This is an automated email from the ASF dual-hosted git repository.
onikolas pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/airflow.git
The following commit(s) were added to refs/heads/main by this push:
new be057d84ed Attempt at fixing sagemaker system test (#32220)
be057d84ed is described below
commit be057d84ed3dc235610eea20a33a7f41a039d8fb
Author: Raphaƫl Vandon <[email protected]>
AuthorDate: Tue Jun 27 22:19:01 2023 -0700
Attempt at fixing sagemaker system test (#32220)
It has been failing a bit randomly recently with 400 errors from the line
that logs in the public ecr repo.
Upon reading the doc
(https://docs.aws.amazon.com/AmazonECR/latest/public/public-registries.html),
it seems we shouldn't use the same credentials as for a private repo. Why it
was working until now (and is still working from time to time) is still a
mystery.
Hopefully this should help.
Also, there was a warning that using --password on the command line was a
bad idea, so piping the credentials will make that go away.
---
tests/system/providers/amazon/aws/example_sagemaker.py | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/tests/system/providers/amazon/aws/example_sagemaker.py
b/tests/system/providers/amazon/aws/example_sagemaker.py
index 2b0f3fc6ef..c1f5745caf 100644
--- a/tests/system/providers/amazon/aws/example_sagemaker.py
+++ b/tests/system/providers/amazon/aws/example_sagemaker.py
@@ -28,7 +28,6 @@ from airflow import DAG
from airflow.decorators import task
from airflow.models.baseoperator import chain
from airflow.operators.python import get_current_context
-from airflow.providers.amazon.aws.hooks.ecr import EcrHook
from airflow.providers.amazon.aws.operators.s3 import (
S3CreateBucketOperator,
S3CreateObjectOperator,
@@ -135,10 +134,6 @@ def _build_and_upload_docker_image(preprocess_script,
repository_uri):
- Has numpy, pandas, requests, and boto3 installed
- Has our data preprocessing script mounted and set as the entry point
"""
- ecr_region = repository_uri.split(".")[3]
- # Fetch ECR Token to be used for docker
- creds = EcrHook(region_name=ecr_region).get_temporary_credentials()[0]
-
with NamedTemporaryFile(mode="w+t") as preprocessing_script,
NamedTemporaryFile(mode="w+t") as dockerfile:
preprocessing_script.write(preprocess_script)
preprocessing_script.flush()
@@ -156,17 +151,21 @@ def _build_and_upload_docker_image(preprocess_script,
repository_uri):
)
dockerfile.flush()
+ ecr_region = repository_uri.split(".")[3]
docker_build_and_push_commands = f"""
cp /root/.aws/credentials /tmp/credentials &&
- # login to public ecr repo containing amazonlinux image
- docker login --username {creds.username} --password
{creds.password} public.ecr.aws &&
+ # login to public ecr repo containing amazonlinux image (public
login is always on us east 1)
+ aws ecr-public get-login-password --region us-east-1 |
+ docker login --username AWS --password-stdin public.ecr.aws &&
docker build --platform=linux/amd64 -f {dockerfile.name} -t
{repository_uri} /tmp &&
rm /tmp/credentials &&
# login again, this time to the private repo we created to hold
that specific image
- docker login --username {creds.username} --password
{creds.password} {repository_uri} &&
+ aws ecr get-login-password --region {ecr_region} |
+ docker login --username AWS --password-stdin {repository_uri} &&
docker push {repository_uri}
"""
+ logging.info("building and uploading docker image for
preprocessing...")
docker_build = subprocess.Popen(
docker_build_and_push_commands,
shell=True,