I'm having a confusion with KubernetesPodOperator from Airflow, and I'm wondering how to pass the load_users_into_table() function that it has a conn_id parameter stored in connection of Airflow in the Pod ?
In the official doc proposes to put the conn_id in Secret but I don't understand how can I pass it in my function load_users_into_table() after that.
https://airflow.apache.org/docs/stable/kubernetes.html
the function (task) to be executed in the pod:
def load_users_into_table(postgres_hook, schema, path):
gdf = read_csv(path)
gdf.to_sql('users', con=postgres_hook.get_sqlalchemy_engine(), schema=schema)
the dag:
_pg_hook = PostgresHook(postgres_conn_id = _conn_id)
with dag:
test = KubernetesPodOperator(
namespace=namespace,
image=image_name,
cmds=["python", "-c"],
arguments=[load_users_into_table],
labels={"dag-id": dag.dag_id},
name="airflow-test-pod",
task_id="task-1",
is_delete_operator_pod=True,
in_cluster=in_cluster,
get_logs=True,
config_file=config_file,
executor_config={
"KubernetesExecutor": {"request_memory": "512Mi",
"limit_memory": "1024Mi",
"request_cpu": "1",
"limit_cpu": "2"}
}
)
Assuming you want to run with K8sPodOperator, you can use argparse and add arguments to the docker cmd. Something in these lines should do the job:
import argparse
def f(arg):
print(arg)
parser = argparse.ArgumentParser()
parser.add_argument('--foo', help='foo help')
args = parser.parse_args()
if __name__ == '__main__':
f(args.foo)
Dockerfile:
FROM python:3
COPY main.py main.py
CMD ["python", "main.py", "--foo", "somebar"]
There are other ways to solve this such as using secrets, configMaps or even Airflow Variables, but this should get you moving forward.
Related
I am having trouble with setting up database configs correctly for unit-testing purposes using pytest.
My goal is to create a brand new empty test database each time I run the tests.
I have a pytest.ini file that looks like this:
[pytest]
DJANGO_SETTINGS_MODULE = test_settings
python_files = test_*.py
python_functions = test_*
addopts = --ds=test_settings --create-db
My project structure is like this:
my_project
app_1
app_2
tests
my_project
settings.py
test_settings
__init__.py
The test_settings/init.py file looks like this:
from my_project.settings import *
POSTGRES_USER = os.getenv('POSTGRES_USER')
POSTGRES_HOST = os.getenv('POSTGRES_HOST')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
POSTGRES_DB = 'test_db'
POSTGRES_PORT = os.getenv('POSTGRES_PORT')
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': POSTGRES_DB,
'USER': POSTGRES_USER,
'PASSWORD': POSTGRES_PASSWORD,
'HOST': POSTGRES_HOST,
'PORT': POSTGRES_PORT,
}
}
And I have test cases like this:
import pytest
from model_bakery import baker
from rest_framework.test import APIClient
from sales.models import ServiceLocation
# TODO: вынести куда-то в общий модуль
#pytest.fixture
def api_client():
return APIClient
#pytest.mark.django_db
def test_get_service_locations(api_client):
baker.make(ServiceLocation, _quantity=3)
response = api_client().get('/mobile_api/v3/service_locations')
assert response.status_code == 200
assert len(response.json()['data']) == 3
When I run pytest command, for some reason I get assert 28 == 3 which means that python tries to connect to my original database that I use for development
Most surprisingly, I tried to delete one record from my development database table, so that the number of rows becomes 24 (instead of 25). But python seems to cache the database.
I am totally lost. Any ideas what I am doing wrong ?
I am adding a task to an airflow dag as follows:
examples_task = KubernetesPodOperator(
task_id='examples_generation',
dag=dag,
namespace='test',
image='test_amazon_image',
name='pipe-labelled-examples-generation-tf-record-operator',
env={
'GOOGLE_APPLICATION_CREDENTIALS': Variable.get('google_cloud_credentials')
},
arguments=[
"--assets_path", Variable.get('assets_path'),
"--folder_source", Variable.get('folder_source'),
"--folder_destination", Variable.get('folder_destination'),
"--gcs_folder_destination", Variable.get('gcs_folder_destination'),
"--aws_region", Variable.get('aws_region'),
"--s3_endpoint", Variable.get('s3_endpoint')
],
get_logs=True)
I thought I could paste the service account json file as a variable and call it but this doesn't work and airflow/google documentation isn't clear. How do you do this?
Solutions to port the json into an argument
examples_task = KubernetesPodOperator(
task_id='examples_generation',
dag=dag,
namespace='test',
image='test_amazon_image',
name='pipe-labelled-examples-generation-tf-record-operator',
arguments=[
"--folder_source", Variable.get('folder_source'),
"--folder_destination", Variable.get('folder_destination'),
"--gcs_folder_destination", Variable.get('gcs_folder_destination'),
"--aws_region", Variable.get('aws_region'),
"--s3_endpoint", Variable.get('s3_endpoint')
"--gcs_credentials", Variable.get('google_cloud_credentials')
],
get_logs=True)
then in the cli set
import json
from google.cloud import storage
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_info(json.loads(gcs_credentials))
client = storage.Client(project='project_id', credentials=credentials)
Summary of my DAG:
I am using SSH Operator to SSH to an EC2 instance and run a JAR file which will connect to multiple DBs. I've declared the Airflow Connection in my DAG file and able to pass the variables into the EC2 instance. As you can see from below, I'm passing properties into JAVA command.
Airflow version - airflow-1-10.7
Package installed - apache-airflow[crypto]
from airflow import DAG
from datetime import datetime, timedelta
from airflow.contrib.hooks.ssh_hook import SSHHook
from airflow.contrib.operators.ssh_operator import SSHOperator
from airflow.hooks.base_hook import BaseHook
from airflow.models.connection import Connection
ssh_hook = SSHHook(ssh_conn_id='ssh_to_ec2')
ssh_hook.no_host_key_check = True
redshift_connection = BaseHook.get_connection("my_redshift")
rs_user = redshift_connection.login
rs_password = redshift_connection.password
mongo_connection = BaseHook.get_connection("my_mongo")
mongo_user = mongo_connection.login
mongo_password = mongo_connection.password
default_args = {
'owner': 'AIRFLOW',
'start_date': datetime(2020, 4, 1, 0, 0),
'email': [],
'retries': 1,
}
dag = DAG('connect_to_redshift', default_args=default_args)
t00_00 = SSHOperator(
task_id='ssh_and_connect_db',
ssh_hook=ssh_hook,
command="java "
"-Drs_user={rs_user} -Drs_pass={rs_pass} "
"-Dmongo_user={mongo_user} -Dmongo_pass={mongo_pass} "
"-jar /home/airflow/root.jar".format(rs_user=rs_user,rs_pass=rs_pass,mongo_user=mongo_user,mongo_pass=mongo_pass),
dag=dag)
t00_00
Problem
The value for rs_pass,mongo_pass will be exposed in Rendered_Template/Airflow log which is not good and I would like to have a solution that can hide all these sensitive information from log and rendered template with SSH Operator.
So far I've tried to minimum the log verbose to ERROR in airflow.cfg, but it still shows in Rendered_Template.
Please enlighten me.
Thanks
We are running a self-managed Airflow 1.10.2 with KubernetesExecutor on a GKE cluster in GCP. All internal operators are working fine so far, except the KubernetesPodOperator, which we would like to use for running our custom docker images. It seems that the Airflow worker images don't have privileges to start other pods inside the Kubernetes cluster. DAG just does not seem to be doing anything after starting it. This is what we found in the logs initially:
FileNotFoundError: [Errno 2] No such file or directory: '/root/.kube/config'
Next try - in_cluster=True parameter in the KubernetesPodOperator section does not seem to help. After that, we tried to use this parameter in airflow.cfg, section [kubernetes]:
gcp_service_account_keys = kubernetes-executor-private-key:/var/tmp/private/kubernetes_executor_private_key.json
and the error message was now TypeError: a bytes-like object is required, not 'str'
This is the parameter definition from github:
# GCP Service Account Keys to be provided to tasks run on Kubernetes Executors
# Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2
gcp_service_account_keys =
Already tried using various kinds of parentheses and quotes here, no success.
DAG code:
from datetime import datetime, timedelta
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.operators.dummy_operator import DummyOperator
default_args = {
'owner': 'xxx',
'depends_on_past': False,
'start_date': datetime.utcnow(),
'email': ['airflow#example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'kubernetes_sample', default_args=default_args, schedule_interval=timedelta(minutes=10))
start = DummyOperator(task_id='run_this_first', dag=dag)
passing = KubernetesPodOperator(namespace='default',
image="Python:3.6",
cmds=["Python","-c"],
arguments=["print('hello world')"],
labels={"foo": "bar"},
name="passing-test",
task_id="passing-task",
in_cluster=True,
get_logs=True,
dag=dag
)
failing = KubernetesPodOperator(namespace='default',
image="ubuntu:1604",
cmds=["Python","-c"],
arguments=["print('hello world')"],
labels={"foo": "bar"},
in_cluster=True,
name="fail",
task_id="failing-task",
get_logs=True,
dag=dag
)
passing.set_upstream(start)
failing.set_upstream(start)
Anyone facing the same problem? Am i missing something here?
With:
default_args = {
...
'retries': 1,
'retry_delay': timedelta (seconds = 1),
...
}
I can get the task that fails to retry several times, but how can I get it when a task fails, the DAG starts again?
Of course, automatically...
You can run a second "Fail Check" DAG that queries for any task instances where the task_id matches what you want and the state is failed using the provide_session util. Then, you'll want to optionally clear downstream tasks as well and set the state of the relevant DagRun to running.
from datetime import datetime, timedelta
from sqlalchemy import and_
import json
from airflow import DAG
from airflow.models import TaskInstance, DagRun
from airflow.utils.db import provide_session
from airflow.operators.python_operator import PythonOperator
default_args = {'start_date': datetime(2018, 6, 11),
'retries': 2,
'retry_delay': timedelta(minutes=2),
'email': [],
'email_on_failure': True}
dag = DAG('__RESET__FAILED_TASKS',
default_args=default_args,
schedule_interval='#daily',
catchup=False
)
#provide_session
def check_py(session=None, **kwargs):
relevant_task_id = 'relevant_task_id'
obj = (session
.query(TaskInstance)
.filter(and_(TaskInstance.task_id == relevant_task_id,
TaskInstance.state == 'failed'))
.all())
if obj is None:
raise KeyError('No failed Task Instances of {} exist.'.format(relevant_task_id))
else:
# Clear the relevant tasks.
(session
.query(TaskInstance)
.filter(and_(TaskInstance.task_id == relevant_task_id,
TaskInstance.state == 'failed'))
.delete())
# Clear downstream tasks and set relevant DAG state to RUNNING
for _ in obj:
_ = json.loads(_.val)
# OPTIONAL: Clear downstream tasks in the specified Dag Run.
for task in _['downstream_tasks']:
(session
.query(TaskInstance)
.filter(and_(TaskInstance.task_id == task,
TaskInstance.dag_id == _['dag_id'],
TaskInstance.execution_date == datetime.strptime(_['ts'],
"%Y-%m-%dT%H:%M:%S")))
.delete())
# Set the Dag Run state to "running"
dag_run = (session
.query(DagRun)
.filter(and_(DagRun.dag_id == _['dag_id'],
DagRun.execution_date == datetime.strptime(_['ts'],
"%Y-%m-%dT%H:%M:%S")))
.first())
dag_run.set_state('running')
with dag:
run_check = PythonOperator(task_id='run_check',
python_callable=check_py,
provide_context=True)
run_check
The canonical solution to this in Airflow is to create a subdagoperator that wraps all the other tasks in the dag, and apply the retry to that.
You could potentially use the on_failure_callback feature to call a python / bash script that would restart the DAG. There is not currently a feature provided by Airflow to automatically restart the DAG upon task failure.