How to use of on_failure_callback in Airflow 1.10.10+composer? - callback

I wish to get an email notification when a single Airflow Operator fails. I need that because the failure of some tasks mustn't set the entire pipeline as failed.
To simulate the error, I set a source bucket as not existing bucket.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Stefano Giostra"
__credits__ = "Stefano Giostra"
__maintainer__ = "Stefano Giostra"
__version__ = "0.9.3"
__status__ = "Dev"
from airflow.models import Variable, DAG
from airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator
# from lib.bb_utils import *
import logging
from airflow.utils import dates
from datetime import timedelta
from functools import partial
from lib.bb_utils import load_json_file
from airflow.utils.email import send_email
ROOT_PATH = '/home/airflow/gcs/dags'
logger = logging.getLogger("dag_demo_2")
def notify_email(context, config): # **kwargs
"""Send custom email alerts."""
alerting_email_address = config.get('email_address')
print("---> notify_email -------------------")
print(context)
print(f"-->{alerting_email_address}")
print("<------------------------------------")
# print(context['dag'])
# email title.
# title = "Airflow alert: {task_name} Failed".format(context)
#
# # email contents
# body = """
# Hi, <br><br>
# There's been an error in the {task_name} job.<br>
# <br>
# Forever yours,<br>
# Airflow bot <br>
# """.format(**contextDict)
# for dest in dest_email:
# send_email(dest, title, body)
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# Dizionario dati con le chiavi richieste dai DAG di AirFlow
my_email = 'abc#xyz.com'
default_args = {
"owner": 'SG',
"depends_on_past": False,
"start_date": dates.days_ago(1),
"end_date": None,
"email_on_failure": 'my_email',
"email_on_retry": False,
"email": [my_email],
"retries": 2,
"retry_delay": timedelta(minutes=5),
"max_active_runs": 1,
"on_failure_callback": partial(notify_email, config={'email_address': my_email})
}
dag_name = 'SG-DagDemo-Once'
with DAG(dag_id=dag_name, default_args=default_args, schedule_interval="#once") as ldag:
project = Variable.get("PROJECT")
source_bucket = 'sg-dev'
source_object = 'covid19_italy/national_trends_2.csv'
bq_dataset = "covid19_italy"
bq_table_name = "national_trends"
bq_task_id = f'gcs_to_bq_load_{bq_table_name}'
schema_fields = load_json_file(f"{ROOT_PATH}/source/{bq_dataset}/{bq_table_name}_tabschema.json")
t = GoogleCloudStorageToBigQueryOperator(
dag=ldag,
task_id=bq_task_id,
bucket=source_bucket,
source_objects=[source_object],
destination_project_dataset_table="{0}.{1}.{2}".format(project, bq_dataset, bq_table_name),
schema_fields=schema_fields,
source_format='CSV',
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE"
)

To invoke notify_email() on a failure, it will be enough if you adjust default_args with:
"on_failure_callback": notify_email
then default_args should be included in the DAG creation sentence:
with DAG(dag_id='SG-DagDemo-Once', default_args=default_args) as dag:
You can try something like the following to call the function notify_email() on operator failures; each operator will call the same function (example taken from gcs_to_bq):
args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(1),
'on_failure_callback': notify_email
}
dag_name = 'SG-DagDemo-Once'
with DAG(dag_id=dag_name, default_args=args, schedule_interval=None) as dag:
create_test_dataset = bash_operator.BashOperator(
task_id='create_airflow_test_dataset',
bash_command='bq mk airflow_test')
# [START howto_operator_gcs_to_bq]
load_csv = GoogleCloudStorageToBigQueryOperator(
task_id='gcs_to_bq_example',
bucket='cloud-samples-data',
source_objects=['bigquery/us-states/us-states.csv'],
destination_project_dataset_table='airflow_test.gcs_to_bq_table',
schema_fields=[
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'post_abbr', 'type': 'STRING', 'mode': 'NULLABLE'},
],
write_disposition='WRITE_TRUNCATE')
# [END howto_operator_gcs_to_bq]
delete_test_dataset = bash_operator.BashOperator(
task_id='delete_airflow_test_dataset',
bash_command='bq rm -r -f -d airflow_test')
create_test_dataset >> load_csv >> delete_test_dataset
You can simulate an error by changing a piece of configuration on each operator. And you will need to complete the configuration for sending the email in notify_email().

Related

DAG Import Errors - Invalid arguments were passed

Im tryng Load data from postgresql(local) to google cloud storage using airflow by docker, but i got error like this https://i.stack.imgur.com/pHzAF.png
Broken DAG: [/opt/airflow/dags/postgres_to_bigquery.py] Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/baseoperator.py", line 408, in apply_defaults
result = func(self, **kwargs, default_args=default_args)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/baseoperator.py", line 756, in __init__
f"Invalid arguments were passed to {self.__class__.__name__} (task_id: {task_id}). "
airflow.exceptions.AirflowException: Invalid arguments were passed to PostgresToGCSOperator (task_id: postgres_to_gcs). Invalid arguments were:
**kwargs: {'google_cloud_storage_conn_id': 'gcp_conn'}
And this is some part of my own code
GCS_CONN = Variable.get('GCS_CONN')
default_args={
'owner': 'airflow',
'retries': 0,
'retry_delay': timedelta(minutes=5),
}
with DAG(
dag_id = 'postgres_to_bigquery',
default_args = default_args,
start_date = datetime(2022, 10, 3),
schedule_interval = '#once'
) as dag:
start = DummyOperator(
task_id = 'start',
)
postgres_to_gcs = PostgresToGCSOperator(
task_id = f'postgres_to_gcs',
postgres_conn_id = 'postgres_localhost',
sql = f'select * from orders;',
bucket = 'airflow_fakri',
filename = f'airflow_fakri/data/orders.csv',
export_format = 'csv',
gzip = False,
use_server_side_cursor = False,
google_cloud_storage_conn_id = GCS_CONN
)
It looks like you are indeed passing the wrong argument.
From the doc: https://airflow.apache.org/docs/apache-airflow-providers-google/stable/_api/airflow/providers/google/cloud/transfers/postgres_to_gcs/index.html
The correct conn parameter name is postgres_conn_id

FastAPI-Mail TEMPLATE_FOLDER does not exist

I was following this example for sending an email using FastAPI HTML templates but it was showing an error related to the templates directory.
conf = ConnectionConfig(
File "pydantic/env_settings.py", line 38, in pydantic.env_settings.BaseSettings.__init__
File "pydantic/main.py", line 341, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for ConnectionConfig
TEMPLATE_FOLDER
file or directory at path "/Users/abushoeb/myproject/templates" does not exist (type=value_error.path.not_exists; path=/Users/abushoeb/myproject/templates)
The solution is almost identical to the example with minor modification in the TEMPLATE_FOLDER value.
The whole codebase is available at abushoeb/fastapi.
from fastapi import (
FastAPI,
BackgroundTasks,
UploadFile, File,
Form,
Query,
Body,
Depends
)
from starlette.responses import JSONResponse
from starlette.requests import Request
from fastapi_mail import FastMail, MessageSchema, ConnectionConfig
from pydantic import EmailStr, BaseModel
from typing import List, Dict, Any
from fastapi_mail.email_utils import DefaultChecker
from pathlib import Path
class EmailSchema(BaseModel):
email: List[EmailStr]
body: Dict[str, Any]
class Config:
schema_extra = {
"example": {
"email": ["type recipient email addess"],
"subject": "FastAPI Templated Mail",
"body": {"first_name": "recipient first name",
"last_name": "recipient last name"},
}
}
BASE_DIR = Path(__file__).resolve().parent
conf = ConnectionConfig(
MAIL_USERNAME = "YourUsername",
MAIL_PASSWORD = "strong_password",
MAIL_FROM = "your#email.com",
MAIL_PORT = 587,
MAIL_SERVER = "your mail server",
MAIL_FROM_NAME="Desired Name",
MAIL_TLS=True,
MAIL_SSL=False,
# USE_CREDENTIALS = True,
# VALIDATE_CERTS = True,
TEMPLATE_FOLDER = Path(BASE_DIR, 'templates')
)
app = FastAPI(title="Email - FastAPI", description="Sample Email Script")
#app.post("/email")
async def send_with_template(email: EmailSchema) -> JSONResponse:
message = MessageSchema(
subject="Fastapi-Mail with HTML Templates",
recipients=email.dict().get("email"), # List of recipients, as many as you can pass
template_body=email.dict().get("body"),
)
fm = FastMail(conf)
await fm.send_message(message, template_name="email_template.html")
return JSONResponse(status_code=200, content={"message": "email has been sent"})

sla_miss_callback to send email on missing task SLA in Apache Airflow

I have a DAG A that is being triggered by a parent DAG B. So DAG A doesn't have any schedule interval defined in it.
1.I would like to set up a sla_miss_callback on one of the task in DAG A.
2.I would like to get an e-mail notification whenever the task misses it's SLA.
I have tried methods available in google and stackoverflow. The e-mail is not getting triggered as expected.
Sharing the sample code I have used for testing.
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import timedelta, datetime
import logging
def print_sla_miss(**kwargs):
logging.info("SLA missed")
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2021, 1, 1),
'email': 'sample#xxx.com',
'email_on_failure': True,
'email_on_retry': False,
'retries': 0
}
with DAG('sla_test', schedule_interval=None, max_active_runs=1, catchup=False,sla_miss_callback=print_sla_miss, default_args=default_args) as dag:
sleep = BashOperator(
task_id='timeout',
sla=timedelta(seconds=5),
bash_command='sleep 15',
retries=0,
dag=dag,
)
Thanks in advance.
SLAs will only be evaluated on scheduled DAG Runs. Since you have schedule_interval=None the SLA you set is not being evaluated for this DAG.
If there is a certain amount of time you expect the triggered DAG to finish, you could set that SLA in the sensor task in the parent DAG that checks when the child DAG is finished.
Another possible workaround is to set up a Slack notification for when the child DAG finishes entirely, or when a certain task starts/finishes so you can evaluate if it has been running for too long.
To achieve my requirement, I have created a seperate DAG that watches the task run status every 5 mins and notifies through e-mail based on the run status as below.To do this I am sending the execution date of my main DAG to an airflow variable.
#importing operators and modules
from airflow import DAG
from airflow.operators.python_operator import BranchPythonOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.email_operator import EmailOperator
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.models import Variable
from datetime import datetime,timedelta,timezone
import dateutil
#setting default arguments
default_args = {
'owner': 'test',
'depends_on_past': False,
'start_date': datetime(2021, 1, 1),
'email': ['abc#example.com'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 0
}
#getting current status of task in main DAG
exec_date = dateutil.parser.parse(Variable.get('main_dag_execution_date'))
ti = get_task_instance('main_dag', 'task_to_check', exec_date)
state = ti.current_state()
start_date = ti.start_date
end_date = ti.end_date
print("start_date",start_date," end_date",end_date, " execution_date",exec_date)
#deciding the action based on status of the task
def check_task_status(**kwargs):
if state == 'running' and datetime.now(timezone.utc) > start_date + timedelta(minutes = 10):
breach_mail = 'breach_mail'
return breach_mail
elif state == 'failed':
failure_mail = 'failure_mail'
return failure_mail
else:
other_state = 'other_state'
return other_state
#print statement when status is not in breached or failed state
def print_current_state(**context):
if start_date is None:
print("task is in wait state")
else:
print("task is in " + state + " state")
with DAG('sla_check', schedule_interval='0-59/5 9-23 * * *', max_active_runs=1, catchup=False,default_args=default_args) as dag:
check_task_status = BranchPythonOperator(task_id='check_task_status', python_callable=check_task_status,
provide_context=True,
dag=dag)
breach_mail = EmailOperator(task_id='breach_mail', to='Abc#example.com',
subject='SLA for task breached',
html_content="<p>Hi,<br><br>task running belyond SLA<br>", dag=dag)
failure_mail = EmailOperator(task_id='failure_mail', to='Abc#example.com',
subject='task failed',
html_content="<p>Hi,<br><br>task failed. Please check.<br>", dag=dag)
other_state = PythonOperator(task_id='other_state', python_callable=print_current_state,
provide_context=True,
dag=dag)
check_task_status >> breach_mail
check_task_status >> failure_mail
check_task_status >> other_state

Defining Global Airflow Variables Using Kwargs Passed From POST Json

I'm creating a DAG and that needs functionality to set global variables using kwargs passed in from the POST Json used to trigger the job. So far, I have attempted this way:
import airflow
from airflow import DAG
from datetime import timedelta
DAG_Name = 'dag_test'
DEFAULT_ARGS = {
'owner': '...',
'depends_on_past': False,
'email': ['...'],
'email_on_failure': True,
'start_date': datetime(2020,8,31)
}
dag = DAG(DAG_Name, default_args=DEFAULT_ARGS, dagrun_timeout=timedelta(hours=2))
snap_date = ''
output_loc = ''
recast = ''
def define_param(**kwargs):
global snap_date
global output_loc
global recast
snapshot = str(kwargs['dag_run'].conf['snap_date'])
output_s3 = kwargs['dag_run'].conf['output_loc']
recast = str(kwargs['dag_run'].conf['recast'])
DEFINE_PARAMETERS = PythonOperator(
task_id='DEFINE_PARAMETERS',
python_callable=define_param,
provide_context=True,
dag=dag)
But this does not work. How would I use kwargs to set global dag variables?
Use Variable.set as it will make the actual update to the database, along with handling session and serialization for you if needed.
Variable.set("snap_date", "2019-09-17")
Ref: https://github.com/apache/airflow/blob/1.10.1/airflow/models.py#L4558-L4569

Apache Airflow - trigger/schedule DAG rerun on completion (File Sensor)

Good Morning.
I'm trying to setup a DAG too
Watch/sense for a file to hit a network folder
Process the file
Archive the file
Using the tutorials online and stackoverflow I have been able to come up with the following DAG and Operator that successfully achieves the objectives, however I would like the DAG to be rescheduled or rerun on completion so it starts watching/sensing for another file.
I attempted to set a variable max_active_runs:1 and then a schedule_interval: timedelta(seconds=5) this yes reschedules the DAG but starts queuing task and locks the file.
Any ideas welcome on how I could rerun the DAG after the archive_task?
Thanks
DAG CODE
from airflow import DAG
from airflow.operators import PythonOperator, OmegaFileSensor, ArchiveFileOperator
from datetime import datetime, timedelta
from airflow.models import Variable
default_args = {
'owner': 'glsam',
'depends_on_past': False,
'start_date': datetime.now(),
'provide_context': True,
'retries': 100,
'retry_delay': timedelta(seconds=30),
'max_active_runs': 1,
'schedule_interval': timedelta(seconds=5),
}
dag = DAG('test_sensing_for_a_file', default_args=default_args)
filepath = Variable.get("soucePath_Test")
filepattern = Variable.get("filePattern_Test")
archivepath = Variable.get("archivePath_Test")
sensor_task = OmegaFileSensor(
task_id='file_sensor_task',
filepath=filepath,
filepattern=filepattern,
poke_interval=3,
dag=dag)
def process_file(**context):
file_to_process = context['task_instance'].xcom_pull(
key='file_name', task_ids='file_sensor_task')
file = open(filepath + file_to_process, 'w')
file.write('This is a test\n')
file.write('of processing the file')
file.close()
proccess_task = PythonOperator(
task_id='process_the_file',
python_callable=process_file,
provide_context=True,
dag=dag
)
archive_task = ArchiveFileOperator(
task_id='archive_file',
filepath=filepath,
archivepath=archivepath,
dag=dag)
sensor_task >> proccess_task >> archive_task
FILE SENSOR OPERATOR
import os
import re
from datetime import datetime
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
from airflow.operators.sensors import BaseSensorOperator
class ArchiveFileOperator(BaseOperator):
#apply_defaults
def __init__(self, filepath, archivepath, *args, **kwargs):
super(ArchiveFileOperator, self).__init__(*args, **kwargs)
self.filepath = filepath
self.archivepath = archivepath
def execute(self, context):
file_name = context['task_instance'].xcom_pull(
'file_sensor_task', key='file_name')
os.rename(self.filepath + file_name, self.archivepath + file_name)
class OmegaFileSensor(BaseSensorOperator):
#apply_defaults
def __init__(self, filepath, filepattern, *args, **kwargs):
super(OmegaFileSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.filepattern = filepattern
def poke(self, context):
full_path = self.filepath
file_pattern = re.compile(self.filepattern)
directory = os.listdir(full_path)
for files in directory:
if re.match(file_pattern, files):
context['task_instance'].xcom_push('file_name', files)
return True
return False
class OmegaPlugin(AirflowPlugin):
name = "omega_plugin"
operators = [OmegaFileSensor, ArchiveFileOperator]
Dmitris method worked perfectly.
I also found in my reading setting schedule_interval=None and then using the TriggerDagRunOperator worked equally as well
trigger = TriggerDagRunOperator(
task_id='trigger_dag_RBCPV99_rerun',
trigger_dag_id="RBCPV99_v2",
dag=dag)
sensor_task >> proccess_task >> archive_task >> trigger
Set schedule_interval=None and use airflow trigger_dag command from BashOperator to launch next execution at the completion of the previous one.
trigger_next = BashOperator(task_id="trigger_next",
bash_command="airflow trigger_dag 'your_dag_id'", dag=dag)
sensor_task >> proccess_task >> archive_task >> trigger_next
You can start your first run manually with the same airflow trigger_dag command and then trigger_next task will automatically trigger the next one. We use this in production for many months now and and it runs perfectly.