Apache Airflow - trigger/schedule DAG rerun on completion (File Sensor) - triggers

Good Morning.
I'm trying to setup a DAG too
Watch/sense for a file to hit a network folder
Process the file
Archive the file
Using the tutorials online and stackoverflow I have been able to come up with the following DAG and Operator that successfully achieves the objectives, however I would like the DAG to be rescheduled or rerun on completion so it starts watching/sensing for another file.
I attempted to set a variable max_active_runs:1 and then a schedule_interval: timedelta(seconds=5) this yes reschedules the DAG but starts queuing task and locks the file.
Any ideas welcome on how I could rerun the DAG after the archive_task?
Thanks
DAG CODE
from airflow import DAG
from airflow.operators import PythonOperator, OmegaFileSensor, ArchiveFileOperator
from datetime import datetime, timedelta
from airflow.models import Variable
default_args = {
'owner': 'glsam',
'depends_on_past': False,
'start_date': datetime.now(),
'provide_context': True,
'retries': 100,
'retry_delay': timedelta(seconds=30),
'max_active_runs': 1,
'schedule_interval': timedelta(seconds=5),
}
dag = DAG('test_sensing_for_a_file', default_args=default_args)
filepath = Variable.get("soucePath_Test")
filepattern = Variable.get("filePattern_Test")
archivepath = Variable.get("archivePath_Test")
sensor_task = OmegaFileSensor(
task_id='file_sensor_task',
filepath=filepath,
filepattern=filepattern,
poke_interval=3,
dag=dag)
def process_file(**context):
file_to_process = context['task_instance'].xcom_pull(
key='file_name', task_ids='file_sensor_task')
file = open(filepath + file_to_process, 'w')
file.write('This is a test\n')
file.write('of processing the file')
file.close()
proccess_task = PythonOperator(
task_id='process_the_file',
python_callable=process_file,
provide_context=True,
dag=dag
)
archive_task = ArchiveFileOperator(
task_id='archive_file',
filepath=filepath,
archivepath=archivepath,
dag=dag)
sensor_task >> proccess_task >> archive_task
FILE SENSOR OPERATOR
import os
import re
from datetime import datetime
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
from airflow.operators.sensors import BaseSensorOperator
class ArchiveFileOperator(BaseOperator):
#apply_defaults
def __init__(self, filepath, archivepath, *args, **kwargs):
super(ArchiveFileOperator, self).__init__(*args, **kwargs)
self.filepath = filepath
self.archivepath = archivepath
def execute(self, context):
file_name = context['task_instance'].xcom_pull(
'file_sensor_task', key='file_name')
os.rename(self.filepath + file_name, self.archivepath + file_name)
class OmegaFileSensor(BaseSensorOperator):
#apply_defaults
def __init__(self, filepath, filepattern, *args, **kwargs):
super(OmegaFileSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.filepattern = filepattern
def poke(self, context):
full_path = self.filepath
file_pattern = re.compile(self.filepattern)
directory = os.listdir(full_path)
for files in directory:
if re.match(file_pattern, files):
context['task_instance'].xcom_push('file_name', files)
return True
return False
class OmegaPlugin(AirflowPlugin):
name = "omega_plugin"
operators = [OmegaFileSensor, ArchiveFileOperator]

Dmitris method worked perfectly.
I also found in my reading setting schedule_interval=None and then using the TriggerDagRunOperator worked equally as well
trigger = TriggerDagRunOperator(
task_id='trigger_dag_RBCPV99_rerun',
trigger_dag_id="RBCPV99_v2",
dag=dag)
sensor_task >> proccess_task >> archive_task >> trigger

Set schedule_interval=None and use airflow trigger_dag command from BashOperator to launch next execution at the completion of the previous one.
trigger_next = BashOperator(task_id="trigger_next",
bash_command="airflow trigger_dag 'your_dag_id'", dag=dag)
sensor_task >> proccess_task >> archive_task >> trigger_next
You can start your first run manually with the same airflow trigger_dag command and then trigger_next task will automatically trigger the next one. We use this in production for many months now and and it runs perfectly.

Related

"Dag Seems to be missing" error in a Cloud Composer Airflow Dynamic DAG

I have a dynamic Airflow DAG in Google Cloud Composer gets created, listed in the web-server and ran (backfill) without error.
However, there are issues:
When clicking on the DAG in web url, it says "DAG seems to be
missing"
Can't see Graph view/Tree view as showing the error above
Can't manually trigger the DAG as showing the error above
Trying to fix this for couple days...any hint will be helpful. Thank you!
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator
from google.cloud import storage
from airflow.models import Variable
import json
args = {
'owner': 'xxx',
'start_date':'2020-11-5',
'provide_context': True
}
dag = DAG(
dag_id='dynamic',
default_args=args
)
def return_bucket_files(bucket_name='xxxxx', **kwargs):
client = storage.Client()
bucket = client.get_bucket(bucket_name)
blobs = bucket.list_blobs()
file_list = [blob.name for blob in blobs]
return file_list
def dynamic_gcs_to_gbq_etl(file, **kwargs):
mapping = json.loads(Variable.get("xxxxx"))
database = mapping[0][file]
table = mapping[1][file]
task=GoogleCloudStorageToBigQueryOperator(
task_id= f'gcs_load_{file}_to_gbq',
bucket='xxxxxxx',
source_objects=[f'{file}'],
destination_project_dataset_table=f'xxx.{database}.{table}',
write_disposition="WRITE_TRUNCATE",
autodetect=True,
skip_leading_rows=1,
source_format='CSV',
dag=dag)
return task
start_task = DummyOperator(
task_id='start',
dag=dag
)
end_task = DummyOperator(
task_id='end',
dag=dag)
push_bucket_files = PythonOperator(
task_id="return_bucket_files",
provide_context=True,
python_callable=return_bucket_files,
dag=dag)
for file in return_bucket_files():
gcs_load_task = dynamic_gcs_to_gbq_etl(file)
start_task >> push_bucket_files >> gcs_load_task >> end_task
This issue means that the Web Server is failing to fill in the DAG bag on its side - this problem is most likely not with your DAG specifically.
My suggestion would be right now to try and restart the web server (via the installation of some dummy package).
Similar issues reported in this post as well here.

why excel process stay opened after printing

i use these packages:
openpyxl - copy excel templates, opened copied file, save data from db, print it and then delete;
pywin32 - send for printing to remote network printer by network name;
after some processes, i don't know which - excel process window still opened. (attach screenshot)
i attach the most using in my project functions.
this program it's like a web service, which listening 5000 port and print in needed template.
i delete all created files, because of no need to store all of them.
from openpyxl import load_workbook
import os
import app_config as config
import printers.printers as p
from datetime import datetime
import shutil
import time
class EditExcelTemplate:
def __init__(self, template_name):
now = datetime.now()
report_name = "_{}{}{}_{}{}{}_{}".format(now.year, now.month, now.day,
now.hour, now.minute, now.second,
now.microsecond)
self.report_path = config.EXCEL_REPORT_PATH.format(template_name +
report_name)
shutil.copy(src=config.EXCEL_TEMPLATE_PATH.format(template_name),
dst=self.report_path)
# self.wb = load_workbook(filename=config.EXCEL_TEMPLATE_PATH.format(template_name))
start_load = time.time()
self.wb = load_workbook(filename=self.report_path,
keep_links=False,
keep_vba=False,
data_only=True)
end_load = time.time()
print('LOAD WORKBOOK|{}'.format(str(end_load - start_load)))
self.ws = self.wb.active
self.answer = {'file_name': template_name.upper()}
def write_workbook(self, row_dest, column_dest, value):
c = self.ws.cell(row=row_dest, column=column_dest)
c.value = value
def save_excel(self):
self.wb.save(self.report_path)
def print_excel(self, printer_no):
p.print_excel(printer_no=printer_no, path_to_file=self.report_path)
def print_excel_file(self, printer_name):
p.print_excel_file(printer_name=printer_name, path_to_file=self.report_path)
import win32api
import app_config
import os, time
def print_excel(printer_no, path_to_file):
printer_name = app_config.PRINTER_NAMES[printer_no]
win32api.ShellExecute(
1,
'printto',
path_to_file,
'{}'.format(printer_name),
'.',
0
)
def delete_file(path_to_file, try_count=1):
if os.path.exists(path=path_to_file):
file_name = path_to_file.split('\\')[-1]
while try_count < 60:
try:
os.remove(path_to_file)
print('File {} deleted!'.format(file_name))
break
except PermissionError:
print('Can not delete file {}. Hold {} sec.'.format(file_name, try_count))
time.sleep(1.0)
try_count += 1

Airflow - How to push xcom from ecs operator?

In my airflow dag, I have an ecs_operator task followed by python operator task. I want to push some messages from ECS task to python task using xcom feature of airflow. I tried the option do_xcom_push=True with no result. Find below sample dag.
dag = DAG(
dag_name, default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id = 'start'
,dag =dag)
end = DummyOperator(task_id = 'end'
,dag =dag)
ecs_operator_args = {
'launch_type': 'FARGATE',
'task_definition': 'task-def:2',
'cluster': 'cluster-name',
'region_name': 'region',
'network_configuration': {
'awsvpcConfiguration':
{}
}
}
ecs_task = ECSOperator(
task_id='x_com_test'
,**ecs_operator_args
,do_xcom_push=True
,params={'my_param': 'Parameter-1'}
,dag=dag)
def pull_function(**kwargs):
ti = kwargs['ti']
msg = ti.xcom_pull(task_ids='x_com_test',key='the_message')
print("received message: '%s'" % msg)
pull_task = PythonOperator(
task_id='pull_task',
python_callable=pull_function,
provide_context=True,
dag=dag)
start >> ecs_task >> pull_task >> end
You need to setup a cloudwatch log group for the container.
ECSOperator needs to be extended to support pushing to xcom:
from collections import deque
from airflow.utils import apply_defaults
from airflow.contrib.operators.ecs_operator import ECSOperator
class MyECSOperator(ECSOperator):
#apply_defaults
def __init__(self, xcom_push=False, **kwargs):
super(CLECSOperator, self).__init__(**kwargs)
self.xcom_push_flag = xcom_push
def execute(self, context):
super().execute(context)
if self.xcom_push_flag:
return self._last_log_event()
def _last_log_event(self):
if self.awslogs_group and self.awslogs_stream_prefix:
task_id = self.arn.split("/")[-1]
stream_name = "{}/{}".format(self.awslogs_stream_prefix, task_id)
events = self.get_logs_hook().get_log_events(self.awslogs_group, stream_name)
last_event = deque(events, maxlen=1).pop()
return last_event["message"]
dag = DAG(
dag_name, default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id = 'start'
,dag =dag)
end = DummyOperator(task_id = 'end'
,dag =dag)
ecs_operator_args = {
'launch_type': 'FARGATE',
'task_definition': 'task-def:2',
'cluster': 'cluster-name',
'region_name': 'region',
'awslogs_group': '/aws/ecs/myLogGroup',
'awslogs_stream_prefix': 'myStreamPrefix',
'network_configuration': {
'awsvpcConfiguration':
{}
}
}
ecs_task = MyECSOperator(
task_id='x_com_test'
,**ecs_operator_args
,xcom_push=True
,params={'my_param': 'Parameter-1'}
,dag=dag)
def pull_function(**kwargs):
ti = kwargs['ti']
msg = ti.xcom_pull(task_ids='x_com_test',key='return_value')
print("received message: '%s'" % msg)
pull_task = PythonOperator(
task_id='pull_task',
python_callable=pull_function,
provide_context=True,
dag=dag)
start >> ecs_task >> pull_task >> end
ecs_task will take the last event from the log group before finishing executing, and push it to xcom.
Apache-AWS has a new commit that pretty much implements what #Бојан-Аџиевски mentioned above, so you don't need to write your custom ECSOperator. Available as of version 1.1.0
All you gotta do is to provide the do_xcom_push=True when calling the ECSOperator and provide the correct awslogs_group and awslogs_stream_prefix.
Make sure your awslogs_stream_prefix follows the following format:
prefix-name/container-name
As this is what ECS directs logs to.

Flask, blueprints uses celery task and got cycle import

I have an application with Blueprints and Celery
the code is here:
config.py
import os
from celery.schedules import crontab
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or ''
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
RECORDS_PER_PAGE = 40
SQLALCHEMY_DATABASE_URI = ''
CELERY_BROKER_URL = ''
CELERY_RESULT_BACKEND = ''
CELERY_RESULT_DBURI = ''
CELERY_TIMEZONE = 'Europe/Kiev'
CELERY_ENABLE_UTC = False
CELERYBEAT_SCHEDULE = {}
#staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
WTF_CSRF_ENABLED = True
APP_HOME = ''
SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://...'
CELERY_BROKER_URL = 'sqla+mysql://...'
CELERY_RESULT_BACKEND = "database"
CELERY_RESULT_DBURI = 'mysql://...'
CELERY_TIMEZONE = 'Europe/Kiev'
CELERY_ENABLE_UTC = False
CELERYBEAT_SCHEDULE = {
'send-email-every-morning': {
'task': 'app.workers.tasks.send_email_task',
'schedule': crontab(hour=6, minute=15),
},
}
class TestConfig(Config):
DEBUG = True
WTF_CSRF_ENABLED = False
TESTING = True
SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://...'
class ProdConfig(Config):
DEBUG = False
WTF_CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = 'mysql+mysqldb://...'
CELERY_BROKER_URL = 'sqla+mysql://...celery'
CELERY_RESULT_BACKEND = "database"
CELERY_RESULT_DBURI = 'mysql://.../celery'
CELERY_TIMEZONE = 'Europe/Kiev'
CELERY_ENABLE_UTC = False
CELERYBEAT_SCHEDULE = {
'send-email-every-morning': {
'task': 'app.workers.tasks.send_email_task',
'schedule': crontab(hour=6, minute=15),
},
}
config = {
'development': DevelopmentConfig,
'default': ProdConfig,
'production': ProdConfig,
'testing': TestConfig,
}
class AppConf:
"""
Class to store current config even out of context
"""
def __init__(self):
self.app = None
self.config = {}
def init_app(self, app):
if hasattr(app, 'config'):
self.app = app
self.config = app.config.copy()
else:
raise TypeError
init.py:
import os
from flask import Flask
from celery import Celery
from config import config, AppConf
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
app_conf.init_app(app)
# Connect to Staging view
from staging.views import staging as staging_blueprint
app.register_blueprint(staging_blueprint)
return app
def make_celery(app=None):
app = app or create_app(os.getenv('FLASK_CONFIG') or 'default')
celery = Celery(__name__, broker=app.config.CELERY_BROKER_URL)
celery.conf.update(app.conf)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
tasks.py:
from app import make_celery, app_conf
cel = make_celery(app_conf.app)
#cel.task
def send_realm_to_fabricdb(realm, form):
some actions...
and here is the problem:
The Blueprint "staging" uses task send_realm_to_fabricdb, so it makes: from tasks import send_realm_to_fabricdb
than, when I just run application, everything goes ok
BUT, when I'm trying to run celery: celery -A app.tasks worker -l info --beat, it goes to cel = make_celery(app_conf.app) in tasks.py, got app=None and trying to create application again: registering a blueprint... so I've got cycle import here.
Could you tell me how to break this cycle?
Thanks in advance.
I don't have the code to try this out, but I think things would work better if you move the creation of the Celery instance out of tasks.py and into the create_app function, so that it happens at the same time the app instance is created.
The argument you give to the Celery worker in the -A option does not need to have the tasks, Celery just needs the celery object, so for example, you could create a separate starter script, say celery_worker.py that calls create_app to create app and cel and then give it to the worker as -A celery_worker.cel, without involving the blueprint at all.
Hope this helps.
What I do to solve this error is that I create two Flask instance which one is for Web app, and another is for initial Celery instance.
Like #Miguel said, I have
celery_app.py for celery instance
manager.py for Flask instance
And in these two files, each module has it's own Flask instance.
So I can use celery.task in Views. And I can start celery worker separately.
Thanks Bob Jordan, you can find the answer from https://stackoverflow.com/a/50665633/2794539,
Key points:
1. make_celery do two things at the same time: create celery app and run celery with flask content, so you can create two functions to do make_celery job
2. celery app must init before blueprint register
Having the same problem, I ended up solving it very easily using shared_task (docs), keeping a single app.py file and not having to instantiate the flask app multiple times.
The original situation that led to the circular import:
from src.app import celery # src.app is ALSO importing the blueprints which are importing this file which causes the circular import.
#celery.task(bind=True)
def celery_test(self):
sleep(5)
logger.info("Task processed by Celery.")
The current code that works fine and avoids the circular import:
# from src.app import celery <- not needed anymore!
#shared_task(bind=True)
def celery_test(self):
sleep(5)
logger.info("Task processed by Celery.")
Please mind that I'm pretty new to Celery so I might be overseeing important stuff, it would be great if someone more experienced can give their opinion.

web automation - auto check link

I'm new to web app and I want to check when there's a new version of dota map, I'll check links in getdota.com.
How can I do this and which language, I want it checks every time you start warcraft, and auto download new map to specific folder.
My question is : Can you give a link to a specific article about web automation or something like that.
Thanks first :)
Below is an example in Python.
It parses getdota.com page, reads parameters for POST request for downloading a map, gets the file and saves it in configured directory (by default current directory).
#!/usr/bin/env python
import urllib
import urllib2
import sgmllib
from pprint import pprint
import os.path
import sys
url = 'http://www.getdota.com/'
download_url = 'http://www.getdota.com/app/getmap/'
chunk = 10000
directory = '' #directory where file should be saved, if empty uses current dir
class DotaParser(sgmllib.SGMLParser):
def parse(self, s):
self.feed(s)
self.close()
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self.URL = ''
self.post_args = {}
def getArgs(self):
return self.post_args
def start_input(self, attributes):
d = dict(attributes)
if d.get('id', None) == None:
return
if d['id'] in ["input_mirror2", "input_file_name2", "input_map_id2", "input_language2", "input_language_id2"]:
self.post_args[d['name']] = d['value']
if __name__ == '__main__':
dotap = DotaParser()
data = urllib2.urlopen(urllib2.Request('http://www.getdota.com/')).read()
dotap.parse(data)
data = urllib.urlencode(dotap.getArgs())
request = urllib2.Request(download_url, data)
response = urllib2.urlopen(request)
page = response.read()
#download file
fname = directory + page.split('/')[-1]
if os.path.isfile(fname):
print "No newer file available"
sys.exit(0)
f = open(fname, 'w')
print "New file available. Saving in: %s" % fname
webFile = urllib.urlopen(page)
c = webFile.read(chunk)
while(c):
f.write(c)
c = webFile.read(chunk)
f.close()
webFile.close()