diff --git a/.env.sample b/.env.sample new file mode 100644 index 0000000..24bcced --- /dev/null +++ b/.env.sample @@ -0,0 +1,2 @@ +AIRFLOW_UID=50000 +AIRFLOW_GID=0 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0dbf2f2..48a4811 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ # ---> Python +logs/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/README.md b/README.md index 8cea8ae..26bd02b 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,44 @@ # job-manager-by-airflow -Apach airflow \ No newline at end of file +Apach airflow + +* apache/airflow:3.1.2 +* https://github.com/apache/airflow + +![APP](./docs/images/app01.png) + +## Apach airflowとは? + +データ処理やバッチ処理(ETL/ELT)などのワークフローを +管理・自動化するためのもっとも有名なプラットフォームです。 + +* **できること・強み** + * Airflow は 本番の大規模ETLで強い + * ワークフローをコード(Python)で記述できる(DAG) + * スケジューラで定期実行できる + * 失敗時のリトライ・アラートが強い + * Web UIが強力 + * プラグインが豊富で拡張性が高い + +* **前提・制限** + *Airflowは「DAGどこまで実行した?」などの状態をデータベースで管理する仕組み です。そのため DBなしでは動きません。 + + +## Develop + +### Dockerで構築する + +```sh +docker compose up -d +``` + +デフォユーザーは airflow / airflow + +Airflow は公式的には Linux / macOS 前提で作られていて、 +systemd とか unix系コマンド前提の部分がけっこうあります。Windowsならばwslで実行してください + +#### 公式のDocker composeファイルを取得する方法 + +```sh +curl -LfO 'https://airflow.apache.org/docs/apache-airflow/stable/docker-compose.yaml' +``` \ No newline at end of file diff --git a/config/airflow.cfg b/config/airflow.cfg new file mode 100644 index 0000000..2f9ca08 --- /dev/null +++ b/config/airflow.cfg @@ -0,0 +1,3225 @@ +[core] +# The folder where your airflow pipelines live, most likely a +# subfolder in a code repository. This path must be absolute. +# +# Variable: AIRFLOW__CORE__DAGS_FOLDER +# +dags_folder = /opt/airflow/dags + +# Hostname by providing a path to a callable, which will resolve the hostname. +# The format is "package.function". +# +# For example, default value ``airflow.utils.net.getfqdn`` means that result from patched +# version of `socket.getfqdn() `__, +# see related `CPython Issue `__. +# +# No argument should be required in the function specified. +# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address`` +# +# Variable: AIRFLOW__CORE__HOSTNAME_CALLABLE +# +hostname_callable = airflow.utils.net.getfqdn + +# A callable to check if a python file has airflow dags defined or not and should +# return ``True`` if it has dags otherwise ``False``. +# If this is not provided, Airflow uses its own heuristic rules. +# +# The function should have the following signature +# +# .. code-block:: python +# +# def func_name(file_path: str, zip_file: zipfile.ZipFile | None = None) -> bool: ... +# +# Variable: AIRFLOW__CORE__MIGHT_CONTAIN_DAG_CALLABLE +# +might_contain_dag_callable = airflow.utils.file.might_contain_dag_via_default_heuristic + +# Default timezone in case supplied date times are naive +# can be `UTC` (default), `system`, or any `IANA ` +# timezone string (e.g. Europe/Amsterdam) +# +# Variable: AIRFLOW__CORE__DEFAULT_TIMEZONE +# +default_timezone = utc + +# The executor class that airflow should use. Choices include +# ``LocalExecutor``, ``CeleryExecutor``, +# ``KubernetesExecutor`` or the full import path to the class when using a custom executor. +# +# Variable: AIRFLOW__CORE__EXECUTOR +# +executor = LocalExecutor + +# The auth manager class that airflow should use. Full import path to the auth manager class. +# +# Variable: AIRFLOW__CORE__AUTH_MANAGER +# +auth_manager = airflow.api_fastapi.auth.managers.simple.simple_auth_manager.SimpleAuthManager + +# The list of users and their associated role in simple auth manager. If the simple auth manager is +# used in your environment, this list controls who can access the environment. +# +# List of user-role delimited with a comma. Each user-role is a colon delimited couple of username and +# role. Roles are predefined in simple auth managers: viewer, user, op, admin. +# +# Example: simple_auth_manager_users = bob:admin,peter:viewer +# +# Variable: AIRFLOW__CORE__SIMPLE_AUTH_MANAGER_USERS +# +simple_auth_manager_users = admin:admin + +# Whether to disable authentication and allow everyone as admin in the environment. +# +# Variable: AIRFLOW__CORE__SIMPLE_AUTH_MANAGER_ALL_ADMINS +# +simple_auth_manager_all_admins = False + +# The json file where the simple auth manager stores passwords for the configured users. +# By default this is ``AIRFLOW_HOME/simple_auth_manager_passwords.json.generated``. +# +# Example: simple_auth_manager_passwords_file = /path/to/passwords.json +# +# Variable: AIRFLOW__CORE__SIMPLE_AUTH_MANAGER_PASSWORDS_FILE +# +# simple_auth_manager_passwords_file = + +# This defines the maximum number of task instances that can run concurrently per scheduler in +# Airflow, regardless of the worker count. Generally this value, multiplied by the number of +# schedulers in your cluster, is the maximum number of task instances with the running +# state in the metadata database. The value must be larger or equal 1. +# +# Variable: AIRFLOW__CORE__PARALLELISM +# +parallelism = 32 + +# The maximum number of task instances allowed to run concurrently in each DAG. To calculate +# the number of tasks that is running concurrently for a DAG, add up the number of running +# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``max_active_tasks``, +# which is defaulted as ``[core] max_active_tasks_per_dag``. +# +# An example scenario when this would be useful is when you want to stop a new dag with an early +# start date from stealing all the executor slots in a cluster. +# +# Variable: AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG +# +max_active_tasks_per_dag = 16 + +# Are DAGs paused by default at creation +# +# Variable: AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION +# +dags_are_paused_at_creation = True + +# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs +# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``, +# which is defaulted as ``[core] max_active_runs_per_dag``. +# +# Variable: AIRFLOW__CORE__MAX_ACTIVE_RUNS_PER_DAG +# +max_active_runs_per_dag = 16 + +# (experimental) The maximum number of consecutive DAG failures before DAG is automatically paused. +# This is also configurable per DAG level with ``max_consecutive_failed_dag_runs``, +# which is defaulted as ``[core] max_consecutive_failed_dag_runs_per_dag``. +# If not specified, then the value is considered as 0, +# meaning that the dags are never paused out by default. +# +# Variable: AIRFLOW__CORE__MAX_CONSECUTIVE_FAILED_DAG_RUNS_PER_DAG +# +max_consecutive_failed_dag_runs_per_dag = 0 + +# The name of the method used in order to start Python processes via the multiprocessing module. +# This corresponds directly with the options available in the Python docs: +# `multiprocessing.set_start_method +# `__ +# must be one of the values returned by `multiprocessing.get_all_start_methods() +# `__. +# +# Example: mp_start_method = fork +# +# Variable: AIRFLOW__CORE__MP_START_METHOD +# +# mp_start_method = + +# Whether to load the DAG examples that ship with Airflow. It's good to +# get started, but you probably want to set this to ``False`` in a production +# environment +# +# Variable: AIRFLOW__CORE__LOAD_EXAMPLES +# +load_examples = True + +# Path to the folder containing Airflow plugins +# +# Variable: AIRFLOW__CORE__PLUGINS_FOLDER +# +plugins_folder = /opt/airflow/plugins + +# Should tasks be executed via forking of the parent process +# +# * ``False``: Execute via forking of the parent process +# * ``True``: Spawning a new python process, slower than fork, but means plugin changes picked +# up by tasks straight away +# +# Variable: AIRFLOW__CORE__EXECUTE_TASKS_NEW_PYTHON_INTERPRETER +# +execute_tasks_new_python_interpreter = False + +# Secret key to save connection passwords in the db +# +# Variable: AIRFLOW__CORE__FERNET_KEY +# +fernet_key = FGnQ2hpBWTfxvE3AuxyOZlvYml6ka5PxbVzRZcIg384= + +# How long before timing out a python file import +# +# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_TIMEOUT +# +dagbag_import_timeout = 30.0 + +# Should a traceback be shown in the UI for dagbag import errors, +# instead of just the exception message +# +# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_ERROR_TRACEBACKS +# +dagbag_import_error_tracebacks = True + +# If tracebacks are shown, how many entries from the traceback should be shown +# +# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_ERROR_TRACEBACK_DEPTH +# +dagbag_import_error_traceback_depth = 2 + +# If set, tasks without a ``run_as_user`` argument will be run with this user +# Can be used to de-elevate a sudo user running Airflow when executing tasks +# +# Variable: AIRFLOW__CORE__DEFAULT_IMPERSONATION +# +default_impersonation = + +# What security module to use (for example kerberos) +# +# Variable: AIRFLOW__CORE__SECURITY +# +security = + +# Turn unit test mode on (overwrites many configuration options with test +# values at runtime) +# +# Variable: AIRFLOW__CORE__UNIT_TEST_MODE +# +unit_test_mode = False + +# Space-separated list of classes that may be imported during deserialization. Items can be glob +# expressions. Python built-in classes (like dict) are always allowed. +# +# Example: allowed_deserialization_classes = airflow.* my_mod.my_other_mod.TheseClasses* +# +# Variable: AIRFLOW__CORE__ALLOWED_DESERIALIZATION_CLASSES +# +allowed_deserialization_classes = airflow.* + +# Space-separated list of classes that may be imported during deserialization. Items are processed +# as regex expressions. Python built-in classes (like dict) are always allowed. +# This is a secondary option to ``[core] allowed_deserialization_classes``. +# +# Variable: AIRFLOW__CORE__ALLOWED_DESERIALIZATION_CLASSES_REGEXP +# +allowed_deserialization_classes_regexp = + +# When a task is killed forcefully, this is the amount of time in seconds that +# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED +# +# Variable: AIRFLOW__CORE__KILLED_TASK_CLEANUP_TIME +# +killed_task_cleanup_time = 60 + +# Whether to override params with dag_run.conf. If you pass some key-value pairs +# through ``airflow dags backfill -c`` or +# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. +# +# Variable: AIRFLOW__CORE__DAG_RUN_CONF_OVERRIDES_PARAMS +# +dag_run_conf_overrides_params = True + +# If enabled, Airflow will only scan files containing both ``DAG`` and ``airflow`` (case-insensitive). +# +# Variable: AIRFLOW__CORE__DAG_DISCOVERY_SAFE_MODE +# +dag_discovery_safe_mode = True + +# The pattern syntax used in the +# `.airflowignore +# `__ +# files in the DAG directories. Valid values are ``regexp`` or ``glob``. +# +# Variable: AIRFLOW__CORE__DAG_IGNORE_FILE_SYNTAX +# +dag_ignore_file_syntax = glob + +# The number of retries each task is going to have by default. Can be overridden at dag or task level. +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_RETRIES +# +default_task_retries = 0 + +# The number of seconds each task is going to wait by default between retries. Can be overridden at +# dag or task level. +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_RETRY_DELAY +# +default_task_retry_delay = 300 + +# The maximum delay (in seconds) each task is going to wait by default between retries. +# This is a global setting and cannot be overridden at task or DAG level. +# +# Variable: AIRFLOW__CORE__MAX_TASK_RETRY_DELAY +# +max_task_retry_delay = 86400 + +# The weighting method used for the effective total priority weight of the task +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_WEIGHT_RULE +# +default_task_weight_rule = downstream + +# Maximum possible time (in seconds) that task will have for execution of auxiliary processes +# (like listeners, mini scheduler...) after task is marked as success.. +# +# Variable: AIRFLOW__CORE__TASK_SUCCESS_OVERTIME +# +task_success_overtime = 20 + +# The default task execution_timeout value for the operators. Expected an integer value to +# be passed into timedelta as seconds. If not specified, then the value is considered as None, +# meaning that the operators are never timed out by default. +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_EXECUTION_TIMEOUT +# +default_task_execution_timeout = + +# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. +# +# Variable: AIRFLOW__CORE__MIN_SERIALIZED_DAG_UPDATE_INTERVAL +# +min_serialized_dag_update_interval = 30 + +# If ``True``, serialized DAGs are compressed before writing to DB. +# +# .. note:: +# +# This will disable the DAG dependencies view +# +# Variable: AIRFLOW__CORE__COMPRESS_SERIALIZED_DAGS +# +compress_serialized_dags = False + +# Fetching serialized DAG can not be faster than a minimum interval to reduce database +# read rate. This config controls when your DAGs are updated in the Webserver +# +# Variable: AIRFLOW__CORE__MIN_SERIALIZED_DAG_FETCH_INTERVAL +# +min_serialized_dag_fetch_interval = 10 + +# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store +# in the Database. +# All the template_fields for each of Task Instance are stored in the Database. +# Keeping this number small may cause an error when you try to view ``Rendered`` tab in +# TaskInstance view for older tasks. +# +# Variable: AIRFLOW__CORE__MAX_NUM_RENDERED_TI_FIELDS_PER_TASK +# +max_num_rendered_ti_fields_per_task = 30 + +# Path to custom XCom class that will be used to store and resolve operators results +# +# Example: xcom_backend = path.to.CustomXCom +# +# Variable: AIRFLOW__CORE__XCOM_BACKEND +# +xcom_backend = airflow.sdk.execution_time.xcom.BaseXCom + +# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``, +# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module. +# +# Variable: AIRFLOW__CORE__LAZY_LOAD_PLUGINS +# +lazy_load_plugins = True + +# By default Airflow providers are lazily-discovered (discovery and imports happen only when required). +# Set it to ``False``, if you want to discover providers whenever 'airflow' is invoked via cli or +# loaded from module. +# +# Variable: AIRFLOW__CORE__LAZY_DISCOVER_PROVIDERS +# +lazy_discover_providers = True + +# Hide sensitive **Variables** or **Connection extra json keys** from UI +# and task logs when set to ``True`` +# +# .. note:: +# +# Connection passwords are always hidden in logs +# +# Variable: AIRFLOW__CORE__HIDE_SENSITIVE_VAR_CONN_FIELDS +# +hide_sensitive_var_conn_fields = True + +# A comma-separated list of extra sensitive keywords to look for in variables names or connection's +# extra JSON. +# +# Variable: AIRFLOW__CORE__SENSITIVE_VAR_CONN_NAMES +# +sensitive_var_conn_names = + +# Task Slot counts for ``default_pool``. This setting would not have any effect in an existing +# deployment where the ``default_pool`` is already created. For existing deployments, users can +# change the number of slots using Webserver, API or the CLI +# +# Variable: AIRFLOW__CORE__DEFAULT_POOL_TASK_SLOT_COUNT +# +default_pool_task_slot_count = 128 + +# The maximum list/dict length an XCom can push to trigger task mapping. If the pushed list/dict has a +# length exceeding this value, the task pushing the XCom will be failed automatically to prevent the +# mapped tasks from clogging the scheduler. +# +# Variable: AIRFLOW__CORE__MAX_MAP_LENGTH +# +max_map_length = 1024 + +# The default umask to use for process when run in daemon mode (scheduler, worker, etc.) +# +# This controls the file-creation mode mask which determines the initial value of file permission bits +# for newly created files. +# +# This value is treated as an octal-integer. +# +# Variable: AIRFLOW__CORE__DAEMON_UMASK +# +daemon_umask = 0o077 + +# Class to use as asset manager. +# +# Example: asset_manager_class = airflow.assets.manager.AssetManager +# +# Variable: AIRFLOW__CORE__ASSET_MANAGER_CLASS +# +# asset_manager_class = + +# Kwargs to supply to asset manager. +# +# Example: asset_manager_kwargs = {"some_param": "some_value"} +# +# Variable: AIRFLOW__CORE__ASSET_MANAGER_KWARGS +# +# asset_manager_kwargs = + +# The ability to allow testing connections across Airflow UI, API and CLI. +# Supported options: ``Disabled``, ``Enabled``, ``Hidden``. Default: Disabled +# Disabled - Disables the test connection functionality and disables the Test Connection button in UI. +# Enabled - Enables the test connection functionality and shows the Test Connection button in UI. +# Hidden - Disables the test connection functionality and hides the Test Connection button in UI. +# Before setting this to Enabled, make sure that you review the users who are able to add/edit +# connections and ensure they are trusted. Connection testing can be done maliciously leading to +# undesired and insecure outcomes. +# See `Airflow Security Model: Capabilities of authenticated UI users +# `__ +# for more details. +# +# Variable: AIRFLOW__CORE__TEST_CONNECTION +# +test_connection = Disabled + +# The maximum length of the rendered template field. If the value to be stored in the +# rendered template field exceeds this size, it's redacted. +# +# Variable: AIRFLOW__CORE__MAX_TEMPLATED_FIELD_LENGTH +# +max_templated_field_length = 4096 + +# The url of the execution api server. Default is ``{BASE_URL}/execution/`` +# where ``{BASE_URL}`` is the base url of the API Server. If ``{BASE_URL}`` is not set, +# it will use ``http://localhost:8080`` as the default base url. +# +# Variable: AIRFLOW__CORE__EXECUTION_API_SERVER_URL +# +# execution_api_server_url = + +[database] +# Path to the ``alembic.ini`` file. You can either provide the file path relative +# to the Airflow home directory or the absolute path if it is located elsewhere. +# +# Variable: AIRFLOW__DATABASE__ALEMBIC_INI_FILE_PATH +# +alembic_ini_file_path = alembic.ini + +# The SQLAlchemy connection string to the metadata database. +# SQLAlchemy supports many different database engines. +# See: `Set up a Database Backend: Database URI +# `__ +# for more details. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONN +# +sql_alchemy_conn = sqlite:////opt/airflow/airflow.db + +# The SQLAlchemy connection string to the metadata database used for async connections. +# If this is not set, Airflow automatically derives a string by converting ``sql_alchemy_conn``. +# Unfortunately, this conversion logic does not always work due to various incompatibilities +# between sync and async db driver implementations. This sets the connection string directly +# without any conversion instead. +# +# Example: sql_alchemy_conn_async = postgresql+asyncpg://postgres:airflow@postgres/airflow +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONN_ASYNC +# +# sql_alchemy_conn_async = + +# Extra engine specific keyword args passed to SQLAlchemy's create_engine, as a JSON-encoded value +# +# Example: sql_alchemy_engine_args = {"arg1": true} +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_ENGINE_ARGS +# +# sql_alchemy_engine_args = + +# The encoding for the databases +# +# Variable: AIRFLOW__DATABASE__SQL_ENGINE_ENCODING +# +sql_engine_encoding = utf-8 + +# Collation for ``dag_id``, ``task_id``, ``key``, ``external_executor_id`` columns +# in case they have different encoding. +# By default this collation is the same as the database collation, however for ``mysql`` and ``mariadb`` +# the default is ``utf8mb3_bin`` so that the index sizes of our index keys will not exceed +# the maximum size of allowed index when collation is set to ``utf8mb4`` variant, see +# `GitHub Issue Comment `__ +# for more details. +# +# Variable: AIRFLOW__DATABASE__SQL_ENGINE_COLLATION_FOR_IDS +# +# sql_engine_collation_for_ids = + +# If SQLAlchemy should pool database connections. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_ENABLED +# +sql_alchemy_pool_enabled = True + +# The SQLAlchemy pool size is the maximum number of database connections +# in the pool. 0 indicates no limit. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_SIZE +# +sql_alchemy_pool_size = 5 + +# The maximum overflow size of the pool. +# When the number of checked-out connections reaches the size set in pool_size, +# additional connections will be returned up to this limit. +# When those additional connections are returned to the pool, they are disconnected and discarded. +# It follows then that the total number of simultaneous connections the pool will allow +# is **pool_size** + **max_overflow**, +# and the total number of "sleeping" connections the pool will allow is pool_size. +# max_overflow can be set to ``-1`` to indicate no overflow limit; +# no limit will be placed on the total number of concurrent connections. Defaults to ``10``. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_OVERFLOW +# +sql_alchemy_max_overflow = 10 + +# The SQLAlchemy pool recycle is the number of seconds a connection +# can be idle in the pool before it is invalidated. This config does +# not apply to sqlite. If the number of DB connections is ever exceeded, +# a lower config value will allow the system to recover faster. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_RECYCLE +# +sql_alchemy_pool_recycle = 1800 + +# Check connection at the start of each connection pool checkout. +# Typically, this is a simple statement like "SELECT 1". +# See `SQLAlchemy Pooling: Disconnect Handling - Pessimistic +# `__ +# for more details. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_PRE_PING +# +sql_alchemy_pool_pre_ping = True + +# The schema to use for the metadata database. +# SQLAlchemy supports databases with the concept of multiple schemas. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_SCHEMA +# +sql_alchemy_schema = + +# Import path for connect args in SQLAlchemy. Defaults to an empty dict. +# This is useful when you want to configure db engine args that SQLAlchemy won't parse +# in connection string. This can be set by passing a dictionary containing the create engine parameters. +# For more details about passing create engine parameters (keepalives variables, timeout etc) +# in Postgres DB Backend see `Setting up a PostgreSQL Database +# `__ +# e.g ``connect_args={"timeout":30}`` can be defined in ``airflow_local_settings.py`` and +# can be imported as shown below +# +# *Changed in 3.1.0*: This configuration is only applied to synchronous engines, such as psycopg2. +# See ``sql_alchemy_connect_args_async``. +# +# Example: sql_alchemy_connect_args = airflow_local_settings.connect_args +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONNECT_ARGS +# +# sql_alchemy_connect_args = + +# Import path for connect args in SQLAlchemy. Defaults to an empty dict. +# This is similar to ``sql_alchemy_connect_args``, but only for async connections. +# +# This configuration is only applied to async engines, such as asyncpg. +# +# Example: sql_alchemy_connect_args_async = airflow_local_settings.connect_args_async +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONNECT_ARGS_ASYNC +# +# sql_alchemy_connect_args_async = + +# Important Warning: Use of sql_alchemy_session_maker Highly Discouraged +# Import path for function which returns 'sqlalchemy.orm.sessionmaker'. +# Improper configuration of sql_alchemy_session_maker can lead to serious issues, +# including data corruption, unrecoverable application crashes. Please review the SQLAlchemy +# documentation for detailed guidance on proper configuration and best practices. +# +# Example: sql_alchemy_session_maker = airflow_local_settings._sessionmaker +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_SESSION_MAKER +# +# sql_alchemy_session_maker = + +# Number of times the code should be retried in case of DB Operational Errors. +# Not all transactions will be retried as it can cause undesired state. +# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``. +# +# Variable: AIRFLOW__DATABASE__MAX_DB_RETRIES +# +max_db_retries = 3 + +# Whether to run alembic migrations during Airflow start up. Sometimes this operation can be expensive, +# and the users can assert the correct version through other means (e.g. through a Helm chart). +# Accepts ``True`` or ``False``. +# +# Variable: AIRFLOW__DATABASE__CHECK_MIGRATIONS +# +check_migrations = True + +# List of DB managers to use to migrate external tables in airflow database. The managers must inherit +# from BaseDBManager. If ``FabAuthManager`` is configured in the environment, +# ``airflow.providers.fab.auth_manager.models.db.FABDBManager`` is automatically added. +# +# Example: external_db_managers = airflow.providers.fab.auth_manager.models.db.FABDBManager +# +# Variable: AIRFLOW__DATABASE__EXTERNAL_DB_MANAGERS +# +# external_db_managers = + +# The number of rows to process in each batch when performing a migration. +# This is useful for large tables to avoid locking and failure due to query timeouts. +# +# Variable: AIRFLOW__DATABASE__MIGRATION_BATCH_SIZE +# +migration_batch_size = 10000 + +[logging] +# The folder where airflow should store its log files. +# This path must be absolute. +# There are a few existing configurations that assume this is set to the default. +# If you choose to override this you may need to update the +# ``[logging] dag_processor_manager_log_location`` and +# ``[logging] dag_processor_child_process_log_directory settings`` as well. +# +# Variable: AIRFLOW__LOGGING__BASE_LOG_FOLDER +# +base_log_folder = /opt/airflow/logs + +# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. +# Set this to ``True`` if you want to enable remote logging. +# +# Variable: AIRFLOW__LOGGING__REMOTE_LOGGING +# +remote_logging = False + +# Users must supply an Airflow connection id that provides access to the storage +# location. Depending on your remote logging service, this may only be used for +# reading logs, not writing them. +# +# Variable: AIRFLOW__LOGGING__REMOTE_LOG_CONN_ID +# +remote_log_conn_id = + +# Whether the local log files for GCS, S3, WASB, HDFS and OSS remote logging should be deleted after +# they are uploaded to the remote location. +# +# Variable: AIRFLOW__LOGGING__DELETE_LOCAL_LOGS +# +delete_local_logs = False + +# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default +# Credentials +# `__ will +# be used. +# +# Variable: AIRFLOW__LOGGING__GOOGLE_KEY_PATH +# +google_key_path = + +# Storage bucket URL for remote logging +# S3 buckets should start with **s3://** +# Cloudwatch log groups should start with **cloudwatch://** +# GCS buckets should start with **gs://** +# WASB buckets should start with **wasb** just to help Airflow select correct handler +# Stackdriver logs should start with **stackdriver://** +# +# Variable: AIRFLOW__LOGGING__REMOTE_BASE_LOG_FOLDER +# +remote_base_log_folder = + +# The remote_task_handler_kwargs param is loaded into a dictionary and passed to the ``__init__`` +# of remote task handler and it overrides the values provided by Airflow config. For example if you set +# ``delete_local_logs=False`` and you provide ``{"delete_local_copy": true}``, then the local +# log files will be deleted after they are uploaded to remote location. +# +# Example: remote_task_handler_kwargs = {"delete_local_copy": true} +# +# Variable: AIRFLOW__LOGGING__REMOTE_TASK_HANDLER_KWARGS +# +remote_task_handler_kwargs = + +# Use server-side encryption for logs stored in S3 +# +# Variable: AIRFLOW__LOGGING__ENCRYPT_S3_LOGS +# +encrypt_s3_logs = False + +# Logging level. +# +# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. +# +# Variable: AIRFLOW__LOGGING__LOGGING_LEVEL +# +logging_level = INFO + +# Logging level for celery. If not set, it uses the value of logging_level +# +# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. +# +# Variable: AIRFLOW__LOGGING__CELERY_LOGGING_LEVEL +# +celery_logging_level = + +# Logging level for Flask-appbuilder UI. +# +# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. +# +# Variable: AIRFLOW__LOGGING__FAB_LOGGING_LEVEL +# +fab_logging_level = WARNING + +# Logging class +# Specify the class that will specify the logging configuration +# This class has to be on the python classpath +# +# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG +# +# Variable: AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS +# +logging_config_class = + +# Flag to enable/disable Colored logs in Console +# Colour the logs when the controlling terminal is a TTY. +# +# Variable: AIRFLOW__LOGGING__COLORED_CONSOLE_LOG +# +colored_console_log = True + +# Format of Log line +# +# *Changed in 3.1.0*: This can now contain color escape sequences ``%(blue)s`` etc which will only +# result in colours if :ref:`config:logging__colored_console_log` is true. +# +# Example: log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s +# +# Variable: AIRFLOW__LOGGING__LOG_FORMAT +# +log_format = + +# A comma separated list of information about the callsite (such as line number of filename etc) of +# logger calls to include in each message. +# +# See :class:`structlog.processors.CallsiteParameter` for the possible values.The values should be the +# constant names (``FUNC_NAME``) or the values (``func_name``) +# +# Including these in a log message adds a lot to the usability to the logs, but collecting these has a +# (tiny) cost -- if you are super concerned with eking out every last ounce of performance you could +# turn these off (by setting this value to an empty string) +# +# Variable: AIRFLOW__LOGGING__CALLSITE_PARAMETERS +# +callsite_parameters = filename,lineno + +# Defines the format of log messages for simple logging configuration +# +# Variable: AIRFLOW__LOGGING__SIMPLE_LOG_FORMAT +# +simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s + +# Where to send dag parser logs. If "file", logs are sent to log files defined by child_process_log_directory. +# +# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_LOG_TARGET +# +dag_processor_log_target = file + +# Format of Dag Processor Log line +# +# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_LOG_FORMAT +# +dag_processor_log_format = [%%(asctime)s] [SOURCE:DAG_PROCESSOR] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s + +# Determines the directory where logs for the child processes of the dag processor will be stored +# +# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_CHILD_PROCESS_LOG_DIRECTORY +# +dag_processor_child_process_log_directory = /opt/airflow/logs/dag_processor + +# Determines the formatter class used by Airflow for structuring its log messages +# The default formatter class is timezone-aware, which means that timestamps attached to log entries +# will be adjusted to reflect the local timezone of the Airflow instance +# +# Variable: AIRFLOW__LOGGING__LOG_FORMATTER_CLASS +# +log_formatter_class = airflow.utils.log.timezone_aware.TimezoneAware + +# An import path to a function to add adaptations of each secret added with +# ``airflow.sdk.execution_time.secrets_masker.mask_secret`` to be masked in log messages. +# The given function is expected to require a single parameter: the secret to be adapted. +# It may return a single adaptation of the secret or an iterable of adaptations to each be +# masked as secrets. The original secret will be masked as well as any adaptations returned. +# +# Example: secret_mask_adapter = urllib.parse.quote +# +# Variable: AIRFLOW__LOGGING__SECRET_MASK_ADAPTER +# +secret_mask_adapter = + +# The minimum length of a secret to be masked in log messages. +# Secrets shorter than this length will not be masked. +# +# Variable: AIRFLOW__LOGGING__MIN_LENGTH_MASKED_SECRET +# +min_length_masked_secret = 5 + +# Specify prefix pattern like mentioned below with stream handler ``TaskHandlerWithCustomFormatter`` +# +# Example: task_log_prefix_template = {{ti.dag_id}}-{{ti.task_id}}-{{logical_date}}-{{ti.try_number}} +# +# Variable: AIRFLOW__LOGGING__TASK_LOG_PREFIX_TEMPLATE +# +task_log_prefix_template = + +# Formatting for how airflow generates file names/paths for each task run. +# +# Variable: AIRFLOW__LOGGING__LOG_FILENAME_TEMPLATE +# +log_filename_template = dag_id={{ ti.dag_id }}/run_id={{ ti.run_id }}/task_id={{ ti.task_id }}/{%% if ti.map_index >= 0 %%}map_index={{ ti.map_index }}/{%% endif %%}attempt={{ try_number|default(ti.try_number) }}.log + +# Name of handler to read task instance logs. +# Defaults to use ``task`` handler. +# +# Variable: AIRFLOW__LOGGING__TASK_LOG_READER +# +task_log_reader = task + +# A comma\-separated list of third-party logger names that will be configured to print messages to +# consoles\. +# +# Example: extra_logger_names = fastapi,sqlalchemy +# +# Variable: AIRFLOW__LOGGING__EXTRA_LOGGER_NAMES +# +extra_logger_names = + +# When you start an Airflow worker, Airflow starts a tiny web server +# subprocess to serve the workers local log files to the airflow main +# web server, who then builds pages and sends them to users. This defines +# the port on which the logs are served. It needs to be unused, and open +# visible from the main web server to connect into the workers. +# +# Variable: AIRFLOW__LOGGING__WORKER_LOG_SERVER_PORT +# +worker_log_server_port = 8793 + +# Port to serve logs from for triggerer. +# See ``[logging] worker_log_server_port`` description for more info. +# +# Variable: AIRFLOW__LOGGING__TRIGGER_LOG_SERVER_PORT +# +trigger_log_server_port = 8794 + +# We must parse timestamps to interleave logs between trigger and task. To do so, +# we need to parse timestamps in log files. In case your log format is non-standard, +# you may provide import path to callable which takes a string log line and returns +# the timestamp (datetime.datetime compatible). +# +# Example: interleave_timestamp_parser = path.to.my_func +# +# Variable: AIRFLOW__LOGGING__INTERLEAVE_TIMESTAMP_PARSER +# +# interleave_timestamp_parser = + +# Permissions in the form or of octal string as understood by chmod. The permissions are important +# when you use impersonation, when logs are written by a different user than airflow. The most secure +# way of configuring it in this case is to add both users to the same group and make it the default +# group of both users. Group-writeable logs are default in airflow, but you might decide that you are +# OK with having the logs other-writeable, in which case you should set it to ``0o777``. You might +# decide to add more security if you do not use impersonation and change it to ``0o755`` to make it +# only owner-writeable. You can also make it just readable only for owner by changing it to ``0o700`` +# if all the access (read/write) for your logs happens from the same user. +# +# Example: file_task_handler_new_folder_permissions = 0o775 +# +# Variable: AIRFLOW__LOGGING__FILE_TASK_HANDLER_NEW_FOLDER_PERMISSIONS +# +file_task_handler_new_folder_permissions = 0o775 + +# Permissions in the form or of octal string as understood by chmod. The permissions are important +# when you use impersonation, when logs are written by a different user than airflow. The most secure +# way of configuring it in this case is to add both users to the same group and make it the default +# group of both users. Group-writeable logs are default in airflow, but you might decide that you are +# OK with having the logs other-writeable, in which case you should set it to ``0o666``. You might +# decide to add more security if you do not use impersonation and change it to ``0o644`` to make it +# only owner-writeable. You can also make it just readable only for owner by changing it to ``0o600`` +# if all the access (read/write) for your logs happens from the same user. +# +# Example: file_task_handler_new_file_permissions = 0o664 +# +# Variable: AIRFLOW__LOGGING__FILE_TASK_HANDLER_NEW_FILE_PERMISSIONS +# +file_task_handler_new_file_permissions = 0o664 + +# By default Celery sends all logs into stderr. +# If enabled any previous logging handlers will get *removed*. +# With this option AirFlow will create new handlers +# and send low level logs like INFO and WARNING to stdout, +# while sending higher severity logs to stderr. +# +# Variable: AIRFLOW__LOGGING__CELERY_STDOUT_STDERR_SEPARATION +# +celery_stdout_stderr_separation = False + +# A comma separated list of keywords related to errors whose presence should display the line in red +# color in UI +# +# Variable: AIRFLOW__LOGGING__COLOR_LOG_ERROR_KEYWORDS +# +color_log_error_keywords = error,exception + +# A comma separated list of keywords related to warning whose presence should display the line in yellow +# color in UI +# +# Variable: AIRFLOW__LOGGING__COLOR_LOG_WARNING_KEYWORDS +# +color_log_warning_keywords = warn + +[metrics] +# `StatsD `__ integration settings. + +# Configure an allow list (comma separated regex patterns to match) to send only certain metrics. +# +# Example: metrics_allow_list = "scheduler,executor,dagrun,pool,triggerer,celery" or "^scheduler,^executor,heartbeat|timeout" +# +# Variable: AIRFLOW__METRICS__METRICS_ALLOW_LIST +# +metrics_allow_list = + +# Configure a block list (comma separated regex patterns to match) to block certain metrics +# from being emitted. +# If ``[metrics] metrics_allow_list`` and ``[metrics] metrics_block_list`` are both configured, +# ``[metrics] metrics_block_list`` is ignored. +# +# Example: metrics_block_list = "scheduler,executor,dagrun,pool,triggerer,celery" or "^scheduler,^executor,heartbeat|timeout" +# +# Variable: AIRFLOW__METRICS__METRICS_BLOCK_LIST +# +metrics_block_list = + +# Enables sending metrics to StatsD. +# +# Variable: AIRFLOW__METRICS__STATSD_ON +# +statsd_on = False + +# Specifies the host address where the StatsD daemon (or server) is running +# +# Variable: AIRFLOW__METRICS__STATSD_HOST +# +statsd_host = localhost + +# Enables the statsd host to be resolved into IPv6 address +# +# Variable: AIRFLOW__METRICS__STATSD_IPV6 +# +statsd_ipv6 = False + +# Specifies the port on which the StatsD daemon (or server) is listening to +# +# Variable: AIRFLOW__METRICS__STATSD_PORT +# +statsd_port = 8125 + +# Defines the namespace for all metrics sent from Airflow to StatsD +# +# Variable: AIRFLOW__METRICS__STATSD_PREFIX +# +statsd_prefix = airflow + +# A function that validate the StatsD stat name, apply changes to the stat name if necessary and return +# the transformed stat name. +# +# The function should have the following signature +# +# .. code-block:: python +# +# def func_name(stat_name: str) -> str: ... +# +# Variable: AIRFLOW__METRICS__STAT_NAME_HANDLER +# +stat_name_handler = + +# To enable datadog integration to send airflow metrics. +# +# Variable: AIRFLOW__METRICS__STATSD_DATADOG_ENABLED +# +statsd_datadog_enabled = False + +# List of datadog tags attached to all metrics(e.g: ``key1:value1,key2:value2``) +# +# Variable: AIRFLOW__METRICS__STATSD_DATADOG_TAGS +# +statsd_datadog_tags = + +# Set to ``False`` to disable metadata tags for some of the emitted metrics +# +# Variable: AIRFLOW__METRICS__STATSD_DATADOG_METRICS_TAGS +# +statsd_datadog_metrics_tags = True + +# If you want to utilise your own custom StatsD client set the relevant +# module path below. +# Note: The module path must exist on your +# `PYTHONPATH ` +# for Airflow to pick it up +# +# Variable: AIRFLOW__METRICS__STATSD_CUSTOM_CLIENT_PATH +# +# statsd_custom_client_path = + +# If you want to avoid sending all the available metrics tags to StatsD, +# you can configure a block list of prefixes (comma separated) to filter out metric tags +# that start with the elements of the list (e.g: ``job_id,run_id``) +# +# Example: statsd_disabled_tags = job_id,run_id,dag_id,task_id +# +# Variable: AIRFLOW__METRICS__STATSD_DISABLED_TAGS +# +statsd_disabled_tags = job_id,run_id + +# To enable sending Airflow metrics with StatsD-Influxdb tagging convention. +# +# Variable: AIRFLOW__METRICS__STATSD_INFLUXDB_ENABLED +# +statsd_influxdb_enabled = False + +# Enables sending metrics to OpenTelemetry. +# +# Variable: AIRFLOW__METRICS__OTEL_ON +# +otel_on = False + +# Specifies the hostname or IP address of the OpenTelemetry Collector to which Airflow sends +# metrics and traces. +# +# Variable: AIRFLOW__METRICS__OTEL_HOST +# +otel_host = localhost + +# Specifies the port of the OpenTelemetry Collector that is listening to. +# +# Variable: AIRFLOW__METRICS__OTEL_PORT +# +otel_port = 8889 + +# The prefix for the Airflow metrics. +# +# Variable: AIRFLOW__METRICS__OTEL_PREFIX +# +otel_prefix = airflow + +# Defines the interval, in milliseconds, at which Airflow sends batches of metrics and traces +# to the configured OpenTelemetry Collector. +# +# Variable: AIRFLOW__METRICS__OTEL_INTERVAL_MILLISECONDS +# +otel_interval_milliseconds = 60000 + +# If ``True``, all metrics are also emitted to the console. Defaults to ``False``. +# +# Variable: AIRFLOW__METRICS__OTEL_DEBUGGING_ON +# +otel_debugging_on = False + +# The default service name of traces. +# +# Variable: AIRFLOW__METRICS__OTEL_SERVICE +# +otel_service = Airflow + +# If ``True``, SSL will be enabled. Defaults to ``False``. +# To establish an HTTPS connection to the OpenTelemetry collector, +# you need to configure the SSL certificate and key within the OpenTelemetry collector's +# ``config.yml`` file. +# +# Variable: AIRFLOW__METRICS__OTEL_SSL_ACTIVE +# +otel_ssl_active = False + +[traces] +# Distributed traces integration settings. + +# Enables sending traces to OpenTelemetry. +# +# Variable: AIRFLOW__TRACES__OTEL_ON +# +otel_on = False + +# Specifies the hostname or IP address of the OpenTelemetry Collector to which Airflow sends +# traces. +# +# Variable: AIRFLOW__TRACES__OTEL_HOST +# +otel_host = localhost + +# Specifies the port of the OpenTelemetry Collector that is listening to. +# +# Variable: AIRFLOW__TRACES__OTEL_PORT +# +otel_port = 8889 + +# The default service name of traces. +# +# Variable: AIRFLOW__TRACES__OTEL_SERVICE +# +otel_service = Airflow + +# If True, all traces are also emitted to the console. Defaults to False. +# +# Variable: AIRFLOW__TRACES__OTEL_DEBUGGING_ON +# +otel_debugging_on = False + +# If True, SSL will be enabled. Defaults to False. +# To establish an HTTPS connection to the OpenTelemetry collector, +# you need to configure the SSL certificate and key within the OpenTelemetry collector's +# config.yml file. +# +# Variable: AIRFLOW__TRACES__OTEL_SSL_ACTIVE +# +otel_ssl_active = False + +# If True, then traces from Airflow internal methods are exported. Defaults to False. +# +# Variable: AIRFLOW__TRACES__OTEL_DEBUG_TRACES_ON +# +otel_debug_traces_on = False + +[secrets] +# Full class name of secrets backend to enable (will precede env vars and metastore in search path) +# +# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend +# +# Variable: AIRFLOW__SECRETS__BACKEND +# +backend = + +# The backend_kwargs param is loaded into a dictionary and passed to ``__init__`` +# of secrets backend class. See documentation for the secrets backend you are using. +# JSON is expected. +# +# Example for AWS Systems Manager ParameterStore: +# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}`` +# +# Variable: AIRFLOW__SECRETS__BACKEND_KWARGS +# +backend_kwargs = + +# .. note:: |experimental| +# +# Enables local caching of Variables, when parsing DAGs only. +# Using this option can make dag parsing faster if Variables are used in top level code, at the expense +# of longer propagation time for changes. +# Please note that this cache concerns only the DAG parsing step. There is no caching in place when DAG +# tasks are run. +# +# Variable: AIRFLOW__SECRETS__USE_CACHE +# +use_cache = False + +# .. note:: |experimental| +# +# When the cache is enabled, this is the duration for which we consider an entry in the cache to be +# valid. Entries are refreshed if they are older than this many seconds. +# It means that when the cache is enabled, this is the maximum amount of time you need to wait to see a +# Variable change take effect. +# +# Variable: AIRFLOW__SECRETS__CACHE_TTL_SECONDS +# +cache_ttl_seconds = 900 + +[api] +# Sets a custom homepage heading and site title for all Airflow UI pages. +# If set, the Dashboard will display this value instead of the default +# "Welcome" message. +# +# Variable: AIRFLOW__API__INSTANCE_NAME +# +# instance_name = + +# Boolean for running SwaggerUI in the webserver. +# +# Variable: AIRFLOW__API__ENABLE_SWAGGER_UI +# +enable_swagger_ui = True + +# Secret key used to run your api server. It should be as random as possible. However, when running +# more than 1 instances of the api, make sure all of them use the same ``secret_key`` otherwise +# one of them will error with "CSRF session token is missing". +# The api key is also used to authorize requests to Celery workers when logs are retrieved. +# The token generated using the secret key has a short expiry time though - make sure that time on +# ALL the machines that you run airflow components on is synchronized (for example using ntpd) +# otherwise you might get "forbidden" errors when the logs are accessed. +# +# Variable: AIRFLOW__API__SECRET_KEY +# +secret_key = IPTIPKlON0TQzwLWnMtppA== + +# Expose the configuration file in the web server. Set to ``non-sensitive-only`` to show all values +# except those that have security implications. ``True`` shows all values. ``False`` hides the +# configuration completely. +# +# Variable: AIRFLOW__API__EXPOSE_CONFIG +# +expose_config = False + +# Expose stacktrace in the web server +# +# Variable: AIRFLOW__API__EXPOSE_STACKTRACE +# +expose_stacktrace = False + +# The base url of the API server. Airflow cannot guess what domain or CNAME you are using. +# If the Airflow console (the front-end) and the API server are on a different domain, this config +# should contain the API server endpoint. +# +# Example: base_url = https://my-airflow.company.com +# +# Variable: AIRFLOW__API__BASE_URL +# +# base_url = + +# The ip specified when starting the api server +# +# Variable: AIRFLOW__API__HOST +# +host = 0.0.0.0 + +# The port on which to run the api server +# +# Variable: AIRFLOW__API__PORT +# +port = 8080 + +# Number of workers to run on the API server. Should be roughly equal to the number of cpu cores +# available. If you need to scale the API server, strongly consider deploying multiple API servers +# instead of increasing the number of workers; See https://github.com/apache/airflow/issues/52270. +# +# Variable: AIRFLOW__API__WORKERS +# +workers = 1 + +# Number of seconds the API server waits before timing out on a worker +# +# Variable: AIRFLOW__API__WORKER_TIMEOUT +# +worker_timeout = 120 + +# Path to the logging configuration file for the uvicorn server. +# If not set, the default uvicorn logging configuration will be used. +# +# Example: log_config = path/to/logging_config.yaml +# +# Variable: AIRFLOW__API__LOG_CONFIG +# +# log_config = + +# Paths to the SSL certificate and key for the api server. When both are +# provided SSL will be enabled. This does not change the api server port. +# The same SSL certificate will also be loaded into the worker to enable +# it to be trusted when a self-signed certificate is used. +# +# Variable: AIRFLOW__API__SSL_CERT +# +ssl_cert = + +# Paths to the SSL certificate and key for the api server. When both are +# provided SSL will be enabled. This does not change the api server port. +# +# Variable: AIRFLOW__API__SSL_KEY +# +ssl_key = + +# Used to set the maximum page limit for API requests. If limit passed as param +# is greater than maximum page limit, it will be ignored and maximum page limit value +# will be set as the limit +# +# Variable: AIRFLOW__API__MAXIMUM_PAGE_LIMIT +# +maximum_page_limit = 100 + +# Used to set the default page limit when limit param is zero or not provided in API +# requests. Otherwise if positive integer is passed in the API requests as limit, the +# smallest number of user given limit or maximum page limit is taken as limit. +# +# Variable: AIRFLOW__API__FALLBACK_PAGE_LIMIT +# +fallback_page_limit = 50 + +# Used in response to a preflight request to indicate which HTTP +# headers can be used when making the actual request. This header is +# the server side response to the browser's +# Access-Control-Request-Headers header. +# +# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_HEADERS +# +access_control_allow_headers = + +# Specifies the method or methods allowed when accessing the resource. +# +# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_METHODS +# +access_control_allow_methods = + +# Indicates whether the response can be shared with requesting code from the given origins. +# Separate URLs with space. +# +# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_ORIGINS +# +access_control_allow_origins = + +# Sorting order in grid view. Valid values are: ``topological``, ``hierarchical_alphabetical`` +# +# Variable: AIRFLOW__API__GRID_VIEW_SORTING_ORDER +# +grid_view_sorting_order = topological + +# The amount of time (in secs) webserver will wait for initial handshake +# while fetching logs from other worker machine +# +# Variable: AIRFLOW__API__LOG_FETCH_TIMEOUT_SEC +# +log_fetch_timeout_sec = 5 + +# By default, the webserver shows paused DAGs. Flip this to hide paused +# DAGs by default +# +# Variable: AIRFLOW__API__HIDE_PAUSED_DAGS_BY_DEFAULT +# +hide_paused_dags_by_default = False + +# Consistent page size across all listing views in the UI +# +# Variable: AIRFLOW__API__PAGE_SIZE +# +page_size = 50 + +# Default setting for wrap toggle on DAG code and TI log views. +# +# Variable: AIRFLOW__API__DEFAULT_WRAP +# +default_wrap = False + +# How frequently, in seconds, the DAG data will auto-refresh in graph or grid view +# when auto-refresh is turned on +# +# Variable: AIRFLOW__API__AUTO_REFRESH_INTERVAL +# +auto_refresh_interval = 3 + +# Require confirmation when changing a DAG in the web UI. This is to prevent accidental changes +# to a DAG that may be running on sensitive environments like production. +# When set to ``True``, confirmation dialog will be shown when a user tries to Pause/Unpause, +# Trigger a DAG +# +# Variable: AIRFLOW__API__REQUIRE_CONFIRMATION_DAG_CHANGE +# +require_confirmation_dag_change = False + +[workers] +# Configuration related to workers that run Airflow tasks. + +# Full class name of secrets backend to enable for workers (will precede env vars backend) +# +# Example: secrets_backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend +# +# Variable: AIRFLOW__WORKERS__SECRETS_BACKEND +# +secrets_backend = + +# The secrets_backend_kwargs param is loaded into a dictionary and passed to ``__init__`` +# of secrets backend class. See documentation for the secrets backend you are using. +# JSON is expected. +# +# Example for AWS Systems Manager ParameterStore: +# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}`` +# +# Variable: AIRFLOW__WORKERS__SECRETS_BACKEND_KWARGS +# +secrets_backend_kwargs = + +# The minimum interval (in seconds) at which the worker checks the task instance's +# heartbeat status with the API server to confirm it is still alive. +# +# Variable: AIRFLOW__WORKERS__MIN_HEARTBEAT_INTERVAL +# +min_heartbeat_interval = 5 + +# The maximum number of consecutive failed heartbeats before terminating the task instance process. +# +# Variable: AIRFLOW__WORKERS__MAX_FAILED_HEARTBEATS +# +max_failed_heartbeats = 3 + +# The maximum number of retry attempts to the execution API server. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_RETRIES +# +execution_api_retries = 5 + +# The minimum amount of time (in seconds) to wait before retrying a failed API request. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_RETRY_WAIT_MIN +# +execution_api_retry_wait_min = 1.0 + +# The maximum amount of time (in seconds) to wait before retrying a failed API request. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_RETRY_WAIT_MAX +# +execution_api_retry_wait_max = 90.0 + +# The timeout (in seconds) for HTTP requests from workers to the Execution API server. +# This controls how long a worker will wait for a response from the API server before +# timing out. Increase this value if you experience timeout errors under high load. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_TIMEOUT +# +execution_api_timeout = 5.0 + +# Number of seconds to wait after a task process exits before forcibly closing any +# remaining communication sockets. This helps prevent the task supervisor from hanging +# indefinitely due to missed EOF signals. +# +# Variable: AIRFLOW__WORKERS__SOCKET_CLEANUP_TIMEOUT +# +socket_cleanup_timeout = 60.0 + +[api_auth] +# Settings relating to authentication on the Airflow APIs + +# The audience claim to use when generating and validating JWTs for the API. +# +# This variable can be a single value, or a comma-separated string, in which case the first value is the +# one that will be used when generating, and the others are accepted at validation time. +# +# Not required, but strongly encouraged. +# +# See also :ref:`config:execution_api__jwt_audience` +# +# Example: jwt_audience = my-unique-airflow-id +# +# Variable: AIRFLOW__API_AUTH__JWT_AUDIENCE +# +# jwt_audience = + +# Number in seconds until the JWTs used for authentication expires. When the token expires, +# all API calls using this token will fail on authentication. +# +# Make sure that time on ALL the machines that you run airflow components on is synchronized +# (for example using ntpd) otherwise you might get "forbidden" errors. +# +# See also :ref:`config:execution_api__jwt_expiration_time` +# +# Variable: AIRFLOW__API_AUTH__JWT_EXPIRATION_TIME +# +jwt_expiration_time = 86400 + +# Number in seconds until the JWTs used for authentication expires for CLI commands. +# When the token expires, all CLI calls using this token will fail on authentication. +# +# Make sure that time on ALL the machines that you run airflow components on is synchronized +# (for example using ntpd) otherwise you might get "forbidden" errors. +# +# Variable: AIRFLOW__API_AUTH__JWT_CLI_EXPIRATION_TIME +# +jwt_cli_expiration_time = 3600 + +# Secret key used to encode and decode JWTs to authenticate to public and private APIs. +# +# It should be as random as possible. However, when running more than 1 instances of API services, +# make sure all of them use the same ``jwt_secret`` otherwise calls will fail on authentication. +# +# Mutually exclusive with ``jwt_private_key_path``. +# +# Variable: AIRFLOW__API_AUTH__JWT_SECRET +# +jwt_secret = 6+GNS8M7dlHn7iexrxXW1w== + +# The path to a file containing a PEM-encoded private key use when generating Task Identity tokens in +# the executor. +# +# Mutually exclusive with ``jwt_secret``. +# +# Example: jwt_private_key_path = /path/to/private_key.pem +# +# Variable: AIRFLOW__API_AUTH__JWT_PRIVATE_KEY_PATH +# +# jwt_private_key_path = + +# The algorithm name use when generating and validating JWT Task Identities. +# +# This value must be appropriate for the given private key type. +# +# If this is not specified Airflow makes some guesses as what algorithm is best based on the key type. +# +# ("HS512" if ``jwt_secret`` is set, otherwise a key-type specific guess) +# +# Example: jwt_algorithm = "EdDSA" or "HS512" +# +# Variable: AIRFLOW__API_AUTH__JWT_ALGORITHM +# +# jwt_algorithm = + +# The Key ID to place in header when generating JWTs. Not used in the validation path. +# +# If this is not specified the RFC7638 thumbprint of the private key will be used. +# +# Ignored when ``jwt_secret`` is used. +# +# Example: jwt_kid = my-key-id +# +# Variable: AIRFLOW__API_AUTH__JWT_KID +# +# jwt_kid = + +# The public signing keys of Task Execution token issuers to trust. It must contain the public key +# related to ``jwt_private_key_path`` else tasks will be unlikely to execute successfully. +# +# Can be a local file path (without the ``file://`` prefix) or an http or https URL. +# +# If a remote URL is given it will be polled periodically for changes. +# +# Mutually exclusive with ``jwt_secret``. +# +# If a ``jwt_private_key_path`` is given but this settings is not set then the private key will be +# trusted. If this is provided it is your responsibility to ensure that the private key used for +# generation is in this list. +# +# Example: trusted_jwks_url = "/path/to/public-jwks.json" or "https://my-issuer/.well-known/jwks.json" +# +# Variable: AIRFLOW__API_AUTH__TRUSTED_JWKS_URL +# +# trusted_jwks_url = + +# Issuer of the JWT. This becomes the ``iss`` claim of generated tokens, and is validated on incoming +# requests. +# +# Ideally this should be unique per individual airflow deployment +# +# Not required, but strongly recommended to be set. +# +# See also :ref:`config:api_auth__jwt_audience` +# +# Example: jwt_issuer = http://my-airflow.mycompany.com +# +# Variable: AIRFLOW__API_AUTH__JWT_ISSUER +# +# jwt_issuer = + +# Number of seconds leeway in validating expiry time of JWTs to account for clock skew between +# client and server +# +# Variable: AIRFLOW__API_AUTH__JWT_LEEWAY +# +jwt_leeway = 10 + +[execution_api] +# Settings related to the Execution API server. +# +# The ExecutionAPI also uses a lot of settings from the :ref:`config:api_auth` section. + +# Number in seconds until the JWT used for authentication expires. When the token expires, +# all API calls using this token will fail on authentication. +# +# Make sure that time on ALL the machines that you run airflow components on is synchronized +# (for example using ntpd) otherwise you might get "forbidden" errors. +# +# Variable: AIRFLOW__EXECUTION_API__JWT_EXPIRATION_TIME +# +jwt_expiration_time = 600 + +# The audience claim to use when generating and validating JWTs for the Execution API. +# +# This variable can be a single value, or a comma-separated string, in which case the first value is the +# one that will be used when generating, and the others are accepted at validation time. +# +# Not required, but strongly encouraged +# +# See also :ref:`config:api_auth__jwt_audience` +# +# Variable: AIRFLOW__EXECUTION_API__JWT_AUDIENCE +# +jwt_audience = urn:airflow.apache.org:task + +[lineage] +# what lineage backend to use +# +# Variable: AIRFLOW__LINEAGE__BACKEND +# +backend = + +[operators] +# The default owner assigned to each new operator, unless +# provided explicitly or passed via ``default_args`` +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_OWNER +# +default_owner = airflow + +# The default value of attribute "deferrable" in operators and sensors. +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_DEFERRABLE +# +default_deferrable = false + +# Indicates the default number of CPU units allocated to each operator when no specific CPU request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_CPUS +# +default_cpus = 1 + +# Indicates the default number of RAM allocated to each operator when no specific RAM request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_RAM +# +default_ram = 512 + +# Indicates the default number of disk storage allocated to each operator when no specific disk request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_DISK +# +default_disk = 512 + +# Indicates the default number of GPUs allocated to each operator when no specific GPUs request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_GPUS +# +default_gpus = 0 + +# Default queue that tasks get assigned to and that worker listen on. +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_QUEUE +# +default_queue = default + +[email] +# Configuration email backend and whether to +# send email alerts on retry or failure + +# Email backend to use +# +# Variable: AIRFLOW__EMAIL__EMAIL_BACKEND +# +email_backend = airflow.utils.email.send_email_smtp + +# Email connection to use +# +# Variable: AIRFLOW__EMAIL__EMAIL_CONN_ID +# +email_conn_id = smtp_default + +# Whether email alerts should be sent when a task is retried +# +# Variable: AIRFLOW__EMAIL__DEFAULT_EMAIL_ON_RETRY +# +default_email_on_retry = True + +# Whether email alerts should be sent when a task failed +# +# Variable: AIRFLOW__EMAIL__DEFAULT_EMAIL_ON_FAILURE +# +default_email_on_failure = True + +# File that will be used as the template for Email subject (which will be rendered using Jinja2). +# If not set, Airflow uses a base template. +# +# Example: subject_template = /path/to/my_subject_template_file +# +# Variable: AIRFLOW__EMAIL__SUBJECT_TEMPLATE +# +# subject_template = + +# File that will be used as the template for Email content (which will be rendered using Jinja2). +# If not set, Airflow uses a base template. +# +# Example: html_content_template = /path/to/my_html_content_template_file +# +# Variable: AIRFLOW__EMAIL__HTML_CONTENT_TEMPLATE +# +# html_content_template = + +# Email address that will be used as sender address. +# It can either be raw email or the complete address in a format ``Sender Name `` +# +# Example: from_email = Airflow +# +# Variable: AIRFLOW__EMAIL__FROM_EMAIL +# +# from_email = + +# ssl context to use when using SMTP and IMAP SSL connections. By default, the context is "default" +# which sets it to ``ssl.create_default_context()`` which provides the right balance between +# compatibility and security, it however requires that certificates in your operating system are +# updated and that SMTP/IMAP servers of yours have valid certificates that have corresponding public +# keys installed on your machines. You can switch it to "none" if you want to disable checking +# of the certificates, but it is not recommended as it allows MITM (man-in-the-middle) attacks +# if your infrastructure is not sufficiently secured. It should only be set temporarily while you +# are fixing your certificate configuration. This can be typically done by upgrading to newer +# version of the operating system you run Airflow components on,by upgrading/refreshing proper +# certificates in the OS or by updating certificates for your mail servers. +# +# Example: ssl_context = default +# +# Variable: AIRFLOW__EMAIL__SSL_CONTEXT +# +ssl_context = default + +[smtp] +# If you want airflow to send emails on retries, failure, and you want to use +# the airflow.utils.email.send_email_smtp function, you have to configure an +# smtp server here + +# Specifies the host server address used by Airflow when sending out email notifications via SMTP. +# +# Variable: AIRFLOW__SMTP__SMTP_HOST +# +smtp_host = localhost + +# Determines whether to use the STARTTLS command when connecting to the SMTP server. +# +# Variable: AIRFLOW__SMTP__SMTP_STARTTLS +# +smtp_starttls = True + +# Determines whether to use an SSL connection when talking to the SMTP server. +# +# Variable: AIRFLOW__SMTP__SMTP_SSL +# +smtp_ssl = False + +# Defines the port number on which Airflow connects to the SMTP server to send email notifications. +# +# Variable: AIRFLOW__SMTP__SMTP_PORT +# +smtp_port = 25 + +# Specifies the default **from** email address used when Airflow sends email notifications. +# +# Variable: AIRFLOW__SMTP__SMTP_MAIL_FROM +# +smtp_mail_from = airflow@example.com + +# Determines the maximum time (in seconds) the Apache Airflow system will wait for a +# connection to the SMTP server to be established. +# +# Variable: AIRFLOW__SMTP__SMTP_TIMEOUT +# +smtp_timeout = 30 + +# Defines the maximum number of times Airflow will attempt to connect to the SMTP server. +# +# Variable: AIRFLOW__SMTP__SMTP_RETRY_LIMIT +# +smtp_retry_limit = 5 + +[sentry] +# `Sentry `__ integration. Here you can supply +# additional configuration options based on the Python platform. +# See `Python / Configuration / Basic Options +# `__ for more details. +# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``, +# ``ignore_errors``, ``before_breadcrumb``, ``transport``. + +# Enable error reporting to Sentry +# +# Variable: AIRFLOW__SENTRY__SENTRY_ON +# +sentry_on = false + +# +# Variable: AIRFLOW__SENTRY__SENTRY_DSN +# +sentry_dsn = + +# Dotted path to a before_send function that the sentry SDK should be configured to use. +# +# Variable: AIRFLOW__SENTRY__BEFORE_SEND +# +# before_send = + +[scheduler] +# Task instances listen for external kill signal (when you clear tasks +# from the CLI or the UI), this defines the frequency at which they should +# listen (in seconds). +# +# Variable: AIRFLOW__SCHEDULER__JOB_HEARTBEAT_SEC +# +job_heartbeat_sec = 5 + +# The scheduler constantly tries to trigger new tasks (look at the +# scheduler section in the docs for more information). This defines +# how often the scheduler should run (in seconds). +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEARTBEAT_SEC +# +scheduler_heartbeat_sec = 5 + +# The frequency (in seconds) at which the LocalTaskJob should send heartbeat signals to the +# scheduler to notify it's still alive. If this value is set to 0, the heartbeat interval will default +# to the value of ``[scheduler] task_instance_heartbeat_timeout``. +# +# Variable: AIRFLOW__SCHEDULER__TASK_INSTANCE_HEARTBEAT_SEC +# +task_instance_heartbeat_sec = 0 + +# The number of times to try to schedule each DAG file +# -1 indicates unlimited number +# +# Variable: AIRFLOW__SCHEDULER__NUM_RUNS +# +num_runs = -1 + +# Controls how long the scheduler will sleep between loops, but if there was nothing to do +# in the loop. i.e. if it scheduled something then it will start the next loop +# iteration straight away. +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_IDLE_SLEEP_TIME +# +scheduler_idle_sleep_time = 1 + +# How often (in seconds) to check for stale DAGs (DAGs which are no longer present in +# the expected files) which should be deactivated, as well as assets that are no longer +# referenced and should be marked as orphaned. +# +# Variable: AIRFLOW__SCHEDULER__PARSING_CLEANUP_INTERVAL +# +parsing_cleanup_interval = 60 + +# How often (in seconds) should pool usage stats be sent to StatsD (if statsd_on is enabled) +# +# Variable: AIRFLOW__SCHEDULER__POOL_METRICS_INTERVAL +# +pool_metrics_interval = 5.0 + +# How often (in seconds) should running task instance stats be sent to StatsD (if statsd_on is enabled) +# +# Variable: AIRFLOW__SCHEDULER__RUNNING_METRICS_INTERVAL +# +running_metrics_interval = 30.0 + +# If the last scheduler heartbeat happened more than ``[scheduler] scheduler_health_check_threshold`` +# ago (in seconds), scheduler is considered unhealthy. +# This is used by the health check in the **/health** endpoint and in ``airflow jobs check`` CLI +# for SchedulerJob. +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_THRESHOLD +# +scheduler_health_check_threshold = 30 + +# When you start a scheduler, airflow starts a tiny web server +# subprocess to serve a health check if this is set to ``True`` +# +# Variable: AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK +# +enable_health_check = False + +# When you start a scheduler, airflow starts a tiny web server +# subprocess to serve a health check on this host +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_SERVER_HOST +# +scheduler_health_check_server_host = 0.0.0.0 + +# When you start a scheduler, airflow starts a tiny web server +# subprocess to serve a health check on this port +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_SERVER_PORT +# +scheduler_health_check_server_port = 8974 + +# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs +# +# Variable: AIRFLOW__SCHEDULER__ORPHANED_TASKS_CHECK_INTERVAL +# +orphaned_tasks_check_interval = 300.0 + +# Local task jobs periodically heartbeat to the DB. If the job has +# not heartbeat in this many seconds, the scheduler will mark the +# associated task instance as failed and will re-schedule the task. +# +# Variable: AIRFLOW__SCHEDULER__TASK_INSTANCE_HEARTBEAT_TIMEOUT +# +task_instance_heartbeat_timeout = 300 + +# How often (in seconds) should the scheduler check for task instances whose heartbeats have timed out. +# +# Variable: AIRFLOW__SCHEDULER__TASK_INSTANCE_HEARTBEAT_TIMEOUT_DETECTION_INTERVAL +# +task_instance_heartbeat_timeout_detection_interval = 10.0 + +# Turn on scheduler catchup by setting this to ``True``. +# Default behavior is unchanged and +# Command Line Backfills still work, but the scheduler +# will not do scheduler catchup if this is ``False``, +# however it can be set on a per DAG basis in the +# DAG definition (catchup) +# +# Variable: AIRFLOW__SCHEDULER__CATCHUP_BY_DEFAULT +# +catchup_by_default = False + +# Setting this to ``True`` will make first task instance of a task +# ignore depends_on_past setting. A task instance will be considered +# as the first task instance of a task when there is no task instance +# in the DB with a logical_date earlier than it., i.e. no manual marking +# success will be needed for a newly added task to be scheduled. +# +# Variable: AIRFLOW__SCHEDULER__IGNORE_FIRST_DEPENDS_ON_PAST_BY_DEFAULT +# +ignore_first_depends_on_past_by_default = True + +# This determines the number of task instances to be evaluated for scheduling +# during each scheduler loop. +# Set this to 0 to use the value of ``[core] parallelism`` +# +# Variable: AIRFLOW__SCHEDULER__MAX_TIS_PER_QUERY +# +max_tis_per_query = 16 + +# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries. +# If this is set to ``False`` then you should not run more than a single +# scheduler at once +# +# Variable: AIRFLOW__SCHEDULER__USE_ROW_LEVEL_LOCKING +# +use_row_level_locking = True + +# Max number of DAGs to create DagRuns for per scheduler loop. +# +# Variable: AIRFLOW__SCHEDULER__MAX_DAGRUNS_TO_CREATE_PER_LOOP +# +max_dagruns_to_create_per_loop = 10 + +# How many DagRuns should a scheduler examine (and lock) when scheduling +# and queuing tasks. +# +# Variable: AIRFLOW__SCHEDULER__MAX_DAGRUNS_PER_LOOP_TO_SCHEDULE +# +max_dagruns_per_loop_to_schedule = 20 + +# Turn off scheduler use of cron intervals by setting this to ``False``. +# DAGs submitted manually in the web UI or with trigger_dag will still run. +# +# Variable: AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE +# +use_job_schedule = True + +# How often to check for expired trigger requests that have not run yet. +# +# Variable: AIRFLOW__SCHEDULER__TRIGGER_TIMEOUT_CHECK_INTERVAL +# +trigger_timeout_check_interval = 15 + +# Amount of time a task can be in the queued state before being retried or set to failed. +# +# Variable: AIRFLOW__SCHEDULER__TASK_QUEUED_TIMEOUT +# +task_queued_timeout = 600.0 + +# How often to check for tasks that have been in the queued state for +# longer than ``[scheduler] task_queued_timeout``. +# +# Variable: AIRFLOW__SCHEDULER__TASK_QUEUED_TIMEOUT_CHECK_INTERVAL +# +task_queued_timeout_check_interval = 120.0 + +# The run_id pattern used to verify the validity of user input to the run_id parameter when +# triggering a DAG. This pattern cannot change the pattern used by scheduler to generate run_id +# for scheduled DAG runs or DAG runs triggered without changing the run_id parameter. +# +# Variable: AIRFLOW__SCHEDULER__ALLOWED_RUN_ID_PATTERN +# +allowed_run_id_pattern = ^[A-Za-z0-9_.~:+-]+$ + +# Whether to create DAG runs that span an interval or one single point in time for cron schedules, when +# a cron string is provided to ``schedule`` argument of a DAG. +# +# * ``True``: **CronDataIntervalTimetable** is used, which is suitable +# for DAGs with well-defined data interval. You get contiguous intervals from the end of the previous +# interval up to the scheduled datetime. +# * ``False``: **CronTriggerTimetable** is used, which is closer to the behavior of cron itself. +# +# Notably, for **CronTriggerTimetable**, the logical date is the same as the time the DAG Run will +# try to schedule, while for **CronDataIntervalTimetable**, the logical date is the beginning of +# the data interval, but the DAG Run will try to schedule at the end of the data interval. +# +# Variable: AIRFLOW__SCHEDULER__CREATE_CRON_DATA_INTERVALS +# +create_cron_data_intervals = False + +# Whether to create DAG runs that span an interval or one single point in time when a timedelta or +# relativedelta is provided to ``schedule`` argument of a DAG. +# +# * ``True``: **DeltaDataIntervalTimetable** is used, which is suitable for DAGs with well-defined data +# interval. You get contiguous intervals from the end of the previous interval up to the scheduled +# datetime. +# * ``False``: **DeltaTriggerTimetable** is used, which is suitable for DAGs that simply want to say +# e.g. "run this every day" and do not care about the data interval. +# +# Notably, for **DeltaTriggerTimetable**, the logical date is the same as the time the DAG Run will +# try to schedule, while for **DeltaDataIntervalTimetable**, the logical date is the beginning of +# the data interval, but the DAG Run will try to schedule at the end of the data interval. +# +# Variable: AIRFLOW__SCHEDULER__CREATE_DELTA_DATA_INTERVALS +# +create_delta_data_intervals = False + +# Whether to enable memory allocation tracing in the scheduler. If enabled, Airflow will start +# tracing memory allocation and log the top 10 memory usages at the error level upon receiving the +# signal SIGUSR1. +# This is an expensive operation and generally should not be used except for debugging purposes. +# +# Variable: AIRFLOW__SCHEDULER__ENABLE_TRACEMALLOC +# +enable_tracemalloc = False + +[triggerer] +# How many triggers a single Triggerer will run at once, by default. +# +# Variable: AIRFLOW__TRIGGERER__CAPACITY +# +capacity = 1000 + +# How often to heartbeat the Triggerer job to ensure it hasn't been killed. +# +# Variable: AIRFLOW__TRIGGERER__JOB_HEARTBEAT_SEC +# +job_heartbeat_sec = 5 + +# If the last triggerer heartbeat happened more than ``[triggerer] triggerer_health_check_threshold`` +# ago (in seconds), triggerer is considered unhealthy. +# This is used by the health check in the **/health** endpoint and in ``airflow jobs check`` CLI +# for TriggererJob. +# +# Variable: AIRFLOW__TRIGGERER__TRIGGERER_HEALTH_CHECK_THRESHOLD +# +triggerer_health_check_threshold = 30 + +[kerberos] +# Location of your ccache file once kinit has been performed. +# +# Variable: AIRFLOW__KERBEROS__CCACHE +# +ccache = /tmp/airflow_krb5_ccache + +# gets augmented with fqdn +# +# Variable: AIRFLOW__KERBEROS__PRINCIPAL +# +principal = airflow + +# Determines the frequency at which initialization or re-initialization processes occur. +# +# Variable: AIRFLOW__KERBEROS__REINIT_FREQUENCY +# +reinit_frequency = 3600 + +# Path to the kinit executable +# +# Variable: AIRFLOW__KERBEROS__KINIT_PATH +# +kinit_path = kinit + +# Designates the path to the Kerberos keytab file for the Airflow user +# +# Variable: AIRFLOW__KERBEROS__KEYTAB +# +keytab = airflow.keytab + +# Allow to disable ticket forwardability. +# +# Variable: AIRFLOW__KERBEROS__FORWARDABLE +# +forwardable = True + +# Allow to remove source IP from token, useful when using token behind NATted Docker host. +# +# Variable: AIRFLOW__KERBEROS__INCLUDE_IP +# +include_ip = True + +[sensors] +# Sensor default timeout, 7 days by default (7 * 24 * 60 * 60). +# +# Variable: AIRFLOW__SENSORS__DEFAULT_TIMEOUT +# +default_timeout = 604800 + +[dag_processor] +# Configuration for the Airflow DAG processor. This includes, for example: +# - DAG bundles, which allows Airflow to load DAGs from different sources +# - Parsing configuration, like: +# - how often to refresh DAGs from those sources +# - how many files to parse concurrently + +# String path to folder where Airflow bundles can store files locally. Not templated. +# If no path is provided, Airflow will use ``Path(tempfile.gettempdir()) / "airflow"``. +# This path must be absolute. +# +# Example: dag_bundle_storage_path = /tmp/some-place +# +# Variable: AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_STORAGE_PATH +# +# dag_bundle_storage_path = + +# List of backend configs. Must supply name, classpath, and kwargs for each backend. +# +# By default, ``refresh_interval`` is set to ``[dag_processor] refresh_interval``, but that can +# also be overridden in kwargs if desired. +# +# The default is the dags folder dag bundle. +# +# Note: As shown below, you can split your json config over multiple lines by indenting. +# See configparser documentation for an example: +# https://docs.python.org/3/library/configparser.html#supported-ini-file-structure. +# +# Example: dag_bundle_config_list = [ +# { +# "name": "my-git-repo", +# "classpath": "airflow.providers.git.bundles.git.GitDagBundle", +# "kwargs": { +# "subdir": "dags", +# "tracking_ref": "main", +# "refresh_interval": 0 +# } +# } +# ] +# +# Variable: AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST +# +dag_bundle_config_list = [ + { + "name": "dags-folder", + "classpath": "airflow.dag_processing.bundles.local.LocalDagBundle", + "kwargs": {} + } + ] + + +# How often (in seconds) to refresh, or look for new files, in a DAG bundle. +# +# Variable: AIRFLOW__DAG_PROCESSOR__REFRESH_INTERVAL +# +refresh_interval = 300 + +# The DAG processor can run multiple processes in parallel to parse dags. +# This defines how many processes will run. +# +# Variable: AIRFLOW__DAG_PROCESSOR__PARSING_PROCESSES +# +parsing_processes = 2 + +# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``. +# The DAG processor will list and sort the dag files to decide the parsing order. +# +# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the +# recently modified DAGs first. +# * ``random_seeded_by_host``: Sort randomly across multiple DAG processors but with same order on the +# same host, allowing each processor to parse the files in a different order. +# * ``alphabetical``: Sort by filename +# +# Variable: AIRFLOW__DAG_PROCESSOR__FILE_PARSING_SORT_MODE +# +file_parsing_sort_mode = modified_time + +# The maximum number of callbacks that are fetched during a single loop. +# +# Variable: AIRFLOW__DAG_PROCESSOR__MAX_CALLBACKS_PER_LOOP +# +max_callbacks_per_loop = 20 + +# Number of seconds after which a DAG file is parsed. The DAG file is parsed every +# ``[dag_processor] min_file_process_interval`` number of seconds. Updates to DAGs are reflected after +# this interval. Keeping this number low will increase CPU usage. +# +# Variable: AIRFLOW__DAG_PROCESSOR__MIN_FILE_PROCESS_INTERVAL +# +min_file_process_interval = 30 + +# How long (in seconds) to wait after we have re-parsed a DAG file before deactivating stale +# DAGs (DAGs which are no longer present in the expected files). The reason why we need +# this threshold is to account for the time between when the file is parsed and when the +# DAG is loaded. The absolute maximum that this could take is +# ``[dag_processor] dag_file_processor_timeout``, but when you have a long timeout configured, +# it results in a significant delay in the deactivation of stale dags. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_DAG_THRESHOLD +# +stale_dag_threshold = 50 + +# How long before timing out a DagFileProcessor, which processes a dag file +# +# Variable: AIRFLOW__DAG_PROCESSOR__DAG_FILE_PROCESSOR_TIMEOUT +# +dag_file_processor_timeout = 50 + +# How often should DAG processor stats be printed to the logs. Setting to 0 will disable printing stats +# +# Variable: AIRFLOW__DAG_PROCESSOR__PRINT_STATS_INTERVAL +# +print_stats_interval = 30 + +# Always run tasks with the latest code. If set to True, the bundle version will not +# be stored on the dag run and therefore, the latest code will always be used. +# +# Variable: AIRFLOW__DAG_PROCESSOR__DISABLE_BUNDLE_VERSIONING +# +disable_bundle_versioning = False + +# How often the DAG processor should check if any DAG bundles are ready for a refresh, either by hitting +# the bundles refresh_interval or because another DAG processor has seen a newer version of the bundle. +# A low value means we check more frequently, and have a smaller window of time where DAG processors are +# out of sync with each other, parsing different versions of the same bundle. +# +# Variable: AIRFLOW__DAG_PROCESSOR__BUNDLE_REFRESH_CHECK_INTERVAL +# +bundle_refresh_check_interval = 5 + +# On shared workers, bundle copies accumulate in local storage as tasks run +# and version of the bundle changes. +# This setting represents the delta in seconds between checks for these stale bundles. +# Bundles which are older than `stale_bundle_cleanup_age_threshold` may be removed. But +# we always keep `stale_bundle_cleanup_min_versions` versions locally. +# Set to 0 or negative to disable. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_BUNDLE_CLEANUP_INTERVAL +# +stale_bundle_cleanup_interval = 1800 + +# Bundle versions used more recently than this threshold will not be removed. +# Recency of use is determined by when the task began running on the worker, +# that age is compared with this setting, given as time delta in seconds. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_BUNDLE_CLEANUP_AGE_THRESHOLD +# +stale_bundle_cleanup_age_threshold = 21600 + +# Minimum number of local bundle versions to retain on disk. +# Local bundle versions older than `stale_bundle_cleanup_age_threshold` will +# only be deleted we have more than `stale_bundle_cleanup_min_versions` versions +# accumulated on the worker. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_BUNDLE_CLEANUP_MIN_VERSIONS +# +stale_bundle_cleanup_min_versions = 10 + +# The dag_processor reads dag files to extract the airflow modules that are going to be used, +# and imports them ahead of time to avoid having to re-do it for each parsing process. +# This flag can be set to ``False`` to disable this behavior in case an airflow module needs +# to be freshly imported each time (at the cost of increased DAG parsing time). +# +# Variable: AIRFLOW__DAG_PROCESSOR__PARSING_PRE_IMPORT_MODULES +# +parsing_pre_import_modules = True + +[aws] +# This section contains settings for Amazon Web Services (AWS) integration. + +# Full import path to the class which implements a custom session factory for +# ``boto3.session.Session``. For more details please have a look at +# :ref:`howto/connection:aws:session-factory`. +# +# Example: session_factory = my_company.aws.MyCustomSessionFactory +# +# Variable: AIRFLOW__AWS__SESSION_FACTORY +# +# session_factory = + +cloudwatch_task_handler_json_serializer = airflow.providers.amazon.aws.log.cloudwatch_task_handler.json_serialize_legacy + +[aws_batch_executor] +# This section only applies if you are using the AwsBatchExecutor in +# Airflow's ``[core]`` configuration. +# For more information on any of these execution parameters, see the link below: +# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.submit_job +# For boto3 credential management, see +# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html + +conn_id = aws_default +# region_name = +max_submit_job_attempts = 3 +check_health_on_startup = True +# job_name = +# job_queue = +# job_definition = +# submit_job_kwargs = + +[aws_lambda_executor] +# This section only applies if you are using the AwsLambdaExecutor in +# Airflow's ``[core.executor]`` configuration. +# For more information see: +# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda/client/invoke.html +# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html +# https://airflow.apache.org/docs/apache-airflow-providers-amazon/stable/executors/lambda-executor.html + +conn_id = aws_default +# region_name = +check_health_on_startup = True +max_run_task_attempts = 3 +# queue_url = +# dead_letter_queue_url = +# function_name = +# qualifier = +end_wait_timeout = 0 + +[aws_ecs_executor] +# This section only applies if you are using the AwsEcsExecutor in +# Airflow's ``[core]`` configuration. +# For more information on any of these execution parameters, see the link below: +# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs/client/run_task.html +# For boto3 credential management, see +# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html + +conn_id = aws_default +# region_name = +assign_public_ip = False +# cluster = +# capacity_provider_strategy = +# container_name = +# launch_type = +platform_version = LATEST +# security_groups = +# subnets = +# task_definition = +max_run_task_attempts = 3 +# run_task_kwargs = +check_health_on_startup = True + +[aws_auth_manager] +# This section only applies if you are using the AwsAuthManager. In other words, if you set +# ``[core] auth_manager = airflow.providers.amazon.aws.auth_manager.aws_auth_manager.AwsAuthManager`` in +# Airflow's configuration. + +conn_id = aws_default +# region_name = +# saml_metadata_url = +# avp_policy_store_id = + +[celery_kubernetes_executor] +# This section only applies if you are using the ``CeleryKubernetesExecutor`` in +# ``[core]`` section above + +# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``. +# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), +# the task is executed via ``KubernetesExecutor``, +# otherwise via ``CeleryExecutor`` +# +# Variable: AIRFLOW__CELERY_KUBERNETES_EXECUTOR__KUBERNETES_QUEUE +# +kubernetes_queue = kubernetes + +[celery] +# This section only applies if you are using the CeleryExecutor in +# ``[core]`` section above + +# The app name that will be used by celery +# +# Variable: AIRFLOW__CELERY__CELERY_APP_NAME +# +celery_app_name = airflow.providers.celery.executors.celery_executor + +# The concurrency that will be used when starting workers with the +# ``airflow celery worker`` command. This defines the number of task instances that +# a worker will take, so size up your workers based on the resources on +# your worker box and the nature of your tasks +# +# Variable: AIRFLOW__CELERY__WORKER_CONCURRENCY +# +worker_concurrency = 16 + +# The maximum and minimum number of pool processes that will be used to dynamically resize +# the pool based on load.Enable autoscaling by providing max_concurrency,min_concurrency +# with the ``airflow celery worker`` command (always keep minimum processes, +# but grow to maximum if necessary). +# Pick these numbers based on resources on worker box and the nature of the task. +# If autoscale option is available, worker_concurrency will be ignored. +# https://docs.celeryq.dev/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale +# +# Example: worker_autoscale = 16,12 +# +# Variable: AIRFLOW__CELERY__WORKER_AUTOSCALE +# +# worker_autoscale = + +# Used to increase the number of tasks that a worker prefetches which can improve performance. +# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks +# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily +# blocked if there are multiple workers and one worker prefetches tasks that sit behind long +# running tasks while another worker has unutilized processes that are unable to process the already +# claimed blocked tasks. +# https://docs.celeryq.dev/en/stable/userguide/optimizing.html#prefetch-limits +# +# Variable: AIRFLOW__CELERY__WORKER_PREFETCH_MULTIPLIER +# +worker_prefetch_multiplier = 1 + +# Specify if remote control of the workers is enabled. +# In some cases when the broker does not support remote control, Celery creates lots of +# ``.*reply-celery-pidbox`` queues. You can prevent this by setting this to false. +# However, with this disabled Flower won't work. +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/index.html#broker-overview +# +# Variable: AIRFLOW__CELERY__WORKER_ENABLE_REMOTE_CONTROL +# +worker_enable_remote_control = true + +# The Celery broker URL. Celery supports multiple broker types. See: +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/index.html#broker-overview +# +# Variable: AIRFLOW__CELERY__BROKER_URL +# +broker_url = redis://redis:6379/0 + +# The Celery result_backend. When a job finishes, it needs to update the +# metadata of the job. Therefore it will post a message on a message bus, +# or insert it into a database (depending of the backend) +# This status is used by the scheduler to update the state of the task +# The use of a database is highly recommended +# When not specified, sql_alchemy_conn with a db+ scheme prefix will be used +# https://docs.celeryq.dev/en/latest/userguide/configuration.html#task-result-backend-settings +# +# Example: result_backend = db+postgresql://postgres:airflow@postgres/airflow +# +# Variable: AIRFLOW__CELERY__RESULT_BACKEND +# +# result_backend = + +# Optional configuration dictionary to pass to the Celery result backend SQLAlchemy engine. +# +# Example: result_backend_sqlalchemy_engine_options = {"pool_recycle": 1800} +# +# Variable: AIRFLOW__CELERY__RESULT_BACKEND_SQLALCHEMY_ENGINE_OPTIONS +# +result_backend_sqlalchemy_engine_options = + +# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start +# it ``airflow celery flower``. This defines the IP that Celery Flower runs on +# +# Variable: AIRFLOW__CELERY__FLOWER_HOST +# +flower_host = 0.0.0.0 + +# The root URL for Flower +# +# Example: flower_url_prefix = /flower +# +# Variable: AIRFLOW__CELERY__FLOWER_URL_PREFIX +# +flower_url_prefix = + +# This defines the port that Celery Flower runs on +# +# Variable: AIRFLOW__CELERY__FLOWER_PORT +# +flower_port = 5555 + +# Securing Flower with Basic Authentication +# Accepts user:password pairs separated by a comma +# +# Example: flower_basic_auth = user1:password1,user2:password2 +# +# Variable: AIRFLOW__CELERY__FLOWER_BASIC_AUTH +# +flower_basic_auth = + +# How many processes CeleryExecutor uses to sync task state. +# 0 means to use max(1, number of cores - 1) processes. +# +# Variable: AIRFLOW__CELERY__SYNC_PARALLELISM +# +sync_parallelism = 0 + +# Import path for celery configuration options +# +# Variable: AIRFLOW__CELERY__CELERY_CONFIG_OPTIONS +# +celery_config_options = airflow.providers.celery.executors.default_celery.DEFAULT_CELERY_CONFIG + +# +# Variable: AIRFLOW__CELERY__SSL_ACTIVE +# +ssl_active = False + +# Path to the client key. +# +# Variable: AIRFLOW__CELERY__SSL_KEY +# +ssl_key = + +# Path to the client certificate. +# +# Variable: AIRFLOW__CELERY__SSL_CERT +# +ssl_cert = + +# Path to the CA certificate. +# +# Variable: AIRFLOW__CELERY__SSL_CACERT +# +ssl_cacert = + +# Celery Pool implementation. +# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``. +# See: +# https://docs.celeryq.dev/en/latest/userguide/workers.html#concurrency +# https://docs.celeryq.dev/en/latest/userguide/concurrency/eventlet.html +# +# Variable: AIRFLOW__CELERY__POOL +# +pool = prefork + +# The number of seconds to wait before timing out ``send_task_to_executor`` or +# ``fetch_celery_task_state`` operations. +# +# Variable: AIRFLOW__CELERY__OPERATION_TIMEOUT +# +operation_timeout = 1.0 + +task_acks_late = True +# Celery task will report its status as 'started' when the task is executed by a worker. +# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted +# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob. +# +# Variable: AIRFLOW__CELERY__TASK_TRACK_STARTED +# +task_track_started = True + +# The Maximum number of retries for publishing task messages to the broker when failing +# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed. +# +# Variable: AIRFLOW__CELERY__TASK_PUBLISH_MAX_RETRIES +# +task_publish_max_retries = 3 + +# Extra celery configs to include in the celery worker. +# Any of the celery config can be added to this config and it +# will be applied while starting the celery worker. e.g. {"worker_max_tasks_per_child": 10} +# See also: +# https://docs.celeryq.dev/en/stable/userguide/configuration.html#configuration-and-defaults +# +# Variable: AIRFLOW__CELERY__EXTRA_CELERY_CONFIG +# +extra_celery_config = {} + +# The default umask to use for celery worker when run in daemon mode +# +# This controls the file-creation mode mask which determines the initial value of file permission bits +# for newly created files. +# +# This value is treated as an octal-integer. +# +# Variable: AIRFLOW__CELERY__WORKER_UMASK +# +# worker_umask = + +[celery_broker_transport_options] +# This section is for specifying options which can be passed to the +# underlying celery broker transport. See: +# https://docs.celeryq.dev/en/latest/userguide/configuration.html#std:setting-broker_transport_options + +# The visibility timeout defines the number of seconds to wait for the worker +# to acknowledge the task before the message is redelivered to another worker. +# Make sure to increase the visibility timeout to match the time of the longest +# ETA you're planning to use. +# visibility_timeout is only supported for Redis and SQS celery brokers. +# See: +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#visibility-timeout +# +# Example: visibility_timeout = 21600 +# +# Variable: AIRFLOW__CELERY_BROKER_TRANSPORT_OPTIONS__VISIBILITY_TIMEOUT +# +# visibility_timeout = + +# The sentinel_kwargs parameter allows passing additional options to the Sentinel client. +# In a typical scenario where Redis Sentinel is used as the broker and Redis servers are +# password-protected, the password needs to be passed through this parameter. Although its +# type is string, it is required to pass a string that conforms to the dictionary format. +# See: +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#configuration +# +# Example: sentinel_kwargs = {"password": "password_for_redis_server"} +# +# Variable: AIRFLOW__CELERY_BROKER_TRANSPORT_OPTIONS__SENTINEL_KWARGS +# +# sentinel_kwargs = + +[local_kubernetes_executor] +# This section only applies if you are using the ``LocalKubernetesExecutor`` in +# ``[core]`` section above + +# Define when to send a task to ``KubernetesExecutor`` when using ``LocalKubernetesExecutor``. +# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), +# the task is executed via ``KubernetesExecutor``, +# otherwise via ``LocalExecutor`` +# +# Variable: AIRFLOW__LOCAL_KUBERNETES_EXECUTOR__KUBERNETES_QUEUE +# +kubernetes_queue = kubernetes + +[kubernetes_executor] +# Kwargs to override the default urllib3 Retry used in the kubernetes API client +# +# Example: api_client_retry_configuration = { "total": 3, "backoff_factor": 0.5 } +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__API_CLIENT_RETRY_CONFIGURATION +# +api_client_retry_configuration = + +# Flag to control the information added to kubernetes executor logs for better traceability +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__LOGS_TASK_METADATA +# +logs_task_metadata = False + +# Path to the YAML pod file that forms the basis for KubernetesExecutor workers. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__POD_TEMPLATE_FILE +# +pod_template_file = + +# The repository of the Kubernetes Image for the Worker to Run +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_CONTAINER_REPOSITORY +# +worker_container_repository = + +# The tag of the Kubernetes Image for the Worker to Run +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_CONTAINER_TAG +# +worker_container_tag = + +# The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__NAMESPACE +# +namespace = default + +# If True, all worker pods will be deleted upon termination +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_WORKER_PODS +# +delete_worker_pods = True + +# If False (and delete_worker_pods is True), +# failed worker pods will not be deleted so users can investigate them. +# This only prevents removal of worker pods where the worker itself failed, +# not when the task it ran failed. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_WORKER_PODS_ON_FAILURE +# +delete_worker_pods_on_failure = False + +worker_pod_pending_fatal_container_state_reasons = CreateContainerConfigError,ErrImagePull,CreateContainerError,ImageInspectError,InvalidImageName +# Number of Kubernetes Worker Pod creation calls per scheduler loop. +# Note that the current default of "1" will only launch a single pod +# per-heartbeat. It is HIGHLY recommended that users increase this +# number to match the tolerance of their kubernetes cluster for +# better performance. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_PODS_CREATION_BATCH_SIZE +# +worker_pods_creation_batch_size = 1 + +# Allows users to launch pods in multiple namespaces. +# Will require creating a cluster-role for the scheduler, +# or use multi_namespace_mode_namespace_list configuration. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__MULTI_NAMESPACE_MODE +# +multi_namespace_mode = False + +# If multi_namespace_mode is True while scheduler does not have a cluster-role, +# give the list of namespaces where the scheduler will schedule jobs +# Scheduler needs to have the necessary permissions in these namespaces. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__MULTI_NAMESPACE_MODE_NAMESPACE_LIST +# +multi_namespace_mode_namespace_list = + +# Use the service account kubernetes gives to pods to connect to kubernetes cluster. +# It's intended for clients that expect to be running inside a pod running on kubernetes. +# It will raise an exception if called from a process not running in a kubernetes environment. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__IN_CLUSTER +# +in_cluster = True + +# When running with in_cluster=False change the default cluster_context or config_file +# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__CLUSTER_CONTEXT +# +# cluster_context = + +# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__CONFIG_FILE +# +# config_file = + +# Keyword parameters to pass while calling a kubernetes client core_v1_api methods +# from Kubernetes Executor provided as a single line formatted JSON dictionary string. +# List of supported params are similar for all core_v1_apis, hence a single config +# variable for all apis. See: +# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__KUBE_CLIENT_REQUEST_ARGS +# +kube_client_request_args = + +# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client +# ``core_v1_api`` method when using the Kubernetes Executor. +# This should be an object and can contain any of the options listed in the ``v1DeleteOptions`` +# class defined here: +# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19 +# +# Example: delete_option_kwargs = {"grace_period_seconds": 10} +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_OPTION_KWARGS +# +delete_option_kwargs = + +# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely +# when idle connection is time-outed on services like cloud load balancers or firewalls. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__ENABLE_TCP_KEEPALIVE +# +enable_tcp_keepalive = True + +# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has +# been idle for `tcp_keep_idle` seconds. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_IDLE +# +tcp_keep_idle = 120 + +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_INTVL +# +tcp_keep_intvl = 30 + +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before +# a connection is considered to be broken. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_CNT +# +tcp_keep_cnt = 6 + +# Set this to false to skip verifying SSL certificate of Kubernetes python client. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__VERIFY_SSL +# +verify_ssl = True + +# Path to a CA certificate to be used by the Kubernetes client to verify the server's SSL certificate. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__SSL_CA_CERT +# +ssl_ca_cert = + +# The Maximum number of retries for queuing the task to the kubernetes scheduler when +# failing due to Kube API exceeded quota errors before giving up and marking task as failed. +# -1 for unlimited times. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TASK_PUBLISH_MAX_RETRIES +# +task_publish_max_retries = 0 + +[common.io] +# Common IO configuration section + +# Path to a location on object storage where XComs can be stored in url format. +# +# Example: xcom_objectstorage_path = s3://conn_id@bucket/path +# +# Variable: AIRFLOW__COMMON.IO__XCOM_OBJECTSTORAGE_PATH +# +xcom_objectstorage_path = + +# Threshold in bytes for storing XComs in object storage. -1 means always store in the +# database. 0 means always store in object storage. Any positive number means +# it will be stored in object storage if the size of the value is greater than the threshold. +# +# Example: xcom_objectstorage_threshold = 1000000 +# +# Variable: AIRFLOW__COMMON.IO__XCOM_OBJECTSTORAGE_THRESHOLD +# +xcom_objectstorage_threshold = -1 + +# Compression algorithm to use when storing XComs in object storage. Supported algorithms +# are a.o.: snappy, zip, gzip, bz2, and lzma. If not specified, no compression will be used. +# Note that the compression algorithm must be available in the Python installation (e.g. +# python-snappy for snappy). Zip, gz, bz2 are available by default. +# +# Example: xcom_objectstorage_compression = gz +# +# Variable: AIRFLOW__COMMON.IO__XCOM_OBJECTSTORAGE_COMPRESSION +# +xcom_objectstorage_compression = + +[elasticsearch] +# Elasticsearch host +# +# Variable: AIRFLOW__ELASTICSEARCH__HOST +# +host = + +# Format of the log_id, which is used to query for a given tasks logs +# +# Variable: AIRFLOW__ELASTICSEARCH__LOG_ID_TEMPLATE +# +log_id_template = {dag_id}-{task_id}-{run_id}-{map_index}-{try_number} + +# Used to mark the end of a log stream for a task +# +# Variable: AIRFLOW__ELASTICSEARCH__END_OF_LOG_MARK +# +end_of_log_mark = end_of_log + +# Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id +# Code will construct log_id using the log_id template from the argument above. +# NOTE: scheme will default to https if one is not provided +# +# Example: frontend = http://localhost:5601/app/kibana#/discover?_a=(columns:!(message),query:(language:kuery,query:'log_id: "{log_id}"'),sort:!(log.offset,asc)) +# +# Variable: AIRFLOW__ELASTICSEARCH__FRONTEND +# +frontend = + +# Write the task logs to the stdout of the worker, rather than the default files +# +# Variable: AIRFLOW__ELASTICSEARCH__WRITE_STDOUT +# +write_stdout = False + +write_to_es = False +target_index = airflow-logs +# Instead of the default log formatter, write the log lines as JSON +# +# Variable: AIRFLOW__ELASTICSEARCH__JSON_FORMAT +# +json_format = False + +# Log fields to also attach to the json output, if enabled +# +# Variable: AIRFLOW__ELASTICSEARCH__JSON_FIELDS +# +json_fields = asctime, filename, lineno, levelname, message + +# The field where host name is stored (normally either `host` or `host.name`) +# +# Variable: AIRFLOW__ELASTICSEARCH__HOST_FIELD +# +host_field = host + +# The field where offset is stored (normally either `offset` or `log.offset`) +# +# Variable: AIRFLOW__ELASTICSEARCH__OFFSET_FIELD +# +offset_field = offset + +# Comma separated list of index patterns to use when searching for logs (default: `_all`). +# The index_patterns_callable takes precedence over this. +# +# Example: index_patterns = something-* +# +# Variable: AIRFLOW__ELASTICSEARCH__INDEX_PATTERNS +# +index_patterns = _all + +index_patterns_callable = + +[elasticsearch_configs] +# +# Variable: AIRFLOW__ELASTICSEARCH_CONFIGS__HTTP_COMPRESS +# +http_compress = False + +# +# Variable: AIRFLOW__ELASTICSEARCH_CONFIGS__VERIFY_CERTS +# +verify_certs = True + +[fab] +# This section contains configs specific to FAB provider. + +# Cookie with the secure attribute is only sent to the server with an HTTPS connection. +# +# Variable: AIRFLOW__FAB__COOKIE_SECURE +# +cookie_secure = False + +# Whether the cookie is restricted to a first-party or same-site context. +# +# Variable: AIRFLOW__FAB__COOKIE_SAMESITE +# +cookie_samesite = Lax + +# Define the color of navigation bar +# +# Variable: AIRFLOW__FAB__NAVBAR_COLOR +# +navbar_color = #fff + +# Define the color of text in the navigation bar +# +# Variable: AIRFLOW__FAB__NAVBAR_TEXT_COLOR +# +navbar_text_color = #51504f + +# Define the color of navigation bar links when hovered +# +# Variable: AIRFLOW__FAB__NAVBAR_HOVER_COLOR +# +navbar_hover_color = #eee + +# Define the color of text in the navigation bar when hovered +# +# Variable: AIRFLOW__FAB__NAVBAR_TEXT_HOVER_COLOR +# +navbar_text_hover_color = #51504f + +# The message displayed when a user attempts to execute actions beyond their authorised privileges. +# +# Variable: AIRFLOW__FAB__ACCESS_DENIED_MESSAGE +# +access_denied_message = Access is Denied + +# Expose hostname in the web server +# +# Variable: AIRFLOW__FAB__EXPOSE_HOSTNAME +# +expose_hostname = False + +# Boolean for enabling rate limiting on authentication endpoints. +# +# Variable: AIRFLOW__FAB__AUTH_RATE_LIMITED +# +auth_rate_limited = True + +# Rate limit for authentication endpoints. +# +# Variable: AIRFLOW__FAB__AUTH_RATE_LIMIT +# +auth_rate_limit = 5 per 40 second + +# Update FAB permissions and sync security manager roles +# on webserver startup +# +# Variable: AIRFLOW__FAB__UPDATE_FAB_PERMS +# +update_fab_perms = True + +# Comma separated list of auth backends to authenticate users of the API. +# +# Variable: AIRFLOW__FAB__AUTH_BACKENDS +# +auth_backends = airflow.providers.fab.auth_manager.api.auth.backend.session + +# Path of webserver config file used for configuring the webserver parameters +# +# Variable: AIRFLOW__FAB__CONFIG_FILE +# +config_file = /opt/airflow/webserver_config.py + +# The type of backend used to store web session data, can be ``database`` or ``securecookie``. For the +# ``database`` backend, sessions are store in the database and they can be +# managed there (for example when you reset password of the user, all sessions for that user are +# deleted). For the ``securecookie`` backend, sessions are stored in encrypted cookies on the client +# side. The ``securecookie`` mechanism is 'lighter' than database backend, but sessions are not +# deleted when you reset password of the user, which means that other than waiting for expiry time, +# the only way to invalidate all sessions for a user is to change secret_key and restart webserver +# (which also invalidates and logs out all other user's sessions). +# +# When you are using ``database`` backend, make sure to keep your database session table small +# by periodically running ``airflow db clean --table session`` command, especially if you have +# automated API calls that will create a new session for each call rather than reuse the sessions +# stored in browser cookies. +# +# Example: session_backend = securecookie +# +# Variable: AIRFLOW__FAB__SESSION_BACKEND +# +session_backend = database + +# The UI cookie lifetime in minutes. User will be logged out from UI after +# ``[fab] session_lifetime_minutes`` of non-activity +# +# Variable: AIRFLOW__FAB__SESSION_LIFETIME_MINUTES +# +session_lifetime_minutes = 43200 + +# Enable werkzeug ``ProxyFix`` middleware for reverse proxy +# +# Variable: AIRFLOW__FAB__ENABLE_PROXY_FIX +# +enable_proxy_fix = False + +# Number of values to trust for ``X-Forwarded-For``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_FOR +# +proxy_fix_x_for = 1 + +# Number of values to trust for ``X-Forwarded-Proto``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_PROTO +# +proxy_fix_x_proto = 1 + +# Number of values to trust for ``X-Forwarded-Host``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_HOST +# +proxy_fix_x_host = 1 + +# Number of values to trust for ``X-Forwarded-Port``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_PORT +# +proxy_fix_x_port = 1 + +# Number of values to trust for ``X-Forwarded-Prefix``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_PREFIX +# +proxy_fix_x_prefix = 1 + +[azure_remote_logging] +# Configuration that needs to be set for enable remote logging in Azure Blob Storage + +remote_wasb_log_container = airflow-logs + +[openlineage] +# This section applies settings for OpenLineage integration. +# More about configuration and its precedence can be found in the `user's guide +# `_. + +# Disable sending events without uninstalling the OpenLineage Provider by setting this to true. +# +# Variable: AIRFLOW__OPENLINEAGE__DISABLED +# +disabled = False + +# Exclude some Operators from emitting OpenLineage events by passing a string of semicolon separated +# full import paths of Operators to disable. +# +# Example: disabled_for_operators = airflow.providers.standard.operators.bash.BashOperator; airflow.providers.standard.operators.python.PythonOperator +# +# Variable: AIRFLOW__OPENLINEAGE__DISABLED_FOR_OPERATORS +# +disabled_for_operators = + +# If this setting is enabled, OpenLineage integration won't collect and emit metadata, +# unless you explicitly enable it per `DAG` or `Task` using `enable_lineage` method. +# +# Variable: AIRFLOW__OPENLINEAGE__SELECTIVE_ENABLE +# +selective_enable = False + +# Set namespace that the lineage data belongs to, so that if you use multiple OpenLineage producers, +# events coming from them will be logically separated. +# +# Example: namespace = my_airflow_instance_1 +# +# Variable: AIRFLOW__OPENLINEAGE__NAMESPACE +# +# namespace = + +# Register custom OpenLineage Extractors by passing a string of semicolon separated full import paths. +# +# Example: extractors = full.path.to.ExtractorClass;full.path.to.AnotherExtractorClass +# +# Variable: AIRFLOW__OPENLINEAGE__EXTRACTORS +# +# extractors = + +# Register custom run facet functions by passing a string of semicolon separated full import paths. +# +# Example: custom_run_facets = full.path.to.custom_facet_function;full.path.to.another_custom_facet_function +# +# Variable: AIRFLOW__OPENLINEAGE__CUSTOM_RUN_FACETS +# +custom_run_facets = + +# Specify the path to the YAML configuration file. +# This ensures backwards compatibility with passing config through the `openlineage.yml` file. +# +# Example: config_path = full/path/to/openlineage.yml +# +# Variable: AIRFLOW__OPENLINEAGE__CONFIG_PATH +# +config_path = + +# Pass OpenLineage Client transport configuration as a JSON string, including the transport type +# and any additional options specific to that type, as described in `OpenLineage docs +# `_. +# +# Currently supported types are: +# +# * HTTP +# * Kafka +# * Console +# * File +# * Composite +# * Custom +# +# Example: transport = {"type": "http", "url": "http://localhost:5000", "endpoint": "api/v1/lineage"} +# +# Variable: AIRFLOW__OPENLINEAGE__TRANSPORT +# +transport = + +# Disable the inclusion of source code in OpenLineage events by setting this to `true`. +# By default, several Operators (e.g. Python, Bash) will include their source code in the events +# unless disabled. +# +# Variable: AIRFLOW__OPENLINEAGE__DISABLE_SOURCE_CODE +# +disable_source_code = False + +# Number of processes to utilize for processing DAG state changes +# in an asynchronous manner within the scheduler process. +# +# Variable: AIRFLOW__OPENLINEAGE__DAG_STATE_CHANGE_PROCESS_POOL_SIZE +# +dag_state_change_process_pool_size = 1 + +# Maximum amount of time (in seconds) that OpenLineage can spend executing metadata extraction. +# Note that other configurations, sometimes with higher priority, such as +# `[core] task_success_overtime +# `_, +# may also affect how much time OpenLineage has for execution. +# +# Variable: AIRFLOW__OPENLINEAGE__EXECUTION_TIMEOUT +# +execution_timeout = 10 + +# If true, OpenLineage event will include full task info - potentially containing large fields. +# +# Variable: AIRFLOW__OPENLINEAGE__INCLUDE_FULL_TASK_INFO +# +include_full_task_info = False + +# If true, OpenLineage events will include information useful for debugging - potentially +# containing large fields e.g. all installed packages and their versions. +# +# Variable: AIRFLOW__OPENLINEAGE__DEBUG_MODE +# +debug_mode = False + +# Automatically inject OpenLineage's parent job (namespace, job name, run id) information into Spark +# application properties for supported Operators. +# +# Variable: AIRFLOW__OPENLINEAGE__SPARK_INJECT_PARENT_JOB_INFO +# +spark_inject_parent_job_info = False + +# Automatically inject OpenLineage's transport information into Spark application properties +# for supported Operators. +# +# Variable: AIRFLOW__OPENLINEAGE__SPARK_INJECT_TRANSPORT_INFO +# +spark_inject_transport_info = False + +[postgres] +# Configuration for Postgres hooks and operators. + +azure_oauth_scope = https://ossrdbms-aad.database.windows.net/.default + +[snowflake] +# Configuration for Snowflake hooks and operators. + +azure_oauth_scope = api://snowflake_oauth_server/.default + +[standard] +# Options for the standard provider operators. + +# Which python tooling should be used to install the virtual environment. +# +# The following options are available: +# - ``auto``: Automatically select, use ``uv`` if available, otherwise use ``pip``. +# - ``pip``: Use pip to install the virtual environment. +# - ``uv``: Use uv to install the virtual environment. Must be available in environment PATH. +# +# Example: venv_install_method = uv +# +# Variable: AIRFLOW__STANDARD__VENV_INSTALL_METHOD +# +venv_install_method = auto + diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..ecb1102 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,337 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Basic Airflow cluster configuration for CeleryExecutor with Redis and PostgreSQL. +# +# WARNING: This configuration is for local development. Do not use it in a production deployment. +# +# This configuration supports basic configuration using environment variables or an .env file +# The following variables are supported: +# +# AIRFLOW_IMAGE_NAME - Docker image name used to run Airflow. +# Default: apache/airflow:3.1.2 +# AIRFLOW_UID - User ID in Airflow containers +# Default: 50000 +# AIRFLOW_PROJ_DIR - Base path to which all the files will be volumed. +# Default: . +# Those configurations are useful mostly in case of standalone testing/running Airflow in test/try-out mode +# +# _AIRFLOW_WWW_USER_USERNAME - Username for the administrator account (if requested). +# Default: airflow +# _AIRFLOW_WWW_USER_PASSWORD - Password for the administrator account (if requested). +# Default: airflow +# _PIP_ADDITIONAL_REQUIREMENTS - Additional PIP requirements to add when starting all containers. +# Use this option ONLY for quick checks. Installing requirements at container +# startup is done EVERY TIME the service is started. +# A better way is to build a custom image or extend the official image +# as described in https://airflow.apache.org/docs/docker-stack/build.html. +# Default: '' +# +# Feel free to modify this file to suit your needs. +--- +x-airflow-common: + &airflow-common + # In order to add custom dependencies or upgrade provider distributions you can use your extended image. + # Comment the image line, place your Dockerfile in the directory where you placed the docker-compose.yaml + # and uncomment the "build" line below, Then run `docker-compose build` to build the images. + image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:3.1.2} + # build: . + env_file: + - ${ENV_FILE_PATH:-.env} + environment: + &airflow-common-env + AIRFLOW__CORE__EXECUTOR: CeleryExecutor + AIRFLOW__CORE__AUTH_MANAGER: airflow.providers.fab.auth_manager.fab_auth_manager.FabAuthManager + AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow + AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:airflow@postgres/airflow + AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0 + AIRFLOW__CORE__FERNET_KEY: '' + AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true' + AIRFLOW__CORE__LOAD_EXAMPLES: 'true' + AIRFLOW__CORE__EXECUTION_API_SERVER_URL: 'http://airflow-apiserver:8080/execution/' + # yamllint disable rule:line-length + # Use simple http server on scheduler for health checks + # See https://airflow.apache.org/docs/apache-airflow/stable/administration-and-deployment/logging-monitoring/check-health.html#scheduler-health-check-server + # yamllint enable rule:line-length + AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK: 'true' + # WARNING: Use _PIP_ADDITIONAL_REQUIREMENTS option ONLY for a quick checks + # for other purpose (development, test and especially production usage) build/extend Airflow image. + _PIP_ADDITIONAL_REQUIREMENTS: ${_PIP_ADDITIONAL_REQUIREMENTS:-} + # The following line can be used to set a custom config file, stored in the local config folder + AIRFLOW_CONFIG: '/opt/airflow/config/airflow.cfg' + volumes: + - ${AIRFLOW_PROJ_DIR:-.}/dags:/opt/airflow/dags + - ${AIRFLOW_PROJ_DIR:-.}/logs:/opt/airflow/logs + - ${AIRFLOW_PROJ_DIR:-.}/config:/opt/airflow/config + - ${AIRFLOW_PROJ_DIR:-.}/plugins:/opt/airflow/plugins + user: "${AIRFLOW_UID:-50000}:0" + depends_on: + &airflow-common-depends-on + redis: + condition: service_healthy + postgres: + condition: service_healthy + +services: + postgres: + image: postgres:16 + environment: + POSTGRES_USER: airflow + POSTGRES_PASSWORD: airflow + POSTGRES_DB: airflow + volumes: + - postgres-db-volume:/var/lib/postgresql/data + healthcheck: + test: ["CMD", "pg_isready", "-U", "airflow"] + interval: 10s + retries: 5 + start_period: 5s + restart: always + + redis: + # Redis is limited to 7.2-bookworm due to licencing change + # https://redis.io/blog/redis-adopts-dual-source-available-licensing/ + image: redis:7.2-bookworm + expose: + - 6379 + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 30s + retries: 50 + start_period: 30s + restart: always + + airflow-apiserver: + <<: *airflow-common + command: api-server + ports: + - "8080:8080" + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:8080/api/v2/version"] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-scheduler: + <<: *airflow-common + command: scheduler + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:8974/health"] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-dag-processor: + <<: *airflow-common + command: dag-processor + healthcheck: + test: ["CMD-SHELL", 'airflow jobs check --job-type DagProcessorJob --hostname "$${HOSTNAME}"'] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-worker: + <<: *airflow-common + command: celery worker + healthcheck: + # yamllint disable rule:line-length + test: + - "CMD-SHELL" + - 'celery --app airflow.providers.celery.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}" || celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"' + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + environment: + <<: *airflow-common-env + # Required to handle warm shutdown of the celery workers properly + # See https://airflow.apache.org/docs/docker-stack/entrypoint.html#signal-propagation + DUMB_INIT_SETSID: "0" + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-apiserver: + condition: service_healthy + airflow-init: + condition: service_completed_successfully + + airflow-triggerer: + <<: *airflow-common + command: triggerer + healthcheck: + test: ["CMD-SHELL", 'airflow jobs check --job-type TriggererJob --hostname "$${HOSTNAME}"'] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-init: + <<: *airflow-common + entrypoint: /bin/bash + # yamllint disable rule:line-length + command: + - -c + - | + if [[ -z "${AIRFLOW_UID}" ]]; then + echo + echo -e "\033[1;33mWARNING!!!: AIRFLOW_UID not set!\e[0m" + echo "If you are on Linux, you SHOULD follow the instructions below to set " + echo "AIRFLOW_UID environment variable, otherwise files will be owned by root." + echo "For other operating systems you can get rid of the warning with manually created .env file:" + echo " See: https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#setting-the-right-airflow-user" + echo + export AIRFLOW_UID=$$(id -u) + fi + one_meg=1048576 + mem_available=$$(($$(getconf _PHYS_PAGES) * $$(getconf PAGE_SIZE) / one_meg)) + cpus_available=$$(grep -cE 'cpu[0-9]+' /proc/stat) + disk_available=$$(df / | tail -1 | awk '{print $$4}') + warning_resources="false" + if (( mem_available < 4000 )) ; then + echo + echo -e "\033[1;33mWARNING!!!: Not enough memory available for Docker.\e[0m" + echo "At least 4GB of memory required. You have $$(numfmt --to iec $$((mem_available * one_meg)))" + echo + warning_resources="true" + fi + if (( cpus_available < 2 )); then + echo + echo -e "\033[1;33mWARNING!!!: Not enough CPUS available for Docker.\e[0m" + echo "At least 2 CPUs recommended. You have $${cpus_available}" + echo + warning_resources="true" + fi + if (( disk_available < one_meg * 10 )); then + echo + echo -e "\033[1;33mWARNING!!!: Not enough Disk space available for Docker.\e[0m" + echo "At least 10 GBs recommended. You have $$(numfmt --to iec $$((disk_available * 1024 )))" + echo + warning_resources="true" + fi + if [[ $${warning_resources} == "true" ]]; then + echo + echo -e "\033[1;33mWARNING!!!: You have not enough resources to run Airflow (see above)!\e[0m" + echo "Please follow the instructions to increase amount of resources available:" + echo " https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#before-you-begin" + echo + fi + echo + echo "Creating missing opt dirs if missing:" + echo + mkdir -v -p /opt/airflow/{logs,dags,plugins,config} + echo + echo "Airflow version:" + /entrypoint airflow version + echo + echo "Files in shared volumes:" + echo + ls -la /opt/airflow/{logs,dags,plugins,config} + echo + echo "Running airflow config list to create default config file if missing." + echo + /entrypoint airflow config list >/dev/null + echo + echo "Files in shared volumes:" + echo + ls -la /opt/airflow/{logs,dags,plugins,config} + echo + echo "Change ownership of files in /opt/airflow to ${AIRFLOW_UID}:0" + echo + chown -R "${AIRFLOW_UID}:0" /opt/airflow/ + echo + echo "Change ownership of files in shared volumes to ${AIRFLOW_UID}:0" + echo + chown -v -R "${AIRFLOW_UID}:0" /opt/airflow/{logs,dags,plugins,config} + echo + echo "Files in shared volumes:" + echo + ls -la /opt/airflow/{logs,dags,plugins,config} + + # yamllint enable rule:line-length + environment: + <<: *airflow-common-env + _AIRFLOW_DB_MIGRATE: 'true' + _AIRFLOW_WWW_USER_CREATE: 'true' + _AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow} + _AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow} + _PIP_ADDITIONAL_REQUIREMENTS: '' + user: "0:0" + + airflow-cli: + <<: *airflow-common + profiles: + - debug + environment: + <<: *airflow-common-env + CONNECTION_CHECK_MAX_COUNT: "0" + # Workaround for entrypoint issue. See: https://github.com/apache/airflow/issues/16252 + command: + - bash + - -c + - airflow + depends_on: + <<: *airflow-common-depends-on + + # You can enable flower by adding "--profile flower" option e.g. docker-compose --profile flower up + # or by explicitly targeted on the command line e.g. docker-compose up flower. + # See: https://docs.docker.com/compose/profiles/ + flower: + <<: *airflow-common + command: celery flower + profiles: + - flower + ports: + - "5555:5555" + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:5555/"] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + +volumes: + postgres-db-volume: diff --git a/docs/airflow.md b/docs/airflow.md new file mode 100644 index 0000000..9c70896 --- /dev/null +++ b/docs/airflow.md @@ -0,0 +1,68 @@ +# Apache Airflow + +データ処理やバッチ処理(ETL/ELT)などのワークフローを +管理・自動化するためのもっとも有名なプラットフォームです。 + +* **できること・強み** + * Airflow は 本番の大規模ETLで強い + * ワークフローをコード(Python)で記述できる(DAG) + * スケジューラで定期実行できる + * 失敗時のリトライ・アラートが強い + * Web UIが強力 + * プラグインが豊富で拡張性が高い + + + +## ワークフローを記述する + +Airflowの最大特徴はDAG(有向非巡回グラフ)という構造で +処理の流れをコード化できること。 + +DAG(Directed Acyclic Graph)とは +“順序や依存関係を持つタスクの集合” のこと。 + +```python +with DAG("my_etl_job") as dag: + extract >> transform >> load +``` + +* Directed(有向) → 流れに向きがある(A → B → C) +* Acyclic(非巡回) → ループしてはいけない(戻ってきてはいけない) +* Graph(グラフ) → ノード(タスク)と線(依存)で表す + +```mermaid +flowchart LR + source1[("データソース1
MySQL")] + source2[("データソース2
API")] + source3[("データソース3
CSV")] + + extract["Extract
データ抽出"] + transform["Transform
データ変換"] + load["Load
データ格納"] + + dwh[("データウェア
ハウス")] + + source1 --> extract + source2 --> extract + source3 --> extract + extract --> transform + transform --> load + load --> dwh +``` + +### Task(タスク) + +DAG内で実行される個々の処理単位。 +データ処理やスクリプト実行などを担当する + +--- + +## Develop + +### 環境を準備する + + +## Link + +* https://airflow.apache.org/docs/apache-airflow/stable/howto/variable.html +* https://zenn.dev/iwatagumi/articles/c8c61771ae49fc \ No newline at end of file diff --git a/docs/images/app01.png b/docs/images/app01.png new file mode 100644 index 0000000..ecc1c54 Binary files /dev/null and b/docs/images/app01.png differ diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..86e0c49 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +requests +python-dotenv +apache-airflow==3.1.2 +pandas==2.3.2 +duckdb==1.3.2 +google-cloud-storage \ No newline at end of file