From d5027a5e025efa304ab82c18b7f178860328fcd2 Mon Sep 17 00:00:00 2001 From: "ry.yamafuji" Date: Thu, 13 Nov 2025 22:21:35 +0900 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E6=9C=9F=E6=A7=8B=E7=AF=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env.sample | 2 + .gitignore | 2 + README.md | 43 +- config/airflow.cfg | 3225 +++++++++++++++++++++++++++++++++++++++++ docker-compose.yaml | 337 +++++ docs/airflow.md | 68 + docs/images/app01.png | Bin 0 -> 53010 bytes requirements.txt | 6 + 8 files changed, 3682 insertions(+), 1 deletion(-) create mode 100644 .env.sample create mode 100644 config/airflow.cfg create mode 100644 docker-compose.yaml create mode 100644 docs/airflow.md create mode 100644 docs/images/app01.png create mode 100644 requirements.txt diff --git a/.env.sample b/.env.sample new file mode 100644 index 0000000..24bcced --- /dev/null +++ b/.env.sample @@ -0,0 +1,2 @@ +AIRFLOW_UID=50000 +AIRFLOW_GID=0 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0dbf2f2..48a4811 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ # ---> Python +logs/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/README.md b/README.md index 8cea8ae..26bd02b 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,44 @@ # job-manager-by-airflow -Apach airflow \ No newline at end of file +Apach airflow + +* apache/airflow:3.1.2 +* https://github.com/apache/airflow + +![APP](./docs/images/app01.png) + +## Apach airflowとは? + +データ処理やバッチ処理(ETL/ELT)などのワークフローを +管理・自動化するためのもっとも有名なプラットフォームです。 + +* **できること・強み** + * Airflow は 本番の大規模ETLで強い + * ワークフローをコード(Python)で記述できる(DAG) + * スケジューラで定期実行できる + * 失敗時のリトライ・アラートが強い + * Web UIが強力 + * プラグインが豊富で拡張性が高い + +* **前提・制限** + *Airflowは「DAGどこまで実行した?」などの状態をデータベースで管理する仕組み です。そのため DBなしでは動きません。 + + +## Develop + +### Dockerで構築する + +```sh +docker compose up -d +``` + +デフォユーザーは airflow / airflow + +Airflow は公式的には Linux / macOS 前提で作られていて、 +systemd とか unix系コマンド前提の部分がけっこうあります。Windowsならばwslで実行してください + +#### 公式のDocker composeファイルを取得する方法 + +```sh +curl -LfO 'https://airflow.apache.org/docs/apache-airflow/stable/docker-compose.yaml' +``` \ No newline at end of file diff --git a/config/airflow.cfg b/config/airflow.cfg new file mode 100644 index 0000000..2f9ca08 --- /dev/null +++ b/config/airflow.cfg @@ -0,0 +1,3225 @@ +[core] +# The folder where your airflow pipelines live, most likely a +# subfolder in a code repository. This path must be absolute. +# +# Variable: AIRFLOW__CORE__DAGS_FOLDER +# +dags_folder = /opt/airflow/dags + +# Hostname by providing a path to a callable, which will resolve the hostname. +# The format is "package.function". +# +# For example, default value ``airflow.utils.net.getfqdn`` means that result from patched +# version of `socket.getfqdn() `__, +# see related `CPython Issue `__. +# +# No argument should be required in the function specified. +# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address`` +# +# Variable: AIRFLOW__CORE__HOSTNAME_CALLABLE +# +hostname_callable = airflow.utils.net.getfqdn + +# A callable to check if a python file has airflow dags defined or not and should +# return ``True`` if it has dags otherwise ``False``. +# If this is not provided, Airflow uses its own heuristic rules. +# +# The function should have the following signature +# +# .. code-block:: python +# +# def func_name(file_path: str, zip_file: zipfile.ZipFile | None = None) -> bool: ... +# +# Variable: AIRFLOW__CORE__MIGHT_CONTAIN_DAG_CALLABLE +# +might_contain_dag_callable = airflow.utils.file.might_contain_dag_via_default_heuristic + +# Default timezone in case supplied date times are naive +# can be `UTC` (default), `system`, or any `IANA ` +# timezone string (e.g. Europe/Amsterdam) +# +# Variable: AIRFLOW__CORE__DEFAULT_TIMEZONE +# +default_timezone = utc + +# The executor class that airflow should use. Choices include +# ``LocalExecutor``, ``CeleryExecutor``, +# ``KubernetesExecutor`` or the full import path to the class when using a custom executor. +# +# Variable: AIRFLOW__CORE__EXECUTOR +# +executor = LocalExecutor + +# The auth manager class that airflow should use. Full import path to the auth manager class. +# +# Variable: AIRFLOW__CORE__AUTH_MANAGER +# +auth_manager = airflow.api_fastapi.auth.managers.simple.simple_auth_manager.SimpleAuthManager + +# The list of users and their associated role in simple auth manager. If the simple auth manager is +# used in your environment, this list controls who can access the environment. +# +# List of user-role delimited with a comma. Each user-role is a colon delimited couple of username and +# role. Roles are predefined in simple auth managers: viewer, user, op, admin. +# +# Example: simple_auth_manager_users = bob:admin,peter:viewer +# +# Variable: AIRFLOW__CORE__SIMPLE_AUTH_MANAGER_USERS +# +simple_auth_manager_users = admin:admin + +# Whether to disable authentication and allow everyone as admin in the environment. +# +# Variable: AIRFLOW__CORE__SIMPLE_AUTH_MANAGER_ALL_ADMINS +# +simple_auth_manager_all_admins = False + +# The json file where the simple auth manager stores passwords for the configured users. +# By default this is ``AIRFLOW_HOME/simple_auth_manager_passwords.json.generated``. +# +# Example: simple_auth_manager_passwords_file = /path/to/passwords.json +# +# Variable: AIRFLOW__CORE__SIMPLE_AUTH_MANAGER_PASSWORDS_FILE +# +# simple_auth_manager_passwords_file = + +# This defines the maximum number of task instances that can run concurrently per scheduler in +# Airflow, regardless of the worker count. Generally this value, multiplied by the number of +# schedulers in your cluster, is the maximum number of task instances with the running +# state in the metadata database. The value must be larger or equal 1. +# +# Variable: AIRFLOW__CORE__PARALLELISM +# +parallelism = 32 + +# The maximum number of task instances allowed to run concurrently in each DAG. To calculate +# the number of tasks that is running concurrently for a DAG, add up the number of running +# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``max_active_tasks``, +# which is defaulted as ``[core] max_active_tasks_per_dag``. +# +# An example scenario when this would be useful is when you want to stop a new dag with an early +# start date from stealing all the executor slots in a cluster. +# +# Variable: AIRFLOW__CORE__MAX_ACTIVE_TASKS_PER_DAG +# +max_active_tasks_per_dag = 16 + +# Are DAGs paused by default at creation +# +# Variable: AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION +# +dags_are_paused_at_creation = True + +# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs +# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``, +# which is defaulted as ``[core] max_active_runs_per_dag``. +# +# Variable: AIRFLOW__CORE__MAX_ACTIVE_RUNS_PER_DAG +# +max_active_runs_per_dag = 16 + +# (experimental) The maximum number of consecutive DAG failures before DAG is automatically paused. +# This is also configurable per DAG level with ``max_consecutive_failed_dag_runs``, +# which is defaulted as ``[core] max_consecutive_failed_dag_runs_per_dag``. +# If not specified, then the value is considered as 0, +# meaning that the dags are never paused out by default. +# +# Variable: AIRFLOW__CORE__MAX_CONSECUTIVE_FAILED_DAG_RUNS_PER_DAG +# +max_consecutive_failed_dag_runs_per_dag = 0 + +# The name of the method used in order to start Python processes via the multiprocessing module. +# This corresponds directly with the options available in the Python docs: +# `multiprocessing.set_start_method +# `__ +# must be one of the values returned by `multiprocessing.get_all_start_methods() +# `__. +# +# Example: mp_start_method = fork +# +# Variable: AIRFLOW__CORE__MP_START_METHOD +# +# mp_start_method = + +# Whether to load the DAG examples that ship with Airflow. It's good to +# get started, but you probably want to set this to ``False`` in a production +# environment +# +# Variable: AIRFLOW__CORE__LOAD_EXAMPLES +# +load_examples = True + +# Path to the folder containing Airflow plugins +# +# Variable: AIRFLOW__CORE__PLUGINS_FOLDER +# +plugins_folder = /opt/airflow/plugins + +# Should tasks be executed via forking of the parent process +# +# * ``False``: Execute via forking of the parent process +# * ``True``: Spawning a new python process, slower than fork, but means plugin changes picked +# up by tasks straight away +# +# Variable: AIRFLOW__CORE__EXECUTE_TASKS_NEW_PYTHON_INTERPRETER +# +execute_tasks_new_python_interpreter = False + +# Secret key to save connection passwords in the db +# +# Variable: AIRFLOW__CORE__FERNET_KEY +# +fernet_key = FGnQ2hpBWTfxvE3AuxyOZlvYml6ka5PxbVzRZcIg384= + +# How long before timing out a python file import +# +# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_TIMEOUT +# +dagbag_import_timeout = 30.0 + +# Should a traceback be shown in the UI for dagbag import errors, +# instead of just the exception message +# +# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_ERROR_TRACEBACKS +# +dagbag_import_error_tracebacks = True + +# If tracebacks are shown, how many entries from the traceback should be shown +# +# Variable: AIRFLOW__CORE__DAGBAG_IMPORT_ERROR_TRACEBACK_DEPTH +# +dagbag_import_error_traceback_depth = 2 + +# If set, tasks without a ``run_as_user`` argument will be run with this user +# Can be used to de-elevate a sudo user running Airflow when executing tasks +# +# Variable: AIRFLOW__CORE__DEFAULT_IMPERSONATION +# +default_impersonation = + +# What security module to use (for example kerberos) +# +# Variable: AIRFLOW__CORE__SECURITY +# +security = + +# Turn unit test mode on (overwrites many configuration options with test +# values at runtime) +# +# Variable: AIRFLOW__CORE__UNIT_TEST_MODE +# +unit_test_mode = False + +# Space-separated list of classes that may be imported during deserialization. Items can be glob +# expressions. Python built-in classes (like dict) are always allowed. +# +# Example: allowed_deserialization_classes = airflow.* my_mod.my_other_mod.TheseClasses* +# +# Variable: AIRFLOW__CORE__ALLOWED_DESERIALIZATION_CLASSES +# +allowed_deserialization_classes = airflow.* + +# Space-separated list of classes that may be imported during deserialization. Items are processed +# as regex expressions. Python built-in classes (like dict) are always allowed. +# This is a secondary option to ``[core] allowed_deserialization_classes``. +# +# Variable: AIRFLOW__CORE__ALLOWED_DESERIALIZATION_CLASSES_REGEXP +# +allowed_deserialization_classes_regexp = + +# When a task is killed forcefully, this is the amount of time in seconds that +# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED +# +# Variable: AIRFLOW__CORE__KILLED_TASK_CLEANUP_TIME +# +killed_task_cleanup_time = 60 + +# Whether to override params with dag_run.conf. If you pass some key-value pairs +# through ``airflow dags backfill -c`` or +# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. +# +# Variable: AIRFLOW__CORE__DAG_RUN_CONF_OVERRIDES_PARAMS +# +dag_run_conf_overrides_params = True + +# If enabled, Airflow will only scan files containing both ``DAG`` and ``airflow`` (case-insensitive). +# +# Variable: AIRFLOW__CORE__DAG_DISCOVERY_SAFE_MODE +# +dag_discovery_safe_mode = True + +# The pattern syntax used in the +# `.airflowignore +# `__ +# files in the DAG directories. Valid values are ``regexp`` or ``glob``. +# +# Variable: AIRFLOW__CORE__DAG_IGNORE_FILE_SYNTAX +# +dag_ignore_file_syntax = glob + +# The number of retries each task is going to have by default. Can be overridden at dag or task level. +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_RETRIES +# +default_task_retries = 0 + +# The number of seconds each task is going to wait by default between retries. Can be overridden at +# dag or task level. +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_RETRY_DELAY +# +default_task_retry_delay = 300 + +# The maximum delay (in seconds) each task is going to wait by default between retries. +# This is a global setting and cannot be overridden at task or DAG level. +# +# Variable: AIRFLOW__CORE__MAX_TASK_RETRY_DELAY +# +max_task_retry_delay = 86400 + +# The weighting method used for the effective total priority weight of the task +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_WEIGHT_RULE +# +default_task_weight_rule = downstream + +# Maximum possible time (in seconds) that task will have for execution of auxiliary processes +# (like listeners, mini scheduler...) after task is marked as success.. +# +# Variable: AIRFLOW__CORE__TASK_SUCCESS_OVERTIME +# +task_success_overtime = 20 + +# The default task execution_timeout value for the operators. Expected an integer value to +# be passed into timedelta as seconds. If not specified, then the value is considered as None, +# meaning that the operators are never timed out by default. +# +# Variable: AIRFLOW__CORE__DEFAULT_TASK_EXECUTION_TIMEOUT +# +default_task_execution_timeout = + +# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. +# +# Variable: AIRFLOW__CORE__MIN_SERIALIZED_DAG_UPDATE_INTERVAL +# +min_serialized_dag_update_interval = 30 + +# If ``True``, serialized DAGs are compressed before writing to DB. +# +# .. note:: +# +# This will disable the DAG dependencies view +# +# Variable: AIRFLOW__CORE__COMPRESS_SERIALIZED_DAGS +# +compress_serialized_dags = False + +# Fetching serialized DAG can not be faster than a minimum interval to reduce database +# read rate. This config controls when your DAGs are updated in the Webserver +# +# Variable: AIRFLOW__CORE__MIN_SERIALIZED_DAG_FETCH_INTERVAL +# +min_serialized_dag_fetch_interval = 10 + +# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store +# in the Database. +# All the template_fields for each of Task Instance are stored in the Database. +# Keeping this number small may cause an error when you try to view ``Rendered`` tab in +# TaskInstance view for older tasks. +# +# Variable: AIRFLOW__CORE__MAX_NUM_RENDERED_TI_FIELDS_PER_TASK +# +max_num_rendered_ti_fields_per_task = 30 + +# Path to custom XCom class that will be used to store and resolve operators results +# +# Example: xcom_backend = path.to.CustomXCom +# +# Variable: AIRFLOW__CORE__XCOM_BACKEND +# +xcom_backend = airflow.sdk.execution_time.xcom.BaseXCom + +# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``, +# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module. +# +# Variable: AIRFLOW__CORE__LAZY_LOAD_PLUGINS +# +lazy_load_plugins = True + +# By default Airflow providers are lazily-discovered (discovery and imports happen only when required). +# Set it to ``False``, if you want to discover providers whenever 'airflow' is invoked via cli or +# loaded from module. +# +# Variable: AIRFLOW__CORE__LAZY_DISCOVER_PROVIDERS +# +lazy_discover_providers = True + +# Hide sensitive **Variables** or **Connection extra json keys** from UI +# and task logs when set to ``True`` +# +# .. note:: +# +# Connection passwords are always hidden in logs +# +# Variable: AIRFLOW__CORE__HIDE_SENSITIVE_VAR_CONN_FIELDS +# +hide_sensitive_var_conn_fields = True + +# A comma-separated list of extra sensitive keywords to look for in variables names or connection's +# extra JSON. +# +# Variable: AIRFLOW__CORE__SENSITIVE_VAR_CONN_NAMES +# +sensitive_var_conn_names = + +# Task Slot counts for ``default_pool``. This setting would not have any effect in an existing +# deployment where the ``default_pool`` is already created. For existing deployments, users can +# change the number of slots using Webserver, API or the CLI +# +# Variable: AIRFLOW__CORE__DEFAULT_POOL_TASK_SLOT_COUNT +# +default_pool_task_slot_count = 128 + +# The maximum list/dict length an XCom can push to trigger task mapping. If the pushed list/dict has a +# length exceeding this value, the task pushing the XCom will be failed automatically to prevent the +# mapped tasks from clogging the scheduler. +# +# Variable: AIRFLOW__CORE__MAX_MAP_LENGTH +# +max_map_length = 1024 + +# The default umask to use for process when run in daemon mode (scheduler, worker, etc.) +# +# This controls the file-creation mode mask which determines the initial value of file permission bits +# for newly created files. +# +# This value is treated as an octal-integer. +# +# Variable: AIRFLOW__CORE__DAEMON_UMASK +# +daemon_umask = 0o077 + +# Class to use as asset manager. +# +# Example: asset_manager_class = airflow.assets.manager.AssetManager +# +# Variable: AIRFLOW__CORE__ASSET_MANAGER_CLASS +# +# asset_manager_class = + +# Kwargs to supply to asset manager. +# +# Example: asset_manager_kwargs = {"some_param": "some_value"} +# +# Variable: AIRFLOW__CORE__ASSET_MANAGER_KWARGS +# +# asset_manager_kwargs = + +# The ability to allow testing connections across Airflow UI, API and CLI. +# Supported options: ``Disabled``, ``Enabled``, ``Hidden``. Default: Disabled +# Disabled - Disables the test connection functionality and disables the Test Connection button in UI. +# Enabled - Enables the test connection functionality and shows the Test Connection button in UI. +# Hidden - Disables the test connection functionality and hides the Test Connection button in UI. +# Before setting this to Enabled, make sure that you review the users who are able to add/edit +# connections and ensure they are trusted. Connection testing can be done maliciously leading to +# undesired and insecure outcomes. +# See `Airflow Security Model: Capabilities of authenticated UI users +# `__ +# for more details. +# +# Variable: AIRFLOW__CORE__TEST_CONNECTION +# +test_connection = Disabled + +# The maximum length of the rendered template field. If the value to be stored in the +# rendered template field exceeds this size, it's redacted. +# +# Variable: AIRFLOW__CORE__MAX_TEMPLATED_FIELD_LENGTH +# +max_templated_field_length = 4096 + +# The url of the execution api server. Default is ``{BASE_URL}/execution/`` +# where ``{BASE_URL}`` is the base url of the API Server. If ``{BASE_URL}`` is not set, +# it will use ``http://localhost:8080`` as the default base url. +# +# Variable: AIRFLOW__CORE__EXECUTION_API_SERVER_URL +# +# execution_api_server_url = + +[database] +# Path to the ``alembic.ini`` file. You can either provide the file path relative +# to the Airflow home directory or the absolute path if it is located elsewhere. +# +# Variable: AIRFLOW__DATABASE__ALEMBIC_INI_FILE_PATH +# +alembic_ini_file_path = alembic.ini + +# The SQLAlchemy connection string to the metadata database. +# SQLAlchemy supports many different database engines. +# See: `Set up a Database Backend: Database URI +# `__ +# for more details. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONN +# +sql_alchemy_conn = sqlite:////opt/airflow/airflow.db + +# The SQLAlchemy connection string to the metadata database used for async connections. +# If this is not set, Airflow automatically derives a string by converting ``sql_alchemy_conn``. +# Unfortunately, this conversion logic does not always work due to various incompatibilities +# between sync and async db driver implementations. This sets the connection string directly +# without any conversion instead. +# +# Example: sql_alchemy_conn_async = postgresql+asyncpg://postgres:airflow@postgres/airflow +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONN_ASYNC +# +# sql_alchemy_conn_async = + +# Extra engine specific keyword args passed to SQLAlchemy's create_engine, as a JSON-encoded value +# +# Example: sql_alchemy_engine_args = {"arg1": true} +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_ENGINE_ARGS +# +# sql_alchemy_engine_args = + +# The encoding for the databases +# +# Variable: AIRFLOW__DATABASE__SQL_ENGINE_ENCODING +# +sql_engine_encoding = utf-8 + +# Collation for ``dag_id``, ``task_id``, ``key``, ``external_executor_id`` columns +# in case they have different encoding. +# By default this collation is the same as the database collation, however for ``mysql`` and ``mariadb`` +# the default is ``utf8mb3_bin`` so that the index sizes of our index keys will not exceed +# the maximum size of allowed index when collation is set to ``utf8mb4`` variant, see +# `GitHub Issue Comment `__ +# for more details. +# +# Variable: AIRFLOW__DATABASE__SQL_ENGINE_COLLATION_FOR_IDS +# +# sql_engine_collation_for_ids = + +# If SQLAlchemy should pool database connections. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_ENABLED +# +sql_alchemy_pool_enabled = True + +# The SQLAlchemy pool size is the maximum number of database connections +# in the pool. 0 indicates no limit. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_SIZE +# +sql_alchemy_pool_size = 5 + +# The maximum overflow size of the pool. +# When the number of checked-out connections reaches the size set in pool_size, +# additional connections will be returned up to this limit. +# When those additional connections are returned to the pool, they are disconnected and discarded. +# It follows then that the total number of simultaneous connections the pool will allow +# is **pool_size** + **max_overflow**, +# and the total number of "sleeping" connections the pool will allow is pool_size. +# max_overflow can be set to ``-1`` to indicate no overflow limit; +# no limit will be placed on the total number of concurrent connections. Defaults to ``10``. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_OVERFLOW +# +sql_alchemy_max_overflow = 10 + +# The SQLAlchemy pool recycle is the number of seconds a connection +# can be idle in the pool before it is invalidated. This config does +# not apply to sqlite. If the number of DB connections is ever exceeded, +# a lower config value will allow the system to recover faster. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_RECYCLE +# +sql_alchemy_pool_recycle = 1800 + +# Check connection at the start of each connection pool checkout. +# Typically, this is a simple statement like "SELECT 1". +# See `SQLAlchemy Pooling: Disconnect Handling - Pessimistic +# `__ +# for more details. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_POOL_PRE_PING +# +sql_alchemy_pool_pre_ping = True + +# The schema to use for the metadata database. +# SQLAlchemy supports databases with the concept of multiple schemas. +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_SCHEMA +# +sql_alchemy_schema = + +# Import path for connect args in SQLAlchemy. Defaults to an empty dict. +# This is useful when you want to configure db engine args that SQLAlchemy won't parse +# in connection string. This can be set by passing a dictionary containing the create engine parameters. +# For more details about passing create engine parameters (keepalives variables, timeout etc) +# in Postgres DB Backend see `Setting up a PostgreSQL Database +# `__ +# e.g ``connect_args={"timeout":30}`` can be defined in ``airflow_local_settings.py`` and +# can be imported as shown below +# +# *Changed in 3.1.0*: This configuration is only applied to synchronous engines, such as psycopg2. +# See ``sql_alchemy_connect_args_async``. +# +# Example: sql_alchemy_connect_args = airflow_local_settings.connect_args +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONNECT_ARGS +# +# sql_alchemy_connect_args = + +# Import path for connect args in SQLAlchemy. Defaults to an empty dict. +# This is similar to ``sql_alchemy_connect_args``, but only for async connections. +# +# This configuration is only applied to async engines, such as asyncpg. +# +# Example: sql_alchemy_connect_args_async = airflow_local_settings.connect_args_async +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONNECT_ARGS_ASYNC +# +# sql_alchemy_connect_args_async = + +# Important Warning: Use of sql_alchemy_session_maker Highly Discouraged +# Import path for function which returns 'sqlalchemy.orm.sessionmaker'. +# Improper configuration of sql_alchemy_session_maker can lead to serious issues, +# including data corruption, unrecoverable application crashes. Please review the SQLAlchemy +# documentation for detailed guidance on proper configuration and best practices. +# +# Example: sql_alchemy_session_maker = airflow_local_settings._sessionmaker +# +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_SESSION_MAKER +# +# sql_alchemy_session_maker = + +# Number of times the code should be retried in case of DB Operational Errors. +# Not all transactions will be retried as it can cause undesired state. +# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``. +# +# Variable: AIRFLOW__DATABASE__MAX_DB_RETRIES +# +max_db_retries = 3 + +# Whether to run alembic migrations during Airflow start up. Sometimes this operation can be expensive, +# and the users can assert the correct version through other means (e.g. through a Helm chart). +# Accepts ``True`` or ``False``. +# +# Variable: AIRFLOW__DATABASE__CHECK_MIGRATIONS +# +check_migrations = True + +# List of DB managers to use to migrate external tables in airflow database. The managers must inherit +# from BaseDBManager. If ``FabAuthManager`` is configured in the environment, +# ``airflow.providers.fab.auth_manager.models.db.FABDBManager`` is automatically added. +# +# Example: external_db_managers = airflow.providers.fab.auth_manager.models.db.FABDBManager +# +# Variable: AIRFLOW__DATABASE__EXTERNAL_DB_MANAGERS +# +# external_db_managers = + +# The number of rows to process in each batch when performing a migration. +# This is useful for large tables to avoid locking and failure due to query timeouts. +# +# Variable: AIRFLOW__DATABASE__MIGRATION_BATCH_SIZE +# +migration_batch_size = 10000 + +[logging] +# The folder where airflow should store its log files. +# This path must be absolute. +# There are a few existing configurations that assume this is set to the default. +# If you choose to override this you may need to update the +# ``[logging] dag_processor_manager_log_location`` and +# ``[logging] dag_processor_child_process_log_directory settings`` as well. +# +# Variable: AIRFLOW__LOGGING__BASE_LOG_FOLDER +# +base_log_folder = /opt/airflow/logs + +# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. +# Set this to ``True`` if you want to enable remote logging. +# +# Variable: AIRFLOW__LOGGING__REMOTE_LOGGING +# +remote_logging = False + +# Users must supply an Airflow connection id that provides access to the storage +# location. Depending on your remote logging service, this may only be used for +# reading logs, not writing them. +# +# Variable: AIRFLOW__LOGGING__REMOTE_LOG_CONN_ID +# +remote_log_conn_id = + +# Whether the local log files for GCS, S3, WASB, HDFS and OSS remote logging should be deleted after +# they are uploaded to the remote location. +# +# Variable: AIRFLOW__LOGGING__DELETE_LOCAL_LOGS +# +delete_local_logs = False + +# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default +# Credentials +# `__ will +# be used. +# +# Variable: AIRFLOW__LOGGING__GOOGLE_KEY_PATH +# +google_key_path = + +# Storage bucket URL for remote logging +# S3 buckets should start with **s3://** +# Cloudwatch log groups should start with **cloudwatch://** +# GCS buckets should start with **gs://** +# WASB buckets should start with **wasb** just to help Airflow select correct handler +# Stackdriver logs should start with **stackdriver://** +# +# Variable: AIRFLOW__LOGGING__REMOTE_BASE_LOG_FOLDER +# +remote_base_log_folder = + +# The remote_task_handler_kwargs param is loaded into a dictionary and passed to the ``__init__`` +# of remote task handler and it overrides the values provided by Airflow config. For example if you set +# ``delete_local_logs=False`` and you provide ``{"delete_local_copy": true}``, then the local +# log files will be deleted after they are uploaded to remote location. +# +# Example: remote_task_handler_kwargs = {"delete_local_copy": true} +# +# Variable: AIRFLOW__LOGGING__REMOTE_TASK_HANDLER_KWARGS +# +remote_task_handler_kwargs = + +# Use server-side encryption for logs stored in S3 +# +# Variable: AIRFLOW__LOGGING__ENCRYPT_S3_LOGS +# +encrypt_s3_logs = False + +# Logging level. +# +# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. +# +# Variable: AIRFLOW__LOGGING__LOGGING_LEVEL +# +logging_level = INFO + +# Logging level for celery. If not set, it uses the value of logging_level +# +# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. +# +# Variable: AIRFLOW__LOGGING__CELERY_LOGGING_LEVEL +# +celery_logging_level = + +# Logging level for Flask-appbuilder UI. +# +# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. +# +# Variable: AIRFLOW__LOGGING__FAB_LOGGING_LEVEL +# +fab_logging_level = WARNING + +# Logging class +# Specify the class that will specify the logging configuration +# This class has to be on the python classpath +# +# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG +# +# Variable: AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS +# +logging_config_class = + +# Flag to enable/disable Colored logs in Console +# Colour the logs when the controlling terminal is a TTY. +# +# Variable: AIRFLOW__LOGGING__COLORED_CONSOLE_LOG +# +colored_console_log = True + +# Format of Log line +# +# *Changed in 3.1.0*: This can now contain color escape sequences ``%(blue)s`` etc which will only +# result in colours if :ref:`config:logging__colored_console_log` is true. +# +# Example: log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s +# +# Variable: AIRFLOW__LOGGING__LOG_FORMAT +# +log_format = + +# A comma separated list of information about the callsite (such as line number of filename etc) of +# logger calls to include in each message. +# +# See :class:`structlog.processors.CallsiteParameter` for the possible values.The values should be the +# constant names (``FUNC_NAME``) or the values (``func_name``) +# +# Including these in a log message adds a lot to the usability to the logs, but collecting these has a +# (tiny) cost -- if you are super concerned with eking out every last ounce of performance you could +# turn these off (by setting this value to an empty string) +# +# Variable: AIRFLOW__LOGGING__CALLSITE_PARAMETERS +# +callsite_parameters = filename,lineno + +# Defines the format of log messages for simple logging configuration +# +# Variable: AIRFLOW__LOGGING__SIMPLE_LOG_FORMAT +# +simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s + +# Where to send dag parser logs. If "file", logs are sent to log files defined by child_process_log_directory. +# +# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_LOG_TARGET +# +dag_processor_log_target = file + +# Format of Dag Processor Log line +# +# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_LOG_FORMAT +# +dag_processor_log_format = [%%(asctime)s] [SOURCE:DAG_PROCESSOR] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s + +# Determines the directory where logs for the child processes of the dag processor will be stored +# +# Variable: AIRFLOW__LOGGING__DAG_PROCESSOR_CHILD_PROCESS_LOG_DIRECTORY +# +dag_processor_child_process_log_directory = /opt/airflow/logs/dag_processor + +# Determines the formatter class used by Airflow for structuring its log messages +# The default formatter class is timezone-aware, which means that timestamps attached to log entries +# will be adjusted to reflect the local timezone of the Airflow instance +# +# Variable: AIRFLOW__LOGGING__LOG_FORMATTER_CLASS +# +log_formatter_class = airflow.utils.log.timezone_aware.TimezoneAware + +# An import path to a function to add adaptations of each secret added with +# ``airflow.sdk.execution_time.secrets_masker.mask_secret`` to be masked in log messages. +# The given function is expected to require a single parameter: the secret to be adapted. +# It may return a single adaptation of the secret or an iterable of adaptations to each be +# masked as secrets. The original secret will be masked as well as any adaptations returned. +# +# Example: secret_mask_adapter = urllib.parse.quote +# +# Variable: AIRFLOW__LOGGING__SECRET_MASK_ADAPTER +# +secret_mask_adapter = + +# The minimum length of a secret to be masked in log messages. +# Secrets shorter than this length will not be masked. +# +# Variable: AIRFLOW__LOGGING__MIN_LENGTH_MASKED_SECRET +# +min_length_masked_secret = 5 + +# Specify prefix pattern like mentioned below with stream handler ``TaskHandlerWithCustomFormatter`` +# +# Example: task_log_prefix_template = {{ti.dag_id}}-{{ti.task_id}}-{{logical_date}}-{{ti.try_number}} +# +# Variable: AIRFLOW__LOGGING__TASK_LOG_PREFIX_TEMPLATE +# +task_log_prefix_template = + +# Formatting for how airflow generates file names/paths for each task run. +# +# Variable: AIRFLOW__LOGGING__LOG_FILENAME_TEMPLATE +# +log_filename_template = dag_id={{ ti.dag_id }}/run_id={{ ti.run_id }}/task_id={{ ti.task_id }}/{%% if ti.map_index >= 0 %%}map_index={{ ti.map_index }}/{%% endif %%}attempt={{ try_number|default(ti.try_number) }}.log + +# Name of handler to read task instance logs. +# Defaults to use ``task`` handler. +# +# Variable: AIRFLOW__LOGGING__TASK_LOG_READER +# +task_log_reader = task + +# A comma\-separated list of third-party logger names that will be configured to print messages to +# consoles\. +# +# Example: extra_logger_names = fastapi,sqlalchemy +# +# Variable: AIRFLOW__LOGGING__EXTRA_LOGGER_NAMES +# +extra_logger_names = + +# When you start an Airflow worker, Airflow starts a tiny web server +# subprocess to serve the workers local log files to the airflow main +# web server, who then builds pages and sends them to users. This defines +# the port on which the logs are served. It needs to be unused, and open +# visible from the main web server to connect into the workers. +# +# Variable: AIRFLOW__LOGGING__WORKER_LOG_SERVER_PORT +# +worker_log_server_port = 8793 + +# Port to serve logs from for triggerer. +# See ``[logging] worker_log_server_port`` description for more info. +# +# Variable: AIRFLOW__LOGGING__TRIGGER_LOG_SERVER_PORT +# +trigger_log_server_port = 8794 + +# We must parse timestamps to interleave logs between trigger and task. To do so, +# we need to parse timestamps in log files. In case your log format is non-standard, +# you may provide import path to callable which takes a string log line and returns +# the timestamp (datetime.datetime compatible). +# +# Example: interleave_timestamp_parser = path.to.my_func +# +# Variable: AIRFLOW__LOGGING__INTERLEAVE_TIMESTAMP_PARSER +# +# interleave_timestamp_parser = + +# Permissions in the form or of octal string as understood by chmod. The permissions are important +# when you use impersonation, when logs are written by a different user than airflow. The most secure +# way of configuring it in this case is to add both users to the same group and make it the default +# group of both users. Group-writeable logs are default in airflow, but you might decide that you are +# OK with having the logs other-writeable, in which case you should set it to ``0o777``. You might +# decide to add more security if you do not use impersonation and change it to ``0o755`` to make it +# only owner-writeable. You can also make it just readable only for owner by changing it to ``0o700`` +# if all the access (read/write) for your logs happens from the same user. +# +# Example: file_task_handler_new_folder_permissions = 0o775 +# +# Variable: AIRFLOW__LOGGING__FILE_TASK_HANDLER_NEW_FOLDER_PERMISSIONS +# +file_task_handler_new_folder_permissions = 0o775 + +# Permissions in the form or of octal string as understood by chmod. The permissions are important +# when you use impersonation, when logs are written by a different user than airflow. The most secure +# way of configuring it in this case is to add both users to the same group and make it the default +# group of both users. Group-writeable logs are default in airflow, but you might decide that you are +# OK with having the logs other-writeable, in which case you should set it to ``0o666``. You might +# decide to add more security if you do not use impersonation and change it to ``0o644`` to make it +# only owner-writeable. You can also make it just readable only for owner by changing it to ``0o600`` +# if all the access (read/write) for your logs happens from the same user. +# +# Example: file_task_handler_new_file_permissions = 0o664 +# +# Variable: AIRFLOW__LOGGING__FILE_TASK_HANDLER_NEW_FILE_PERMISSIONS +# +file_task_handler_new_file_permissions = 0o664 + +# By default Celery sends all logs into stderr. +# If enabled any previous logging handlers will get *removed*. +# With this option AirFlow will create new handlers +# and send low level logs like INFO and WARNING to stdout, +# while sending higher severity logs to stderr. +# +# Variable: AIRFLOW__LOGGING__CELERY_STDOUT_STDERR_SEPARATION +# +celery_stdout_stderr_separation = False + +# A comma separated list of keywords related to errors whose presence should display the line in red +# color in UI +# +# Variable: AIRFLOW__LOGGING__COLOR_LOG_ERROR_KEYWORDS +# +color_log_error_keywords = error,exception + +# A comma separated list of keywords related to warning whose presence should display the line in yellow +# color in UI +# +# Variable: AIRFLOW__LOGGING__COLOR_LOG_WARNING_KEYWORDS +# +color_log_warning_keywords = warn + +[metrics] +# `StatsD `__ integration settings. + +# Configure an allow list (comma separated regex patterns to match) to send only certain metrics. +# +# Example: metrics_allow_list = "scheduler,executor,dagrun,pool,triggerer,celery" or "^scheduler,^executor,heartbeat|timeout" +# +# Variable: AIRFLOW__METRICS__METRICS_ALLOW_LIST +# +metrics_allow_list = + +# Configure a block list (comma separated regex patterns to match) to block certain metrics +# from being emitted. +# If ``[metrics] metrics_allow_list`` and ``[metrics] metrics_block_list`` are both configured, +# ``[metrics] metrics_block_list`` is ignored. +# +# Example: metrics_block_list = "scheduler,executor,dagrun,pool,triggerer,celery" or "^scheduler,^executor,heartbeat|timeout" +# +# Variable: AIRFLOW__METRICS__METRICS_BLOCK_LIST +# +metrics_block_list = + +# Enables sending metrics to StatsD. +# +# Variable: AIRFLOW__METRICS__STATSD_ON +# +statsd_on = False + +# Specifies the host address where the StatsD daemon (or server) is running +# +# Variable: AIRFLOW__METRICS__STATSD_HOST +# +statsd_host = localhost + +# Enables the statsd host to be resolved into IPv6 address +# +# Variable: AIRFLOW__METRICS__STATSD_IPV6 +# +statsd_ipv6 = False + +# Specifies the port on which the StatsD daemon (or server) is listening to +# +# Variable: AIRFLOW__METRICS__STATSD_PORT +# +statsd_port = 8125 + +# Defines the namespace for all metrics sent from Airflow to StatsD +# +# Variable: AIRFLOW__METRICS__STATSD_PREFIX +# +statsd_prefix = airflow + +# A function that validate the StatsD stat name, apply changes to the stat name if necessary and return +# the transformed stat name. +# +# The function should have the following signature +# +# .. code-block:: python +# +# def func_name(stat_name: str) -> str: ... +# +# Variable: AIRFLOW__METRICS__STAT_NAME_HANDLER +# +stat_name_handler = + +# To enable datadog integration to send airflow metrics. +# +# Variable: AIRFLOW__METRICS__STATSD_DATADOG_ENABLED +# +statsd_datadog_enabled = False + +# List of datadog tags attached to all metrics(e.g: ``key1:value1,key2:value2``) +# +# Variable: AIRFLOW__METRICS__STATSD_DATADOG_TAGS +# +statsd_datadog_tags = + +# Set to ``False`` to disable metadata tags for some of the emitted metrics +# +# Variable: AIRFLOW__METRICS__STATSD_DATADOG_METRICS_TAGS +# +statsd_datadog_metrics_tags = True + +# If you want to utilise your own custom StatsD client set the relevant +# module path below. +# Note: The module path must exist on your +# `PYTHONPATH ` +# for Airflow to pick it up +# +# Variable: AIRFLOW__METRICS__STATSD_CUSTOM_CLIENT_PATH +# +# statsd_custom_client_path = + +# If you want to avoid sending all the available metrics tags to StatsD, +# you can configure a block list of prefixes (comma separated) to filter out metric tags +# that start with the elements of the list (e.g: ``job_id,run_id``) +# +# Example: statsd_disabled_tags = job_id,run_id,dag_id,task_id +# +# Variable: AIRFLOW__METRICS__STATSD_DISABLED_TAGS +# +statsd_disabled_tags = job_id,run_id + +# To enable sending Airflow metrics with StatsD-Influxdb tagging convention. +# +# Variable: AIRFLOW__METRICS__STATSD_INFLUXDB_ENABLED +# +statsd_influxdb_enabled = False + +# Enables sending metrics to OpenTelemetry. +# +# Variable: AIRFLOW__METRICS__OTEL_ON +# +otel_on = False + +# Specifies the hostname or IP address of the OpenTelemetry Collector to which Airflow sends +# metrics and traces. +# +# Variable: AIRFLOW__METRICS__OTEL_HOST +# +otel_host = localhost + +# Specifies the port of the OpenTelemetry Collector that is listening to. +# +# Variable: AIRFLOW__METRICS__OTEL_PORT +# +otel_port = 8889 + +# The prefix for the Airflow metrics. +# +# Variable: AIRFLOW__METRICS__OTEL_PREFIX +# +otel_prefix = airflow + +# Defines the interval, in milliseconds, at which Airflow sends batches of metrics and traces +# to the configured OpenTelemetry Collector. +# +# Variable: AIRFLOW__METRICS__OTEL_INTERVAL_MILLISECONDS +# +otel_interval_milliseconds = 60000 + +# If ``True``, all metrics are also emitted to the console. Defaults to ``False``. +# +# Variable: AIRFLOW__METRICS__OTEL_DEBUGGING_ON +# +otel_debugging_on = False + +# The default service name of traces. +# +# Variable: AIRFLOW__METRICS__OTEL_SERVICE +# +otel_service = Airflow + +# If ``True``, SSL will be enabled. Defaults to ``False``. +# To establish an HTTPS connection to the OpenTelemetry collector, +# you need to configure the SSL certificate and key within the OpenTelemetry collector's +# ``config.yml`` file. +# +# Variable: AIRFLOW__METRICS__OTEL_SSL_ACTIVE +# +otel_ssl_active = False + +[traces] +# Distributed traces integration settings. + +# Enables sending traces to OpenTelemetry. +# +# Variable: AIRFLOW__TRACES__OTEL_ON +# +otel_on = False + +# Specifies the hostname or IP address of the OpenTelemetry Collector to which Airflow sends +# traces. +# +# Variable: AIRFLOW__TRACES__OTEL_HOST +# +otel_host = localhost + +# Specifies the port of the OpenTelemetry Collector that is listening to. +# +# Variable: AIRFLOW__TRACES__OTEL_PORT +# +otel_port = 8889 + +# The default service name of traces. +# +# Variable: AIRFLOW__TRACES__OTEL_SERVICE +# +otel_service = Airflow + +# If True, all traces are also emitted to the console. Defaults to False. +# +# Variable: AIRFLOW__TRACES__OTEL_DEBUGGING_ON +# +otel_debugging_on = False + +# If True, SSL will be enabled. Defaults to False. +# To establish an HTTPS connection to the OpenTelemetry collector, +# you need to configure the SSL certificate and key within the OpenTelemetry collector's +# config.yml file. +# +# Variable: AIRFLOW__TRACES__OTEL_SSL_ACTIVE +# +otel_ssl_active = False + +# If True, then traces from Airflow internal methods are exported. Defaults to False. +# +# Variable: AIRFLOW__TRACES__OTEL_DEBUG_TRACES_ON +# +otel_debug_traces_on = False + +[secrets] +# Full class name of secrets backend to enable (will precede env vars and metastore in search path) +# +# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend +# +# Variable: AIRFLOW__SECRETS__BACKEND +# +backend = + +# The backend_kwargs param is loaded into a dictionary and passed to ``__init__`` +# of secrets backend class. See documentation for the secrets backend you are using. +# JSON is expected. +# +# Example for AWS Systems Manager ParameterStore: +# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}`` +# +# Variable: AIRFLOW__SECRETS__BACKEND_KWARGS +# +backend_kwargs = + +# .. note:: |experimental| +# +# Enables local caching of Variables, when parsing DAGs only. +# Using this option can make dag parsing faster if Variables are used in top level code, at the expense +# of longer propagation time for changes. +# Please note that this cache concerns only the DAG parsing step. There is no caching in place when DAG +# tasks are run. +# +# Variable: AIRFLOW__SECRETS__USE_CACHE +# +use_cache = False + +# .. note:: |experimental| +# +# When the cache is enabled, this is the duration for which we consider an entry in the cache to be +# valid. Entries are refreshed if they are older than this many seconds. +# It means that when the cache is enabled, this is the maximum amount of time you need to wait to see a +# Variable change take effect. +# +# Variable: AIRFLOW__SECRETS__CACHE_TTL_SECONDS +# +cache_ttl_seconds = 900 + +[api] +# Sets a custom homepage heading and site title for all Airflow UI pages. +# If set, the Dashboard will display this value instead of the default +# "Welcome" message. +# +# Variable: AIRFLOW__API__INSTANCE_NAME +# +# instance_name = + +# Boolean for running SwaggerUI in the webserver. +# +# Variable: AIRFLOW__API__ENABLE_SWAGGER_UI +# +enable_swagger_ui = True + +# Secret key used to run your api server. It should be as random as possible. However, when running +# more than 1 instances of the api, make sure all of them use the same ``secret_key`` otherwise +# one of them will error with "CSRF session token is missing". +# The api key is also used to authorize requests to Celery workers when logs are retrieved. +# The token generated using the secret key has a short expiry time though - make sure that time on +# ALL the machines that you run airflow components on is synchronized (for example using ntpd) +# otherwise you might get "forbidden" errors when the logs are accessed. +# +# Variable: AIRFLOW__API__SECRET_KEY +# +secret_key = IPTIPKlON0TQzwLWnMtppA== + +# Expose the configuration file in the web server. Set to ``non-sensitive-only`` to show all values +# except those that have security implications. ``True`` shows all values. ``False`` hides the +# configuration completely. +# +# Variable: AIRFLOW__API__EXPOSE_CONFIG +# +expose_config = False + +# Expose stacktrace in the web server +# +# Variable: AIRFLOW__API__EXPOSE_STACKTRACE +# +expose_stacktrace = False + +# The base url of the API server. Airflow cannot guess what domain or CNAME you are using. +# If the Airflow console (the front-end) and the API server are on a different domain, this config +# should contain the API server endpoint. +# +# Example: base_url = https://my-airflow.company.com +# +# Variable: AIRFLOW__API__BASE_URL +# +# base_url = + +# The ip specified when starting the api server +# +# Variable: AIRFLOW__API__HOST +# +host = 0.0.0.0 + +# The port on which to run the api server +# +# Variable: AIRFLOW__API__PORT +# +port = 8080 + +# Number of workers to run on the API server. Should be roughly equal to the number of cpu cores +# available. If you need to scale the API server, strongly consider deploying multiple API servers +# instead of increasing the number of workers; See https://github.com/apache/airflow/issues/52270. +# +# Variable: AIRFLOW__API__WORKERS +# +workers = 1 + +# Number of seconds the API server waits before timing out on a worker +# +# Variable: AIRFLOW__API__WORKER_TIMEOUT +# +worker_timeout = 120 + +# Path to the logging configuration file for the uvicorn server. +# If not set, the default uvicorn logging configuration will be used. +# +# Example: log_config = path/to/logging_config.yaml +# +# Variable: AIRFLOW__API__LOG_CONFIG +# +# log_config = + +# Paths to the SSL certificate and key for the api server. When both are +# provided SSL will be enabled. This does not change the api server port. +# The same SSL certificate will also be loaded into the worker to enable +# it to be trusted when a self-signed certificate is used. +# +# Variable: AIRFLOW__API__SSL_CERT +# +ssl_cert = + +# Paths to the SSL certificate and key for the api server. When both are +# provided SSL will be enabled. This does not change the api server port. +# +# Variable: AIRFLOW__API__SSL_KEY +# +ssl_key = + +# Used to set the maximum page limit for API requests. If limit passed as param +# is greater than maximum page limit, it will be ignored and maximum page limit value +# will be set as the limit +# +# Variable: AIRFLOW__API__MAXIMUM_PAGE_LIMIT +# +maximum_page_limit = 100 + +# Used to set the default page limit when limit param is zero or not provided in API +# requests. Otherwise if positive integer is passed in the API requests as limit, the +# smallest number of user given limit or maximum page limit is taken as limit. +# +# Variable: AIRFLOW__API__FALLBACK_PAGE_LIMIT +# +fallback_page_limit = 50 + +# Used in response to a preflight request to indicate which HTTP +# headers can be used when making the actual request. This header is +# the server side response to the browser's +# Access-Control-Request-Headers header. +# +# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_HEADERS +# +access_control_allow_headers = + +# Specifies the method or methods allowed when accessing the resource. +# +# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_METHODS +# +access_control_allow_methods = + +# Indicates whether the response can be shared with requesting code from the given origins. +# Separate URLs with space. +# +# Variable: AIRFLOW__API__ACCESS_CONTROL_ALLOW_ORIGINS +# +access_control_allow_origins = + +# Sorting order in grid view. Valid values are: ``topological``, ``hierarchical_alphabetical`` +# +# Variable: AIRFLOW__API__GRID_VIEW_SORTING_ORDER +# +grid_view_sorting_order = topological + +# The amount of time (in secs) webserver will wait for initial handshake +# while fetching logs from other worker machine +# +# Variable: AIRFLOW__API__LOG_FETCH_TIMEOUT_SEC +# +log_fetch_timeout_sec = 5 + +# By default, the webserver shows paused DAGs. Flip this to hide paused +# DAGs by default +# +# Variable: AIRFLOW__API__HIDE_PAUSED_DAGS_BY_DEFAULT +# +hide_paused_dags_by_default = False + +# Consistent page size across all listing views in the UI +# +# Variable: AIRFLOW__API__PAGE_SIZE +# +page_size = 50 + +# Default setting for wrap toggle on DAG code and TI log views. +# +# Variable: AIRFLOW__API__DEFAULT_WRAP +# +default_wrap = False + +# How frequently, in seconds, the DAG data will auto-refresh in graph or grid view +# when auto-refresh is turned on +# +# Variable: AIRFLOW__API__AUTO_REFRESH_INTERVAL +# +auto_refresh_interval = 3 + +# Require confirmation when changing a DAG in the web UI. This is to prevent accidental changes +# to a DAG that may be running on sensitive environments like production. +# When set to ``True``, confirmation dialog will be shown when a user tries to Pause/Unpause, +# Trigger a DAG +# +# Variable: AIRFLOW__API__REQUIRE_CONFIRMATION_DAG_CHANGE +# +require_confirmation_dag_change = False + +[workers] +# Configuration related to workers that run Airflow tasks. + +# Full class name of secrets backend to enable for workers (will precede env vars backend) +# +# Example: secrets_backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend +# +# Variable: AIRFLOW__WORKERS__SECRETS_BACKEND +# +secrets_backend = + +# The secrets_backend_kwargs param is loaded into a dictionary and passed to ``__init__`` +# of secrets backend class. See documentation for the secrets backend you are using. +# JSON is expected. +# +# Example for AWS Systems Manager ParameterStore: +# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}`` +# +# Variable: AIRFLOW__WORKERS__SECRETS_BACKEND_KWARGS +# +secrets_backend_kwargs = + +# The minimum interval (in seconds) at which the worker checks the task instance's +# heartbeat status with the API server to confirm it is still alive. +# +# Variable: AIRFLOW__WORKERS__MIN_HEARTBEAT_INTERVAL +# +min_heartbeat_interval = 5 + +# The maximum number of consecutive failed heartbeats before terminating the task instance process. +# +# Variable: AIRFLOW__WORKERS__MAX_FAILED_HEARTBEATS +# +max_failed_heartbeats = 3 + +# The maximum number of retry attempts to the execution API server. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_RETRIES +# +execution_api_retries = 5 + +# The minimum amount of time (in seconds) to wait before retrying a failed API request. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_RETRY_WAIT_MIN +# +execution_api_retry_wait_min = 1.0 + +# The maximum amount of time (in seconds) to wait before retrying a failed API request. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_RETRY_WAIT_MAX +# +execution_api_retry_wait_max = 90.0 + +# The timeout (in seconds) for HTTP requests from workers to the Execution API server. +# This controls how long a worker will wait for a response from the API server before +# timing out. Increase this value if you experience timeout errors under high load. +# +# Variable: AIRFLOW__WORKERS__EXECUTION_API_TIMEOUT +# +execution_api_timeout = 5.0 + +# Number of seconds to wait after a task process exits before forcibly closing any +# remaining communication sockets. This helps prevent the task supervisor from hanging +# indefinitely due to missed EOF signals. +# +# Variable: AIRFLOW__WORKERS__SOCKET_CLEANUP_TIMEOUT +# +socket_cleanup_timeout = 60.0 + +[api_auth] +# Settings relating to authentication on the Airflow APIs + +# The audience claim to use when generating and validating JWTs for the API. +# +# This variable can be a single value, or a comma-separated string, in which case the first value is the +# one that will be used when generating, and the others are accepted at validation time. +# +# Not required, but strongly encouraged. +# +# See also :ref:`config:execution_api__jwt_audience` +# +# Example: jwt_audience = my-unique-airflow-id +# +# Variable: AIRFLOW__API_AUTH__JWT_AUDIENCE +# +# jwt_audience = + +# Number in seconds until the JWTs used for authentication expires. When the token expires, +# all API calls using this token will fail on authentication. +# +# Make sure that time on ALL the machines that you run airflow components on is synchronized +# (for example using ntpd) otherwise you might get "forbidden" errors. +# +# See also :ref:`config:execution_api__jwt_expiration_time` +# +# Variable: AIRFLOW__API_AUTH__JWT_EXPIRATION_TIME +# +jwt_expiration_time = 86400 + +# Number in seconds until the JWTs used for authentication expires for CLI commands. +# When the token expires, all CLI calls using this token will fail on authentication. +# +# Make sure that time on ALL the machines that you run airflow components on is synchronized +# (for example using ntpd) otherwise you might get "forbidden" errors. +# +# Variable: AIRFLOW__API_AUTH__JWT_CLI_EXPIRATION_TIME +# +jwt_cli_expiration_time = 3600 + +# Secret key used to encode and decode JWTs to authenticate to public and private APIs. +# +# It should be as random as possible. However, when running more than 1 instances of API services, +# make sure all of them use the same ``jwt_secret`` otherwise calls will fail on authentication. +# +# Mutually exclusive with ``jwt_private_key_path``. +# +# Variable: AIRFLOW__API_AUTH__JWT_SECRET +# +jwt_secret = 6+GNS8M7dlHn7iexrxXW1w== + +# The path to a file containing a PEM-encoded private key use when generating Task Identity tokens in +# the executor. +# +# Mutually exclusive with ``jwt_secret``. +# +# Example: jwt_private_key_path = /path/to/private_key.pem +# +# Variable: AIRFLOW__API_AUTH__JWT_PRIVATE_KEY_PATH +# +# jwt_private_key_path = + +# The algorithm name use when generating and validating JWT Task Identities. +# +# This value must be appropriate for the given private key type. +# +# If this is not specified Airflow makes some guesses as what algorithm is best based on the key type. +# +# ("HS512" if ``jwt_secret`` is set, otherwise a key-type specific guess) +# +# Example: jwt_algorithm = "EdDSA" or "HS512" +# +# Variable: AIRFLOW__API_AUTH__JWT_ALGORITHM +# +# jwt_algorithm = + +# The Key ID to place in header when generating JWTs. Not used in the validation path. +# +# If this is not specified the RFC7638 thumbprint of the private key will be used. +# +# Ignored when ``jwt_secret`` is used. +# +# Example: jwt_kid = my-key-id +# +# Variable: AIRFLOW__API_AUTH__JWT_KID +# +# jwt_kid = + +# The public signing keys of Task Execution token issuers to trust. It must contain the public key +# related to ``jwt_private_key_path`` else tasks will be unlikely to execute successfully. +# +# Can be a local file path (without the ``file://`` prefix) or an http or https URL. +# +# If a remote URL is given it will be polled periodically for changes. +# +# Mutually exclusive with ``jwt_secret``. +# +# If a ``jwt_private_key_path`` is given but this settings is not set then the private key will be +# trusted. If this is provided it is your responsibility to ensure that the private key used for +# generation is in this list. +# +# Example: trusted_jwks_url = "/path/to/public-jwks.json" or "https://my-issuer/.well-known/jwks.json" +# +# Variable: AIRFLOW__API_AUTH__TRUSTED_JWKS_URL +# +# trusted_jwks_url = + +# Issuer of the JWT. This becomes the ``iss`` claim of generated tokens, and is validated on incoming +# requests. +# +# Ideally this should be unique per individual airflow deployment +# +# Not required, but strongly recommended to be set. +# +# See also :ref:`config:api_auth__jwt_audience` +# +# Example: jwt_issuer = http://my-airflow.mycompany.com +# +# Variable: AIRFLOW__API_AUTH__JWT_ISSUER +# +# jwt_issuer = + +# Number of seconds leeway in validating expiry time of JWTs to account for clock skew between +# client and server +# +# Variable: AIRFLOW__API_AUTH__JWT_LEEWAY +# +jwt_leeway = 10 + +[execution_api] +# Settings related to the Execution API server. +# +# The ExecutionAPI also uses a lot of settings from the :ref:`config:api_auth` section. + +# Number in seconds until the JWT used for authentication expires. When the token expires, +# all API calls using this token will fail on authentication. +# +# Make sure that time on ALL the machines that you run airflow components on is synchronized +# (for example using ntpd) otherwise you might get "forbidden" errors. +# +# Variable: AIRFLOW__EXECUTION_API__JWT_EXPIRATION_TIME +# +jwt_expiration_time = 600 + +# The audience claim to use when generating and validating JWTs for the Execution API. +# +# This variable can be a single value, or a comma-separated string, in which case the first value is the +# one that will be used when generating, and the others are accepted at validation time. +# +# Not required, but strongly encouraged +# +# See also :ref:`config:api_auth__jwt_audience` +# +# Variable: AIRFLOW__EXECUTION_API__JWT_AUDIENCE +# +jwt_audience = urn:airflow.apache.org:task + +[lineage] +# what lineage backend to use +# +# Variable: AIRFLOW__LINEAGE__BACKEND +# +backend = + +[operators] +# The default owner assigned to each new operator, unless +# provided explicitly or passed via ``default_args`` +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_OWNER +# +default_owner = airflow + +# The default value of attribute "deferrable" in operators and sensors. +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_DEFERRABLE +# +default_deferrable = false + +# Indicates the default number of CPU units allocated to each operator when no specific CPU request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_CPUS +# +default_cpus = 1 + +# Indicates the default number of RAM allocated to each operator when no specific RAM request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_RAM +# +default_ram = 512 + +# Indicates the default number of disk storage allocated to each operator when no specific disk request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_DISK +# +default_disk = 512 + +# Indicates the default number of GPUs allocated to each operator when no specific GPUs request +# is specified in the operator's configuration +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_GPUS +# +default_gpus = 0 + +# Default queue that tasks get assigned to and that worker listen on. +# +# Variable: AIRFLOW__OPERATORS__DEFAULT_QUEUE +# +default_queue = default + +[email] +# Configuration email backend and whether to +# send email alerts on retry or failure + +# Email backend to use +# +# Variable: AIRFLOW__EMAIL__EMAIL_BACKEND +# +email_backend = airflow.utils.email.send_email_smtp + +# Email connection to use +# +# Variable: AIRFLOW__EMAIL__EMAIL_CONN_ID +# +email_conn_id = smtp_default + +# Whether email alerts should be sent when a task is retried +# +# Variable: AIRFLOW__EMAIL__DEFAULT_EMAIL_ON_RETRY +# +default_email_on_retry = True + +# Whether email alerts should be sent when a task failed +# +# Variable: AIRFLOW__EMAIL__DEFAULT_EMAIL_ON_FAILURE +# +default_email_on_failure = True + +# File that will be used as the template for Email subject (which will be rendered using Jinja2). +# If not set, Airflow uses a base template. +# +# Example: subject_template = /path/to/my_subject_template_file +# +# Variable: AIRFLOW__EMAIL__SUBJECT_TEMPLATE +# +# subject_template = + +# File that will be used as the template for Email content (which will be rendered using Jinja2). +# If not set, Airflow uses a base template. +# +# Example: html_content_template = /path/to/my_html_content_template_file +# +# Variable: AIRFLOW__EMAIL__HTML_CONTENT_TEMPLATE +# +# html_content_template = + +# Email address that will be used as sender address. +# It can either be raw email or the complete address in a format ``Sender Name `` +# +# Example: from_email = Airflow +# +# Variable: AIRFLOW__EMAIL__FROM_EMAIL +# +# from_email = + +# ssl context to use when using SMTP and IMAP SSL connections. By default, the context is "default" +# which sets it to ``ssl.create_default_context()`` which provides the right balance between +# compatibility and security, it however requires that certificates in your operating system are +# updated and that SMTP/IMAP servers of yours have valid certificates that have corresponding public +# keys installed on your machines. You can switch it to "none" if you want to disable checking +# of the certificates, but it is not recommended as it allows MITM (man-in-the-middle) attacks +# if your infrastructure is not sufficiently secured. It should only be set temporarily while you +# are fixing your certificate configuration. This can be typically done by upgrading to newer +# version of the operating system you run Airflow components on,by upgrading/refreshing proper +# certificates in the OS or by updating certificates for your mail servers. +# +# Example: ssl_context = default +# +# Variable: AIRFLOW__EMAIL__SSL_CONTEXT +# +ssl_context = default + +[smtp] +# If you want airflow to send emails on retries, failure, and you want to use +# the airflow.utils.email.send_email_smtp function, you have to configure an +# smtp server here + +# Specifies the host server address used by Airflow when sending out email notifications via SMTP. +# +# Variable: AIRFLOW__SMTP__SMTP_HOST +# +smtp_host = localhost + +# Determines whether to use the STARTTLS command when connecting to the SMTP server. +# +# Variable: AIRFLOW__SMTP__SMTP_STARTTLS +# +smtp_starttls = True + +# Determines whether to use an SSL connection when talking to the SMTP server. +# +# Variable: AIRFLOW__SMTP__SMTP_SSL +# +smtp_ssl = False + +# Defines the port number on which Airflow connects to the SMTP server to send email notifications. +# +# Variable: AIRFLOW__SMTP__SMTP_PORT +# +smtp_port = 25 + +# Specifies the default **from** email address used when Airflow sends email notifications. +# +# Variable: AIRFLOW__SMTP__SMTP_MAIL_FROM +# +smtp_mail_from = airflow@example.com + +# Determines the maximum time (in seconds) the Apache Airflow system will wait for a +# connection to the SMTP server to be established. +# +# Variable: AIRFLOW__SMTP__SMTP_TIMEOUT +# +smtp_timeout = 30 + +# Defines the maximum number of times Airflow will attempt to connect to the SMTP server. +# +# Variable: AIRFLOW__SMTP__SMTP_RETRY_LIMIT +# +smtp_retry_limit = 5 + +[sentry] +# `Sentry `__ integration. Here you can supply +# additional configuration options based on the Python platform. +# See `Python / Configuration / Basic Options +# `__ for more details. +# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``, +# ``ignore_errors``, ``before_breadcrumb``, ``transport``. + +# Enable error reporting to Sentry +# +# Variable: AIRFLOW__SENTRY__SENTRY_ON +# +sentry_on = false + +# +# Variable: AIRFLOW__SENTRY__SENTRY_DSN +# +sentry_dsn = + +# Dotted path to a before_send function that the sentry SDK should be configured to use. +# +# Variable: AIRFLOW__SENTRY__BEFORE_SEND +# +# before_send = + +[scheduler] +# Task instances listen for external kill signal (when you clear tasks +# from the CLI or the UI), this defines the frequency at which they should +# listen (in seconds). +# +# Variable: AIRFLOW__SCHEDULER__JOB_HEARTBEAT_SEC +# +job_heartbeat_sec = 5 + +# The scheduler constantly tries to trigger new tasks (look at the +# scheduler section in the docs for more information). This defines +# how often the scheduler should run (in seconds). +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEARTBEAT_SEC +# +scheduler_heartbeat_sec = 5 + +# The frequency (in seconds) at which the LocalTaskJob should send heartbeat signals to the +# scheduler to notify it's still alive. If this value is set to 0, the heartbeat interval will default +# to the value of ``[scheduler] task_instance_heartbeat_timeout``. +# +# Variable: AIRFLOW__SCHEDULER__TASK_INSTANCE_HEARTBEAT_SEC +# +task_instance_heartbeat_sec = 0 + +# The number of times to try to schedule each DAG file +# -1 indicates unlimited number +# +# Variable: AIRFLOW__SCHEDULER__NUM_RUNS +# +num_runs = -1 + +# Controls how long the scheduler will sleep between loops, but if there was nothing to do +# in the loop. i.e. if it scheduled something then it will start the next loop +# iteration straight away. +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_IDLE_SLEEP_TIME +# +scheduler_idle_sleep_time = 1 + +# How often (in seconds) to check for stale DAGs (DAGs which are no longer present in +# the expected files) which should be deactivated, as well as assets that are no longer +# referenced and should be marked as orphaned. +# +# Variable: AIRFLOW__SCHEDULER__PARSING_CLEANUP_INTERVAL +# +parsing_cleanup_interval = 60 + +# How often (in seconds) should pool usage stats be sent to StatsD (if statsd_on is enabled) +# +# Variable: AIRFLOW__SCHEDULER__POOL_METRICS_INTERVAL +# +pool_metrics_interval = 5.0 + +# How often (in seconds) should running task instance stats be sent to StatsD (if statsd_on is enabled) +# +# Variable: AIRFLOW__SCHEDULER__RUNNING_METRICS_INTERVAL +# +running_metrics_interval = 30.0 + +# If the last scheduler heartbeat happened more than ``[scheduler] scheduler_health_check_threshold`` +# ago (in seconds), scheduler is considered unhealthy. +# This is used by the health check in the **/health** endpoint and in ``airflow jobs check`` CLI +# for SchedulerJob. +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_THRESHOLD +# +scheduler_health_check_threshold = 30 + +# When you start a scheduler, airflow starts a tiny web server +# subprocess to serve a health check if this is set to ``True`` +# +# Variable: AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK +# +enable_health_check = False + +# When you start a scheduler, airflow starts a tiny web server +# subprocess to serve a health check on this host +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_SERVER_HOST +# +scheduler_health_check_server_host = 0.0.0.0 + +# When you start a scheduler, airflow starts a tiny web server +# subprocess to serve a health check on this port +# +# Variable: AIRFLOW__SCHEDULER__SCHEDULER_HEALTH_CHECK_SERVER_PORT +# +scheduler_health_check_server_port = 8974 + +# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs +# +# Variable: AIRFLOW__SCHEDULER__ORPHANED_TASKS_CHECK_INTERVAL +# +orphaned_tasks_check_interval = 300.0 + +# Local task jobs periodically heartbeat to the DB. If the job has +# not heartbeat in this many seconds, the scheduler will mark the +# associated task instance as failed and will re-schedule the task. +# +# Variable: AIRFLOW__SCHEDULER__TASK_INSTANCE_HEARTBEAT_TIMEOUT +# +task_instance_heartbeat_timeout = 300 + +# How often (in seconds) should the scheduler check for task instances whose heartbeats have timed out. +# +# Variable: AIRFLOW__SCHEDULER__TASK_INSTANCE_HEARTBEAT_TIMEOUT_DETECTION_INTERVAL +# +task_instance_heartbeat_timeout_detection_interval = 10.0 + +# Turn on scheduler catchup by setting this to ``True``. +# Default behavior is unchanged and +# Command Line Backfills still work, but the scheduler +# will not do scheduler catchup if this is ``False``, +# however it can be set on a per DAG basis in the +# DAG definition (catchup) +# +# Variable: AIRFLOW__SCHEDULER__CATCHUP_BY_DEFAULT +# +catchup_by_default = False + +# Setting this to ``True`` will make first task instance of a task +# ignore depends_on_past setting. A task instance will be considered +# as the first task instance of a task when there is no task instance +# in the DB with a logical_date earlier than it., i.e. no manual marking +# success will be needed for a newly added task to be scheduled. +# +# Variable: AIRFLOW__SCHEDULER__IGNORE_FIRST_DEPENDS_ON_PAST_BY_DEFAULT +# +ignore_first_depends_on_past_by_default = True + +# This determines the number of task instances to be evaluated for scheduling +# during each scheduler loop. +# Set this to 0 to use the value of ``[core] parallelism`` +# +# Variable: AIRFLOW__SCHEDULER__MAX_TIS_PER_QUERY +# +max_tis_per_query = 16 + +# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries. +# If this is set to ``False`` then you should not run more than a single +# scheduler at once +# +# Variable: AIRFLOW__SCHEDULER__USE_ROW_LEVEL_LOCKING +# +use_row_level_locking = True + +# Max number of DAGs to create DagRuns for per scheduler loop. +# +# Variable: AIRFLOW__SCHEDULER__MAX_DAGRUNS_TO_CREATE_PER_LOOP +# +max_dagruns_to_create_per_loop = 10 + +# How many DagRuns should a scheduler examine (and lock) when scheduling +# and queuing tasks. +# +# Variable: AIRFLOW__SCHEDULER__MAX_DAGRUNS_PER_LOOP_TO_SCHEDULE +# +max_dagruns_per_loop_to_schedule = 20 + +# Turn off scheduler use of cron intervals by setting this to ``False``. +# DAGs submitted manually in the web UI or with trigger_dag will still run. +# +# Variable: AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE +# +use_job_schedule = True + +# How often to check for expired trigger requests that have not run yet. +# +# Variable: AIRFLOW__SCHEDULER__TRIGGER_TIMEOUT_CHECK_INTERVAL +# +trigger_timeout_check_interval = 15 + +# Amount of time a task can be in the queued state before being retried or set to failed. +# +# Variable: AIRFLOW__SCHEDULER__TASK_QUEUED_TIMEOUT +# +task_queued_timeout = 600.0 + +# How often to check for tasks that have been in the queued state for +# longer than ``[scheduler] task_queued_timeout``. +# +# Variable: AIRFLOW__SCHEDULER__TASK_QUEUED_TIMEOUT_CHECK_INTERVAL +# +task_queued_timeout_check_interval = 120.0 + +# The run_id pattern used to verify the validity of user input to the run_id parameter when +# triggering a DAG. This pattern cannot change the pattern used by scheduler to generate run_id +# for scheduled DAG runs or DAG runs triggered without changing the run_id parameter. +# +# Variable: AIRFLOW__SCHEDULER__ALLOWED_RUN_ID_PATTERN +# +allowed_run_id_pattern = ^[A-Za-z0-9_.~:+-]+$ + +# Whether to create DAG runs that span an interval or one single point in time for cron schedules, when +# a cron string is provided to ``schedule`` argument of a DAG. +# +# * ``True``: **CronDataIntervalTimetable** is used, which is suitable +# for DAGs with well-defined data interval. You get contiguous intervals from the end of the previous +# interval up to the scheduled datetime. +# * ``False``: **CronTriggerTimetable** is used, which is closer to the behavior of cron itself. +# +# Notably, for **CronTriggerTimetable**, the logical date is the same as the time the DAG Run will +# try to schedule, while for **CronDataIntervalTimetable**, the logical date is the beginning of +# the data interval, but the DAG Run will try to schedule at the end of the data interval. +# +# Variable: AIRFLOW__SCHEDULER__CREATE_CRON_DATA_INTERVALS +# +create_cron_data_intervals = False + +# Whether to create DAG runs that span an interval or one single point in time when a timedelta or +# relativedelta is provided to ``schedule`` argument of a DAG. +# +# * ``True``: **DeltaDataIntervalTimetable** is used, which is suitable for DAGs with well-defined data +# interval. You get contiguous intervals from the end of the previous interval up to the scheduled +# datetime. +# * ``False``: **DeltaTriggerTimetable** is used, which is suitable for DAGs that simply want to say +# e.g. "run this every day" and do not care about the data interval. +# +# Notably, for **DeltaTriggerTimetable**, the logical date is the same as the time the DAG Run will +# try to schedule, while for **DeltaDataIntervalTimetable**, the logical date is the beginning of +# the data interval, but the DAG Run will try to schedule at the end of the data interval. +# +# Variable: AIRFLOW__SCHEDULER__CREATE_DELTA_DATA_INTERVALS +# +create_delta_data_intervals = False + +# Whether to enable memory allocation tracing in the scheduler. If enabled, Airflow will start +# tracing memory allocation and log the top 10 memory usages at the error level upon receiving the +# signal SIGUSR1. +# This is an expensive operation and generally should not be used except for debugging purposes. +# +# Variable: AIRFLOW__SCHEDULER__ENABLE_TRACEMALLOC +# +enable_tracemalloc = False + +[triggerer] +# How many triggers a single Triggerer will run at once, by default. +# +# Variable: AIRFLOW__TRIGGERER__CAPACITY +# +capacity = 1000 + +# How often to heartbeat the Triggerer job to ensure it hasn't been killed. +# +# Variable: AIRFLOW__TRIGGERER__JOB_HEARTBEAT_SEC +# +job_heartbeat_sec = 5 + +# If the last triggerer heartbeat happened more than ``[triggerer] triggerer_health_check_threshold`` +# ago (in seconds), triggerer is considered unhealthy. +# This is used by the health check in the **/health** endpoint and in ``airflow jobs check`` CLI +# for TriggererJob. +# +# Variable: AIRFLOW__TRIGGERER__TRIGGERER_HEALTH_CHECK_THRESHOLD +# +triggerer_health_check_threshold = 30 + +[kerberos] +# Location of your ccache file once kinit has been performed. +# +# Variable: AIRFLOW__KERBEROS__CCACHE +# +ccache = /tmp/airflow_krb5_ccache + +# gets augmented with fqdn +# +# Variable: AIRFLOW__KERBEROS__PRINCIPAL +# +principal = airflow + +# Determines the frequency at which initialization or re-initialization processes occur. +# +# Variable: AIRFLOW__KERBEROS__REINIT_FREQUENCY +# +reinit_frequency = 3600 + +# Path to the kinit executable +# +# Variable: AIRFLOW__KERBEROS__KINIT_PATH +# +kinit_path = kinit + +# Designates the path to the Kerberos keytab file for the Airflow user +# +# Variable: AIRFLOW__KERBEROS__KEYTAB +# +keytab = airflow.keytab + +# Allow to disable ticket forwardability. +# +# Variable: AIRFLOW__KERBEROS__FORWARDABLE +# +forwardable = True + +# Allow to remove source IP from token, useful when using token behind NATted Docker host. +# +# Variable: AIRFLOW__KERBEROS__INCLUDE_IP +# +include_ip = True + +[sensors] +# Sensor default timeout, 7 days by default (7 * 24 * 60 * 60). +# +# Variable: AIRFLOW__SENSORS__DEFAULT_TIMEOUT +# +default_timeout = 604800 + +[dag_processor] +# Configuration for the Airflow DAG processor. This includes, for example: +# - DAG bundles, which allows Airflow to load DAGs from different sources +# - Parsing configuration, like: +# - how often to refresh DAGs from those sources +# - how many files to parse concurrently + +# String path to folder where Airflow bundles can store files locally. Not templated. +# If no path is provided, Airflow will use ``Path(tempfile.gettempdir()) / "airflow"``. +# This path must be absolute. +# +# Example: dag_bundle_storage_path = /tmp/some-place +# +# Variable: AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_STORAGE_PATH +# +# dag_bundle_storage_path = + +# List of backend configs. Must supply name, classpath, and kwargs for each backend. +# +# By default, ``refresh_interval`` is set to ``[dag_processor] refresh_interval``, but that can +# also be overridden in kwargs if desired. +# +# The default is the dags folder dag bundle. +# +# Note: As shown below, you can split your json config over multiple lines by indenting. +# See configparser documentation for an example: +# https://docs.python.org/3/library/configparser.html#supported-ini-file-structure. +# +# Example: dag_bundle_config_list = [ +# { +# "name": "my-git-repo", +# "classpath": "airflow.providers.git.bundles.git.GitDagBundle", +# "kwargs": { +# "subdir": "dags", +# "tracking_ref": "main", +# "refresh_interval": 0 +# } +# } +# ] +# +# Variable: AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST +# +dag_bundle_config_list = [ + { + "name": "dags-folder", + "classpath": "airflow.dag_processing.bundles.local.LocalDagBundle", + "kwargs": {} + } + ] + + +# How often (in seconds) to refresh, or look for new files, in a DAG bundle. +# +# Variable: AIRFLOW__DAG_PROCESSOR__REFRESH_INTERVAL +# +refresh_interval = 300 + +# The DAG processor can run multiple processes in parallel to parse dags. +# This defines how many processes will run. +# +# Variable: AIRFLOW__DAG_PROCESSOR__PARSING_PROCESSES +# +parsing_processes = 2 + +# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``. +# The DAG processor will list and sort the dag files to decide the parsing order. +# +# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the +# recently modified DAGs first. +# * ``random_seeded_by_host``: Sort randomly across multiple DAG processors but with same order on the +# same host, allowing each processor to parse the files in a different order. +# * ``alphabetical``: Sort by filename +# +# Variable: AIRFLOW__DAG_PROCESSOR__FILE_PARSING_SORT_MODE +# +file_parsing_sort_mode = modified_time + +# The maximum number of callbacks that are fetched during a single loop. +# +# Variable: AIRFLOW__DAG_PROCESSOR__MAX_CALLBACKS_PER_LOOP +# +max_callbacks_per_loop = 20 + +# Number of seconds after which a DAG file is parsed. The DAG file is parsed every +# ``[dag_processor] min_file_process_interval`` number of seconds. Updates to DAGs are reflected after +# this interval. Keeping this number low will increase CPU usage. +# +# Variable: AIRFLOW__DAG_PROCESSOR__MIN_FILE_PROCESS_INTERVAL +# +min_file_process_interval = 30 + +# How long (in seconds) to wait after we have re-parsed a DAG file before deactivating stale +# DAGs (DAGs which are no longer present in the expected files). The reason why we need +# this threshold is to account for the time between when the file is parsed and when the +# DAG is loaded. The absolute maximum that this could take is +# ``[dag_processor] dag_file_processor_timeout``, but when you have a long timeout configured, +# it results in a significant delay in the deactivation of stale dags. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_DAG_THRESHOLD +# +stale_dag_threshold = 50 + +# How long before timing out a DagFileProcessor, which processes a dag file +# +# Variable: AIRFLOW__DAG_PROCESSOR__DAG_FILE_PROCESSOR_TIMEOUT +# +dag_file_processor_timeout = 50 + +# How often should DAG processor stats be printed to the logs. Setting to 0 will disable printing stats +# +# Variable: AIRFLOW__DAG_PROCESSOR__PRINT_STATS_INTERVAL +# +print_stats_interval = 30 + +# Always run tasks with the latest code. If set to True, the bundle version will not +# be stored on the dag run and therefore, the latest code will always be used. +# +# Variable: AIRFLOW__DAG_PROCESSOR__DISABLE_BUNDLE_VERSIONING +# +disable_bundle_versioning = False + +# How often the DAG processor should check if any DAG bundles are ready for a refresh, either by hitting +# the bundles refresh_interval or because another DAG processor has seen a newer version of the bundle. +# A low value means we check more frequently, and have a smaller window of time where DAG processors are +# out of sync with each other, parsing different versions of the same bundle. +# +# Variable: AIRFLOW__DAG_PROCESSOR__BUNDLE_REFRESH_CHECK_INTERVAL +# +bundle_refresh_check_interval = 5 + +# On shared workers, bundle copies accumulate in local storage as tasks run +# and version of the bundle changes. +# This setting represents the delta in seconds between checks for these stale bundles. +# Bundles which are older than `stale_bundle_cleanup_age_threshold` may be removed. But +# we always keep `stale_bundle_cleanup_min_versions` versions locally. +# Set to 0 or negative to disable. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_BUNDLE_CLEANUP_INTERVAL +# +stale_bundle_cleanup_interval = 1800 + +# Bundle versions used more recently than this threshold will not be removed. +# Recency of use is determined by when the task began running on the worker, +# that age is compared with this setting, given as time delta in seconds. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_BUNDLE_CLEANUP_AGE_THRESHOLD +# +stale_bundle_cleanup_age_threshold = 21600 + +# Minimum number of local bundle versions to retain on disk. +# Local bundle versions older than `stale_bundle_cleanup_age_threshold` will +# only be deleted we have more than `stale_bundle_cleanup_min_versions` versions +# accumulated on the worker. +# +# Variable: AIRFLOW__DAG_PROCESSOR__STALE_BUNDLE_CLEANUP_MIN_VERSIONS +# +stale_bundle_cleanup_min_versions = 10 + +# The dag_processor reads dag files to extract the airflow modules that are going to be used, +# and imports them ahead of time to avoid having to re-do it for each parsing process. +# This flag can be set to ``False`` to disable this behavior in case an airflow module needs +# to be freshly imported each time (at the cost of increased DAG parsing time). +# +# Variable: AIRFLOW__DAG_PROCESSOR__PARSING_PRE_IMPORT_MODULES +# +parsing_pre_import_modules = True + +[aws] +# This section contains settings for Amazon Web Services (AWS) integration. + +# Full import path to the class which implements a custom session factory for +# ``boto3.session.Session``. For more details please have a look at +# :ref:`howto/connection:aws:session-factory`. +# +# Example: session_factory = my_company.aws.MyCustomSessionFactory +# +# Variable: AIRFLOW__AWS__SESSION_FACTORY +# +# session_factory = + +cloudwatch_task_handler_json_serializer = airflow.providers.amazon.aws.log.cloudwatch_task_handler.json_serialize_legacy + +[aws_batch_executor] +# This section only applies if you are using the AwsBatchExecutor in +# Airflow's ``[core]`` configuration. +# For more information on any of these execution parameters, see the link below: +# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.submit_job +# For boto3 credential management, see +# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html + +conn_id = aws_default +# region_name = +max_submit_job_attempts = 3 +check_health_on_startup = True +# job_name = +# job_queue = +# job_definition = +# submit_job_kwargs = + +[aws_lambda_executor] +# This section only applies if you are using the AwsLambdaExecutor in +# Airflow's ``[core.executor]`` configuration. +# For more information see: +# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda/client/invoke.html +# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html +# https://airflow.apache.org/docs/apache-airflow-providers-amazon/stable/executors/lambda-executor.html + +conn_id = aws_default +# region_name = +check_health_on_startup = True +max_run_task_attempts = 3 +# queue_url = +# dead_letter_queue_url = +# function_name = +# qualifier = +end_wait_timeout = 0 + +[aws_ecs_executor] +# This section only applies if you are using the AwsEcsExecutor in +# Airflow's ``[core]`` configuration. +# For more information on any of these execution parameters, see the link below: +# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs/client/run_task.html +# For boto3 credential management, see +# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html + +conn_id = aws_default +# region_name = +assign_public_ip = False +# cluster = +# capacity_provider_strategy = +# container_name = +# launch_type = +platform_version = LATEST +# security_groups = +# subnets = +# task_definition = +max_run_task_attempts = 3 +# run_task_kwargs = +check_health_on_startup = True + +[aws_auth_manager] +# This section only applies if you are using the AwsAuthManager. In other words, if you set +# ``[core] auth_manager = airflow.providers.amazon.aws.auth_manager.aws_auth_manager.AwsAuthManager`` in +# Airflow's configuration. + +conn_id = aws_default +# region_name = +# saml_metadata_url = +# avp_policy_store_id = + +[celery_kubernetes_executor] +# This section only applies if you are using the ``CeleryKubernetesExecutor`` in +# ``[core]`` section above + +# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``. +# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), +# the task is executed via ``KubernetesExecutor``, +# otherwise via ``CeleryExecutor`` +# +# Variable: AIRFLOW__CELERY_KUBERNETES_EXECUTOR__KUBERNETES_QUEUE +# +kubernetes_queue = kubernetes + +[celery] +# This section only applies if you are using the CeleryExecutor in +# ``[core]`` section above + +# The app name that will be used by celery +# +# Variable: AIRFLOW__CELERY__CELERY_APP_NAME +# +celery_app_name = airflow.providers.celery.executors.celery_executor + +# The concurrency that will be used when starting workers with the +# ``airflow celery worker`` command. This defines the number of task instances that +# a worker will take, so size up your workers based on the resources on +# your worker box and the nature of your tasks +# +# Variable: AIRFLOW__CELERY__WORKER_CONCURRENCY +# +worker_concurrency = 16 + +# The maximum and minimum number of pool processes that will be used to dynamically resize +# the pool based on load.Enable autoscaling by providing max_concurrency,min_concurrency +# with the ``airflow celery worker`` command (always keep minimum processes, +# but grow to maximum if necessary). +# Pick these numbers based on resources on worker box and the nature of the task. +# If autoscale option is available, worker_concurrency will be ignored. +# https://docs.celeryq.dev/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale +# +# Example: worker_autoscale = 16,12 +# +# Variable: AIRFLOW__CELERY__WORKER_AUTOSCALE +# +# worker_autoscale = + +# Used to increase the number of tasks that a worker prefetches which can improve performance. +# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks +# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily +# blocked if there are multiple workers and one worker prefetches tasks that sit behind long +# running tasks while another worker has unutilized processes that are unable to process the already +# claimed blocked tasks. +# https://docs.celeryq.dev/en/stable/userguide/optimizing.html#prefetch-limits +# +# Variable: AIRFLOW__CELERY__WORKER_PREFETCH_MULTIPLIER +# +worker_prefetch_multiplier = 1 + +# Specify if remote control of the workers is enabled. +# In some cases when the broker does not support remote control, Celery creates lots of +# ``.*reply-celery-pidbox`` queues. You can prevent this by setting this to false. +# However, with this disabled Flower won't work. +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/index.html#broker-overview +# +# Variable: AIRFLOW__CELERY__WORKER_ENABLE_REMOTE_CONTROL +# +worker_enable_remote_control = true + +# The Celery broker URL. Celery supports multiple broker types. See: +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/index.html#broker-overview +# +# Variable: AIRFLOW__CELERY__BROKER_URL +# +broker_url = redis://redis:6379/0 + +# The Celery result_backend. When a job finishes, it needs to update the +# metadata of the job. Therefore it will post a message on a message bus, +# or insert it into a database (depending of the backend) +# This status is used by the scheduler to update the state of the task +# The use of a database is highly recommended +# When not specified, sql_alchemy_conn with a db+ scheme prefix will be used +# https://docs.celeryq.dev/en/latest/userguide/configuration.html#task-result-backend-settings +# +# Example: result_backend = db+postgresql://postgres:airflow@postgres/airflow +# +# Variable: AIRFLOW__CELERY__RESULT_BACKEND +# +# result_backend = + +# Optional configuration dictionary to pass to the Celery result backend SQLAlchemy engine. +# +# Example: result_backend_sqlalchemy_engine_options = {"pool_recycle": 1800} +# +# Variable: AIRFLOW__CELERY__RESULT_BACKEND_SQLALCHEMY_ENGINE_OPTIONS +# +result_backend_sqlalchemy_engine_options = + +# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start +# it ``airflow celery flower``. This defines the IP that Celery Flower runs on +# +# Variable: AIRFLOW__CELERY__FLOWER_HOST +# +flower_host = 0.0.0.0 + +# The root URL for Flower +# +# Example: flower_url_prefix = /flower +# +# Variable: AIRFLOW__CELERY__FLOWER_URL_PREFIX +# +flower_url_prefix = + +# This defines the port that Celery Flower runs on +# +# Variable: AIRFLOW__CELERY__FLOWER_PORT +# +flower_port = 5555 + +# Securing Flower with Basic Authentication +# Accepts user:password pairs separated by a comma +# +# Example: flower_basic_auth = user1:password1,user2:password2 +# +# Variable: AIRFLOW__CELERY__FLOWER_BASIC_AUTH +# +flower_basic_auth = + +# How many processes CeleryExecutor uses to sync task state. +# 0 means to use max(1, number of cores - 1) processes. +# +# Variable: AIRFLOW__CELERY__SYNC_PARALLELISM +# +sync_parallelism = 0 + +# Import path for celery configuration options +# +# Variable: AIRFLOW__CELERY__CELERY_CONFIG_OPTIONS +# +celery_config_options = airflow.providers.celery.executors.default_celery.DEFAULT_CELERY_CONFIG + +# +# Variable: AIRFLOW__CELERY__SSL_ACTIVE +# +ssl_active = False + +# Path to the client key. +# +# Variable: AIRFLOW__CELERY__SSL_KEY +# +ssl_key = + +# Path to the client certificate. +# +# Variable: AIRFLOW__CELERY__SSL_CERT +# +ssl_cert = + +# Path to the CA certificate. +# +# Variable: AIRFLOW__CELERY__SSL_CACERT +# +ssl_cacert = + +# Celery Pool implementation. +# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``. +# See: +# https://docs.celeryq.dev/en/latest/userguide/workers.html#concurrency +# https://docs.celeryq.dev/en/latest/userguide/concurrency/eventlet.html +# +# Variable: AIRFLOW__CELERY__POOL +# +pool = prefork + +# The number of seconds to wait before timing out ``send_task_to_executor`` or +# ``fetch_celery_task_state`` operations. +# +# Variable: AIRFLOW__CELERY__OPERATION_TIMEOUT +# +operation_timeout = 1.0 + +task_acks_late = True +# Celery task will report its status as 'started' when the task is executed by a worker. +# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted +# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob. +# +# Variable: AIRFLOW__CELERY__TASK_TRACK_STARTED +# +task_track_started = True + +# The Maximum number of retries for publishing task messages to the broker when failing +# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed. +# +# Variable: AIRFLOW__CELERY__TASK_PUBLISH_MAX_RETRIES +# +task_publish_max_retries = 3 + +# Extra celery configs to include in the celery worker. +# Any of the celery config can be added to this config and it +# will be applied while starting the celery worker. e.g. {"worker_max_tasks_per_child": 10} +# See also: +# https://docs.celeryq.dev/en/stable/userguide/configuration.html#configuration-and-defaults +# +# Variable: AIRFLOW__CELERY__EXTRA_CELERY_CONFIG +# +extra_celery_config = {} + +# The default umask to use for celery worker when run in daemon mode +# +# This controls the file-creation mode mask which determines the initial value of file permission bits +# for newly created files. +# +# This value is treated as an octal-integer. +# +# Variable: AIRFLOW__CELERY__WORKER_UMASK +# +# worker_umask = + +[celery_broker_transport_options] +# This section is for specifying options which can be passed to the +# underlying celery broker transport. See: +# https://docs.celeryq.dev/en/latest/userguide/configuration.html#std:setting-broker_transport_options + +# The visibility timeout defines the number of seconds to wait for the worker +# to acknowledge the task before the message is redelivered to another worker. +# Make sure to increase the visibility timeout to match the time of the longest +# ETA you're planning to use. +# visibility_timeout is only supported for Redis and SQS celery brokers. +# See: +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#visibility-timeout +# +# Example: visibility_timeout = 21600 +# +# Variable: AIRFLOW__CELERY_BROKER_TRANSPORT_OPTIONS__VISIBILITY_TIMEOUT +# +# visibility_timeout = + +# The sentinel_kwargs parameter allows passing additional options to the Sentinel client. +# In a typical scenario where Redis Sentinel is used as the broker and Redis servers are +# password-protected, the password needs to be passed through this parameter. Although its +# type is string, it is required to pass a string that conforms to the dictionary format. +# See: +# https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#configuration +# +# Example: sentinel_kwargs = {"password": "password_for_redis_server"} +# +# Variable: AIRFLOW__CELERY_BROKER_TRANSPORT_OPTIONS__SENTINEL_KWARGS +# +# sentinel_kwargs = + +[local_kubernetes_executor] +# This section only applies if you are using the ``LocalKubernetesExecutor`` in +# ``[core]`` section above + +# Define when to send a task to ``KubernetesExecutor`` when using ``LocalKubernetesExecutor``. +# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), +# the task is executed via ``KubernetesExecutor``, +# otherwise via ``LocalExecutor`` +# +# Variable: AIRFLOW__LOCAL_KUBERNETES_EXECUTOR__KUBERNETES_QUEUE +# +kubernetes_queue = kubernetes + +[kubernetes_executor] +# Kwargs to override the default urllib3 Retry used in the kubernetes API client +# +# Example: api_client_retry_configuration = { "total": 3, "backoff_factor": 0.5 } +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__API_CLIENT_RETRY_CONFIGURATION +# +api_client_retry_configuration = + +# Flag to control the information added to kubernetes executor logs for better traceability +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__LOGS_TASK_METADATA +# +logs_task_metadata = False + +# Path to the YAML pod file that forms the basis for KubernetesExecutor workers. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__POD_TEMPLATE_FILE +# +pod_template_file = + +# The repository of the Kubernetes Image for the Worker to Run +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_CONTAINER_REPOSITORY +# +worker_container_repository = + +# The tag of the Kubernetes Image for the Worker to Run +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_CONTAINER_TAG +# +worker_container_tag = + +# The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__NAMESPACE +# +namespace = default + +# If True, all worker pods will be deleted upon termination +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_WORKER_PODS +# +delete_worker_pods = True + +# If False (and delete_worker_pods is True), +# failed worker pods will not be deleted so users can investigate them. +# This only prevents removal of worker pods where the worker itself failed, +# not when the task it ran failed. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_WORKER_PODS_ON_FAILURE +# +delete_worker_pods_on_failure = False + +worker_pod_pending_fatal_container_state_reasons = CreateContainerConfigError,ErrImagePull,CreateContainerError,ImageInspectError,InvalidImageName +# Number of Kubernetes Worker Pod creation calls per scheduler loop. +# Note that the current default of "1" will only launch a single pod +# per-heartbeat. It is HIGHLY recommended that users increase this +# number to match the tolerance of their kubernetes cluster for +# better performance. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__WORKER_PODS_CREATION_BATCH_SIZE +# +worker_pods_creation_batch_size = 1 + +# Allows users to launch pods in multiple namespaces. +# Will require creating a cluster-role for the scheduler, +# or use multi_namespace_mode_namespace_list configuration. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__MULTI_NAMESPACE_MODE +# +multi_namespace_mode = False + +# If multi_namespace_mode is True while scheduler does not have a cluster-role, +# give the list of namespaces where the scheduler will schedule jobs +# Scheduler needs to have the necessary permissions in these namespaces. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__MULTI_NAMESPACE_MODE_NAMESPACE_LIST +# +multi_namespace_mode_namespace_list = + +# Use the service account kubernetes gives to pods to connect to kubernetes cluster. +# It's intended for clients that expect to be running inside a pod running on kubernetes. +# It will raise an exception if called from a process not running in a kubernetes environment. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__IN_CLUSTER +# +in_cluster = True + +# When running with in_cluster=False change the default cluster_context or config_file +# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__CLUSTER_CONTEXT +# +# cluster_context = + +# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__CONFIG_FILE +# +# config_file = + +# Keyword parameters to pass while calling a kubernetes client core_v1_api methods +# from Kubernetes Executor provided as a single line formatted JSON dictionary string. +# List of supported params are similar for all core_v1_apis, hence a single config +# variable for all apis. See: +# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__KUBE_CLIENT_REQUEST_ARGS +# +kube_client_request_args = + +# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client +# ``core_v1_api`` method when using the Kubernetes Executor. +# This should be an object and can contain any of the options listed in the ``v1DeleteOptions`` +# class defined here: +# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19 +# +# Example: delete_option_kwargs = {"grace_period_seconds": 10} +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__DELETE_OPTION_KWARGS +# +delete_option_kwargs = + +# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely +# when idle connection is time-outed on services like cloud load balancers or firewalls. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__ENABLE_TCP_KEEPALIVE +# +enable_tcp_keepalive = True + +# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has +# been idle for `tcp_keep_idle` seconds. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_IDLE +# +tcp_keep_idle = 120 + +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_INTVL +# +tcp_keep_intvl = 30 + +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before +# a connection is considered to be broken. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TCP_KEEP_CNT +# +tcp_keep_cnt = 6 + +# Set this to false to skip verifying SSL certificate of Kubernetes python client. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__VERIFY_SSL +# +verify_ssl = True + +# Path to a CA certificate to be used by the Kubernetes client to verify the server's SSL certificate. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__SSL_CA_CERT +# +ssl_ca_cert = + +# The Maximum number of retries for queuing the task to the kubernetes scheduler when +# failing due to Kube API exceeded quota errors before giving up and marking task as failed. +# -1 for unlimited times. +# +# Variable: AIRFLOW__KUBERNETES_EXECUTOR__TASK_PUBLISH_MAX_RETRIES +# +task_publish_max_retries = 0 + +[common.io] +# Common IO configuration section + +# Path to a location on object storage where XComs can be stored in url format. +# +# Example: xcom_objectstorage_path = s3://conn_id@bucket/path +# +# Variable: AIRFLOW__COMMON.IO__XCOM_OBJECTSTORAGE_PATH +# +xcom_objectstorage_path = + +# Threshold in bytes for storing XComs in object storage. -1 means always store in the +# database. 0 means always store in object storage. Any positive number means +# it will be stored in object storage if the size of the value is greater than the threshold. +# +# Example: xcom_objectstorage_threshold = 1000000 +# +# Variable: AIRFLOW__COMMON.IO__XCOM_OBJECTSTORAGE_THRESHOLD +# +xcom_objectstorage_threshold = -1 + +# Compression algorithm to use when storing XComs in object storage. Supported algorithms +# are a.o.: snappy, zip, gzip, bz2, and lzma. If not specified, no compression will be used. +# Note that the compression algorithm must be available in the Python installation (e.g. +# python-snappy for snappy). Zip, gz, bz2 are available by default. +# +# Example: xcom_objectstorage_compression = gz +# +# Variable: AIRFLOW__COMMON.IO__XCOM_OBJECTSTORAGE_COMPRESSION +# +xcom_objectstorage_compression = + +[elasticsearch] +# Elasticsearch host +# +# Variable: AIRFLOW__ELASTICSEARCH__HOST +# +host = + +# Format of the log_id, which is used to query for a given tasks logs +# +# Variable: AIRFLOW__ELASTICSEARCH__LOG_ID_TEMPLATE +# +log_id_template = {dag_id}-{task_id}-{run_id}-{map_index}-{try_number} + +# Used to mark the end of a log stream for a task +# +# Variable: AIRFLOW__ELASTICSEARCH__END_OF_LOG_MARK +# +end_of_log_mark = end_of_log + +# Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id +# Code will construct log_id using the log_id template from the argument above. +# NOTE: scheme will default to https if one is not provided +# +# Example: frontend = http://localhost:5601/app/kibana#/discover?_a=(columns:!(message),query:(language:kuery,query:'log_id: "{log_id}"'),sort:!(log.offset,asc)) +# +# Variable: AIRFLOW__ELASTICSEARCH__FRONTEND +# +frontend = + +# Write the task logs to the stdout of the worker, rather than the default files +# +# Variable: AIRFLOW__ELASTICSEARCH__WRITE_STDOUT +# +write_stdout = False + +write_to_es = False +target_index = airflow-logs +# Instead of the default log formatter, write the log lines as JSON +# +# Variable: AIRFLOW__ELASTICSEARCH__JSON_FORMAT +# +json_format = False + +# Log fields to also attach to the json output, if enabled +# +# Variable: AIRFLOW__ELASTICSEARCH__JSON_FIELDS +# +json_fields = asctime, filename, lineno, levelname, message + +# The field where host name is stored (normally either `host` or `host.name`) +# +# Variable: AIRFLOW__ELASTICSEARCH__HOST_FIELD +# +host_field = host + +# The field where offset is stored (normally either `offset` or `log.offset`) +# +# Variable: AIRFLOW__ELASTICSEARCH__OFFSET_FIELD +# +offset_field = offset + +# Comma separated list of index patterns to use when searching for logs (default: `_all`). +# The index_patterns_callable takes precedence over this. +# +# Example: index_patterns = something-* +# +# Variable: AIRFLOW__ELASTICSEARCH__INDEX_PATTERNS +# +index_patterns = _all + +index_patterns_callable = + +[elasticsearch_configs] +# +# Variable: AIRFLOW__ELASTICSEARCH_CONFIGS__HTTP_COMPRESS +# +http_compress = False + +# +# Variable: AIRFLOW__ELASTICSEARCH_CONFIGS__VERIFY_CERTS +# +verify_certs = True + +[fab] +# This section contains configs specific to FAB provider. + +# Cookie with the secure attribute is only sent to the server with an HTTPS connection. +# +# Variable: AIRFLOW__FAB__COOKIE_SECURE +# +cookie_secure = False + +# Whether the cookie is restricted to a first-party or same-site context. +# +# Variable: AIRFLOW__FAB__COOKIE_SAMESITE +# +cookie_samesite = Lax + +# Define the color of navigation bar +# +# Variable: AIRFLOW__FAB__NAVBAR_COLOR +# +navbar_color = #fff + +# Define the color of text in the navigation bar +# +# Variable: AIRFLOW__FAB__NAVBAR_TEXT_COLOR +# +navbar_text_color = #51504f + +# Define the color of navigation bar links when hovered +# +# Variable: AIRFLOW__FAB__NAVBAR_HOVER_COLOR +# +navbar_hover_color = #eee + +# Define the color of text in the navigation bar when hovered +# +# Variable: AIRFLOW__FAB__NAVBAR_TEXT_HOVER_COLOR +# +navbar_text_hover_color = #51504f + +# The message displayed when a user attempts to execute actions beyond their authorised privileges. +# +# Variable: AIRFLOW__FAB__ACCESS_DENIED_MESSAGE +# +access_denied_message = Access is Denied + +# Expose hostname in the web server +# +# Variable: AIRFLOW__FAB__EXPOSE_HOSTNAME +# +expose_hostname = False + +# Boolean for enabling rate limiting on authentication endpoints. +# +# Variable: AIRFLOW__FAB__AUTH_RATE_LIMITED +# +auth_rate_limited = True + +# Rate limit for authentication endpoints. +# +# Variable: AIRFLOW__FAB__AUTH_RATE_LIMIT +# +auth_rate_limit = 5 per 40 second + +# Update FAB permissions and sync security manager roles +# on webserver startup +# +# Variable: AIRFLOW__FAB__UPDATE_FAB_PERMS +# +update_fab_perms = True + +# Comma separated list of auth backends to authenticate users of the API. +# +# Variable: AIRFLOW__FAB__AUTH_BACKENDS +# +auth_backends = airflow.providers.fab.auth_manager.api.auth.backend.session + +# Path of webserver config file used for configuring the webserver parameters +# +# Variable: AIRFLOW__FAB__CONFIG_FILE +# +config_file = /opt/airflow/webserver_config.py + +# The type of backend used to store web session data, can be ``database`` or ``securecookie``. For the +# ``database`` backend, sessions are store in the database and they can be +# managed there (for example when you reset password of the user, all sessions for that user are +# deleted). For the ``securecookie`` backend, sessions are stored in encrypted cookies on the client +# side. The ``securecookie`` mechanism is 'lighter' than database backend, but sessions are not +# deleted when you reset password of the user, which means that other than waiting for expiry time, +# the only way to invalidate all sessions for a user is to change secret_key and restart webserver +# (which also invalidates and logs out all other user's sessions). +# +# When you are using ``database`` backend, make sure to keep your database session table small +# by periodically running ``airflow db clean --table session`` command, especially if you have +# automated API calls that will create a new session for each call rather than reuse the sessions +# stored in browser cookies. +# +# Example: session_backend = securecookie +# +# Variable: AIRFLOW__FAB__SESSION_BACKEND +# +session_backend = database + +# The UI cookie lifetime in minutes. User will be logged out from UI after +# ``[fab] session_lifetime_minutes`` of non-activity +# +# Variable: AIRFLOW__FAB__SESSION_LIFETIME_MINUTES +# +session_lifetime_minutes = 43200 + +# Enable werkzeug ``ProxyFix`` middleware for reverse proxy +# +# Variable: AIRFLOW__FAB__ENABLE_PROXY_FIX +# +enable_proxy_fix = False + +# Number of values to trust for ``X-Forwarded-For``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_FOR +# +proxy_fix_x_for = 1 + +# Number of values to trust for ``X-Forwarded-Proto``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_PROTO +# +proxy_fix_x_proto = 1 + +# Number of values to trust for ``X-Forwarded-Host``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_HOST +# +proxy_fix_x_host = 1 + +# Number of values to trust for ``X-Forwarded-Port``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_PORT +# +proxy_fix_x_port = 1 + +# Number of values to trust for ``X-Forwarded-Prefix``. +# See `Werkzeug: X-Forwarded-For Proxy Fix +# `__ for more details. +# +# Variable: AIRFLOW__FAB__PROXY_FIX_X_PREFIX +# +proxy_fix_x_prefix = 1 + +[azure_remote_logging] +# Configuration that needs to be set for enable remote logging in Azure Blob Storage + +remote_wasb_log_container = airflow-logs + +[openlineage] +# This section applies settings for OpenLineage integration. +# More about configuration and its precedence can be found in the `user's guide +# `_. + +# Disable sending events without uninstalling the OpenLineage Provider by setting this to true. +# +# Variable: AIRFLOW__OPENLINEAGE__DISABLED +# +disabled = False + +# Exclude some Operators from emitting OpenLineage events by passing a string of semicolon separated +# full import paths of Operators to disable. +# +# Example: disabled_for_operators = airflow.providers.standard.operators.bash.BashOperator; airflow.providers.standard.operators.python.PythonOperator +# +# Variable: AIRFLOW__OPENLINEAGE__DISABLED_FOR_OPERATORS +# +disabled_for_operators = + +# If this setting is enabled, OpenLineage integration won't collect and emit metadata, +# unless you explicitly enable it per `DAG` or `Task` using `enable_lineage` method. +# +# Variable: AIRFLOW__OPENLINEAGE__SELECTIVE_ENABLE +# +selective_enable = False + +# Set namespace that the lineage data belongs to, so that if you use multiple OpenLineage producers, +# events coming from them will be logically separated. +# +# Example: namespace = my_airflow_instance_1 +# +# Variable: AIRFLOW__OPENLINEAGE__NAMESPACE +# +# namespace = + +# Register custom OpenLineage Extractors by passing a string of semicolon separated full import paths. +# +# Example: extractors = full.path.to.ExtractorClass;full.path.to.AnotherExtractorClass +# +# Variable: AIRFLOW__OPENLINEAGE__EXTRACTORS +# +# extractors = + +# Register custom run facet functions by passing a string of semicolon separated full import paths. +# +# Example: custom_run_facets = full.path.to.custom_facet_function;full.path.to.another_custom_facet_function +# +# Variable: AIRFLOW__OPENLINEAGE__CUSTOM_RUN_FACETS +# +custom_run_facets = + +# Specify the path to the YAML configuration file. +# This ensures backwards compatibility with passing config through the `openlineage.yml` file. +# +# Example: config_path = full/path/to/openlineage.yml +# +# Variable: AIRFLOW__OPENLINEAGE__CONFIG_PATH +# +config_path = + +# Pass OpenLineage Client transport configuration as a JSON string, including the transport type +# and any additional options specific to that type, as described in `OpenLineage docs +# `_. +# +# Currently supported types are: +# +# * HTTP +# * Kafka +# * Console +# * File +# * Composite +# * Custom +# +# Example: transport = {"type": "http", "url": "http://localhost:5000", "endpoint": "api/v1/lineage"} +# +# Variable: AIRFLOW__OPENLINEAGE__TRANSPORT +# +transport = + +# Disable the inclusion of source code in OpenLineage events by setting this to `true`. +# By default, several Operators (e.g. Python, Bash) will include their source code in the events +# unless disabled. +# +# Variable: AIRFLOW__OPENLINEAGE__DISABLE_SOURCE_CODE +# +disable_source_code = False + +# Number of processes to utilize for processing DAG state changes +# in an asynchronous manner within the scheduler process. +# +# Variable: AIRFLOW__OPENLINEAGE__DAG_STATE_CHANGE_PROCESS_POOL_SIZE +# +dag_state_change_process_pool_size = 1 + +# Maximum amount of time (in seconds) that OpenLineage can spend executing metadata extraction. +# Note that other configurations, sometimes with higher priority, such as +# `[core] task_success_overtime +# `_, +# may also affect how much time OpenLineage has for execution. +# +# Variable: AIRFLOW__OPENLINEAGE__EXECUTION_TIMEOUT +# +execution_timeout = 10 + +# If true, OpenLineage event will include full task info - potentially containing large fields. +# +# Variable: AIRFLOW__OPENLINEAGE__INCLUDE_FULL_TASK_INFO +# +include_full_task_info = False + +# If true, OpenLineage events will include information useful for debugging - potentially +# containing large fields e.g. all installed packages and their versions. +# +# Variable: AIRFLOW__OPENLINEAGE__DEBUG_MODE +# +debug_mode = False + +# Automatically inject OpenLineage's parent job (namespace, job name, run id) information into Spark +# application properties for supported Operators. +# +# Variable: AIRFLOW__OPENLINEAGE__SPARK_INJECT_PARENT_JOB_INFO +# +spark_inject_parent_job_info = False + +# Automatically inject OpenLineage's transport information into Spark application properties +# for supported Operators. +# +# Variable: AIRFLOW__OPENLINEAGE__SPARK_INJECT_TRANSPORT_INFO +# +spark_inject_transport_info = False + +[postgres] +# Configuration for Postgres hooks and operators. + +azure_oauth_scope = https://ossrdbms-aad.database.windows.net/.default + +[snowflake] +# Configuration for Snowflake hooks and operators. + +azure_oauth_scope = api://snowflake_oauth_server/.default + +[standard] +# Options for the standard provider operators. + +# Which python tooling should be used to install the virtual environment. +# +# The following options are available: +# - ``auto``: Automatically select, use ``uv`` if available, otherwise use ``pip``. +# - ``pip``: Use pip to install the virtual environment. +# - ``uv``: Use uv to install the virtual environment. Must be available in environment PATH. +# +# Example: venv_install_method = uv +# +# Variable: AIRFLOW__STANDARD__VENV_INSTALL_METHOD +# +venv_install_method = auto + diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..ecb1102 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,337 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Basic Airflow cluster configuration for CeleryExecutor with Redis and PostgreSQL. +# +# WARNING: This configuration is for local development. Do not use it in a production deployment. +# +# This configuration supports basic configuration using environment variables or an .env file +# The following variables are supported: +# +# AIRFLOW_IMAGE_NAME - Docker image name used to run Airflow. +# Default: apache/airflow:3.1.2 +# AIRFLOW_UID - User ID in Airflow containers +# Default: 50000 +# AIRFLOW_PROJ_DIR - Base path to which all the files will be volumed. +# Default: . +# Those configurations are useful mostly in case of standalone testing/running Airflow in test/try-out mode +# +# _AIRFLOW_WWW_USER_USERNAME - Username for the administrator account (if requested). +# Default: airflow +# _AIRFLOW_WWW_USER_PASSWORD - Password for the administrator account (if requested). +# Default: airflow +# _PIP_ADDITIONAL_REQUIREMENTS - Additional PIP requirements to add when starting all containers. +# Use this option ONLY for quick checks. Installing requirements at container +# startup is done EVERY TIME the service is started. +# A better way is to build a custom image or extend the official image +# as described in https://airflow.apache.org/docs/docker-stack/build.html. +# Default: '' +# +# Feel free to modify this file to suit your needs. +--- +x-airflow-common: + &airflow-common + # In order to add custom dependencies or upgrade provider distributions you can use your extended image. + # Comment the image line, place your Dockerfile in the directory where you placed the docker-compose.yaml + # and uncomment the "build" line below, Then run `docker-compose build` to build the images. + image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:3.1.2} + # build: . + env_file: + - ${ENV_FILE_PATH:-.env} + environment: + &airflow-common-env + AIRFLOW__CORE__EXECUTOR: CeleryExecutor + AIRFLOW__CORE__AUTH_MANAGER: airflow.providers.fab.auth_manager.fab_auth_manager.FabAuthManager + AIRFLOW__DATABASE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow + AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:airflow@postgres/airflow + AIRFLOW__CELERY__BROKER_URL: redis://:@redis:6379/0 + AIRFLOW__CORE__FERNET_KEY: '' + AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true' + AIRFLOW__CORE__LOAD_EXAMPLES: 'true' + AIRFLOW__CORE__EXECUTION_API_SERVER_URL: 'http://airflow-apiserver:8080/execution/' + # yamllint disable rule:line-length + # Use simple http server on scheduler for health checks + # See https://airflow.apache.org/docs/apache-airflow/stable/administration-and-deployment/logging-monitoring/check-health.html#scheduler-health-check-server + # yamllint enable rule:line-length + AIRFLOW__SCHEDULER__ENABLE_HEALTH_CHECK: 'true' + # WARNING: Use _PIP_ADDITIONAL_REQUIREMENTS option ONLY for a quick checks + # for other purpose (development, test and especially production usage) build/extend Airflow image. + _PIP_ADDITIONAL_REQUIREMENTS: ${_PIP_ADDITIONAL_REQUIREMENTS:-} + # The following line can be used to set a custom config file, stored in the local config folder + AIRFLOW_CONFIG: '/opt/airflow/config/airflow.cfg' + volumes: + - ${AIRFLOW_PROJ_DIR:-.}/dags:/opt/airflow/dags + - ${AIRFLOW_PROJ_DIR:-.}/logs:/opt/airflow/logs + - ${AIRFLOW_PROJ_DIR:-.}/config:/opt/airflow/config + - ${AIRFLOW_PROJ_DIR:-.}/plugins:/opt/airflow/plugins + user: "${AIRFLOW_UID:-50000}:0" + depends_on: + &airflow-common-depends-on + redis: + condition: service_healthy + postgres: + condition: service_healthy + +services: + postgres: + image: postgres:16 + environment: + POSTGRES_USER: airflow + POSTGRES_PASSWORD: airflow + POSTGRES_DB: airflow + volumes: + - postgres-db-volume:/var/lib/postgresql/data + healthcheck: + test: ["CMD", "pg_isready", "-U", "airflow"] + interval: 10s + retries: 5 + start_period: 5s + restart: always + + redis: + # Redis is limited to 7.2-bookworm due to licencing change + # https://redis.io/blog/redis-adopts-dual-source-available-licensing/ + image: redis:7.2-bookworm + expose: + - 6379 + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 30s + retries: 50 + start_period: 30s + restart: always + + airflow-apiserver: + <<: *airflow-common + command: api-server + ports: + - "8080:8080" + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:8080/api/v2/version"] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-scheduler: + <<: *airflow-common + command: scheduler + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:8974/health"] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-dag-processor: + <<: *airflow-common + command: dag-processor + healthcheck: + test: ["CMD-SHELL", 'airflow jobs check --job-type DagProcessorJob --hostname "$${HOSTNAME}"'] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-worker: + <<: *airflow-common + command: celery worker + healthcheck: + # yamllint disable rule:line-length + test: + - "CMD-SHELL" + - 'celery --app airflow.providers.celery.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}" || celery --app airflow.executors.celery_executor.app inspect ping -d "celery@$${HOSTNAME}"' + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + environment: + <<: *airflow-common-env + # Required to handle warm shutdown of the celery workers properly + # See https://airflow.apache.org/docs/docker-stack/entrypoint.html#signal-propagation + DUMB_INIT_SETSID: "0" + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-apiserver: + condition: service_healthy + airflow-init: + condition: service_completed_successfully + + airflow-triggerer: + <<: *airflow-common + command: triggerer + healthcheck: + test: ["CMD-SHELL", 'airflow jobs check --job-type TriggererJob --hostname "$${HOSTNAME}"'] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + + airflow-init: + <<: *airflow-common + entrypoint: /bin/bash + # yamllint disable rule:line-length + command: + - -c + - | + if [[ -z "${AIRFLOW_UID}" ]]; then + echo + echo -e "\033[1;33mWARNING!!!: AIRFLOW_UID not set!\e[0m" + echo "If you are on Linux, you SHOULD follow the instructions below to set " + echo "AIRFLOW_UID environment variable, otherwise files will be owned by root." + echo "For other operating systems you can get rid of the warning with manually created .env file:" + echo " See: https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#setting-the-right-airflow-user" + echo + export AIRFLOW_UID=$$(id -u) + fi + one_meg=1048576 + mem_available=$$(($$(getconf _PHYS_PAGES) * $$(getconf PAGE_SIZE) / one_meg)) + cpus_available=$$(grep -cE 'cpu[0-9]+' /proc/stat) + disk_available=$$(df / | tail -1 | awk '{print $$4}') + warning_resources="false" + if (( mem_available < 4000 )) ; then + echo + echo -e "\033[1;33mWARNING!!!: Not enough memory available for Docker.\e[0m" + echo "At least 4GB of memory required. You have $$(numfmt --to iec $$((mem_available * one_meg)))" + echo + warning_resources="true" + fi + if (( cpus_available < 2 )); then + echo + echo -e "\033[1;33mWARNING!!!: Not enough CPUS available for Docker.\e[0m" + echo "At least 2 CPUs recommended. You have $${cpus_available}" + echo + warning_resources="true" + fi + if (( disk_available < one_meg * 10 )); then + echo + echo -e "\033[1;33mWARNING!!!: Not enough Disk space available for Docker.\e[0m" + echo "At least 10 GBs recommended. You have $$(numfmt --to iec $$((disk_available * 1024 )))" + echo + warning_resources="true" + fi + if [[ $${warning_resources} == "true" ]]; then + echo + echo -e "\033[1;33mWARNING!!!: You have not enough resources to run Airflow (see above)!\e[0m" + echo "Please follow the instructions to increase amount of resources available:" + echo " https://airflow.apache.org/docs/apache-airflow/stable/howto/docker-compose/index.html#before-you-begin" + echo + fi + echo + echo "Creating missing opt dirs if missing:" + echo + mkdir -v -p /opt/airflow/{logs,dags,plugins,config} + echo + echo "Airflow version:" + /entrypoint airflow version + echo + echo "Files in shared volumes:" + echo + ls -la /opt/airflow/{logs,dags,plugins,config} + echo + echo "Running airflow config list to create default config file if missing." + echo + /entrypoint airflow config list >/dev/null + echo + echo "Files in shared volumes:" + echo + ls -la /opt/airflow/{logs,dags,plugins,config} + echo + echo "Change ownership of files in /opt/airflow to ${AIRFLOW_UID}:0" + echo + chown -R "${AIRFLOW_UID}:0" /opt/airflow/ + echo + echo "Change ownership of files in shared volumes to ${AIRFLOW_UID}:0" + echo + chown -v -R "${AIRFLOW_UID}:0" /opt/airflow/{logs,dags,plugins,config} + echo + echo "Files in shared volumes:" + echo + ls -la /opt/airflow/{logs,dags,plugins,config} + + # yamllint enable rule:line-length + environment: + <<: *airflow-common-env + _AIRFLOW_DB_MIGRATE: 'true' + _AIRFLOW_WWW_USER_CREATE: 'true' + _AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow} + _AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow} + _PIP_ADDITIONAL_REQUIREMENTS: '' + user: "0:0" + + airflow-cli: + <<: *airflow-common + profiles: + - debug + environment: + <<: *airflow-common-env + CONNECTION_CHECK_MAX_COUNT: "0" + # Workaround for entrypoint issue. See: https://github.com/apache/airflow/issues/16252 + command: + - bash + - -c + - airflow + depends_on: + <<: *airflow-common-depends-on + + # You can enable flower by adding "--profile flower" option e.g. docker-compose --profile flower up + # or by explicitly targeted on the command line e.g. docker-compose up flower. + # See: https://docs.docker.com/compose/profiles/ + flower: + <<: *airflow-common + command: celery flower + profiles: + - flower + ports: + - "5555:5555" + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost:5555/"] + interval: 30s + timeout: 10s + retries: 5 + start_period: 30s + restart: always + depends_on: + <<: *airflow-common-depends-on + airflow-init: + condition: service_completed_successfully + +volumes: + postgres-db-volume: diff --git a/docs/airflow.md b/docs/airflow.md new file mode 100644 index 0000000..9c70896 --- /dev/null +++ b/docs/airflow.md @@ -0,0 +1,68 @@ +# Apache Airflow + +データ処理やバッチ処理(ETL/ELT)などのワークフローを +管理・自動化するためのもっとも有名なプラットフォームです。 + +* **できること・強み** + * Airflow は 本番の大規模ETLで強い + * ワークフローをコード(Python)で記述できる(DAG) + * スケジューラで定期実行できる + * 失敗時のリトライ・アラートが強い + * Web UIが強力 + * プラグインが豊富で拡張性が高い + + + +## ワークフローを記述する + +Airflowの最大特徴はDAG(有向非巡回グラフ)という構造で +処理の流れをコード化できること。 + +DAG(Directed Acyclic Graph)とは +“順序や依存関係を持つタスクの集合” のこと。 + +```python +with DAG("my_etl_job") as dag: + extract >> transform >> load +``` + +* Directed(有向) → 流れに向きがある(A → B → C) +* Acyclic(非巡回) → ループしてはいけない(戻ってきてはいけない) +* Graph(グラフ) → ノード(タスク)と線(依存)で表す + +```mermaid +flowchart LR + source1[("データソース1
MySQL")] + source2[("データソース2
API")] + source3[("データソース3
CSV")] + + extract["Extract
データ抽出"] + transform["Transform
データ変換"] + load["Load
データ格納"] + + dwh[("データウェア
ハウス")] + + source1 --> extract + source2 --> extract + source3 --> extract + extract --> transform + transform --> load + load --> dwh +``` + +### Task(タスク) + +DAG内で実行される個々の処理単位。 +データ処理やスクリプト実行などを担当する + +--- + +## Develop + +### 環境を準備する + + +## Link + +* https://airflow.apache.org/docs/apache-airflow/stable/howto/variable.html +* https://zenn.dev/iwatagumi/articles/c8c61771ae49fc \ No newline at end of file diff --git a/docs/images/app01.png b/docs/images/app01.png new file mode 100644 index 0000000000000000000000000000000000000000..ecc1c5461ed872ef3181ecde1782126254e9abe1 GIT binary patch literal 53010 zcmcG$2UL?;6fWuvqX-D7NUx3sr7OJ#$5B+8iZp3Pn)KdFWN4#EQ3Rx`2uKYCA=Ds= zG$|1fLI{uv2oVxUC`llpydP0#?pyc1yI#v$ERsL}IsZ9(pR@P=zP(Q#-83=a<`m&P zaNq#<^=p6LK5&5D>A(TD3x^K@S5*Ajcz}Pu1l%_G<3RPG*aGn3H`hzXmku1LP377G z9|S&g_+GOPIB-JE8XEZH`9UxhUxk?WOfVhTN2{?o09}3Gw=8=`g!(URQXN&keI0G zlOq>h9FmXy;d$tnKVSzhzPR%6iM6w}-2G#$lySk@q$`Jy=8ik*KRj~n@HC{Y)MQjQ zXz|Wec|^r>)$n#?0g>@_3`>$L4xjdEBStP}TS6qqx_<@63A}{7ezo3z_fX+j&-aUg zkMsrhFTOPV4YvR8fZgwDdlLp;UysMg?Z4B{I_&hz{@dn1rMULreLBOdc2KRG-6Ed+wN_P1N~TC1-riR%-yNj#tONu`+mZhKT3fonIy>vj1$@=DbpeZaZc_ zx!}MVJARcv;Yu1Rv5m{)zFd6#BTlI)$XxTUU)cwooVQd`>7T zn_=1-WwyE_`g9!+!rJbGUr@2hNZ0#OZWdP0c)rW*F6OS3O}VQ>q;_j#H$A??qb@uI z?}ZHLvp;X^QYq?S%fDl$@I*W*hD>AaL}_lxjh2+QKI0jPkj~r8S2^vaHdWpPt_aLQ zdtQ)k&ExS-{h^_RiaC9?u{y;nPQ_*ATBT60I~9mEUhu#Up1ZFwwAwoij#xL@=#MfT zi?4OOkY-nR4_b3c(40S_>4QWPIo_@Ut=mNvkw zH*Cj)|G?t6*C1Tr)duiVG@b*k7*^7phcoQMzPuzPD%&#n>JGk_?!MjEOJD~2CL4X4 z@NCvDi9C}(PeDnjm$2c^eqTNh3GUiy$^4hGg3+yp-~+kP42Rvw<2`3zkyGIuXwdqf zjXs?bgx%)dGNe?)?JFd&QYKoK@Iij~zX*Hy<|#fYyD}Ulevqg z=rX^;-CQ2;+@E6cxEAAiTd`fVTF=v6BlIH&A!cB=9jT-?W@EE!r<#^b8`{Ce?FM5| z@h3ftch;gzPeALE7Yz(cd~WgR8`h6vDQ_;z7925vl<6#U@da~MQsb5 zbVVqux90PB7ykRWo)|JzA{?}Z&Vn4@Ay!=8C^}eyY~|>LUx=o#X5D|qT0z25Uh$Va zixI1z>f%YBrOaVFGZb<7=)2|38bvfm?%CNHufKh`BetAHGUqR2`n{4#$cY-Y%EqwZ zjh(X&JdkI4Nx`x08~z)sDRro1=$DpZ9*G{r?&l1`S8+Hegw~|^yi|5nGRk|c273%X zcCf?4K1?bvcszyBC+ej`t(e4Q0nOj6}`y|+yj-QIuw8I>N)L!2hTgM?E z2iD;si^v*^y4rO-hKMTk+-g&rdyIKpSc@Br-rs5uUy0t9iWn@SBI z_;2K@Y{*qB#`!5{eL;?pnY>&Erd#*D3;ee?6)fY`;}6O8Ggl^J6ljSPSGcY z#|3n(?k*2UAS)@(NSyq9T-L~$t4oyc22rTF;a4nPVKZu@FhJLFt$rvBoglcgf_|n? z&!_A-sy=Cp==y~KX`d@V9#FH{U740fjvOR;!?Td13v0vY zz<0Lf)l33wI)_KrS``#2D~{+0nNP@A_~-GQbfm1XrRcKSVJFP=(cr4}nJH>6;cH3T zbvy?#5_((-a>2-o>Y^3-KBuK5Az6l0#$l>V+9l(&<_GA3cB0i2Ym^bcVrJhI(g)@v z(~?{`C2RYfM`GgVdh^gsv!iZMP`lw{ECO#y6(_`Z!-imH(8|9vik{|xgw8Bdp{5Yf zRtuM2c>+wCvF#SRDXDgZL}z7Dhc4lH%^*?k8z^nb^GEKj1^r_NJyG30wP=(5GAP!r z`aExio{gI52|eMIo!eo}ivdo`=$G5mTShmVeev$8V>tx=)GeLJlj08Ck)j7-+0I^p z8?g)TuO9k!2JNY)EoYiI7JgE!-CAqFHH53Z%-h~&y9syEz+;}0Ihf8>$d_G!#5(i1 zf%ezo`%m>Jdi1DU6qMwE4129H>4o`q4OUYaG_KPo`n*o?X9Qul349Aoy?bYu6=j;x z?O<)!$h~B-`|-D`1+T;kbo{`<^bn)X8(|v>tOP?tsDqw!?kWlG&1cd-SsL&vN;5+CoGtwVNYI2+l%;HeB5`Vg3tL!4GS949M4&KOLen*ZfWpV)sy zeLih_0($qbqcW{M&)08*Nb%;|p~adC9)cz>RuZEv8+Tsa0aYBTx?aP)q2ZV0f330O zLp<16%XntbeK<9b2LyX<48*D2>_7P8SL~|N4?l9s>yth`3*KfU)GRPs>Y{gShdOo& z*&eaMPT*C*X>IwS+mG#xtY#4Kx;wWw)@OzQ{|A55L|44NU= zn9orbm7UEh<}JFtbfQdXnstS_{tjA~tm$+_Y{Faf&X@;uVYq`XAwKW{n}=W=#XNI} zn%T(~6Hr|0V@+hkzFPXZNlIeKFw1Dk9du!voe*Mdv~`m(V%We7+4#UfNlvz!Z+!l1 z0M*_!`-a+%pjy=LpaNu3yBlBn2zZ@loh?Fa-!mpWwe9Fmd#Qu0?XGV*w8~EC>>ITG zRA`ddux|rh?Spf+Qoxov3Uqd4J8FEkD=@Hgtqx_G?k5`04LFfXBX)I+)X@Psk}{;; zvhnQ#@*<9bRZOshZc2&k-5UTr#MOmWwTCP~w!^ z%{WesFcS+C6NhW9gW!ZIU#RcUM4;2CHsSRnNTJEvCN&G6BOsvD_0iHJ(BVbq`{ys+;gY z?6ZSW#wfKB=n>x2BYJ$^g=^{55XfC&#(+{=c+1RNt{8F}oo#~$_(JeEO zdg>QupEWT4JD4?T)pom;QWGm%zOF82Y%aW#!^v@*kerT!L`FLs3$X-E z+sEpa)GY83krv~PBFCfkjI3^AmQ;g0XxQ3XHQl+4z|fd_XuPQDILMRUsh9vNmy19p zqa3ydEWjkxAP1xMnO(P3%!3Un9pMO+_v#!~I-^Ea!6RK9^?b7rl@nP0m{Hh#QLJs& zf&kTH#0T1dBzK&O22d`bUMT#vVT|YJf1R_3P>+J$3X7U(aDu&dREzkmS)XoZ8QrD; zgggr#436EMvhHU}baYmmK&C)tUHbRFVm{OW9ZIHovdHjZW5UQm&khePY)K)1gMCh;+ z*B|4rnMfKdNcH*mhX{EA$lx+GtW@dm5TIUsgOt=h2LK=YFC@Hi=YOkY;Bx$rz{ zeGWe=S7G+wk+s;`iuEi*u6POAl%0VBhJTF>bOhqN1QJXD3=z&eJiCqQPN0LJ8ui84S=^BKedA3 za;oEx#kNR9T=@Yn*WVy`oE$rETKv558o}%2$UlV;+84mRcU6Khp0^Z#;M$=E``=)* zAv*Ry8|2Y`PfI@Y1I~sg|A^vW8a5yA@vbF3y3U_Wvf%iE{Om_o^n)pm^YQTM&8XWe zl-F*}w|-3LB_>cMO?URMD};2@KSD!j?AJTnXKP-&Y1{qhj?-z|rFN!ALO@%du=hm! zyTT2dD{jDzMH&hwW-30VH-3&R2h0xB77sQFT?(O`=4zoKioqlKF6P~Pot`t{D;*fb z_1=|a!&**}V@<7sb|x)3hBOcY64TrHl1{DIw8R{Q z5fvIwM&1??EIq$?DVH{e%8HMo>u5YtG*NAE{H4& zQHOUUR3{d2@anGIH=f%3N15P2&z}mMHu(%lipK?rT1H98)U!rBe1oB*J-kIG2;Gu0vM)Rtwp@+xwN&|J?V@;n`n2y&H*s8n;t!zw>Z_HMb{;tYT#V=7 zVkaR@(wH-MIl!WED_(Ws{S6`Qvb*f^4Ob9ont1P(&iR z1+Bo9+Rk?w7ev^^4<4C4nj_ zuOF$GE}q!$>%pH=I7cnXp3Jrg+V#?2R->CB}mEx)fo^%dWm z^QC`KVuX-xKtH}BEoULb$D$iA+UiuP0HlDOh|99tdC#!bXSP1$s)Xi3V~$yR`|m)E zzdyhOw1g8q62UXYz5NCSkzqkr6$Fnw>BoI>#t9;!)hBQ*S^$H-r+4s<0H`WFgq2wL9=(2fFU{F zbJYj2?Hx*8am^Vk53wvY`>uvIcdHmr#Zl&oAABN-{Ra&AUl4{rY~}xggS^Y>l-jmC zTJv?izh@2E*mC z+@oYLDYs8Wi0_F(UvW`iD#G0Aos8Yx-du)5#*sqCnG7nWt$i!RiWaER0Lzng zkmr>x>sSALxA0f&jaX)R2%gsibT^A9eu!_cXA6H z@g%FhzLg(+6g^@>_xE2`wrC7~x%RznGvH8?BYJp__soa)@W7e6X~}5wB=~A<^f5HX zM2GiN6WVHaB4*I;8zlO~tF?0k>pwnm10XB=m7P2ads8zNxGGZf zg{LPy`>4!A5CQw`^>$up^de-@C?k_UOE^m$a}0CvmSWruL%h9sO4}~B23yQX^F4f2?NGm8Rs4IY~*|mxVtMgs$xe0N7AP(Ht)-2V5VJEng`iO*tU|%Wo zXZ`FmEWdFt2XNq}B(TOl+!Q_IsKKPuwfWi0>?#{gI^>FkWjY@QvOEB8!K}sHFX}IA zEwBnSUnphy0&5m2RiDd`Isf8J1>bjBvBh|%3f(CBLLugjVhd`PEZnC2M1@+e%HGMT z`%;`ji}QldgwHa;8dpgZnv_gatqHz{smG)g+<{xt)!(+)S9U-sL*7MBtwFxvYg(NA z@c3Fcl|PC@S^vC#8TdxloBhb>16%Etv5qSz7arW16!`ifXm0`sw)LfOrzYH<8-;fX z{8m(C9sV)J_>{OzXF=H&!q*2G1+XKd*BrEKE!}S^Y2_4`%Ke;prC6 zoAh?Nr14fpL$9)Pcfr#m88dY@1|+r4WEt;N*N+!ZoC;g|Ky2WdQ|n|6Zfa~5g5)GH zj~?T`WWyY7eolaNMT!-B-W3y9OA3%;eUs*E-J5LXNq6B2KMB+(hW`^~B-2hN)r0hW7P|?;?CjVZ*+jt|fU$wJ^uC<4-<7*t6if zHiru%k56M~yMtzDYBZKdL0h~*v+f>n>ZOvWTeF72vM)*yGip4zzuJ(ldM&r?jIGjA zd>(*bq#3nb(O|B;#w>+j6x%L#AxQ`oLOb9=>v;h?@x5w0tIw53PF_~O{c5iANHLk#0Gd)f$cL7S9#7D)7N+0a+^&?!tRW{@Kb&LEFN#p!+<_l(1)v+$mv{%u7|=Rz*lb&dK5Ikuf% zt!wUm%ylY%W`@@->#Qh z<*lleN1tc-MljL|^~G5psTG9^4t}DEUb=^!(wW1RHK+4om=6i-jmaplo04TW-RcY} zibwuPa_^Isxb`cyaJo%SE_yp3=_Z0HB0L-J?yf-V+>MSxc@Rda;KnZrTY>fba44V1 zL0Iiom6yV!`f62=xJDMj4cqOsaa*l49RDsR3KNf+<{^BDFN{OYM~f{-m$xY~+VfVf zhqk>NtQb7kJ}tcif-R=km_|46O*U_m=x9Iid!~}S5=x&D_57Dx@-!>n67<3h6r+vXmp7YWg4V#aNPb^@awyobt zr)G;|;?tp9r&~9@4~OV&UN;txxHbDwo#S4X>*YyPB&zsQ#>0kNz%*Zc+?C{Gh#-j& zbrk0J9o~f>hT$KxE8uzN&FzwmtG$+>Z3@@rPcG&h_lw4m$a6DZ@X=7@^oH{zfq{S*^HEWcN-EoRz>KE$c!&z=lJ-Yw!jeC63C%AU}|6Y&(}o zaZLRNe;(dE$BY^msh0!!MC^1`(^|Q|u5;Ecp`9-gJqB5>Glh3ZaCnA~tWFyM z`~4c-%@6ZBm}_r+o+~zV_gk}X_7l}E{yJH`d*Sj_ghUJh3Q?7vIaw1ZGw!)@Ny6_a5P z`c!|_6H#-Nz8Xl*a?fDU8+r2#umMFh{a`8IkL-CteLd{eo6biX`u|#?VI1v?Rx$_~ zOQNB6X1%I6-#uZHV9iw-HyEg!>$v*50d#KStK`Lk;=;J|#OOR7LwY^@^2Y9xxz3cn zDv#3|YLy>e%eR;kP$EMNpK5X_`Wjg-X<>a;ryT}S*RgcuF9`ULE!hh)01?vjaeOfP=h~X%it1 z51X=pmkp^-*@En~rERClYN%w7&fJqtS60|P{@9oRSH(Be+Q1+V-mC^fxw3m{pXFp1 zK~zJKQ+7L2ehYH1qjFtLPD`3A!F;gjQSs)pwR(TMBc7K%h*`Urwfy#R1PF+S|F#f0 z1G?K>sr1{@)hc`2yI|Fax)&rn0639v;ww0! zOPW9%j?6d^T)45@Q*MgX+g$M9+a2YFMQp?euL({0RUbFgw52`tAaz8er9kYxx{b&I zFSCsQvvfqy;HRTY0q?O^Z?hpaa&7vCkSM(B6tIlylc)FuzC;Mli4L;+TuW8`^uive zVc)*hdNp|gOUvMnyXt4V9XzTsV`}??t0l*4?W;)H&GX+PPt+%i#XMyT|y3?v)sn`0^ zND#!gs3*CKS$e;o{44g=pmf#o4&ri+xm`-!VT-TFX> z?zCr5TZ8ZOejX>)pF1jF4w`VY)1Z=1#?^XROqQgqrxcF~u|U7=J?$H2dpB}!lcnj( zpZL1S$}d-_oH=bcCxVsoI0r=QkLTsW%9Mn}s~+PnTSr0P`v>VBbq{PAYc6=!=KKbl zzveAGQRmcR?U(yCu6BFh`K|APpa<|<{H?&)1m zu!hz!KRosqS`7Ix?WR0{y$G2fVuh1Eh;e-p($AA|f0RC1+7HvdBQudR=>@Q&FxJd) zGwq2u?&_&KkME;D4>d>m*+L6yk6<3y?O_{jxZ-kPXAnu)@XF7Cb`NBJzS_GnIR=O0 z%QTdh?so=-yT}#s%UoH(R^c32*T4@ZsveWp`mMfUJK#oLmyliI+RewL^?obZ|FCBX zYB#!kRi?Ez1_m0AKz*pL`AeEMcP(sPY=dnRr7|B69m$ZnuP@>w+4=*Zc83pk&3%~b zoa>t7y1($ozA^Z|hKP@N>i`r;NUq3Oe@#lAip#_}@_c6u{+fktxX53JQnx zK%0VXFsUehBoM|-zIvUxhlW3;{)@1Mp6b)*JJE5w2vo-&`&&+%b#qN9b%=)gGEMHZ z51qRXIxf@ksWJGy<`F>k@5e9v9z(v=ozK@WzEI;kL`wqXOSFwP*jQxn9x2_SGm#70 zx3=etE%8V8ND;(e(}HMB=rm7OKw&Mwh~&3?$IVAH(~{xh(==*2N+B^{EIO3>sJA`Y zeMFpxhb74(6$XgH; z?cX=URoxD1a$X6uoXnVng^p=j3Yg#@EgIC8_I!j-?@=3%iEh#nY5=t%3s4)I`?xcF z>hIrR4A7?g3yrxrCGVE&qZ2+8dDIU{6`ZA%KWP<~mes?JCZ;=)fwh;6%^=p6+e+tEkRyXc(ditZHo`ctt0crS^Qus`m5V||mKDf>@ zJ)n>Va3oik_YH!*>gyQtHH-Enw0VU}4G17uZX~;#iwqgegh2+Q>h&@_49cFi?JGm@ zM^59wf6Mg%mf98F{>pnJU9pYNr2U_g8p?$LBv^Xu?4&P#BxoeCV$T);ud6l&zhbKx z8s!RUQ8S(9;O^c!qXLuNEID%O-#lflzM6w6 zPzg8gQs`KL23DkM3OdcscjR*XNJ#Vd9Mv!`3BYvX=b!;b>!pug%5e{M{rp6JU74q^; z)X(R`*M3eQ0f6wnG+e1-g|G#;n&og*4u3b(B^WZf#&qIx1Jf28sgLXUIW@0CFx1;G z!mv72*p=Dln?DEE661LsX%UVxDGw{~gmdj*zxOWLmKS?WVgJ*^p&u28|NDZ~|MJiD z7&tymv2~N)vLL(P(@O`y|4d2)Bu}u8ugmQfpYN9;dzY&iI>)=b)X$e)Vd&=ksP^3R zo83!FZY~;M8XXePgFwpUNUGxStNn)hd(mA@i)7^Q3&N~^E@)h77WCQOZO?YxCnzBYM(N{L4 z#J}ej_yG@C%iQKgF054_Vc#ZO5+Yb86^Eh?3U$=;e~&w5ssq*1F$wsDM#~KZm|M}7 z7Xu$x`b)8{OQIVHxmeGD%=N)sKIwB!WwiAGwFWxi>Eq~`_$6)z( zUNA$JB;v*FlN1ZTFKBtMz+9wf4i2g|CBg^&XVM!Ks1itLF{%U_5yU>6TE$fktorcY3p}W zgbnM3RusWh3uM-ReG;QjFB)FwI~&jDeItTUjZh`GO?~n05ueLaeYQDx7*nEXa3?qy zm-ZWsWhanX3@w6J>z#jsU*7QD`53sb>I2V#HPTa`v}_vQKA0B(-uRMkI{b9Df8ajhEZJOZJvE!_UizFoZ7T9$d1~eND8Is2HAtpi6-vnrX8E4>hD~) zvO3XYq9boxa41!MFi)&YJkr=|<<{!OQ5RlpvwlNs0dg~a^H6vXf<=Z=LtE?ptpp2p zm->~wk=cZ<;R*wEo=De{A2?LSZAenH3gK}!Eg6Qhh0%dEh=BxNZM?XglnO*MrrM{uqdfQz!4~vn zD3}azZ&C5mBNi3aN%-_A9T^t}eQy68)4q8OlUQ7ViDMMB=#lM2ET;>Ad@V;hQb7^& z*b)Sf07|Erw9DDMF7aZ|3HDUm`B}aQ`hrGUw3vr4l*|@1D@DrM%})&L z%t{xC3q(CX)MHjH3*u9&Rq3CUx3g666_-mh&djJ(Han>M7Z*^Kiiwvr_vzd6ZloaV zkP0j@VMI24(zt4)3yU{>QR-$}551!;1LBlxR#T?m_Ro_NL{Vs(%)Lwq(mc15RAzvFnt9m6To}l7^khO-#|?q zaT;;$l6d!a%C$#NqGbE+y8;K0-Llh4Gu^2zm0$$AE3){9RzvHhZ{5(nofyz(g z@HaeX(yBN@l1ABKB%~QpOth=`pt5yFV9XI6|OYzo*}#nndaur zS3dP*cc_Uln1holb^imisz!-kz=4+%Wl^0YJiSl22yFD)m zN+9!vV_yBe022sH*$@^y?Mpe)4|-S&HA z1iA<*+}32J$}VD7R;jN0VBG?m&zM3s9AM3O&%}M_>P<1UfkYN79-WbM^v zU;p| zUFN~#LhfE%PMaOPQCb3N6CNzQy%4_#lK`N^ye5T<6|jwnXe6YtBfASDpYaGEO$6xu z!jN9>g`>>gWY>IQ1qgGB+L1^}=$>85fNP%?d-t{-ZMpww5xySe;qdjz!g#1O{Ld7) zbAbep?S|(padkfq`o#D*O8oZ_&a;$*Fhk2$L5aKctBtcmg2Qc08;+#I+UE8 zPeDofJX{fCe(oAy8v{`2=peV->;pRL!UEmSrMfgYMo{5D#e+amfkL=%m=JvF4RydS z^>Bbf^O?!EFU80OSP;fhF@8j`@%$Em(2VEXwub~cOG$VBec(Y@>0pcA!a_6aSZQ3T zeKg!R=k?=}D~b^;C1gEw`^Me1>$fGjv$@5hlG1c9qxTo zr3q~%1({p&KT5*JLQtGevac3lmKKRPiEusJ$k|u|xo`RK9?ITY-SR+XXUW|tdDP}4 z2}qyN`X8Y2RKFqr_>aN|$Ii50vC0a~njGek`hUid-zu0IeirI^U5g=;H`UL+T4Y4) zoSJ054~QQ*VV*~i0PR>`q__!AT4@;nXXvEN&b?WG!2=LXMoek8(sw4zGO_n*k942i>FgB0VjtkHrsT*2<`i!RvB z?gT&OW#b+1%{E1?Q%`UdSdCm1wi*CoNwn40uO~-IZ4EqkpQlj%`l2J&6=v)D0&^vq z3}bFi8a8Y{@%3WD?CW-AXBw9a*mTcJu)b{hG~l(MvzTuw?oXBp{5>w0M~h{rxp?Ix zPED?S4Gupq9)^4b)QQ^a zxs`GA)wz3b#VvTwu+I;8yH4;iwgU5en=j&~eRBig2L=&YlXSuOSIG@w_cZHqc~Q^| zaB`+yQT)x)hM~5d#?fl!sgS0*%+m9RkBNx&eblP!tDcfHru*0r?RW=HisW7HBwn&l zVx4slo&IydU%(mOau09*hQ*v{L~~|Y-mXK$co{V)Dtp23v&}i)4$pDBScdM`R_o0< zm9RqNzus=>iP)nz9W$m7AEQ0qB`+Z4(S-$UNw7Ys?lhYSxiBznAjN!B=+3<8R$~@i zK4iH*V9+x8^Mxv1y*UpVZR4e_ImvYuJh}=f!=b$x%<9Qk=4M2=M48WlN7ta(;1hW$ z)k5gPh5USP*QM|qB62PCys~~ws7(bbXSr@YPYaZPHpLLGSP)RP+(#w`_Yu|h_Wh@H zz(DD3tc!xT0P@OaSx+@Dy2F^Td`6MqB&+IMEUp~Q>LolSPRr$0o7L;}96D1^FjdgM zCX!H>yxatEElrz-)%j1|{xQzXLJX?>F6}PT*_YfMl-xDC(1*LF42hl!N+xf5q1FB( zihMBaYi=m%(8<8h=LBWKn;L$JycgjvvcW#|@m>AM=jsaji!L|C3LLRlMU`?!5e_nx z;bzo#TyoBFVW~p_FMilSq~P*`|3sLiF~fL(HPBwTx*~36*}QzU`@FTi%Z|#E3P6vH z44dGviAle6Ex(@fTu{qk2i?4bJ}RfE3pzwMjcIm%KK69lUhkuK#8iU|I{Q+pM_9W`Ui%yG z&7f}e0Q-4(#I1!^quqTv83mX_E5J8sdNO0rUuSzz=|kQcsp&pengIAB^)0ex)>;1D z6x1H$xESbHpecyI-Pmt3#(de}2O80^Sqr&z_~@Jn#A9^O0lLz>DDs{(_B3A)^G{(< zl{Ws9?WeB=mD1L(h@qCJli_tZ>zaWTUGq$9{IzMIcxucfJ&baNQuq5**Bj}3C`YTO zxWY3uTm}}9v5$TF_LL`!FE*b&tZFH@15IOY-wN!n;Ouh7+-gT`*k9~3C8oBmx|fIs z?tWHm*(NnwjI7o6(p7Syn+PGNd{vyiFV7KJ>f#t?-HJcoamDsQ%~p={XkkTA_eqf@ z$Ga*H!MU7p=s)3aKQ7e?Ogl`iC`2j_j9F_dw6tDg021YHhO8cnNHwOr2D1)ZH z;89iNJ`H17fs>8M(l?*+QgV+>4WjVLA{EgsXUYCwuU}8a-uRjW)$k@>Hs#s#PLsP| zD)t?;yRo6@I4zzsn79t#-asWB1!2Bb=_4BL#?gies=XJ50U45*roFm4U^4l zIAPIZkA_oK?w5nYvU_WqgLr$qZ6;GQysjk>v+}JifI}oN;}5(H4$EdtdrmLvJvc6Q zXH>F}$w<@WiCxCG9%8k8-Et4*;olP>IrmUs88-8;fQXu?d{K4ZDd*{kY8dWlDbyx< zXs5TV4Pg`JLwgqF3a=V!!Z~SP!9BHe33m7bhc3`=*ZFYG`w-f=JuR<~IP#}PECpxD zhA7B z)5>=0Ht|GTw}wM7#I>v2OWwx}m=mE&4ATKhpmR4cV+$1Mw2} zZu3kV^%G1TDV~ZrTHV4bdozOva+j7Wrs^J1H*t-U8KQr!p$}FTR-aazR`bjoR29Ie z-WGh4+V@ehuI87>nyd6nV^e`G9%nkP*a@YTE2!hr?2p5nhIGz!{&M-$!Q^Gl`o2D+ zLI~b^AzB_JTi2mtPQ%^YmAKgbDtSBCvaUTT#b%(v1zvIK;jN7IFAswhMHwXx&T3S`jIZFbj(EIt@b7*T&#(+h;rAN3 z*ZumUsW<3IGnBKz=hJq2mc!NPg4Zj4n;8(ZSjq45qX%8~!^TD`e2X`9OC4i8p`*-~ z;3A)4!?PpfqN4sDB9J6h`Y%mgGq#ugR#sEE*$0Fg>wT=7z#7_eZcfz-vfp-ZOZC$> zYy)s2P{j(P`ha+v1${Sl1vmNjLQBol*6Y`v8acpQha;{P8d|lqk|Pw`(d6dG1t)Fn zqi1QDCG<1q_A_0c0lz@3K0^bjvqv#D zOxx-(=$Y8%iq&R!xOyFgb1+q_iSYsg>Lukw9mRxQUC0yq*f39z@MeiD_2tm+Jf9aA zD$w-qWw`}L?g{mw{qJc@;GkqynSYVrB}X66Jd=tCh+j6lX8hTFf_8*mKdYX_sNb=C zF8Z`$ZA|5FSCN#8MbYxUSC?#kiv6_U{kJo+p5g{OMDQe$0ke#REqCWqVXF|>pp)>x z<;To9?pET&RmLTH#N755k!4v{X)uWAoMdjyCP|dJ*8OT|XTSr|?vZNaJ&_Y|u6^fs z`Q_ZV3rz(MCfB_oW9Njj+Ly~&@Y}uX3v_3Cz>Y{62pUjMGTQv3x+q-?I)ARtbuJl7~_@tZvN95PnZ{~ zG*gO$n{QtbUf*`k#8cN!!_kkwKr_k#R}>uSrmISH7`Wi|ayhSJ zitc4tXtNn`5i5PA&#SrWAgj%r#YuM_SV_)MIHlg&TY9 zfBoq(Q!;rkQutPyQ(5}4Ej5Sn@wWuIg_CbXB)ouAGppDHF@b!qA@Uvrn+@JltEs9ITIsMlc|ZS?!_#P5jpK#>;} zVC}O!%4Fb*gGS1qV%5}*qo?0-Msy0ug=qYw`VVnCkx(PfVCJ-+RSD6vp6J2Uin8|% z+xb{yWIADlo_PH~jMue??)U>2{(qKN`4=(&?|!!8zgd9qO8CF^OBfN8BYh4$()j__ z8*`dj39b&IXT&)VJjtw6L@1o!Q~N`UJ=z6FX%$pxbiXye@NyQ_xO6sNK1BETvW*b2 z;1yMPqB*77r1a2%s~`93*|$?Q<;ynI_u9U{2b7G>?-*nytl&XPKne{wOE4QPE-=M- zUHBMY_%^wq1PcVm^7K8JzRIoQvD(jN2g7WhqUQFquSOU%B>_%mM* z{e{hP*lD*f$!$)ek7oDI`E&vBFQWuY-^v=sI0IbViLRv(YpYCzx8=xYmV-cpeAU{^ zE5P{{E8Y)E!MN=dZW~IpQzNNE`dQ1J56zA%&%W>o;3%M?_-WYOZk8-~dE=2me3U|% z-jRybrPsaeQ%8IB!pz5= z(X*($8%5Rn0uO0G%A!$;!x10JnhNZBs2&ZMiJUb^JcADRsovUM4MAHSw~v?&A*{rg z#zB2TtTYyPf)+MTJ`k!#92+dWsQCmBiEdo##Al4bb!_$4W_%sRMuEJCOXJE1fU7|_ zt4DF~X~ya}8@65hp56EkJy0L`ZGpGL)OPKcS!~$;hE8hZyDG!stR=o4wW?_Ig_EN4 zkcE2cKeOo5P{(hFx)2gH3E1XUJ0Oin%4S%!OCp%tY$zHWl-8P($~z7o?tgQi7yE!< z9J0q(Jip{^m$wVq+#~*JyQc7BaN}Kj%rX~=?aLS&b}xZ&W0#0N`i*~ha^<>I#ANq% z>Rmn;JqHmBk?1?^zrj#EcMiggD;zo#riB{105&#kCaf2aeGDAp+6c*OsI~YN8$^iD z9puCpYBarT1rDNY6BBZyEtkvIz8bOG|2J% zP-&=S*lJT?_3v{D_>Zls8HZpVKmsLl+rTwW*6}cF2uE0TON?7Fm=d_3zSd`0$f;PJ zlHB2eH0~BZ-n9g|^0ybfYCUag>u(Zjc$AXZJ(zv0%SZiGzwlL}=Ku$an73_MpPdgYEtLFNt!5!uNi#EvS)Z5m|QQ~bbx zgIdBFDaz?KqLP8z9)vbtJg;H8jd=IwjZ?X_VBxA(dEuABT| zWLz&8scD)32g!MjM6IP`SayQ#26GWT3OtEMFHQtEh0ak6BKF*>zQZ~8|AV;qj%sTA z+J#k)1!*>lgyylJQk5!QJz}A$l+Yv6qy>;(l891Nsvsa8Q9x>_2|Y>#L<9r`0s&%( z5GkP(Ac5rD0X^sZ-ur&z8{@n09oIkj!;!uBT649zp7}g;R%E?o>vg_I^TcUPT}b0m zm-rlFe_AU#WLC>VkX*p!*Ku+A?du0Vsq*P1F&)%_lA+S#VK<%3Fi?JnjB34lXRCek zphw1&nOkB(6f1-9nfahPkC7Y3n85ip(NocNN(6<4VB9UNdkdGJT3iEkV&G|_W54nA z<2U>F^n)Ceg7DvrROU(|klYc%!MZr-h_O3m{h#XKGs4 zzuU|*Yfx?B;a(32>=RU+yuQDKjW49rfwwxKz$w) zbA|~vsFi*xYI;bDOU_T{$*4n!>-3XBQT9q6_2Q}~!)ANW<%`RbSxP6P()^({m1<7y z^VpW_FHutLHp?NiAe*t*hVj1TNMU za5muM?1F)&ntZT*JzM;9#5{|{@G@Xfyzf{izGT{Q|JLT0-X^tkFy45_w$?)2#&^gm zv@}#2;qtMIX7LT%7BvrBOq zvh#_6Ug+br1$Hcv84Fkgx{SR#QE=-sK($Ya)Isp8U8TNR!}j8(IU@9)qpvcqy;^ft z5!yCUmzJ2>hAMpoXNm05l}+Wc0`05dnrttvl;r<}`lhPD&jQiyM`5|6G>%+jTZ<)d9M95$C7o*Sl|_%jYGb`vW6o1-DL|BQAy$f%%cge!2>7MQM z`y|ZDT()c1fT{xV!LXdS*dgHq=Mp?B9M*x1R@i{}RlDYHf##%EUV=%@i+v%kmG~n{ zpuqyq}p7t41K5XN6kg^ zN*DF@&%=XSq}myg-8uB{SOwzRJVHP}%g^}Y?6k$aPQJ6lf|g>a9~MT=5<-o=mUpdZ zNgW--j?Tx@=SDp!#RctNWW~{~V%Z?yCn6Q1>Y|gnsqwCcy@uDtINk?Nj{+Ow8-vWW z&9mnj7=9PPxe0U1@8T#T#I7X>=)TcH{lGe6&8~~^DrOx@Rz2+sTL`o1DfG59Jt|Va z@MD4UY+@B!M=zG08LPs$VzsC#3o}n7IwU$P`?{N1rwyFy(y2Dn4D#DPJ2nv;>(BbI zA>da0+2qFRpI;U_Yogd4!$DXeid4r%@>Q1H{Qfec*07FKVfUjKhY_c$nHTa;uM$&ld9$-7g12>oM|C&n zxi}bYVyziP@4OM%Ft+sGtG#Sva4b1AqOZH=t;%RsAKxjgbOYRTNhQ2u(WHR|QGfOx zeYe_j(m(?gqv8ZrrhW?`PEAMv#FqCDp7|l4Zae#?HYvC#K6(MPV0?`~H-fG>H)Q8)&w}wJ53G79RLbXrf)CnzZJsd?%Hi67HL4 zDQ>-nAHU6?iJx!>N5K3DQ$)S8^&&ow~ADq0i@UHW#Y} zqPi*?uFQn2LC(nT$kBCCj*dgw4+xe;Is1)C|m@UKx5*74U7SNqS2z23=j?oNmo`2Q zYPMOT9>aZN=5pmt4Ww?`~ItqKn|{YLKOsvs-B6+ zuj~%nTI`+Aw%qJ2|i=OuWkGCie@S>$|HV?SZD@aYT zgWL@oR_Cst#sVaSDUVSrgIrQ9J1d?EtwWftC#%Qa47Y-{k-|D=?Mh-zii0H78mP4% z^kMPlXcrOoLB#s)8vSVVmMziPt7;;PH6KZGchfmKJIar5J>)dX)n2yznUpJ$9B%8b zqUcI3>TKTE=6Xm2wvdm_=cc{9d+S{8uRn#|5~$S{MGTJ0kKbi$aZjAJ1Qp2R7R~Dv zadDq%cO;J&vO$&IlC*4xLC>%Jm=6FGj_POlUG=GxlQA=@Db^pP{g{e13AGrIWJf1W zGpf5oPm4AlN=gcsvRHHW*Zz-I9*s3rvmPC{d{82k)+5uVW$Y~96$X6E>(uIEwnVHSwtnJUS^6d&c*kN022M}-vEsgFEXJ>H^B~eSNUQc1B36crK9rJt8Z9rKini#VPeKNr ztnDN{amkW!{bJH+wOG?a;*TYHT;f-;i0S;9Y@2<)zuoI~+IY#$=S`u1+a367t4Ry0 z`Snj0|5c!FZ29B!vhR5j&5uE%2iKAqaA0dXm{0Kqn6R=iQUuS;R6m(F7NQn|Wfx56 z7(LhKu+nkicH;EM+OK+xVE1c~hJTN$ZDn|`A?ghvLpR1eM?<3;!~Wo$x!yt&h$Y{K zwoh69gkDm71BnSK{N@3e|9N+qN80^=ncfI>Kh37T`z0@MMP64^M~_)z&Wm*WTjD1y zf5Lx1dAl2Ul4nMS*8Hz0Ukc)R@!a?mag-AVh^jwFnkJt$hdcKZcw=4Eno1r@M_K-Q z+^8NHfEGv%FRW*0HqL-y#AL{U%K1zyW&Le46*%XQ9zhc<4J1Sc{evwg zECp&Gx@u#D*o+H&x7$FLp~dgCVIYfEpTLlApcCtfd5+NYLJ*^)qCpCw$+1Ns_(;bT zfBW;tP8vQRPQRA->o1DcE6&p1{?w{oi{eKh<4MrR=RWRX7K4)KT(eDB1_Q+|-%IB> z3Hd#e)3smS(zcu{n`O+l=Z*8$yJ&-9<%Z=Cs>CkxJSTIm3;|ZwLpkpm48{e1IBnyp zR}*MyYMEMm#zPN_Yl#$WFxh?(1}^6+HwNxeiwnc#<}H`3bFS&o{4T49#_OU~TD`{v zhaX=|tintvh8zxK1ZQ%3l81A8*%exd0y+R4@ITd2v5#oI371+!%Ax;_mM)*5d z?)YMKTY&6InDjT1`rh(anz(XH=AI#IHN}bP&8E5j>2}-O-le%AP90>I`a}rXt1VPh z#P+po5%~%lM*D8S|99)}EY_AG=pC365MyEpqTh@IC6D6F1=1u!inse$Xykg;)i^&=^BYCSto+Du)SI0(wODA>?(*WD9tC9Rg{5sW$?;%f6QF?4r* zxZd!AkZ>S{M^VP@QXo%_e=x~Y?hLlkvz=FuLwgq zSW&;w=y?r^W{YC^ZdCyP*nrE|-1-aHa1)^!4tD+jml6d8H9cSj0 zM0xgG``q_m!X`V1GpPljX3Qvk9&4Ta<=yeIcb|!aBhA_3G58;-$A`{_|1m|8t3=O6 z_3?0uz7j`(Mndcr*k3t6e!Aq%*6}V>bc2}a<|Geus2y6neaN5li$y)GnpAN@cf+JS z(eugPGmGo3B(+yBH271)r<~>+hSy9lg%7P|VWM5;{C^DGsWi}OyN}itON;7Q8`c|0 zZYt#{78H$Gs<_o*fXZDgaR^$(y_8rR(=lddPUjcYTI*)_uQffFH3|GmjSIi07dNckweS95sz45MBFJy;zVbn3f1bghs%9hLqfbx~ zJ++zm))12Hr&U8?B@ei~U?#+bQlpKxhAgh59!h(sS@21)hGEsOWemBP^s!uNRx^_p zYqL2;!qQy`2F*AG>=Q;hJwMU;bevw^O?OXA)l#9)CI@U2S%W~USMlx`*A=U05!OEw zI2@B1(#E-iu2Vj_{FE9NgZ+G8dXr=dBdIUHe^Z!z$=w-B1GBj1qsi?{6Ww~u9@16&%5=cwNy_iY48gj z3%@vI?U;*ED%i(15m0gYvQ>7EEA7^y7elSXVioOF2lTlCH?KZS$A_zK?|PIMB6(xo z#o6-p(>W4hylV!d7jBLPQPn%S>QB?FE_T0%-=sRu3GfJIEzNvha_Nq^o7N?YQA*hMGW){E*3HXdm6Tv)QRZ4Ao z7+g93`d#3Xz_)al>clP1RG%wUxg1|6k8jhUF+4+I&?d0h8FfxLsnOvf{`Oj3)tAwD zQqK>X)5_4`1OK|>S$ZLC{uf6iGG5toYvXdybW4zEAA_E87N_Q87$%A$TC$f z2J+D@YbkPBdZEwgpI;UlUMhPiV~&>G-g~DB&)uGuI4>r(^fj-vOf3k!Hwo%!b-K9Q zS&07$-uY3zq;8gq>a1L~+Ezd=rd#5tn+3ab!eDN|+D*0*q7lYvRWTjiM7_PP$ulPD z>N}lDY|eK-$Rsr}55+VrMm0Nu`nBS2gRHqYkz7UF0Qj3`Cxs!H=!eKEX@5;ee?v9Un@WoTqOXmNn#TLFB9!6;By2FbW3^Dn=f6H)&%z z*80EGbKaS&^Vd;uX)#ipzs^=#(fwUPEl0tj9U>T4oQ-R7c!+Bt$M`V*7`RZVX<+rK z-eiD+<-^O+n(b|LI`X%b%lEqoes&1N46LM=d`So`@uUqA8{{q3-JR2wDjTZ6ZuXbh z)E)W?%_hGV5=NS+&&qzh;RiO^WuLasZOu&Bx>9^A?IcvW(IeHtSO~7*tSrN{0v&o; z)LUjPdML3_+UVUM`${6%JXKwiUYK3KlilM%3%`8%%*O{cjk&k>JY8ByU9*&fnQv^x z73^%tH!?XkZjFH}kf~wfdr1|qZ;v+VN|-^ZYM^)HUKJNL&`nadQm`tqm1ydCSt^KX5J)nHXtV2o4sERHoKEU(P7 z?%f=W-tN)c$bT#bRP2m>^ia0eLeDnP4I7Z&WOg#pMk|5qK!uF~R(pfycVsj@^$ z(}x^0{lWGw9G_NFgY0#STK*Qs(e+E_|7*T=!EAxvNrK zQA)cfu4F*<=Xxmg{AvzudeqhsgGrsw8itt__)AJGpIRoz8jQqB2?Oh%=OS!$wLcqJ z_432i*@fBKEF;BToxu)9ZTzBdoE1hPtK_R&v<}|b0BZWX(eSGAI`bNh=Y;D}f1+=j z514u_f$|A3SMv=Ve2f|fsoHjOzu`)9w0y^Hk=%hZM7z`+3qJnQG1HyBAh-pE^{w*n z7!MgMt`2eC@Uc$~u{fT5o|;(_UhQd?h!=58vdGY&a}KQxRV7YvmeV@_cKOohym`Y? z+|&~;f;S%JE%^8_a;}U}&t^Zc z{B_Q$^52>lU!EN?Z4W|*o^OYDWb#*c$CU)kE4;o2(bCetknPM(eE@y~)5E5FW`&H3UP@&3jX@1kJp>Vl7GV~e%bHdW;QvfDRmA>TYeq5S^eiRP+k0a)`_%&-iu4*8%uRP z3*Kf!Yl+2Zn)P#3V-2vjwXbZ3;q!SI-b8-sZ68fRvFL1^UbsRWg%?om%Y~NjxT$94 zon;wg<#!UaP?#do(F%u~9q4f({Ry_AQ`rUVyQV>19bB{vRX2+qX!5tEj8QgC7b&7y zWiD2xU@QH*LtkY|3s_5cxPuc}`exEJsfa44RtbwP&CVbP@$to51ZPY} zbnoXX3}D@bZPhZb0vAX9-#0(tV0*!b32w1S_>D|tlXqP2?alD+%*_^56K;|p&$tiz z?(qb75YWv$=jtm*-_{(9Hw1E=9gl^G@7YJb;`kfxPzl%(y{!O_I8P*S-|g)EJ^&|dw1M$B&1pn5!{!hK~Eh%L7 z8QDr0HSFfRoeEU@ZxqksI@^$rdw_Bj9tz0OVz~{@GhjnfvB&iBlwWKD0XG0nCtC;j z;g~o%>=!v1o>+}FhxL|P@aYq?(Q8s1ER<&>s5M=u`H2DMa15YhU2iYPaPwPZYCOy` z{uVYW{?MXU0le`Lm)xfEWdrP>pNga_1ADc(PX?2$yG&`!3u7_-%UXDNxJcr<(v2Et zQmC5}^Q9Kn`P<}|Z6^GsxY3QT30Tdog|8mn0HqK>_W4jFFy|vR)+|A-lZZcU)#Ixt zIoyg{^%6@baPxNk6i?nMb^FSWY3<)U4VGXa0q*PDi(gIJpD6(V4rHkYxfJrJ1evo! zg!aFCewEv9-g@aSAjsPvmZgiNPy~ZMjM2U?y-g}TOQ@^4ix-uZb1m<)A%^542RY~e z_5QsTgCnbdDK9n@C6JrvlqleSq9Y{G;vtCi*~^|pAWJt?s=&KXIz)7y5jg@67*lA! zc>doqVTzXTz!2DoZ4fcE{K|unfINjIE#J3|9YTyvJe3V%a1p-|tVOpCdFCkhK_1P@ ziz#QQg`5_^)vTpj3+>p^TNMo!pR`pXH~5Gkk!Umc^7_8ke1@1;eC< zg`#Fw{GP1$U<1BlLAYnj@o@?Rp=>c6Ymxz%j%Q-*7jJqYi|XwcDX|T44W(a1lM}6c z#s3~z0ECu?ZH&HEnvJ@YeCqYIwV^>#j4Hhs&yqC16|7Y?89Z$cee1XX5 zCLK;TK0Y}0_jm#QOf=v8=5mt<2Y~3M4(q_&;)e!&ul2xR?H2Ej?2N&)B>e!6ewSj5 z@BI5YV4M+wq46w2|vl+}2{1Tmt!n#_Cc%krtE` z{v=jYW^?vSM`3qlG;c&3`3n>2((8xE30aQM%d_R7Bv3oC}iw zV&Si|{$RAu`xnstwJ2N&NQ}8aYq4S4c5}*2TE?$Lsk0{Cdxf#o^_c6B8;ndquKw21 zbygh|)ZgVs;%mbez8*5hOV8NV4mC|*J1<%*ll5$}IEA3ub7XPO$x$#hp z+G2z3YBD+hw``=b5WqEy-#9{!v0_9-kSbGN$a|!=UCtpgZw7dgz-H@YM zQFT*(bMv*GQHT;YTAU#n3>#?tCmSvR_89`nM~5fkSn(I-v$KIe*@-wq^65tVc|Zy2 zUHGTe9~Dmm&wUv~%5QQ0*41-{0b1%GY(p#-RRL+z@|bkpB+C22fl9LVVOC~S+V;nD zVj!L~%`>i13BaAop+sPif#=A0Rz}~Hg0P@!JrvIu({<=>NFV_l3-kfgP#tM?Q5O@VZrW?w}oZ8s7{*I_?%J%5_~&jm6B^ zj`9N?ZQTVpJkN+O3_fh)BVXi6K(nq3%oadFACKo}dMtSoV_N$6S4~y{IAaO>;Fjwx z0W_1>CSV;_v;MSqg%$o@KN~}bIWewIL(SBh~AgrB*J#GRVF8b`GhCIQ(cPPE|!Ok)P-zw>2|#r z=(#Iq!EG@UgvwN^J|ft~O*aCP+4_Sb=3$;S_KPhO7&Pb=T5zn@$pE7~o3JmoGw?gg zV&$<9u~8{GqV~GbktPiH z;kE+hL#ojk*G}q#63VK|p`J+Pq&(F}yl4v$EFl)9>P4yUYW^T@k;SuJ0cl{jJaY|E71&3M>(aW1C$h zz+q(Tz^erpAdBWt2e;KM0LhZwuI;ISTFas7jI}X?i1#=?WSGC+0sHNNd;~r6Ddupw zQnoCgbBoKL)}S5xjs6)YeDc15R(7V#&ItWH6EZ7s!u#?4QWlvI(B`K7QDGbUmHm0` zwsdLN&U>zipPd}uc|+YzF;(pY8QHJ?2VdYCOIs(!Vs&IN@lApY&5<$@1{xcuVErnu zj~?@K+xhM&prN~S-9YoiCWc8Vnkw4uiTh%fpNBLT|Ify&pQ6II=BV}IOp<|yD5HZ} zeP{iPW28h(WZy?8l3K|jmCF|fcXY^K^?)tq;fRbw1cu%w)hrb`$0_I7soy-PI#-7T zl$?N^aXAcuQ%lL`{Z9knufnV7yKi3W)@0sQ{*m98fT?8t`m?^~tJmTB{CXp2UDdYQ zSNd<53Y@VEWJ5TNH+%b!B@lngD1aap_mB%IGMxtjUZycCODe(_>2Ze^UtTida`KY< zMe?L0`oML~%$N>VHq|qIZ^>>BmwX1nqk;FU5lv_CJ+rhMrPbqWvNhVvs0GF}r+rpc z(`yvdiVQY>@cXnfz90Pri`^?*Z?7|SYn0M9Ow=`HqdHVmN)89tV zlvzuyX?u4|5+faxUyV%#`1ifhI4bS z;?=YdlWr4R6V=mim{L9^# zUbUmnW40gY`bnxQZlWuj9eZC;wwM=j4IJ2>A)BeicSG(0QnLxK(-EEBVJk2@szC+e zIRkU6+MAI}{wpA3$B+Ga8tBqeECheP?-Aa-CBVE#Cd4%Z_s(kYhA^lSecy!=UH{eW}VSIf8+FJU8nJwHL|?7fU@hcFFId4` z{MNnhvVKL);vlH&o9BJ4Krc45P$TQAhqwn7rNyHc6FpP*t%OWjxJc>?W_U%@5^b)L zoIh&M=V-42sVg>6{qs+79vZTG0jdtU9cOtA!i|%Y-Tm{e62B{eKJ#&bo&$1juE)jN zAB?Vzu4|Ls()L-*mt9E{;jrr%v{>{psVY`3W_XoQ>+~mG9ty?#C<+q3TmGy{T~qwH zjGTi-ecMp_SXNcs9}t0x$!k-5QBO^nd42hnxwoa@ENM1iXf{1275ZkU(nTO3PK{XU zmx>ha9-F){_%p-B)w<%I&FXx3hr~;lm6-VO$%iL>dc{VIKWI7~AiTS5ELhhkt5~0I z$riY!IJi$~B79kwfjns*!@@KK4y*Zwab6?nr1*D)cL~U)o<=z zBPdI;v{CM8qLm^0eO2C8?W=W)y;`S&tWS|ITZEVjzL)3h_#A()=#FgPM^<;b`jRHy zC|mu`HkprM1YIKhhQ^zU5R6g zMM)iXQ^5*RewpB&Musfb#%1cHP;_Dp``nW`keE$INRB@4Y(z(VcN$Q4YKE2y;{#?u3CmoYH`#nE_pQ~W=ZO93 zRICrG0_i4lGRfH!7b0aFbD^c(CbU1h^~Kf$VN9 z9b9N_;iSNUPnE)4eqb1Y=s$1XdN0JlVqOLqxo9d35aV9$gh$QiQtP17Ty280^redZ zJQpm62LrgMh8ASPJv`*3DBR)62h$e1>(^!E<#)rw8cr9!=*2E(_nqEH5WjkfDVW7k zO}_xO-C`<4b5@i*vC3kdFYlkun2OcU_c)%~JkX(tIDVuQ%hwG8!_AKP`dCFexq8& z%D8eXna(-&^aIsUf(*H+S5Lm1+=cPRYpch(*<^8&H(fR?AFQQ2`7#th+qc!I=RkcB zRpH?K>vI}?9BnN$s&!wpbVR0+a;ZgsEVuf7ZQT713N}^2rSC*^ixXoijbJe8)Z(#c z=%LKFkXwQu5?V>0%cPn^pQYTcaIh?Wt?ITZ&CT~YdUgl^ut&;9fSMPdkxGQK)~fL4 z$Cm>?nGkGi!}*kD@0!;uI>!c_-1we%_KWCjaLt4JlnrpxKKudayY0c9rLn|NMXv30 zj7y{YJ6&U_n~Qpf2tVt_>`Cd)%>nWQoo_3Z-wI82)73mG80Jk0caeHvPZ)6Zy2;oq zq8Fc+y-$XKUOP9A2e=?OjpE^aN)~P3>2JeaG_4G5-dXrdzeFJd9WwSV52hzjG7;@d z;r?#hj+#}^3oYMDVIHSIi*Zq_pMUfI3ESbK(rnztfw;;pNFnZkZ;k-VG?9_6b z&CHZg?d|q9FDY;a8X!* zId1J*i1Rff{F6-OJyG#xctE zZN`S&SD4ul%Au3Aa}Xw$gd!;gI- z-pNxh)mN_a?A_}xHz8+h;e|t z7|+fB?GicxsWjY?wMy*9B+NBZTEEAnTmHM#|LK_iZ+g`L6SWINZ8~L?;kT+DMoKeI z7vHXm)MR+AWYDDa;C#axoObZ(xWZxNjwk;J%1ZP=f_22-_Q7vCgp#`J{`{g+U(IqK z#v43$`aOfK7Co)LJFMdN_T}N@FQ-1+0{C3(FzKmb!aX4SCt1qeK|C_>g8%cz-L?mE zznLQar4H<9y(}Z*=ZAviSc}XyqT(;(x1} zJGOsYkT?dtE0Fwc*2B6-9{bKhKZ|c$dFYu@2(PBCiH7qcF~XTaQ(W6owVgjkX}tJ> z`yl*nSdD`lQ0t_g2&4^_v}|Ir!i2w2ieFAXMRZa-p-)@H3o;`!{hE-Sk#}tJo|TLmc6@LE7mi_;7afQsJway zca_>ukN~7bT*lhowjj7DQhkAnF5O2VU~;s&TmD#Pm#XTlq4``*qYO7%GCgA@uCt3y z=|di%+Xa#G`Z$X*jnhht;Go6GKeu=F;PR4^jLZsTsU@#T)2cP?;I3@z&C){{xvC9&X*UtK=!+_%P)hUN%k&%z3vX#OYFY=eKWo7wlk_;Qor1Mu-d4 z%rd1(1)>sspx=F5{oS^Q+U1nnDW$<9J@)v}u59DQ`m3tlFNIIBYQ{u14{EF16`=)7 zW88ttG}hhReUStG4y&LYbj{mD!VOv;p%W}Ev`edK+%Lj6u$}LZiU~Z2V^A|67xIt1 zq0Gt&8*~J$caTKQRD8fQbgD{9&th3c!x43na;S)~6Tiu>GdHBM)%R^tUIaz9@ZUe|lit6J^O`E*L?94w4I1#x_H!+UlOneZTY4t}F6z%D z4K6!Xi>+3EO_q&*N&>Bo4z8iTSs4Co+|Fcko;6aNT*UcZv9xA!>T)ukY+oliOA{59 zC=Ykp8gTv?Co@&&8?(4=il4TriO}^!D=5JcKs@mPWRvyE?FIwwkfnaKjt7E#5+Zbe zZcZRCn9+ZH^ja{r@0nM2hCQI$4Bcs)8G*HW?nN8G-mYQO0z%@Puv#^{8j)y7M_37*mLW# z*`vUV$Nz4}Pio72vOlYXQ3@qhwLWjGpu|odzmlzz(dYQX=>3m&k9OJpgk>o2A2tgI za`Jo6jIZgav3}oL2((XSRGMG)+_CBx(Y`H|J=w(;DV$qxj*_)hI{?c)g<5XHpRv4y z_of3tQ^fHT_vzp}c+)#DxA8@rv@HXkdJ#`++-g2K%ncrFezEw$uzn)-(xVEf-Nlt; z{d6{pQ14_H7wkn>@-pP(rVfi2XCN;}3xu zm6Z0cS^pUHTMOZ{%%XPv<`LE)JbLk!)pKAFEWmeEO;MRaPWJ2fI)}>!dobsOvPz_U z)h{#vd(^__a{_OSDAXy9t+h_y{^APqpYHoC`b~qq$TAJ81v#D>XE7gHQzskGYWiRG zmJ+c%^)s&>fq+jSF;NNIGN^e-Uzr^4@pX|u;)$cwfy%BX4YMu^k0b|1Mi1>kxlSCr zp3E=pl3jW*(&Ticqy?*Vl}FvMGqe0YPxuq2@0*2`Zs;+^k>&8)aDH`%57zC}{{Cc# zu+vkgcJWf+REYcqc%JLw7`U47?P^)wvqEE$tg7ed){dt@>G4w*QMKJ2`kPF`_EgL{ zy1ywAGG1-2L|8VudyS-GyfY)Tlz^E(@XWV1>q|{`*_MO_dW3FN15__0zFl@d$fL#x zHm&!yQt`VH*wsX9EnL9$Ni^FlslW7gF!g5kwX0q0I1d(V1Ix?AuWaDZUG zZ^}M|XYphWdE|TOEB-Nuq9cS+V?R1<5>gEE*aUB|4RK-Wyl_rvQJ!yy!4b)QH?>SnAgx0*%TBYW`3J}66av=q%6LW%B9moTuA>q0?)R-DfeaSY&&e5AJ z96y^j^(0uE2MZ_)4@uX~#jF@N0?<=m>Hpesx4jkMrc~(VAww{quHo&Hts}^v{2<5y z1Xal*Y~BDU^1T69F+-mgwX3B7A+vCqd4Jy;#9P$%K8>o{vC{hHU=9`O+Tu|i@ZQ|w ziGEg9d!yi_eMAPs+h4AjJ5s-!Q{r7sV~NFfacnG5G|2Xq&v;BOLQ}NKhoL%Eb2x5a z$rD8?TE%M6cvvni>`y?nvQ~)^Qt3K)^j4N!bx58T3Li52-ytbgMMs%T^!Bdx^J9qn zM6!R0Nk@q5Qj5&;X&GkS&1$Mn!)N|i$gs}y7-#zB&9EWlsGVjcd5TiR7C4nbwuNB9Bk-dRHpxA;(~v{^H$#edwtzO zr2w~StZXej|JIZJGqQ)#XnPm_aCQ*%m_}tw`odCStxXY58#OdrrY3BKQj|eE6spUpfKzMu%yOz)JNpf)W@U2!S0d4s6b zqOvMc_ttPVDQPxja6gZ?@OIcU>41;tGDj?bd0e(!vqT^IF2lGq8>AN~{ys-vIMKsy zB7?Xtj6R5<=1&k2E43xfFV@7pcE8CU?PfUtd=)%kceVSmmg1J$d9b}DOdvJ5)jn|K zOxDf*!Nl(LstR>4uPh+y>@tv&)>L>?3`9nbRVZgb1M4!q{W~2WUX(;fTXE(|d~i{G zDJ3`%^IvC`LgA*Itpxz|pf3k3Gm4zU`4Pv9kpu8&;M(kE%rsCI_>v<~{C)9HCcc0e z19hxO zpFAf>cxP1#(CwYY<6!j7jQH=KeTLTO8Uw}CH!^S{)Dn-)<%z|fRFJqh#T(Hy_1b2j zArd=sl!ps4R59|1;A&}FE33O+6y3ZJp?*SWv#7TQqQ}8B3Iq~T4l7xj2a0wR?g%Pw zK;pw0-CN&Z%A)0&&DTLIU2c4zOfp02iPpp%XVv?|bL+|bLyN}CE~D(TGnOKW-I%ff z+{VX9Y;l&W6*Q}Oq&1+QWJ7E)Y)0KoBVt0gNc0RpKYxveL;53sj?D%1m36Lo>0rCK zt|JhAvgO#uZ;uA5=|fqRz2=~|lY54O@1?P;xUdZ>LpF;AUMWhWg_4pq-S=s90~9vW zzAOD<#GKCr?9>U1L!Y=CqzjHi0%bV(OqgABY0%I6%CKCvZ~h*j`)`}p6$*oQ;qQv! zzw@1K8^hGCOpvBLTpdjBR0E})8y7e<|2mJ>*`SxTMNgb?*HlrOrAL28;yz>K@d1pb zvOG1FMaK9L(PBylsNx-|*GzSUfZ)R(!UZ{6s5&<`QPc#ae?sHy^rW8V107Lo^q8VV z@C?U9CJM0<_Kf(^)g6LKWgQR#Nv!j^M|$wq^=GOgrhH?T)ha2Bj5UoLnf&yxM`Z?# zgiy4thdKj5ntBd&^5i-50U$HpfVbdXsC|y1-vQiA97k4mVeNB1E^wR-?UVxA$msg z2A`!yNRP^5pITq9HDp&H5so@?Md>6Zz@ewI3~C$xgtwkPesx1G zZ$Ym7L6cx&cfPJMDqk$Fr2SHSH*VZM)<_Iw{1`_JFTW$sV7faV2k_;dXKb3|A+9}M z=To`5`xn@T4h?)ENNs$5XF02rhmhDDRcw2Sa074fT1@HmeuzxyIiaR0?kb4cbX`xB zWr$ZMKz>9sXJkUT;&-9j_Z!pp0(nZUE-AStN=j_)wY+H&G7EuU*qq=Wmzn#Fd8z{) zT0-|V!`kKH3M`LE){^CId$d9nqufLe?w=qhb#*1*UO8?dX=Q_4O zQbZ?QSe^cPp#-Kc3}iM37_JsUFHPH7RpD*ZjH!GrGTrQHtu_cbg^?HB2fMp@2)Blb zKpe%6G#>^ds~s{iYHkfDWwQ6RwGms4-A3Hy;zk#WRKcQ`_-4L#02!O(R~n7>;2x@4 z=Z=;ox}AgO7rXOFrb69nNM%)9z!4?T)IcgO`(D7)>-qk{1K@xZRXH;VKXT83ZRqiP zxsoNvkMFBI8OoB3G)c5zP14&eN1sFv-2|JVFF7q*s56*%PiI{DK56$;1P^4tdBJJ- zW&=Ey`pv^*p=@#UB|TF{oo z&28bQV+BABR?^VJy))A#k#7LH?jm`p zS@yMxJMFmqo{-u2sUUll3|_)i3=gmyV}Cg94lwSJ&iW z3Lx64;M{=1MoZP>pR|81X`!7ZEv+*BP2wQ_{rS~MFX#c$61wu{bPm2bW`7oMlj7of zozr+(2DhqsdeD#+krb=R-j)S}jmonb&vB`G1dR_s!9wUtUYF8KVy*)CvI?|Bsi}fy zumlR68@qH%K@?!ZV^t@;*TUZBzx=;4#Y-s_} z%Ttx%T<@`fgdgVCXuSLeA^2(1-K?Pxis4?%OE5ywy%5WZ(H92c+wE&@sig*&o#5BY9| zbcZ?j+wGnR7E3#EYDi03=H29hl=Iz&8pW%zjV684-PleRCDftQ=pPg_4ZhrqfNNo;_sbM(YZ?ZIe~ht{gJn=}U>diFKz~7dmo|Rg=2-{ddA$&38hRzr#T7=QK z;D`{+r(an|5;Bg8q-M=4)C0EzX2JrGQ#SoNzW&_y@6vW^k$q9G#=qrx83dRLjs=e{f!g-l`jLwd~_N8 zT{rfa6-||I;s;`pA!|;rthLK9DR}YLe7buzD8w&q9oI%`fC`{QY)R ziVbjuw((SeCHbG&<^PS=|Bu;oipq}xn=}YXTY1>L@?9~&&ehKfG%-g@uqeV%1&KEq zlq@=UbdY~=oAmtV^55f^X93v+5YpafZT!CxQedU*(oSW;%6zP;rl-`jKI)IHwwhtb zLt7|M@!NJhdF`K9^g%YqDUTx`_w zDUC;u415ZGH8ku`OPKm&D<{}8$W4^PZg;o=P_-T2_Y62|2c7P$rywuoXC0BQyJxM# zn=fUqt!LFFIr_N5QAXxfUUsXq43O^>_{yWzp^D{LE)pv3HE6sD2C6byaVJ9EEbj9F-1l%-^me^#8SVqdkXA!+l;9u5nkZ~ zhC$=7Z++VAvcWt33SO-D{ty=S!Ldn2@Y5NaSqoxQ0;mCc*E0xTZWlP!VKbrQWUjt3 z(|CEkf;AMy$j>&j$BD!HkjRV+=UjsrIurbnUGI+bv&e&;O>>XnoOa^6GHaYAE^E`? zVrhSb)L?HmL z6>O@=GQ;GA#~dL-!9y&BZopt-3|ZZ14LMl3Ruo$18La3LP1+auwY5xf|I^-=hc$I|{nFajDuUIjsDKnLA~LB6C=sxrQbC}K%##Yp3@RjHjx7b8 z7-bNVK@lipAP~ZoL`4Y55FtVgQ$%J$h!RL7kmT+YinYGqd*A1~_q)%1pYQq3Uy!rU z9@k#$w|;A#vt5;$ms7lSKbZ6;g42~bMUSo@_9VSfLH%$~C8j90H-lw!HdB3_VkKL1 z-Q|q0(ioT1qAceYrq zA^S+5)BF8QJD>voB|Q7x045vTPA?oiI>lX3p4=>%)dA4@XS2RncDoj$RV zHo#Hzijk<3O*wv8!}RF!j@g*s7GC*Ud)~k78~!ybw;`+V;@G>;3xlnP7stlaQV0dS z9Bo{9o+oChzCkds`1SGruP}Mj+L<5Ta``Mj$L(R`#a*hmm2rxC!hyQN(x8*cX|7LR2VH&XsCQUpru7UO>6L7d5{T zd3cAm*#!ebM#B9YZ;pJscK@`&4GoFs&0iv9zJ0tKF@5h_sc%nj`O(}b`M#8NxpdNn z>EFLyJlWjTQ1gS7frE7WZP$_QyPteX(`-&kP&L`hp4Xemst=tV3_!BHvMZPR7bY~= zvVp8x?m(rt;6n#{aAF{Ls@gpKSf;Qj;Bsc}i*s8rcQ+g{Y@~eEXl1_iL@S8?d(=$H z#Uddgoz02ahYE*q#j&mJ&MlYu{X5ac-E>(PACKulwn)aMD0x^1%J);8O^T?^(Y>vG zRP_Euxa!vq7EaRAswdn_(@dovRZ{zS)TH>>l73TSfdXa1>1X-=Iog_@_vvIgUi#kJ zxQFzKCzGg1n$R?#Di%s6FKM{u>XTaF+M1~7R&iPvz3Uwd$0WkF7({I`@%#j)Tb|Ov z4=HtS?{jka>S&n$;Cm!0Lvq)#gdrY%=*SZGwu-A_o!eE$g~OLT-rdVd@Rbd@A8EXF z9A?*2EvXU9$h3UZy8#A<^~CG2tvu|KN7_s~x7CWJbVYn5#i_+C4Hc~F>5*9y&M!)G z*0z?nFO6LgZ=$8+%X);@@Q(RHvonKYzpJbP(xwVQPmv3^*(Sr*cG`b}}NyhcFb3 zl{~d4+DYS3SWomfcc$d8A$X0aL6;rMInESB+*9m!k!mhT4kj8R&gcFv%a^1z*drWn z){7|<6PSg@xFNQE%c*p_FqLSuQ-9T|b>;ik)10jCZ;qr(zy$f& z4>G0|_S=Mg3scf~GQZihkxU;5o$Delu?)#r1!q>nH)|%I8=CKO*t^elVrSCyudeeN zG5Q63P18k^e6!qp>pX;g+rawsg|0lId8F{0NSEmUzv4rjP4tE=ht0{N!SdMM@EPW< z0)I;fX{7)XtH|+Q&$$*ev{LN08}LM}kcsMwab+LrCI@VF&k=6B9qoc{piF7wTE56@1*|u{2Lg6m4k(HrJM6l}K%9f6~r%k0{*4$00SkzIp7A{aH zuV`%l@lutp@D4cEKV7ilcF3*)wtQ(eRdN&_7o?9@j zgH?O>I#)>?Djp~iAJC~3ye&Soq`nv}-043W#KK#q>)5T27n~fJbnfZkQr*EsiUppM z2Vnf9e1Ds<#kF@!uN3`#RSj}~bap;y7&S4edI~BNwBG_?c*m7CcP;z9ey?5XHhE3a zS@c4x%GXgUVlTHw`KT+KN<}&k-P}3GXZXv5u zADg5sFOt}qqO^?i?O;$j%A!3{wwCB{W4!-sVmO;sKqYc|GMNuLCORg_*?0BLH$edO z$+fc|yR&^Izo6uF@4&9t!k=|GqZ=7UVq)yJO2aJqy?F@|24HyLax=w@bKY?`=B+et~PBcef_4 zp>b@OCCrKVO6-7B){bo$n3hG$HxET;7+uF~!;Du8hu0W`wJ{5}YSCykP;!m@MfBps zaa(c6fVO(aIn{63+-I_(b1@5*V8hyGt()^naQ{7O_Kxf?e^nIc-l2+;8_2oW!IIjv zDX{?Y&B+-9f(8Qg^v1HB=w-Zpr2Eoh-~9@@-Y75Xx?6{Y*p1`z(!URX2mSv7GyaX( ziJSz9XIDhYJ8rfCYs7YgHZC54-lVvWDXINJY|}p1$OAHvqkP=9cR=j=w_7kjCGV8X zKEiVic^A5tG8DeidSc6_KQCLQR&TptUuE>wlmzlRvGM_^^ma*hPSK66e-=qh$!ch} ze3aZV@uirJ-+I~)#v5W5(Q7jzi*NjSwHTW^G3-jhc8Ogd%TJJa^!$nkvSjsUN$M!J ztm(zX7at#9Z~L)zRcOJBqrenwZhUCJ?e=PgUN`Nxty#S>{Jl|jm5J4fKQF7Wxt-Rc z_SF7t@70xiYkQWdk+=S;*i$sD;uAYE#sa<+Q_%)19&HHC;l8&2M{~Sg64GBeD&Br# z$G-xzv>^s+?w3?eZLuG|{Quv) zkhAp_QBDE}sMj`buvs5<50*-HhY zZTi<8u$shTxI9iNg6UgNIV!hwj!{vTgeD_jKtlaebG7nwjl;pXh+6DdvT6RpH$SIT zyEyK3R)ks!JiXe=bI?j=bxQWg92>=6RnuPrba8f$8{H6pl1xu z0ZD}R&c1^(eqfSHn67afvqXf69dfn_Uj4ZEa%xV>aesxTATCb>)4r;4;jx3#hs-|N#g!+mtJ!wxY0X%eu71;ukAGfxlq z1eK|Ru5{V8Sm7kHlw6Viy546z`)j}#-*tP~_<&wod9GfT>Zs;INwjn&b2}-t#b>nlR^1)n z)T&5Uv!%!w6hEzDp)!LBwf5DDY2`ia+>^dXKUsk&a0>ci5?p-b`%tOv`cpYs^w|vt;%43YX>*~x?*l$J>kIe^4P|_Dv=njtU02d zK3&_z^6@=ZcK)R20|SKBMF%2;WdNnIzDG629Z}0!AZjBhWxR`x^QE^mKwrym0i(Q6 zQzi?^NV9JvpVoLS-;XF!?eTj(&b+AzxP3zBX)H=O{d2l4Gun67fGHObFwc3kr;{^3 zU?B?vL1JRXC$ZAiXCKFbPj7;1z4y4yesIK!=K>r6nb@;lN80Pn%)_`Ody|)^=lc2) z1rl=3HiiA_ZfnQYUDyyt)ACk#WjI#EKEn!YoUd7Z?Rn3B8+G~|B0&N+fY}b3un8|Z zv@oFK>*m8S>&u!>B?UCFjzBO-kWwREb>}JD(ZLI@*h&yu zQHVCIcUisZiv1A{SGCgH`0?TMEw}!{O28P~sICCFL5$7E3QZuPTfG7B-(yG2?msWg z+q={nQG4J6GNShKq8pF>F;3idzWrmx#wMa+DZF^ClKs8dg<7!3J_nZny_Wc2!RpD7 zBMq$KrS8=J>cP0|fVRaX&>T^d z!PHEuU6z{V+=Q~|Oj#SRRqg1wA;y}1748nZD0M$fb}(f z;v7!z+WpAKrWBHzSq7rVoLa}hnPD$dtaRWDvqpIeq~5|ec#*>8#G zTS_XhNU2z&EXeIz!8xxug%*_4irSS2ebMm{p8n2|cqbfH2X7y-SH~a83}CL|udN@_ z4^&~|4`x!r9Imed(VN3p_?is++vKRr%=+H^NF&L7ayUC+Dw{Z#jU(h|7Y6blCUa15 zugS$W?tK|Z3Hc^F$6F@Ot)G#_Zy1jS{%E2FOMwv>`IejV0eTQlyW z0rLp3h1IzrK80_P2zZ0{4@VDi)*7J_^!TynWQ%6b+%SV8NF6MsdNj4q6a^h+ZJFVhq5A!C=IL(~5{PWyGXYqblb5m)=t9bjFp^)AZ*(yFaGewJ4 zC})kxmoUWX`t3p^-)hBa>sRvb93)*P;;s(ycm?s96{^g#jnF)o>;<2lgpV(d`_Zx- zo>`IOw_(RtMNq{}K7%Nwh0>o1ptA?2CVKHmN1eIR#QB><;R&9*zrfuPEtio8n1FBG zCi_acz2prUe~QydQoOEHN`-Kq$fxvjt1c%-&71(|KX}X>mQ}s1sN`y7Ldmy{Vk9^e#m!dvW}%6& zYOKmG)hKA#Ru!%5X`2k|UEK-h)gk7;+HY6>Yb#KlD+$;SJ|=2z0bhj9#~4v6x$KZ6qwy@vYG_|LPJ?W&wxj;Qr|j1{MZ$&H};k4ZBkloWygEo z^!>fcNYN}k0(4&P&$6NQZ$WL;Q!fVI*Lz7pm`vo#y}A-K1fRgBW;^CPzmPm*)jZC4 z&;lgAadNgN5d*(sKb*d~${BnwFu(SD1mGbT-t#~%RW&%4oxR`>JLd4Xvw=HQ6%4-H zskNTga z_jpyjt{rLM#TDD%ez~uex)$6L`%7a&;LF#fSpsvig!C|^u5Kvu|`}$54S|@GQjBF7RXev^Fs)SD| zAsa8&hXJ--z8WIldr zS6QdhN1$^qiCQ(+XXP}qUmk#%9OaSqv_0|qS)73Yf1R-@p%y>GBD*vp7I0So46hxx zZr+MnL^Np+)M)6A&;?6XocRf;(StGoFZoE8t+usWSu?oQyJ9XF#Ht@9I9>#M8bSJq zro2wCa%5$9lwC7$DgsZON?h*qD}N+>WZHT*L#sVlW1yw@x8WU>sdKAXg_|X*HEst% zDTgQ@g714(xq3~08w!pw(7FYj!T$|aR@QFWGVaO>BZqFqoCS;^JLkSI0AKI7|N?At?KiwBW4)0+AJpC!UO#?!pP8?1Re3pUhMW1Vl{=5f*=P7bO zt1lKa7}iRzjh_N#wOD0WVQateFrG=tE_Mdob>pYgb+{}+cX*@VF?U&Zp?C}C;@?^w zS8>%ZPLL>I48Ln#nl+EXIi&8g6?V_RykC&VR}^^kp`OJjo3k1gmlp%(CdSAjW_~OO z=(IXeFTCw-4$wjY^1R9530zu%N%G$qyh$)E9L2Kb;~}lij?P45dFfMt&vUUx<-e~(c&@dJgM|NYR17|$hZVm{c7A`Vcq-S-wL4;fCd zfGpefG6xoum9R)63&vf_*BksN9fEOhWZ0| ziDnwAB82~>qRrlkk_5PtKa5}9H4>p(@<|cIEU)^P5PJWD`!cZHIVO_4ppZW zW_0UU&qV89;YZ?c*DA?+9HZVJ0%XfYsl8fy zF3jFcFe5-1&i*rkPbKQIQomLEe%DBwP6I@_hL-mbA!z74PEl}4=hzfGSLv6Q*`d0t ze;hA$f{~ETo!leL*GC*)^t#PN%Px#$P3u}7ga9XaQ)D}rkJcb-BilHmo2x@2s{T+l z0+Xte(sgci`A2HO0d-nU9sN=`Ji=<5ZHwAuYE!Iik!ACiD!u{ya@x#;uYD}5C0T#S z;$`2LJga`2+5~DA%NUz%!u*8*Q*{bu!MTSbiSzO z2SWR)mn^655EVr!>{_R#=w26%+TF5@@Y#yCm~0?Pf6A1%31R5;9aUGO7t|L9>uJ+q z;#jvc^PC$x1v3$wN<>TG@rU2hW0h@Kv=CnbJj;#J`TmvKj{D9r~h2_zn9G!;M% zo=Vi=Aety3eKaEF3QHQwV(Jt?B zKXK3-H(LHD4*IV+xeuL&@%f}UnNkq@2iNUHYTq1)@QO&*s~;*9K~@UUSs#KYV%zqh zv11?{D+*GNCa#Go{;A7M6wd^QfcvVVE5@)|qImh!QDR>kIE|g|FTV8nX-WNoIeZoWB9v#a=~pi{uP}i z3F~|~x=w}vHdF#*~@QBMqJok{DpcRfzy>r zt#YkJw$2WPSb=c31qFa0(f8m=_ODt55I){GbeP7B22@(*O=DWo9o+{Aynp-1(Ihm| z4+=;>uuI7w!Q4Bv5VW4gv{(!v8)tA|nM0!WUFxby1<3W0i9QGc#$9%hhXoNlDKNc5 z47f(E0#hfUdk=Ex(EKu7_%d`ve_#b>b~5XyqH%mP9U+`wfGb>9)lA zBPWzxxr6i7Rf2g6#e3!zv|#%_2CV?j;YgQ(n5dD)>vbRwn65&sX4<51`Qagt-Lz*? zVH*nq->@jD%>llx)cWu8+FtI9DzR6DtaqT{sy%*wrDoPL4ZAHncL&Z;YeIWyPRf&Q zsckLnobxtv*2tXZJYS-~@9OCTbT8gz6|Kih*_bEpwEWxrNWrK}LB8D5l=EpcNXMS? z2tTH~Tst3UEi?}4Gw+lUMuu23OLor>XL>ejgrY;oYo~6h%N1wKXf5pwVNGR@KrRS* zfS}5s_AFS?pQtK)7;r3X$#QC|`Vn2eO$4*GAg=)bW-gF)&;HQHvWX0wpLfHA?G%Y> zme+yXNI)Xr4aH8OBEpSAI_j{VrYyL3#I(ZHZ}Ac+ZB`d+C`*gDd@`2)^mcbX`PJHZ z-Ftdi_b^@D|yz-}e=GwS`kv?)U> z->q-?mQ;RW-8-56uJKH>401b~KUNidW=BS>s%!SukdS6i>23w;y#AiFTt(d_=9F+e zEsQ-q|IEV7)H{paSG9OQEVIA6Ei2%szAVXn^})HsK$>#4wIhj*LL*DK0Rw{4>F1tH z-q~rLVf^ZO!g8hK)t`7Y2c7I|5xm=L7AJEObai$4RZ@9Whxdt9%tw_J3VXtgnD0(b z@pM$PFyHZ@7#WU^I32?8411S2L=Fgn&nWmag%N7Oo~)~^2Ntl6oH#On2@;qaW`7Ax z&@r6K5PN}@PsA@VdbqWE5u+Zw7vW3Ys1LFFrEzYK+b{}#wqf(HuLScZvx#;4wVgj? zu{gWJRZPb=v)$ECXNNSPj^(-Fx${Czi160e)tw=^&zZ(!1xKL3pv1aXbstCt3n=Wf z3X99PZ6UnJFsEAO1TG67icv)H&i=A51>IIB$^{SysTvVn@hQCjVrC#myf@E1tB>gG zucl4)!{dA@oX;1FJuwZ93BfOsry&iZLn9jD!S7C&w~B~?9}Rcp!G4zN4q ze=lgzUNYha6Nly>INag*FD~6Gl{4jYc%|<1PV7q5Jw1C_Sp?mUcr3irwlE^A7WEHf(+{FdfRItw^3!_%A}9Tz zWthECXArxTVd&;#rsXbQKO8rY9r6Zxa(Dv06qCHGmYw==q7NuO`O7H}*XpI_7qP;0 zVUAatfavSJADe%oNj$st7T-V%mMS?T@PKCw3ApW@Guvd7(}bR0F7RS{fjRRGCBuPq z1W=eT#J=#cXy2)=3rKv{e0!{CDF0$<)JzsK-`Qhe{6jQ3LU$0+dV422<8eYMnpulV znk|cyly2bYJF_$ApRCzKL;ayvKoSa|HGA|izIRY2B2>5sIFBBu#A#Ps$JJaJgXJCl z-3zkL9-)xqM?fr^U#Tijnz(~~NM1BvrNE{fA#OZ4=tZ1ESBWoCr({|(H+Ns?fM zn6;OZvzD(P`ECA=6#l3$!?7`@tbV_#B~D#W*YRDtz_^2#WE7>%DanbxbSEsX z)P3ALxVL@5u~L=ae7m4~tcR@4WkJbrKj^G_UFRSRTnE{Z)SSp;tQ1s|U^$wMO)c+m z;A`V>1XM*U758$mf;Mw#K5;%BCkO%7VSK{eL^wNUT~IHqtM%sGp2A2Ga|k`Ng`OSe z_#aA%9!7Ptauy5NY>bvJ_mO!%d4nU%HOhzcj%l4|-SB2!uoy}#D9F@H zDU)#!jvxz)o%qTNtOF>))D%sfa5((!alIp-k29%pxKqf_>)j0{Dnl6jICV7uV!Sa% zu3?J(c}nrBQ`_)8oX{_&ZaD{kEsBdgIk%ea0)@=!Jv~8N*PDrs#T8cmedWte)Fx-y-Oa*0%J7 yrE2VRW!Lt_rHOLwp~_eC1VQ;veEXZp0{zNpE{mvk|2$OF>62zB$_y_2_P+rBtMOg{ literal 0 HcmV?d00001 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..86e0c49 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +requests +python-dotenv +apache-airflow==3.1.2 +pandas==2.3.2 +duckdb==1.3.2 +google-cloud-storage \ No newline at end of file