Merge branch 'main' into develop

This commit is contained in:
ry.yamafuji 2025-12-06 00:50:36 +09:00
commit dda5257765
35 changed files with 869 additions and 79 deletions

69
.github/workflows/deploy_to_gcp.yml vendored Normal file
View File

@ -0,0 +1,69 @@
name: Gitea Deploy to GCP
on:
workflow_dispatch:
pull_request:
branches:
- deploy-prd
- deploy-dev
jobs:
gcp-deploy:
name: Deploy to GCP
runs-on: gcloud-tf
env:
GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }}
REPO_NAME: ${{ github.repository }}
HASH_SUFFIX: ${{ github.sha }}
JOB_NAME: ${{ vars.JOB_NAME }}
BRANCH_NAME: ${{ github.ref_name }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Check Deploy Tools
run: |
ls -la
echo "Checking gcloud and terraform versions..."
gcloud --version
terraform --version
- name: Check Gcloud auth
run: |
echo "HOME: ${HOME}"
printf '%s' "$GCP_SA_KEY" > $HOME/sa.json
export GOOGLE_APPLICATION_CREDENTIALS="$HOME/sa.json"
gcloud auth activate-service-account --key-file="$GOOGLE_APPLICATION_CREDENTIALS"
gcloud config set project "$GCP_PROJECT_ID"
echo "Check gcloud"
gcloud config list
gcloud --version
- name: Exec Terraform init shell
run: |
export GOOGLE_APPLICATION_CREDENTIALS="$HOME/sa.json"
./scripts/deploy/init_terraform.sh
- name: Exec Container Image Push to Artifact Registry
run: |
export GOOGLE_APPLICATION_CREDENTIALS="$HOME/sa.json"
./scripts/deploy/build_image_to_gar.sh
- name: Exec Terraform plan shell
run: |
export GOOGLE_APPLICATION_CREDENTIALS="$HOME/sa.json"
./scripts/deploy/plan_terraform.sh
- name: Exec Terraform apply shell
run: |
export GOOGLE_APPLICATION_CREDENTIALS="$HOME/sa.json"
./scripts/deploy/apply_terraform.sh
- name: Clean up Gcloud auth file
run: |
rm -f $HOME/sa.json
echo "Cleaned up Gcloud auth file."

View File

@ -32,15 +32,35 @@ jobs:
pip install -r requirements.txt
pip install -r requirements-dev.txt
- name: Check Initer
id: checkIniter
run: |
echo "Running Ruff Lint Check..."
python -m ruff check . --exit-zero --no-cache --output-format json --output-file ruff-report.json
echo "Ruff Lint Check completed. ruff-report.json"
- name: Generate Linter Report
id: generateLinterReport
run: |
python scripts/generate_linter.py
- name: pull_request message with Ruff Lint results
id: prMessageRuffLint
run: |
echo 'echo ${{ toJson(github.event_name) }}'
echo 'echo ${{ toJson(github.event) }}'
echo 'echo ${{ toJson(gitea.event_name) }}'
echo 'echo ${{ toJson(gitea.event) }}'
curl -v -X POST \
-H "Content-Type: application/json" \
-H "Authorization: token ${{ secrets.GITEA_TOKEN }}" \
-d "{\"body\": \"## :mag: Ruff Lint Results\n\`\`\`\ntest\n\`\`\`\"}" \
${{ gitea.server_url }}/api/v1/repos/${{ gitea.repository }}/issues/${{ github.event.pull_request.number }}/comments
# イベントがプルリクエストの場合はPRにコメントを追加する
if [ "${{ github.event_name }}" = "pull_request" ]; then
echo "Posting Ruff Lint results to Pull Request..."
curl -v -X POST \
-H "Content-Type: application/json" \
-H "Authorization: token ${{ secrets.GITEA_TOKEN }}" \
-d @lint-result.json \
${{ gitea.server_url }}/api/v1/repos/${{ gitea.repository }}/issues/${{ github.event.pull_request.number }}/comments
else
echo "Not a pull request event."
echo "Ruff Lint results:"
echo "-------------------"
cat lint-result.md
echo "-------------------"
echo "No PR detected. Skipping API comment."
fi

10
.gitignore vendored
View File

@ -4,6 +4,9 @@ __pycache__/
*.py[cod]
*$py.class
ruff-report.*
lint-result.md
lint-result.json
# C extensions
*.so
@ -169,3 +172,10 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# terraform.tfstate files
_*.tfvars
.terraform/
.terraform.lock.hcl
*.tfstate
*.tfstate.backup
*deploy.env

19
Dockerfile Normal file
View File

@ -0,0 +1,19 @@
FROM python:3.12-slim
# 必要なパッケージをインストール
RUN apt-get update && apt-get install -y \
curl
# pythonパッケージのインストール
COPY requirements.txt .
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r requirements.txt
# 作業ディレクトリを設定
WORKDIR /app
# アプリケーションコードをコピー
COPY ./src /app
# コンテナ起動時に実行されるコマンドを指定
CMD ["python", "main.py"]

20
docs/Makefile Normal file
View File

@ -0,0 +1,20 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

35
docs/conf.py Normal file
View File

@ -0,0 +1,35 @@
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
sys.path.insert(0, os.path.abspath('../src'))
project = 'プロジェクト名を設定してください'
copyright = '2025, 作成者名を設定してください'
author = '作成者名を設定してください'
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon', # GoogleスタイルやNumPyスタイルのdocstring対応
'sphinx.ext.viewcode', # ソースコードへのリンク
'sphinx_rtd_theme']
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
language = 'ja'
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']

20
docs/index.rst Normal file
View File

@ -0,0 +1,20 @@
.. プロジェクト名を設定してください documentation master file, created by
sphinx-quickstart on Fri Dec 5 01:02:07 2025.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
プロジェクト名を設定してください documentation
==============================================
Add your content using ``reStructuredText`` syntax. See the
`reStructuredText <https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html>`_
documentation for details.
.. toctree::
:maxdepth: 2
:caption: Contents:
modules

7
docs/main.rst Normal file
View File

@ -0,0 +1,7 @@
main module
===========
.. automodule:: main
:members:
:show-inheritance:
:undoc-members:

35
docs/make.bat Normal file
View File

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=build
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.https://www.sphinx-doc.org/
exit /b 1
)
if "%1" == "" goto help
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd

9
docs/modules.rst Normal file
View File

@ -0,0 +1,9 @@
src
===
.. toctree::
:maxdepth: 4
main
utils

29
docs/utils.rst Normal file
View File

@ -0,0 +1,29 @@
utils package
=============
Submodules
----------
utils.custom\_logger module
---------------------------
.. automodule:: utils.custom_logger
:members:
:show-inheritance:
:undoc-members:
utils.singleton module
----------------------
.. automodule:: utils.singleton
:members:
:show-inheritance:
:undoc-members:
Module contents
---------------
.. automodule:: utils
:members:
:show-inheritance:
:undoc-members:

View File

@ -1,7 +1,9 @@
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")))
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
from utils.custom_logger import get_logger

View File

@ -0,0 +1,61 @@
# エージェントへの目的
`terraform`フォルダを確認して`readme/components_design`フォルダに
.drawioファイル(XML形式)で作成してください
## 役割
あなたはシステム構成を考える専門家です。
## 規約
【要件】
* 出力ファイルは`system_components.drawio`としてください。
* リクエストに指定がない場合は環境変数は`_dev.tfvars`を優先してください。
* サービスアカウントやロールなどは記載しなくて良い。
* **重要**terraformに存在しないコンポーネントは使用しないこと
* ユーザーが利用するコンポーネント図と、開発者が利用するコンポーネント図は分離してください
【レイアウト要件】
- Region、VPCを大きな枠で表現
- Region: 最も外側の枠として配置
- VPC: Regionの内側に配置
- 接続線が重ならないよう、コンポーネントを階段状に配置
- 各コンポーネント間の間隔を100px以上確保
- ユーザーはVPCの外側に配置インターネット経由でアクセスする想定
- コンポーネントは左から右に向かってデータフローを表現User → Frontend → Backend → Database
【スタイル要件】
**枠のスタイル:**
- VPC: `fillColor=#D5E8D4;strokeColor=#82b366;dashed=1;verticalAlign=top;fontStyle=1;fontSize=14`
- Region: `fillColor=#E1F5FE;strokeColor=#01579B;dashed=1;verticalAlign=top;fontStyle=1;fontSize=14`
- 枠のラベルは左上に配置(`align=left;spacingLeft=10;spacingTop=5`
**接続線:**
- 双方向通信: `endArrow=classic;startArrow=classic;strokeWidth=2`
- 単方向通信: `endArrow=classic;strokeWidth=2`
- HTTPSアクセス: `strokeColor=#4285F4`(青)
- データベース接続: `strokeColor=#DB4437`(赤)
- ストレージアクセス: `strokeColor=#34A853`(緑)
- 接続線にラベルを付ける(例: "HTTPS", "API", "SQL"
【座標とサイズの目安】
- Region枠: 幅800-1000px、高さ500-700px
- VPC枠: Region内部で余白50px程度、幅700-900px、高さ400-600px
- コンポーネントアイコン: 78x78 または 80x80
- コンポーネント間の横間隔: 150-200px
- コンポーネント間の縦間隔: 100-150px
**アイコン:**
- ユーザー/クライアントアイコン
- `shape=mxgraph.aws4.resourceIcon;resIcon=mxgraph.aws4.user`(共通で使用可能)
- コンポーネントのアイコンについて以下のマップを参考にしてください
- 以下のアイコンを使用する場合、必ず対応する mxCell テンプレートを使用すること。
- id / x / y / width / height / parent などは適宜書き換えて構いません。

76
readme/deploy.md Normal file
View File

@ -0,0 +1,76 @@
# デプロイの方法について
## インストール方法
MACの場合
```sh
brew tap hashicorp/tap
brew install hashicorp/tap/terraform
# 確認
terraform -version
```
## 環境について
* terraform
* google cloud
* Cloud Run Job
## 実行する方法
```sh
# 初期化を実行する
cd terraform
# Terraformの初期化
terraform init
# アーティファクトやバケットについては先に生成する
terraform apply \
-var-file=_dev.tfvars \
-auto-approve \
-target="google_artifact_registry_repository.repo"
# DockerファイルをビルドしてGARにプッシュする場合
cd ../
# 1. Artifact Registryへの認証設定(初回のみ実行)
source deploy.env
gcloud auth configure-docker "${AR_REGION}-docker.pkg.dev"
# arm64
source deploy.env
gcloud builds submit --tag "${IMAGE_URI}" .
echo "${IMAGE_URI}"
# デプロイするコンポーネントを確認する
cd terraform
terraform plan \
-var-file=_dev.tfvars \
-var="hash_suffix=${HASH_SUFFIX}"
# デプロイを実行する
terraform apply \
-var-file=_dev.tfvars \
-var="hash_suffix=${HASH_SUFFIX}" \
-auto-approve
```
ローカルでビルドで試す場合
```sh
# デフォルトでビルドする場合
docker build -t cloud-run-job-base .
# arm64でビルドしたい場合
docker buildx build -platform linux/amd64,linux/arm64 -t cloud-run-job-base .
# Dockerを実行する(1回だけ実行してコンテナインスタンスを削除する場合)
docker run --rm cloud-run-job-base:latest
```
### CI/CDでデプロイを実行する場合
**Github(Gitea) Acrtionsで実行する場合**
**Cloud Buildで実行する場合**
### Big Quderyにデータが取得できた場合をトリガーにしてJOBを実行する方法

View File

@ -80,3 +80,28 @@ ruff check . --output-format json --output-file ruff-report.json
`--output-file``github`など様々な形式が指定できます
コメントを生成する方法を検討が必要
## Doc
初期設定を行う
```sh
mkdir docs
cd docs
sphinx-quickstart
```
rstファイルを自動生成する
```sh
cd docs
sphinx-apidoc -o . ../src
```
Docsを自動生成する
```
cd docs
make html
```

View File

@ -2,6 +2,10 @@
pytest
pytest-cov
coverage-badge
# Linting tool
ruff
ruff==0.14.7
# Docs
sphinx
sphinx-rtd-theme
autodoc

View File

@ -1,6 +1,12 @@
line-length = 88
line-length = 79
# 末尾スペース・空行まわりをチェックするのは E と W系のルール
# E7xx/E9xx構文/実行時エラーの可能性)
# W1xx/W5xxスタイル・フォーマット関連
# DXXXドキュメンテーション文字列関連
# F (未使用インポートなどのエラー)
# BXXバグの可能性
[lint]
select = ["E", "W"]
select = ["F", "E", "W", "D101", "B"]
ignore = []

View File

@ -0,0 +1,26 @@
#!/bin/bash
# Safe mode(when error,kill script)
set -euo pipefail
# 変数の設定({HOME}/hash.txt からハッシュ値を取得)
TF_DIR=${TF_DIR:-terraform}
ENV=${ENV:-dev}
cd "$TF_DIR"
# --- デプロイ条件 ---
if [[ "${BRANCH_NAME:-}" =~ ^.*deploy$ ]]; then
echo "Start terraform apply (ENV=${ENV}, DIR=${TF_DIR}) ..."
else
echo "Skip terraform apply (branch=${BRANCH_NAME:-})"
exit 0
fi
# --- plan 結果があるか確認 ---
if [[ ! -f tfplan ]]; then
echo "ERROR: tfplan not found in $(pwd). Run plan step first." >&2
exit 1
fi
terraform apply -auto-approve tfplan

View File

@ -0,0 +1,29 @@
#!/bin/bash
# Google Container RegistryへDockerイメージをビルドしてプッシュするスクリプト
set -euo pipefail
# 環境変数の設定
REGION=${REGION:-asia-northeast1}
ENV=${ENV:-dev}
JOB_NAME=${JOB_NAME}
AR_REPO_NAME="cicd-repo-${ENV}"
HASH_SUFFIX=${HASH_SUFFIX}
# IMAGE_URIの設定
# ローカル実行時は epoch 秒で自動採番。
IMAGE_URI="${REGION}-docker.pkg.dev/${GCP_PROJECT_ID}/${AR_REPO_NAME}/run-job-${JOB_NAME}-image:${HASH_SUFFIX}"
echo "REGION : ${REGION}"
echo "ENV : ${ENV}"
echo "JOB_NAME : ${JOB_NAME}"
echo "HASH_SUFFIX : ${HASH_SUFFIX}"
echo "IMAGE_URI : ${IMAGE_URI}"
# Artifact Registry への認証設定
gcloud auth configure-docker "${REGION}-docker.pkg.dev"
# GARへDockerイメージをビルドしてプッシュ
gcloud builds submit --tag "${IMAGE_URI}" .

View File

@ -0,0 +1,21 @@
#!/bin/bash
# Safe mode(when error,kill script)
set -euo pipefail
TF_DIR=${TF_DIR:-terraform}
# GCS S3などで保存する
TF_STATE_BUCKET=${TF_STATE_BUCKET:-cicd-tfstate-bucket-20250906}
ENV=${ENV:-dev}
REPO_NAME=${REPO_NAME:-unknown}
cd "$TF_DIR"
echo "$REPO_NAME"
# # --- terraform init 実行 ---
terraform init \
-backend-config="bucket=${TF_STATE_BUCKET}" \
-backend-config="prefix=${REPO_NAME}/${ENV}" \

View File

@ -0,0 +1,23 @@
#!/bin/bash
# Safe mode(when error,kill script)
set -euo pipefail
# 変数の設定({HOME}/hash.txt からハッシュ値を取得)
TF_DIR=${TF_DIR:-terraform}
ENV=${ENV:-dev}
HASH_SUFFIX=${HASH_SUFFIX}
cd "$TF_DIR"
if [ -f "${ENV}.tfvars" ]; then
terraform plan \
-out=tfplan \
-var-file="${ENV}.tfvars" \
-var="hash_suffix=${HASH_SUFFIX}"
else
# error raise
echo "ERROR: ${ENV}.tfvars not found in $(pwd)" >&2
exit 1
fi

View File

@ -2,9 +2,13 @@ import re
class GenerateCoverage:
def __init__(self,
coverage_file="pytest-coverage.txt",
output_file="coverage_table.md"):
"""カバレッジ結果を解析して Markdown テーブルを生成"""
def __init__(
self,
coverage_file="pytest-coverage.txt",
output_file="coverage_table.md",
):
"""
初期化
@ -35,26 +39,33 @@ class GenerateCoverage:
if in_coverage_section and line.strip().startswith("---"):
continue
# Coverage セクションの終わりを検出TOTAL行の次の空行
if in_coverage_section and (line.strip().startswith("TOTAL") or line.strip().startswith("=")):
if in_coverage_section and (
line.strip().startswith("TOTAL")
or line.strip().startswith("=")
):
break
# Coverage データを抽出
if in_coverage_section:
match = re.match(
r"(.+?)\s+(\d+)\s+(\d+)\s+(\d+%)\s*(.*)", line)
r"(.+?)\s+(\d+)\s+(\d+)\s+(\d+%)\s*(.*)", line
)
if match:
filename = match.group(1).strip()
statements = match.group(2).strip()
missed = match.group(3).strip()
coverage = match.group(4).strip()
missing_lines = match.group(
5).strip() if match.group(5) else "-"
coverage_info.append({
"filename": filename,
"statements": statements,
"missed": missed,
"coverage": coverage,
"missing_lines": missing_lines
})
missing_lines = (
match.group(5).strip() if match.group(5) else "-"
)
coverage_info.append(
{
"filename": filename,
"statements": statements,
"missed": missed,
"coverage": coverage,
"missing_lines": missing_lines,
}
)
self.coverage_data = coverage_info
@ -67,14 +78,20 @@ class GenerateCoverage:
print("Parsed coverage data.")
# Markdown テーブルヘッダー
table_header = "| File | Statements | Missed | Coverage | Missing Lines |\n"
table_header += "|------|------------|--------|----------|---------------|\n"
table_header = (
"| File | Statements | Missed | Coverage | Missing Lines |\n"
)
table_header += (
"|------|------------|--------|----------|---------------|\n"
)
# テーブル行を生成
table_rows = [
(f"| {data['filename']} | {data['statements']} | "
f"{data['missed']} | {data['coverage']} | "
f"{data['missing_lines']} |")
(
f"| {data['filename']} | {data['statements']} | "
f"{data['missed']} | {data['coverage']} | "
f"{data['missing_lines']} |"
)
for data in self.coverage_data
]

123
scripts/generate_linter.py Normal file
View File

@ -0,0 +1,123 @@
import json
from pathlib import Path
PROJECT_NAME = Path(".").resolve().name
print(f"Project Name: {PROJECT_NAME}")
CODE_MAP = {
"W291": {"message": "行末に不要な空白があります。", "severity": "🟢低"},
"W292": {
"message": "ファイルの最後に改行がありません。",
"severity": "🟢低",
},
"E501": {
"message": "行が長すぎます。79文字以内にしてください。",
"severity": "🟢低",
},
"D101": {
"message": "クラスにドキュメンテーション文字列がありません。",
"severity": "⚪️無害",
},
}
def get_relative_path(absolute_path: str) -> str:
"""
絶対パスからプロジェクトルートからの相対パスを取得
"""
try:
index = absolute_path.index(PROJECT_NAME)
return absolute_path[index + len(PROJECT_NAME) + 1 :]
except ValueError:
return absolute_path
class GenerateLinter:
"""Linterレポートを生成するクラス"""
def __init__(
self, json_file="ruff-report.json", output_file="lint-result"
):
"""
初期化
"""
self.json_file = json_file
self.output_file = output_file
def _genarate_lint_report(self, data: list) -> str:
_str = ""
if not data:
_str += "## Linter(リンタ)指摘事項なし\n\n"
_str += "素晴らしいコードです!🎉\n"
return _str
_str += "## Linter(リンタ)レビュー\n\n"
_str += "以下の指摘事項があります。コードを見直してください。\n\n"
_str += f"総数:{len(data)}\n"
_str += "### 指摘事項一覧\n"
_str += "|コード|重要性|項目|ファイル名|行数|自動修正|\n"
_str += "|---|---|---|---|---|---|\n"
for issue in data:
code = issue.get("code", "-")
severity = (
CODE_MAP.get(code, {}).get("severity", "❓不明")
if code != "-"
else "-"
)
message = CODE_MAP.get(code, {}).get(
"message", issue.get("message", "-")
)
filename = get_relative_path(issue.get("filename", "-"))
file_link = f"./{filename}"
line = ""
if issue.get("location") and issue["location"].get("row"):
line = f"{issue['location']['row']}行目"
if issue["location"].get("column"):
line += f"{issue['location']['column']}"
if issue.get("end_location"):
if issue["end_location"].get("row"):
line += f"{issue['end_location']['row']}行目"
if issue["end_location"].get("column"):
line += f"{issue['end_location']['column']}"
auto_fix = "" if issue.get("fix") else ""
_str += f"|{code}|{severity}|{message}|"
_str += f"[{filename}]({file_link})|{line}|{auto_fix}|\n"
_str += "\n\n"
_str += "### 自動修正コマンド\n"
_str += (
"自動修正が可能な指摘事項については、"
"以下のコマンドで自動修正を試みることができます。\n\n"
)
_str += "```bash\n"
_str += "ruff check --fix .\n"
_str += "```\n\n"
return _str
def generate_lint_report_json(self):
with open(self.json_file, "r") as f:
data = json.load(f)
with open(f"{self.output_file}.md", "w") as f:
report_body = self._genarate_lint_report(data)
f.write(report_body)
with open(f"{self.output_file}.json", "w") as f:
report = {"body": self._genarate_lint_report(data)}
json.dump(report, f, ensure_ascii=False, indent=4)
print(
f"Linter report generated: {self.output_file}.md"
f" and {self.output_file}.json"
)
if __name__ == "__main__":
generator = GenerateLinter()
generator.generate_lint_report_json()

View File

@ -1,40 +0,0 @@
#!/usr/bin/env bash
# scripts/genarater_linter.sh
set -u # 未定義変数の利用で落とす(-e は付けない)
RUFF_STATUS=0
OUTPUT_MD=${1:-lint-result.md}
{
echo "## 🧹 Ruff Lint Result"
echo
echo "### Command"
echo '```bash'
echo '$ ruff check .'
echo '```'
echo
echo "### Output"
echo '```text'
# ruff がエラーになってもスクリプト自体は落とさず、ステータスだけ拾う
ruff check . || RUFF_STATUS=$?
echo '```'
echo
echo "### Summary"
echo
echo "| Tool | Status |"
echo "|------|--------|"
if [ "$RUFF_STATUS" -eq 0 ]; then
echo "| ruff | ✅ OK |"
else
echo "| ruff | ❌ Error (exit $RUFF_STATUS) |"
fi
} | tee "$OUTPUT_MD"
# GitHub Actions から呼ばれた場合は、出力変数としてステータスを渡す
if [ -n "${GITHUB_OUTPUT:-}" ]; then
echo "ruff_status=$RUFF_STATUS" >> "$GITHUB_OUTPUT"
fi
# ここでは常に 0 で終了(失敗にするかどうかはワークフロー側で制御)
exit 0

0
src/utils/__init__.py Normal file
View File

View File

@ -6,9 +6,8 @@ from .singleton import Singleton
class CustomLogger(Singleton):
"""
Singleton logger class that initializes a logger with a specified name and log file.
It provides a method to log entry and exit of functions.
Singleton logger class that initializes a logger with a specified name
and log file.It provides a method to log entry and exit of functions.
"""
def __init__(self, name="main", log_file=None, level=logging.INFO):
@ -22,7 +21,8 @@ class CustomLogger(Singleton):
self.logger.propagate = False
formatter = logging.Formatter(
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)3d]: %(message)s"
"%(asctime)s %(levelname)s "
"[%(filename)s:%(lineno)3d]: %(message)s"
)
# Console handler

View File

@ -11,6 +11,7 @@ import threading
class Singleton(object):
"""シングルトンパターンの基底クラス"""
_instances = {}
_lock = threading.Lock()

5
terraform/af.tf Normal file
View File

@ -0,0 +1,5 @@
resource "google_artifact_registry_repository" "repo" {
location = var.region
repository_id = "cicd-repo-${var.env_name}"
format = "DOCKER"
}

12
terraform/dev.tfvars Normal file
View File

@ -0,0 +1,12 @@
project_id = "gcp-devel-project"
region = "asia-northeast1"
env_name = "dev"
job_name = "base"
# コンテナイメージCI/CDから渡される想定
cpu_limit = "1"
memory_limit = "512Mi"
timeout = "1800s"

10
terraform/platform.tf Normal file
View File

@ -0,0 +1,10 @@
# Google CloudのAPIを有効化
resource "google_project_service" "services" {
for_each = toset([
"run.googleapis.com",
"artifactregistry.googleapis.com",
"cloudbuild.googleapis.com",
])
service = each.key
}

9
terraform/provider.tf Normal file
View File

@ -0,0 +1,9 @@
terraform {
backend "gcs" {}
}
# Google Providerの設定
provider "google" {
project = var.project_id
region = var.region
}

29
terraform/run_job.tf Normal file
View File

@ -0,0 +1,29 @@
# Cloud Run Jobのリソース
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_job
resource "google_cloud_run_v2_job" "job" {
name = "${var.job_name}-${var.env_name}-job"
location = var.region
template {
template {
#
service_account = google_service_account.job_sa.email
containers {
image = "${var.region}-docker.pkg.dev/${var.project_id}/cicd-repo-${var.env_name}/run-job-${var.job_name}-image:${var.hash_suffix}"
resources {
limits = {
cpu = var.cpu_limit
memory = var.memory_limit
}
}
}
timeout = var.timeout
}
}
}

14
terraform/sa.tf Normal file
View File

@ -0,0 +1,14 @@
resource "google_service_account" "job_sa" {
account_id = "sa-${var.job_name}-${var.env_name}"
display_name = "Cloud Run Job Service Account for ${var.job_name} in ${var.env_name} environment"
description = "Cloud Run Job Service Account for ${var.job_name} in ${var.env_name} environment"
project = var.project_id
}
# IAM role assignment
# Cloud Run Job実行に必要な権限を付与
resource "google_project_iam_member" "run_job_invoker" {
project = var.project_id
role = "roles/run.invoker"
member = "serviceAccount:${google_service_account.job_sa.email}"
}

10
terraform/sample.tfvars Normal file
View File

@ -0,0 +1,10 @@
project_id = "プロジェクトIDを指定してください"
region = "asia-northeast1"
env_name = "dev"
job_name = "ジョブ名を指定してください"
# コンテナイメージCI/CDから渡される想定
cpu_limit = "1"
memory_limit = "512Mi"
timeout = "1800s"

54
terraform/variables.tf Normal file
View File

@ -0,0 +1,54 @@
# GCPプロジェクトIDとリージョン
variable "project_id" {
description = "The ID of the GCP project to deploy resources into."
type = string
}
variable "region" {
description = "The GCP region to deploy resources into."
type = string
default = "asia-northeast1" #
}
variable "env_name" {
description = "The environment name for the deployment."
type = string
default = "dev"
validation {
condition = contains(["dev", "staging", "prd"], var.env_name)
error_message = "env_name must be one of: dev, staging, prd."
}
}
variable "job_name" {
description = "The name of the Cloud Run Job."
type = string
default = "get-news-ai"
}
# (CI/CDから渡される想定)
variable "hash_suffix" {
description = "The container image for the Cloud Run Job."
type = string
default = null
}
# Cloud Run Jobの設定変数
variable "cpu_limit" {
description = "The CPU limit for the Cloud Run Job container."
type = string
default = "1"
}
variable "memory_limit" {
description = "The memory limit for the Cloud Run Job container."
type = string
default = "512Mi"
}
variable "timeout" {
description = "The task timeout in seconds for the Cloud Run Job."
type = string
default = "1800s"
}