Initial commit

This commit is contained in:
Zzzz
2026-04-27 18:40:30 +08:00
commit 2120774b05
112 changed files with 12308 additions and 0 deletions
+29
View File
@@ -0,0 +1,29 @@
# Developer identity (local only)
.developer
# Current task pointer (each dev works on different task)
.current-task
# Ralph Loop state file
.ralph-state.json
# Agent runtime files
.agents/
.agent-log
.session-id
# Task directory runtime files
.plan-log
# Atomic update temp files
*.tmp
# Update backup directories
.backup-*
# Conflict resolution temp files
*.new
# Python cache
**/__pycache__/
**/*.pyc
+43
View File
@@ -0,0 +1,43 @@
{
".trellis/config.yaml": "c3c4af7d82c09a1638f63c1f560119507735b060a4780ef7e6d0cdef447c215d",
".trellis/scripts/__init__.py": "1242be5b972094c2e141aecbe81a4efd478f6534e3d5e28306374e6a18fcf46c",
".trellis/scripts/add_session.py": "a97a6c88ff7def8045a5dffa5c698a823392d7f73c1641e8a0c08db0168bd913",
".trellis/scripts/common/__init__.py": "a8afa14ebe662723f96e4f5757c15359d76adf4cb5c52327c94dbe854bd1ab01",
".trellis/scripts/common/cli_adapter.py": "b10763292c8eb56affe7e3921ebf0dfaaceb148b3052fc9a01716589a5d4a6e9",
".trellis/scripts/common/config.py": "671a3591f97b75ec19f25814d2ee3f7e9b38e048f6f67442519fe0715c454eeb",
".trellis/scripts/common/developer.py": "f5f833123abe68890171b4da825a324216d24913f6b5ad9245afc556424ffd7b",
".trellis/scripts/common/git.py": "e14817be7de122d3a106f509c2825aeb9669d962ba73ba241642d2931cfdf1d6",
".trellis/scripts/common/git_context.py": "7533c08335791e50c3a6f9d551d5b1af0bdaa2a0a746721cb3e1a2140f4d9683",
".trellis/scripts/common/io.py": "6480b181f2bc505323b28ed7a66963d7b7edc96251e83b4c8e7a45907cc721c8",
".trellis/scripts/common/log.py": "471df6895cfac80f995edebbf9974f6b7440634b7a688f28b8331c868bc0f3cf",
".trellis/scripts/common/packages_context.py": "efe158d7c99c2268851d0216fbb08de22836e418a8dbeb73575b8cc249eed7b7",
".trellis/scripts/common/paths.py": "36f72bdc09e4f0db53250346a4744ff3699c634ea71380eed5b467095f3d946b",
".trellis/scripts/common/session_context.py": "2389eff1a66b172783fcb714a79385114d9b29746133a3e0db732c3b5cb23898",
".trellis/scripts/common/task_context.py": "1c16a7fa82d363010d0d0ebdc038296ae1552bf6e90214787d707f49567bc159",
".trellis/scripts/common/task_queue.py": "0be61f713462b1fe4574927c82fc4704e678afe72dcb9813543aedf2f9e9e0c5",
".trellis/scripts/common/task_store.py": "57fff744bce501ee2a0d25ac096301cb4288e02627197a513a00cd0a5cddb78d",
".trellis/scripts/common/task_utils.py": "f5ef4af87ba3e11d8b19630c0c96d009de1811fc9be56c2027a9c96e21ed103e",
".trellis/scripts/common/tasks.py": "eeefae693dadec54c8945394e288e90ed1e8f79545dfb2d4934a431496f5229d",
".trellis/scripts/common/types.py": "9962081cc2608fb9d1deb32c6880e336f62cdca6b338e7ae813304701e155ee9",
".trellis/scripts/common/workflow_phase.py": "b5736dab0587d78cfe25059435495e7631eeab1d03ea62c3db1a493dae19e553",
".trellis/scripts/get_context.py": "ca5bf9e90bdb1d75d3de182b95f820f9d108ab28793d29097b24fd71315adcf5",
".trellis/scripts/get_developer.py": "84c27076323c3e0f2c9c8ed16e8aa865e225d902a187c37e20ee1a46e7142d8f",
".trellis/scripts/hooks/linear_sync.py": "e09cc4ce4699aada908808718698f33f705a3edf55c4dcf8f777ad892f80ca79",
".trellis/scripts/init_developer.py": "f9e6c0d882406e81c8cd6b1c5abb204b0befc0069ff89cf650cd536a80f8c60e",
".trellis/scripts/task.py": "402e3a097b455e0880e5c61de2b1326da3a85da5d231cf4c2598376a7b6e0687",
".trellis/workflow.md": "3328b94491e79b1c2cc278f26b3dacd384cb874284ee9ae145146efa2588326f",
".codex/agents/trellis-check.toml": "c8d6a7cfa5428c06e0efd79115f873feab94fe4a4e3205a39fffa76621187b02",
".codex/agents/trellis-implement.toml": "0b4a88a7d5ba9c6ca2f4ed2e10c57d39d4e07e8d3f75393b17a7341ef1916ca1",
".codex/agents/trellis-research.toml": "1792ecfaaffade7cb4cd4e4376e9e87ae82b3bf42352054cb2fb0e74dfbee3a2",
".codex/config.toml": "78ae8265425631b6c554185b3f9470e383dfac72a8d759ae0b6022efe662450c",
".codex/hooks/inject-workflow-state.py": "281939a51a62467a5dd73947fdec7143d0e936b4b64b5224ce0484186eb5b018",
".codex/hooks/session-start.py": "f2f0a9cfd09b899a7d5878375a19b7a86a08c9ab1c8213b50e0df5a7e95caac5",
".codex/hooks/statusline.py": "87c01ee786ca9e1f25b591f0ed67564cd430586d57516d5b6fc57c34b6e7a3ec",
".codex/hooks.json": "a7d09984dde4938c9262658140ead0f1327b8251d7fae4f1ec9107b72be9c4fe",
".agents/skills/trellis-before-dev/SKILL.md": "208ad3fd5131fa0da603d4bc354a29826967397f5aeef483fa0564113df13e9e",
".agents/skills/trellis-brainstorm/SKILL.md": "03974d09545d3918f85f8b975ef18a8983fe9d5c30791f9d989a5bbe6268058b",
".agents/skills/trellis-break-loop/SKILL.md": "35afb53fef42cd494e566f1ef170dbf442ec2be7e19931f28a14079b4dda753f",
".agents/skills/trellis-check/SKILL.md": "a3f17aef687aa3b475d12ee64c3293e5491bb7474336be2c0f9ec22042f13b6e",
".agents/skills/trellis-continue/SKILL.md": "874c1a9a7d7fd5f7771e0c30dc74a124e846abd6cac6d72db3b41ef6c857e30a",
".agents/skills/trellis-finish-work/SKILL.md": "c0f3ffa74e1e9990f93e5d44d03bd088c8698da1db4750b74d3eee4809a84aea"
}
+1
View File
@@ -0,0 +1 @@
0.5.0-beta.14
+59
View File
@@ -0,0 +1,59 @@
# Trellis Configuration
# Project-level settings for the Trellis workflow system
#
# All values have sensible defaults. Only override what you need.
#-------------------------------------------------------------------------------
# Session Recording
#-------------------------------------------------------------------------------
# Commit message used when auto-committing journal/index changes
# after running add_session.py
session_commit_message: "chore: record journal"
# Maximum lines per journal file before rotating to a new one
max_journal_lines: 2000
#-------------------------------------------------------------------------------
# Task Lifecycle Hooks
#-------------------------------------------------------------------------------
# Shell commands to run after task lifecycle events.
# Each hook receives TASK_JSON_PATH environment variable pointing to task.json.
# Hook failures print a warning but do not block the main operation.
#
# hooks:
# after_create:
# - "echo 'Task created'"
# after_start:
# - "echo 'Task started'"
# after_finish:
# - "echo 'Task finished'"
# after_archive:
# - "echo 'Task archived'"
#-------------------------------------------------------------------------------
# Monorepo / Packages
#-------------------------------------------------------------------------------
# Declare packages for monorepo projects.
# Trellis auto-detects workspaces during `trellis init`, but you can also
# configure them manually here.
#
# packages:
# frontend:
# path: packages/frontend
# backend:
# path: packages/backend
# docs:
# path: docs-site
# type: submodule
# # For polyrepo / meta-repo layouts (independent .git in each subdir),
# # mark the package with `git: true`. The runtime treats it as an
# # independent repository for things like git-context display.
# webapp:
# path: ./webapp
# git: true
# Default package used when --package is not specified.
# default_package: frontend
+5
View File
@@ -0,0 +1,5 @@
"""
Trellis Python Scripts
This module provides Python implementations of Trellis workflow scripts.
"""
+521
View File
@@ -0,0 +1,521 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Add a new session to journal file and update index.md.
Usage:
python3 add_session.py --title "Title" --commit "hash" --summary "Summary" [--package cli]
python3 add_session.py --title "Title" --branch "feat/my-branch"
# Pipe detailed content via stdin (use --stdin to opt in):
cat << 'EOF' | python3 add_session.py --stdin --title "Title" --summary "Summary"
<session content here>
EOF
Branch resolution order:
1. --branch CLI arg (explicit)
2. task.json branch field (from active task)
3. git branch --show-current (auto-detect)
4. None (omitted gracefully)
"""
from __future__ import annotations
import argparse
import re
import subprocess
import sys
from datetime import datetime
from pathlib import Path
from common.paths import (
FILE_JOURNAL_PREFIX,
get_repo_root,
get_current_task,
get_developer,
get_workspace_dir,
)
from common.developer import ensure_developer
from common.git import run_git
from common.tasks import load_task
from common.config import (
get_packages,
get_session_commit_message,
get_max_journal_lines,
is_monorepo,
resolve_package,
validate_package,
)
# =============================================================================
# Helper Functions
# =============================================================================
def get_latest_journal_info(dev_dir: Path) -> tuple[Path | None, int, int]:
"""Get latest journal file info.
Returns:
Tuple of (file_path, file_number, line_count).
"""
latest_file: Path | None = None
latest_num = -1
for f in dev_dir.glob(f"{FILE_JOURNAL_PREFIX}*.md"):
if not f.is_file():
continue
match = re.search(r"(\d+)$", f.stem)
if match:
num = int(match.group(1))
if num > latest_num:
latest_num = num
latest_file = f
if latest_file:
lines = len(latest_file.read_text(encoding="utf-8").splitlines())
return latest_file, latest_num, lines
return None, 0, 0
def get_current_session(index_file: Path) -> int:
"""Get current session number from index.md."""
if not index_file.is_file():
return 0
content = index_file.read_text(encoding="utf-8")
for line in content.splitlines():
if "Total Sessions" in line:
match = re.search(r":\s*(\d+)", line)
if match:
return int(match.group(1))
return 0
def _extract_journal_num(filename: str) -> int:
"""Extract journal number from filename for sorting."""
match = re.search(r"(\d+)", filename)
return int(match.group(1)) if match else 0
def count_journal_files(dev_dir: Path, active_num: int) -> str:
"""Count journal files and return table rows."""
active_file = f"{FILE_JOURNAL_PREFIX}{active_num}.md"
result_lines = []
files = sorted(
[f for f in dev_dir.glob(f"{FILE_JOURNAL_PREFIX}*.md") if f.is_file()],
key=lambda f: _extract_journal_num(f.stem),
reverse=True
)
for f in files:
filename = f.name
lines = len(f.read_text(encoding="utf-8").splitlines())
status = "Active" if filename == active_file else "Archived"
result_lines.append(f"| `{filename}` | ~{lines} | {status} |")
return "\n".join(result_lines)
def create_new_journal_file(
dev_dir: Path, num: int, developer: str, today: str, max_lines: int = 2000,
) -> Path:
"""Create a new journal file."""
prev_num = num - 1
new_file = dev_dir / f"{FILE_JOURNAL_PREFIX}{num}.md"
content = f"""# Journal - {developer} (Part {num})
> Continuation from `{FILE_JOURNAL_PREFIX}{prev_num}.md` (archived at ~{max_lines} lines)
> Started: {today}
---
"""
new_file.write_text(content, encoding="utf-8")
return new_file
def generate_session_content(
session_num: int,
title: str,
commit: str,
summary: str,
extra_content: str,
today: str,
package: str | None = None,
branch: str | None = None,
) -> str:
"""Generate session content."""
if commit and commit != "-":
commit_table = """| Hash | Message |
|------|---------|"""
for c in commit.split(","):
c = c.strip()
commit_table += f"\n| `{c}` | (see git log) |"
else:
commit_table = "(No commits - planning session)"
package_line = f"\n**Package**: {package}" if package else ""
branch_line = f"\n**Branch**: `{branch}`" if branch else ""
return f"""
## Session {session_num}: {title}
**Date**: {today}
**Task**: {title}{package_line}{branch_line}
### Summary
{summary}
### Main Changes
{extra_content}
### Git Commits
{commit_table}
### Testing
- [OK] (Add test results)
### Status
[OK] **Completed**
### Next Steps
- None - task complete
"""
def update_index(
index_file: Path,
dev_dir: Path,
title: str,
commit: str,
new_session: int,
active_file: str,
today: str,
branch: str | None = None,
) -> bool:
"""Update index.md with new session info."""
# Format commit for display
commit_display = "-"
if commit and commit != "-":
commit_display = re.sub(r"([a-f0-9]{7,})", r"`\1`", commit.replace(",", ", "))
# Get file number from active_file name
match = re.search(r"(\d+)", active_file)
active_num = int(match.group(1)) if match else 0
files_table = count_journal_files(dev_dir, active_num)
print(f"Updating index.md for session {new_session}...")
print(f" Title: {title}")
print(f" Commit: {commit_display}")
print(f" Active File: {active_file}")
print()
content = index_file.read_text(encoding="utf-8")
if "@@@auto:current-status" not in content:
print("Error: Markers not found in index.md. Please ensure markers exist.", file=sys.stderr)
return False
# Process sections
lines = content.splitlines()
new_lines = []
in_current_status = False
in_active_documents = False
in_session_history = False
header_written = False
for line in lines:
if "@@@auto:current-status" in line:
new_lines.append(line)
in_current_status = True
new_lines.append(f"- **Active File**: `{active_file}`")
new_lines.append(f"- **Total Sessions**: {new_session}")
new_lines.append(f"- **Last Active**: {today}")
continue
if "@@@/auto:current-status" in line:
in_current_status = False
new_lines.append(line)
continue
if "@@@auto:active-documents" in line:
new_lines.append(line)
in_active_documents = True
new_lines.append("| File | Lines | Status |")
new_lines.append("|------|-------|--------|")
new_lines.append(files_table)
continue
if "@@@/auto:active-documents" in line:
in_active_documents = False
new_lines.append(line)
continue
if "@@@auto:session-history" in line:
new_lines.append(line)
in_session_history = True
header_written = False
continue
if "@@@/auto:session-history" in line:
in_session_history = False
new_lines.append(line)
continue
if in_current_status:
continue
if in_active_documents:
continue
if in_session_history:
# Migrate old 4/6-column headers to 5-column Branch-only history.
if re.match(
r"^\|\s*#\s*\|\s*Date\s*\|\s*Title\s*\|\s*Commits\s*\|\s*Branch\s*\|\s*Base Branch\s*\|\s*$",
line,
):
new_lines.append("| # | Date | Title | Commits | Branch |")
continue
if re.match(r"^\|\s*#\s*\|\s*Date\s*\|\s*Title\s*\|\s*Commits\s*\|\s*Branch\s*\|\s*$", line):
new_lines.append("| # | Date | Title | Commits | Branch |")
continue
if re.match(r"^\|\s*#\s*\|\s*Date\s*\|\s*Title\s*\|\s*Commits\s*\|\s*$", line):
new_lines.append("| # | Date | Title | Commits | Branch |")
continue
if re.match(r"^\|[-| ]+\|\s*$", line) and not header_written:
new_lines.append("|---|------|-------|---------|--------|")
new_lines.append(f"| {new_session} | {today} | {title} | {commit_display} | `{branch or '-'}` |")
header_written = True
continue
new_lines.append(line)
continue
new_lines.append(line)
index_file.write_text("\n".join(new_lines), encoding="utf-8")
print("[OK] Updated index.md successfully!")
return True
# =============================================================================
# Main Function
# =============================================================================
def _auto_commit_workspace(repo_root: Path) -> None:
"""Stage .trellis/workspace and .trellis/tasks, then commit with a configured message."""
commit_msg = get_session_commit_message(repo_root)
add_result = subprocess.run(
["git", "add", "-A", ".trellis/workspace", ".trellis/tasks"],
cwd=repo_root,
capture_output=True,
text=True,
)
if add_result.returncode != 0:
print(f"[WARN] git add failed (exit {add_result.returncode}): {add_result.stderr.strip()}", file=sys.stderr)
print("[WARN] Please commit .trellis/ changes manually: git add .trellis && git commit", file=sys.stderr)
return
# Check if there are staged changes
result = subprocess.run(
["git", "diff", "--cached", "--quiet", "--", ".trellis/workspace", ".trellis/tasks"],
cwd=repo_root,
)
if result.returncode == 0:
print("[OK] No workspace changes to commit.", file=sys.stderr)
return
commit_result = subprocess.run(
["git", "commit", "-m", commit_msg],
cwd=repo_root,
capture_output=True,
text=True,
)
if commit_result.returncode == 0:
print(f"[OK] Auto-committed: {commit_msg}", file=sys.stderr)
else:
print(f"[WARN] Auto-commit failed: {commit_result.stderr.strip()}", file=sys.stderr)
def add_session(
title: str,
commit: str = "-",
summary: str = "(Add summary)",
extra_content: str = "(Add details)",
auto_commit: bool = True,
package: str | None = None,
branch: str | None = None,
) -> int:
"""Add a new session."""
repo_root = get_repo_root()
ensure_developer(repo_root)
developer = get_developer(repo_root)
if not developer:
print("Error: Developer not initialized", file=sys.stderr)
return 1
dev_dir = get_workspace_dir(repo_root)
if not dev_dir:
print("Error: Workspace directory not found", file=sys.stderr)
return 1
max_lines = get_max_journal_lines(repo_root)
index_file = dev_dir / "index.md"
today = datetime.now().strftime("%Y-%m-%d")
journal_file, current_num, current_lines = get_latest_journal_info(dev_dir)
current_session = get_current_session(index_file)
new_session = current_session + 1
session_content = generate_session_content(
new_session, title, commit, summary, extra_content, today, package,
branch,
)
content_lines = len(session_content.splitlines())
print("========================================", file=sys.stderr)
print("ADD SESSION", file=sys.stderr)
print("========================================", file=sys.stderr)
print("", file=sys.stderr)
print(f"Session: {new_session}", file=sys.stderr)
print(f"Title: {title}", file=sys.stderr)
print(f"Commit: {commit}", file=sys.stderr)
print("", file=sys.stderr)
print(f"Current journal file: {FILE_JOURNAL_PREFIX}{current_num}.md", file=sys.stderr)
print(f"Current lines: {current_lines}", file=sys.stderr)
print(f"New content lines: {content_lines}", file=sys.stderr)
print(f"Total after append: {current_lines + content_lines}", file=sys.stderr)
print("", file=sys.stderr)
target_file = journal_file
target_num = current_num
if current_lines + content_lines > max_lines:
target_num = current_num + 1
print(f"[!] Exceeds {max_lines} lines, creating {FILE_JOURNAL_PREFIX}{target_num}.md", file=sys.stderr)
target_file = create_new_journal_file(dev_dir, target_num, developer, today, max_lines)
print(f"Created: {target_file}", file=sys.stderr)
# Append session content
if target_file:
with target_file.open("a", encoding="utf-8") as f:
f.write(session_content)
print(f"[OK] Appended session to {target_file.name}", file=sys.stderr)
print("", file=sys.stderr)
# Update index.md
active_file = f"{FILE_JOURNAL_PREFIX}{target_num}.md"
if not update_index(
index_file,
dev_dir,
title,
commit,
new_session,
active_file,
today,
branch,
):
return 1
print("", file=sys.stderr)
print("========================================", file=sys.stderr)
print(f"[OK] Session {new_session} added successfully!", file=sys.stderr)
print("========================================", file=sys.stderr)
print("", file=sys.stderr)
print("Files updated:", file=sys.stderr)
print(f" - {target_file.name if target_file else 'journal'}", file=sys.stderr)
print(" - index.md", file=sys.stderr)
# Auto-commit workspace changes
if auto_commit:
print("", file=sys.stderr)
_auto_commit_workspace(repo_root)
return 0
# =============================================================================
# Main Entry
# =============================================================================
def main() -> int:
"""CLI entry point."""
parser = argparse.ArgumentParser(
description="Add a new session to journal file and update index.md"
)
parser.add_argument("--title", required=True, help="Session title")
parser.add_argument("--commit", default="-", help="Comma-separated commit hashes")
parser.add_argument("--summary", default="(Add summary)", help="Brief summary")
parser.add_argument("--content-file", help="Path to file with detailed content")
parser.add_argument("--package", help="Package name tag (e.g., cli, docs-site)")
parser.add_argument("--branch", help="Branch name (auto-detected if omitted)")
parser.add_argument("--no-commit", action="store_true",
help="Skip auto-commit of workspace changes")
parser.add_argument("--stdin", action="store_true",
help="Read extra content from stdin (explicit opt-in)")
args = parser.parse_args()
extra_content = "(Add details)"
if args.content_file:
content_path = Path(args.content_file)
if content_path.is_file():
extra_content = content_path.read_text(encoding="utf-8")
elif args.stdin:
extra_content = sys.stdin.read()
# Load active task once — shared by package and branch resolution
repo_root = get_repo_root()
current = get_current_task(repo_root)
task_data = load_task(repo_root / current) if current else None
package = args.package
if package:
# CLI source: fail-fast in monorepo, ignore in single-repo
if not is_monorepo(repo_root):
print("Warning: --package ignored in single-repo project", file=sys.stderr)
package = None
elif not validate_package(package, repo_root):
packages = get_packages(repo_root)
available = ", ".join(sorted(packages.keys())) if packages else "(none)"
print(f"Error: unknown package '{package}'. Available: {available}", file=sys.stderr)
return 1
else:
# Inferred: active task's task.json.package → default_package → None
task_package = task_data.package if task_data else None
package = resolve_package(task_package, repo_root)
# Resolve branch: CLI → task.json → git auto-detect → None
branch = args.branch
if not branch:
if task_data and task_data.raw.get("branch"):
branch = task_data.raw["branch"]
else:
_, branch_out, _ = run_git(["branch", "--show-current"], cwd=repo_root)
detected = branch_out.strip()
if detected:
branch = detected
return add_session(
args.title, args.commit, args.summary, extra_content,
auto_commit=not args.no_commit,
package=package,
branch=branch,
)
if __name__ == "__main__":
sys.exit(main())
+84
View File
@@ -0,0 +1,84 @@
"""
Common utilities for Trellis workflow scripts.
This module provides shared functionality used by other Trellis scripts.
"""
import io
import sys
# =============================================================================
# Windows Encoding Fix (MUST be at top, before any other output)
# =============================================================================
# On Windows, stdout defaults to the system code page (often GBK/CP936).
# This causes UnicodeEncodeError when printing non-ASCII characters.
#
# Any script that imports from common will automatically get this fix.
# =============================================================================
def _configure_stream(stream: object) -> object:
"""Configure a stream for UTF-8 encoding on Windows."""
# Try reconfigure() first (Python 3.7+, more reliable)
if hasattr(stream, "reconfigure"):
stream.reconfigure(encoding="utf-8", errors="replace") # type: ignore[union-attr]
return stream
# Fallback: detach and rewrap with TextIOWrapper
elif hasattr(stream, "detach"):
return io.TextIOWrapper(
stream.detach(), # type: ignore[union-attr]
encoding="utf-8",
errors="replace",
)
return stream
if sys.platform == "win32":
sys.stdout = _configure_stream(sys.stdout) # type: ignore[assignment]
sys.stderr = _configure_stream(sys.stderr) # type: ignore[assignment]
sys.stdin = _configure_stream(sys.stdin) # type: ignore[assignment]
def configure_encoding() -> None:
"""
Configure stdout/stderr/stdin for UTF-8 encoding on Windows.
This is automatically called when importing from common,
but can be called manually for scripts that don't import common.
Safe to call multiple times.
"""
global sys
if sys.platform == "win32":
sys.stdout = _configure_stream(sys.stdout) # type: ignore[assignment]
sys.stderr = _configure_stream(sys.stderr) # type: ignore[assignment]
sys.stdin = _configure_stream(sys.stdin) # type: ignore[assignment]
from .paths import (
DIR_WORKFLOW,
DIR_WORKSPACE,
DIR_TASKS,
DIR_ARCHIVE,
DIR_SPEC,
DIR_SCRIPTS,
FILE_DEVELOPER,
FILE_CURRENT_TASK,
FILE_TASK_JSON,
FILE_JOURNAL_PREFIX,
get_repo_root,
get_developer,
check_developer,
get_tasks_dir,
get_workspace_dir,
get_active_journal_file,
count_lines,
get_current_task,
get_current_task_abs,
normalize_task_ref,
resolve_task_ref,
set_current_task,
clear_current_task,
has_current_task,
generate_task_date_prefix,
)
+776
View File
@@ -0,0 +1,776 @@
"""
CLI Adapter for Multi-Platform Support.
Abstracts differences between Claude Code, OpenCode, Cursor, iFlow, Codex, Kilo, Kiro Code, Gemini CLI, Antigravity, Windsurf, Qoder, CodeBuddy, GitHub Copilot, and Factory Droid interfaces.
Supported platforms:
- claude: Claude Code (default)
- opencode: OpenCode
- cursor: Cursor IDE
- iflow: iFlow CLI
- codex: Codex CLI (skills-based)
- kilo: Kilo CLI
- kiro: Kiro Code (skills-based)
- gemini: Gemini CLI
- antigravity: Antigravity (workflow-based)
- windsurf: Windsurf (workflow-based)
- qoder: Qoder
- codebuddy: CodeBuddy
- copilot: GitHub Copilot (VS Code)
- droid: Factory Droid (commands-based)
Usage:
from common.cli_adapter import CLIAdapter
adapter = CLIAdapter("opencode")
cmd = adapter.build_run_command(
agent="dispatch",
session_id="abc123",
prompt="Start the pipeline"
)
"""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import ClassVar, Literal
Platform = Literal[
"claude",
"opencode",
"cursor",
"iflow",
"codex",
"kilo",
"kiro",
"gemini",
"antigravity",
"windsurf",
"qoder",
"codebuddy",
"copilot",
"droid",
]
@dataclass
class CLIAdapter:
"""Adapter for different AI coding CLI tools."""
platform: Platform
# =========================================================================
# Agent Name Mapping
# =========================================================================
# OpenCode has built-in agents that cannot be overridden
# See: https://github.com/sst/opencode/issues/4271
# Note: Class-level constant, not a dataclass field
_AGENT_NAME_MAP: ClassVar[dict[Platform, dict[str, str]]] = {
"claude": {}, # No mapping needed
"opencode": {
"plan": "trellis-plan", # 'plan' is built-in in OpenCode
},
}
def get_agent_name(self, agent: str) -> str:
"""Get platform-specific agent name.
Args:
agent: Original agent name (e.g., 'plan', 'dispatch')
Returns:
Platform-specific agent name (e.g., 'trellis-plan' for OpenCode)
"""
mapping = self._AGENT_NAME_MAP.get(self.platform, {})
return mapping.get(agent, agent)
# =========================================================================
# Agent Path
# =========================================================================
@property
def config_dir_name(self) -> str:
"""Get platform-specific config directory name.
Returns:
Directory name ('.claude', '.opencode', '.cursor', '.iflow', '.codex', '.kilocode', '.kiro', '.gemini', '.agent', '.windsurf', '.qoder', or '.codebuddy')
"""
if self.platform == "opencode":
return ".opencode"
elif self.platform == "cursor":
return ".cursor"
elif self.platform == "iflow":
return ".iflow"
elif self.platform == "codex":
return ".codex"
elif self.platform == "kilo":
return ".kilocode"
elif self.platform == "kiro":
return ".kiro"
elif self.platform == "gemini":
return ".gemini"
elif self.platform == "antigravity":
return ".agent"
elif self.platform == "windsurf":
return ".windsurf"
elif self.platform == "qoder":
return ".qoder"
elif self.platform == "codebuddy":
return ".codebuddy"
elif self.platform == "copilot":
return ".github/copilot"
elif self.platform == "droid":
return ".factory"
else:
return ".claude"
def get_config_dir(self, project_root: Path) -> Path:
"""Get platform-specific config directory.
Args:
project_root: Project root directory
Returns:
Path to config directory (.claude, .opencode, .cursor, .iflow, .codex, .kilocode, .kiro, .gemini, .agent, .windsurf, .qoder, or .codebuddy)
"""
return project_root / self.config_dir_name
def get_agent_path(self, agent: str, project_root: Path) -> Path:
"""Get path to agent definition file.
Args:
agent: Agent name (original, before mapping)
project_root: Project root directory
Returns:
Path to agent definition file (.md for most platforms, .toml for Codex)
"""
mapped_name = self.get_agent_name(agent)
if self.platform == "codex":
return self.get_config_dir(project_root) / "agents" / f"{mapped_name}.toml"
return self.get_config_dir(project_root) / "agents" / f"{mapped_name}.md"
def get_commands_path(self, project_root: Path, *parts: str) -> Path:
"""Get path to commands directory or specific command file.
Args:
project_root: Project root directory
*parts: Additional path parts (e.g., 'trellis', 'finish-work.md')
Returns:
Path to commands directory or file
Note:
Cursor uses prefix naming: .cursor/commands/trellis-<name>.md
Antigravity uses workflow directory: .agent/workflows/<name>.md
Windsurf uses workflow directory: .windsurf/workflows/trellis-<name>.md
Copilot uses prompt files: .github/prompts/<name>.prompt.md
Claude/OpenCode use subdirectory: .claude/commands/trellis/<name>.md
"""
if self.platform == "windsurf":
workflow_dir = self.get_config_dir(project_root) / "workflows"
if not parts:
return workflow_dir
if len(parts) >= 2 and parts[0] == "trellis":
filename = parts[-1]
return workflow_dir / f"trellis-{filename}"
return workflow_dir / Path(*parts)
if self.platform in ("antigravity", "kilo"):
workflow_dir = self.get_config_dir(project_root) / "workflows"
if not parts:
return workflow_dir
if len(parts) >= 2 and parts[0] == "trellis":
filename = parts[-1]
return workflow_dir / filename
return workflow_dir / Path(*parts)
if self.platform == "copilot":
prompts_dir = project_root / ".github" / "prompts"
if not parts:
return prompts_dir
if len(parts) >= 2 and parts[0] == "trellis":
filename = parts[-1]
if filename.endswith(".md"):
filename = filename[:-3]
return prompts_dir / f"{filename}.prompt.md"
return prompts_dir / Path(*parts)
if not parts:
return self.get_config_dir(project_root) / "commands"
# Cursor uses prefix naming instead of subdirectory
if self.platform == "cursor" and len(parts) >= 2 and parts[0] == "trellis":
# Convert trellis/<name>.md to trellis-<name>.md
filename = parts[-1]
return (
self.get_config_dir(project_root) / "commands" / f"trellis-{filename}"
)
return self.get_config_dir(project_root) / "commands" / Path(*parts)
def get_trellis_command_path(self, name: str) -> str:
"""Get relative path to a trellis command file.
Args:
name: Command name without extension (e.g., 'finish-work', 'check')
Returns:
Relative path string for use in JSONL entries
Note:
Cursor: .cursor/commands/trellis-<name>.md
Codex: .agents/skills/trellis-<name>/SKILL.md
Kiro: .kiro/skills/trellis-<name>/SKILL.md
Gemini: .gemini/commands/trellis/<name>.toml
Antigravity: .agent/workflows/<name>.md
Windsurf: .windsurf/workflows/trellis-<name>.md
Others: .{platform}/commands/trellis/<name>.md
"""
if self.platform == "cursor":
return f".cursor/commands/trellis-{name}.md"
elif self.platform == "codex":
# 0.5.0-beta.0 renamed all skill dirs to add the `trellis-` prefix
# (see that release's manifest for the 60+ rename entries).
return f".agents/skills/trellis-{name}/SKILL.md"
elif self.platform == "kiro":
return f".kiro/skills/trellis-{name}/SKILL.md"
elif self.platform == "gemini":
return f".gemini/commands/trellis/{name}.toml"
elif self.platform == "antigravity":
return f".agent/workflows/{name}.md"
elif self.platform == "windsurf":
return f".windsurf/workflows/trellis-{name}.md"
elif self.platform == "kilo":
return f".kilocode/workflows/{name}.md"
elif self.platform == "copilot":
return f".github/prompts/{name}.prompt.md"
elif self.platform == "droid":
return f".factory/commands/trellis/{name}.md"
else:
return f"{self.config_dir_name}/commands/trellis/{name}.md"
# =========================================================================
# Environment Variables
# =========================================================================
def get_non_interactive_env(self) -> dict[str, str]:
"""Get environment variables for non-interactive mode.
Returns:
Dict of environment variables to set
"""
if self.platform == "opencode":
return {"OPENCODE_NON_INTERACTIVE": "1"}
elif self.platform == "iflow":
return {"IFLOW_NON_INTERACTIVE": "1"}
elif self.platform == "codex":
return {"CODEX_NON_INTERACTIVE": "1"}
elif self.platform == "kiro":
return {"KIRO_NON_INTERACTIVE": "1"}
elif self.platform == "gemini":
return {} # Gemini CLI doesn't have a non-interactive env var
elif self.platform == "antigravity":
return {}
elif self.platform == "windsurf":
return {}
elif self.platform == "qoder":
return {}
elif self.platform == "codebuddy":
return {}
elif self.platform == "copilot":
return {}
elif self.platform == "droid":
return {}
else:
return {"CLAUDE_NON_INTERACTIVE": "1"}
# =========================================================================
# CLI Command Building
# =========================================================================
def build_run_command(
self,
agent: str,
prompt: str,
session_id: str | None = None,
skip_permissions: bool = True,
verbose: bool = True,
json_output: bool = True,
) -> list[str]:
"""Build CLI command for running an agent.
Args:
agent: Agent name (will be mapped if needed)
prompt: Prompt to send to the agent
session_id: Optional session ID (Claude Code only for creation)
skip_permissions: Whether to skip permission prompts
verbose: Whether to enable verbose output
json_output: Whether to use JSON output format
Returns:
List of command arguments
"""
mapped_agent = self.get_agent_name(agent)
if self.platform == "opencode":
cmd = ["opencode", "run"]
cmd.extend(["--agent", mapped_agent])
# Note: OpenCode 'run' mode is non-interactive by default
# No equivalent to Claude Code's --dangerously-skip-permissions
# See: https://github.com/anomalyco/opencode/issues/9070
if json_output:
cmd.extend(["--format", "json"])
if verbose:
cmd.extend(["--log-level", "DEBUG", "--print-logs"])
# Note: OpenCode doesn't support --session-id on creation
# Session ID must be extracted from logs after startup
cmd.append(prompt)
elif self.platform == "iflow":
cmd = ["iflow", "-y", "-p"]
cmd.append(f"${mapped_agent} {prompt}")
elif self.platform == "codex":
cmd = ["codex", "exec"]
cmd.append(prompt)
elif self.platform == "kiro":
cmd = ["kiro", "run", prompt]
elif self.platform == "gemini":
cmd = ["gemini"]
cmd.append(prompt)
elif self.platform == "antigravity":
raise ValueError(
"Antigravity workflows are UI slash commands; CLI agent run is not supported."
)
elif self.platform == "windsurf":
raise ValueError(
"Windsurf workflows are UI slash commands; CLI agent run is not supported."
)
elif self.platform == "qoder":
cmd = ["qodercli", "-p", prompt]
elif self.platform == "codebuddy":
raise ValueError(
"CodeBuddy does not support non-interactive mode (no CLI agent)"
)
elif self.platform == "copilot":
raise ValueError(
"GitHub Copilot is IDE-only; CLI agent run is not supported."
)
elif self.platform == "droid":
raise ValueError(
"Factory Droid CLI agent run is not yet supported."
)
else: # claude
cmd = ["claude", "-p"]
cmd.extend(["--agent", mapped_agent])
if session_id:
cmd.extend(["--session-id", session_id])
if skip_permissions:
cmd.append("--dangerously-skip-permissions")
if json_output:
cmd.extend(["--output-format", "stream-json"])
if verbose:
cmd.append("--verbose")
cmd.append(prompt)
return cmd
def build_resume_command(self, session_id: str) -> list[str]:
"""Build CLI command for resuming a session.
Args:
session_id: Session ID to resume (ignored for iFlow)
Returns:
List of command arguments
"""
if self.platform == "opencode":
return ["opencode", "run", "--session", session_id]
elif self.platform == "iflow":
# iFlow uses -c to continue most recent conversation
# session_id is ignored as iFlow doesn't support session IDs
return ["iflow", "-c"]
elif self.platform == "codex":
return ["codex", "resume", session_id]
elif self.platform == "kiro":
return ["kiro", "resume", session_id]
elif self.platform == "gemini":
return ["gemini", "--resume", session_id]
elif self.platform == "antigravity":
raise ValueError(
"Antigravity workflows are UI slash commands; CLI resume is not supported."
)
elif self.platform == "windsurf":
raise ValueError(
"Windsurf workflows are UI slash commands; CLI resume is not supported."
)
elif self.platform == "qoder":
return ["qodercli", "--resume", session_id]
elif self.platform == "codebuddy":
raise ValueError(
"CodeBuddy does not support non-interactive mode (no CLI agent)"
)
elif self.platform == "copilot":
raise ValueError(
"GitHub Copilot is IDE-only; CLI resume is not supported."
)
elif self.platform == "droid":
raise ValueError(
"Factory Droid CLI resume is not yet supported."
)
else:
return ["claude", "--resume", session_id]
def get_resume_command_str(self, session_id: str, cwd: str | None = None) -> str:
"""Get human-readable resume command string.
Args:
session_id: Session ID to resume
cwd: Optional working directory to cd into
Returns:
Command string for display
"""
cmd = self.build_resume_command(session_id)
cmd_str = " ".join(cmd)
if cwd:
return f"cd {cwd} && {cmd_str}"
return cmd_str
# =========================================================================
# Platform Detection Helpers
# =========================================================================
@property
def is_opencode(self) -> bool:
"""Check if platform is OpenCode."""
return self.platform == "opencode"
@property
def is_claude(self) -> bool:
"""Check if platform is Claude Code."""
return self.platform == "claude"
@property
def is_cursor(self) -> bool:
"""Check if platform is Cursor."""
return self.platform == "cursor"
@property
def is_iflow(self) -> bool:
"""Check if platform is iFlow CLI."""
return self.platform == "iflow"
@property
def cli_name(self) -> str:
"""Get CLI executable name.
Note: Cursor doesn't have a CLI tool, returns None-like value.
"""
if self.is_opencode:
return "opencode"
elif self.is_cursor:
return "cursor" # Note: Cursor is IDE-only, no CLI
elif self.platform == "iflow":
return "iflow"
elif self.platform == "kiro":
return "kiro"
elif self.platform == "gemini":
return "gemini"
elif self.platform == "antigravity":
return "agy"
elif self.platform == "windsurf":
return "windsurf"
elif self.platform == "qoder":
return "qodercli"
elif self.platform == "codebuddy":
return "codebuddy"
elif self.platform == "copilot":
return "copilot"
elif self.platform == "droid":
return "droid"
else:
return "claude"
@property
def supports_cli_agents(self) -> bool:
"""Check if platform supports running agents via CLI.
Claude Code, OpenCode, iFlow, and Codex support CLI agent execution.
Cursor is IDE-only and doesn't support CLI agents.
"""
return self.platform in ("claude", "opencode", "iflow", "codex")
@property
def requires_agent_definition_file(self) -> bool:
"""Check if platform requires an agent definition file (.md/.toml) to run.
Claude Code, OpenCode, iFlow: require agent .md files (--agent flag).
Codex: auto-discovers agents from .codex/agents/*.toml, no --agent flag.
"""
return self.platform in ("claude", "opencode", "iflow")
# =========================================================================
# Session ID Handling
# =========================================================================
@property
def supports_session_id_on_create(self) -> bool:
"""Check if platform supports specifying session ID on creation.
Claude Code: Yes (--session-id)
OpenCode: No (auto-generated, extract from logs)
iFlow: No (no session ID support)
"""
return self.platform == "claude"
def extract_session_id_from_log(self, log_content: str) -> str | None:
"""Extract session ID from log output (OpenCode only).
OpenCode generates session IDs in format: ses_xxx
Args:
log_content: Log file content
Returns:
Session ID if found, None otherwise
"""
import re
# OpenCode session ID pattern
match = re.search(r"ses_[a-zA-Z0-9]+", log_content)
if match:
return match.group(0)
return None
# =============================================================================
# Factory Function
# =============================================================================
def get_cli_adapter(platform: str = "claude") -> CLIAdapter:
"""Get CLI adapter for the specified platform.
Args:
platform: Platform name ('claude', 'opencode', 'cursor', 'iflow', 'codex', 'kilo', 'kiro', 'gemini', 'antigravity', 'windsurf', 'qoder', or 'codebuddy')
Returns:
CLIAdapter instance
Raises:
ValueError: If platform is not supported
"""
if platform not in (
"claude",
"opencode",
"cursor",
"iflow",
"codex",
"kilo",
"kiro",
"gemini",
"antigravity",
"windsurf",
"qoder",
"codebuddy",
"copilot",
"droid",
):
raise ValueError(
f"Unsupported platform: {platform} (must be 'claude', 'opencode', 'cursor', 'iflow', 'codex', 'kilo', 'kiro', 'gemini', 'antigravity', 'windsurf', 'qoder', 'codebuddy', 'copilot', or 'droid')"
)
return CLIAdapter(platform=platform) # type: ignore
_ALL_PLATFORM_CONFIG_DIRS = (
".claude",
".cursor",
".iflow",
".opencode",
".codex",
".kilocode",
".kiro",
".gemini",
".agent",
".windsurf",
".qoder",
".codebuddy",
".github/copilot",
".factory",
)
"""Platform-specific config directory names used by detect_platform exclusion
checks. `.agents/skills/` is NOT listed here: it is a shared cross-platform
layer (written by Codex, also consumed by Amp/Cline/Warp/etc. via the
agentskills.io standard), not a single-platform signal. Its presence must not
block detection of Kiro, Antigravity, Windsurf, or other platforms."""
def _has_other_platform_dir(project_root: Path, exclude: set[str]) -> bool:
"""Check if any platform config dir exists besides those in *exclude*."""
return any(
(project_root / d).is_dir()
for d in _ALL_PLATFORM_CONFIG_DIRS
if d not in exclude
)
def detect_platform(project_root: Path) -> Platform:
"""Auto-detect platform based on existing config directories.
Detection order:
1. TRELLIS_PLATFORM environment variable (if set)
2. .opencode directory exists → opencode
3. .iflow directory exists → iflow
4. .cursor directory exists (without .claude) → cursor
5. .codex exists and no other platform dirs → codex
6. .kilocode directory exists → kilo
7. .kiro/skills exists and no other platform dirs → kiro
8. .gemini directory exists → gemini
9. .agent/workflows exists and no other platform dirs → antigravity
10. .windsurf/workflows exists and no other platform dirs → windsurf
11. .codebuddy directory exists → codebuddy
12. .qoder directory exists → qoder
13. Default → claude
Args:
project_root: Project root directory
Returns:
Detected platform ('claude', 'opencode', 'cursor', 'iflow', 'codex', 'kilo', 'kiro', 'gemini', 'antigravity', 'windsurf', 'qoder', 'codebuddy', or default 'claude')
"""
import os
# Check environment variable first
env_platform = os.environ.get("TRELLIS_PLATFORM", "").lower()
if env_platform in (
"claude",
"opencode",
"cursor",
"iflow",
"codex",
"kilo",
"kiro",
"gemini",
"antigravity",
"windsurf",
"qoder",
"codebuddy",
"copilot",
"droid",
):
return env_platform # type: ignore
# Check for .opencode directory (OpenCode-specific)
if (project_root / ".opencode").is_dir():
return "opencode"
# Check for .iflow directory (iFlow-specific)
if (project_root / ".iflow").is_dir():
return "iflow"
# Check for .cursor directory (Cursor-specific)
# Only detect as cursor if .claude doesn't exist (to avoid confusion)
if (project_root / ".cursor").is_dir() and not (project_root / ".claude").is_dir():
return "cursor"
# Check for .gemini directory (Gemini CLI-specific)
if (project_root / ".gemini").is_dir():
return "gemini"
# Check for .codex directory (Codex-specific)
# .agents/skills/ alone does NOT trigger codex detection (it's a shared standard)
if (project_root / ".codex").is_dir() and not _has_other_platform_dir(
project_root, {".codex", ".agents"}
):
return "codex"
# Check for .kilocode directory (Kilo-specific)
if (project_root / ".kilocode").is_dir():
return "kilo"
# Check for Kiro skills directory only when no other platform config exists
if (project_root / ".kiro" / "skills").is_dir() and not _has_other_platform_dir(
project_root, {".kiro"}
):
return "kiro"
# Check for Antigravity workflow directory only when no other platform config exists
if (
project_root / ".agent" / "workflows"
).is_dir() and not _has_other_platform_dir(
project_root, {".agent", ".gemini"}
):
return "antigravity"
# Check for Windsurf workflow directory only when no other platform config exists
if (
project_root / ".windsurf" / "workflows"
).is_dir() and not _has_other_platform_dir(
project_root, {".windsurf"}
):
return "windsurf"
# Check for .codebuddy directory (CodeBuddy-specific)
if (project_root / ".codebuddy").is_dir():
return "codebuddy"
# Check for .qoder directory (Qoder-specific)
if (project_root / ".qoder").is_dir():
return "qoder"
# Check for .github/copilot directory (GitHub Copilot-specific)
if (project_root / ".github" / "copilot").is_dir():
return "copilot"
# Check for .factory directory (Factory Droid-specific)
if (project_root / ".factory").is_dir():
return "droid"
# Fallback: checkout only has the Codex shared-skills layer
# (.agents/skills/trellis-* dirs) and no explicit platform config dir.
# Happens on fresh clones where .codex/ is gitignored/absent but the
# shared skills were committed to git. Must guard against the case
# where .claude/ or any other platform dir also exists — .agents/skills/
# can legitimately coexist with any platform as a shared consumption
# layer for Amp/Cline/Warp/etc.
agents_skills = project_root / ".agents" / "skills"
if agents_skills.is_dir() and not _has_other_platform_dir(
project_root, set()
):
try:
for entry in agents_skills.iterdir():
if entry.is_dir() and entry.name.startswith("trellis-"):
return "codex"
except OSError:
pass
return "claude"
def get_cli_adapter_auto(project_root: Path) -> CLIAdapter:
"""Get CLI adapter with auto-detected platform.
Args:
project_root: Project root directory
Returns:
CLIAdapter instance for detected platform
"""
platform = detect_platform(project_root)
return CLIAdapter(platform=platform)
+389
View File
@@ -0,0 +1,389 @@
#!/usr/bin/env python3
"""
Trellis configuration reader.
Reads settings from .trellis/config.yaml with sensible defaults.
"""
from __future__ import annotations
import sys
from pathlib import Path
from .paths import DIR_WORKFLOW, get_repo_root
# =============================================================================
# YAML Simple Parser (no dependencies)
# =============================================================================
def _unquote(s: str) -> str:
"""Remove exactly one layer of matching surrounding quotes.
Unlike str.strip('"'), this only removes the outermost pair,
preserving any nested quotes inside the value.
Examples:
_unquote('"hello"') -> 'hello'
_unquote("'hello'") -> 'hello'
_unquote('"echo \\'hi\\'"') -> "echo 'hi'"
_unquote('hello') -> 'hello'
_unquote('"hello\\'') -> '"hello\\'' (mismatched, unchanged)
"""
if len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'"):
return s[1:-1]
return s
def parse_simple_yaml(content: str) -> dict:
"""Parse simple YAML with nested dict support (no dependencies).
Supports:
- key: value (string)
- key: (followed by list items)
- item1
- item2
- key: (followed by nested dict)
nested_key: value
nested_key2:
- item
Uses indentation to detect nesting (2+ spaces deeper = child).
Args:
content: YAML content string.
Returns:
Parsed dict (values can be str, list[str], or dict).
"""
lines = content.splitlines()
result: dict = {}
_parse_yaml_block(lines, 0, 0, result)
return result
def _parse_yaml_block(
lines: list[str], start: int, min_indent: int, target: dict
) -> int:
"""Parse a YAML block into target dict, returning next line index."""
i = start
current_list: list | None = None
while i < len(lines):
line = lines[i]
stripped = line.strip()
# Skip empty lines and comments
if not stripped or stripped.startswith("#"):
i += 1
continue
# Calculate indentation
indent = len(line) - len(line.lstrip())
# If dedented past our block, we're done
if indent < min_indent:
break
if stripped.startswith("- "):
if current_list is not None:
current_list.append(_unquote(stripped[2:].strip()))
i += 1
elif ":" in stripped:
key, _, value = stripped.partition(":")
key = key.strip()
value = _unquote(value.strip())
current_list = None
if value:
# key: value
target[key] = value
i += 1
else:
# key: (no value) — peek ahead to determine list vs nested dict
next_i, next_line = _next_content_line(lines, i + 1)
if next_i >= len(lines):
target[key] = {}
i = next_i
elif next_line.strip().startswith("- "):
# It's a list
current_list = []
target[key] = current_list
i += 1
else:
next_indent = len(next_line) - len(next_line.lstrip())
if next_indent > indent:
# It's a nested dict
nested: dict = {}
target[key] = nested
i = _parse_yaml_block(lines, i + 1, next_indent, nested)
else:
# Empty value, same or less indent follows
target[key] = {}
i += 1
else:
i += 1
return i
def _next_content_line(lines: list[str], start: int) -> tuple[int, str]:
"""Find the next non-empty, non-comment line."""
i = start
while i < len(lines):
stripped = lines[i].strip()
if stripped and not stripped.startswith("#"):
return i, lines[i]
i += 1
return i, ""
# Defaults
DEFAULT_SESSION_COMMIT_MESSAGE = "chore: record journal"
DEFAULT_MAX_JOURNAL_LINES = 2000
CONFIG_FILE = "config.yaml"
def _is_true_config_value(value: object) -> bool:
"""Return True when a config value represents an enabled flag."""
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.strip().lower() == "true"
return False
def _get_config_path(repo_root: Path | None = None) -> Path:
"""Get path to config.yaml."""
root = repo_root or get_repo_root()
return root / DIR_WORKFLOW / CONFIG_FILE
def _load_config(repo_root: Path | None = None) -> dict:
"""Load and parse config.yaml. Returns empty dict on any error."""
config_file = _get_config_path(repo_root)
try:
content = config_file.read_text(encoding="utf-8")
return parse_simple_yaml(content)
except (OSError, IOError):
return {}
def get_session_commit_message(repo_root: Path | None = None) -> str:
"""Get the commit message for auto-committing session records."""
config = _load_config(repo_root)
return config.get("session_commit_message", DEFAULT_SESSION_COMMIT_MESSAGE)
def get_max_journal_lines(repo_root: Path | None = None) -> int:
"""Get the maximum lines per journal file."""
config = _load_config(repo_root)
value = config.get("max_journal_lines", DEFAULT_MAX_JOURNAL_LINES)
try:
return int(value)
except (ValueError, TypeError):
return DEFAULT_MAX_JOURNAL_LINES
def get_hooks(event: str, repo_root: Path | None = None) -> list[str]:
"""Get hook commands for a lifecycle event.
Args:
event: Event name (e.g. "after_create", "after_archive").
repo_root: Repository root path.
Returns:
List of shell commands to execute, empty if none configured.
"""
config = _load_config(repo_root)
hooks = config.get("hooks")
if not isinstance(hooks, dict):
return []
commands = hooks.get(event)
if isinstance(commands, list):
return [str(c) for c in commands]
return []
# =============================================================================
# Monorepo / Packages
# =============================================================================
def get_packages(repo_root: Path | None = None) -> dict[str, dict] | None:
"""Get monorepo package declarations.
Returns:
Dict mapping package name to its config (path, type, etc.),
or None if not configured (single-repo mode).
Example return:
{"cli": {"path": "packages/cli"}, "docs-site": {"path": "docs-site", "type": "submodule"}}
"""
config = _load_config(repo_root)
packages = config.get("packages")
if not isinstance(packages, dict):
return None
# Ensure each value is a dict (filter out scalar entries)
filtered = {k: v for k, v in packages.items() if isinstance(v, dict)}
if not filtered:
return None
return filtered
def get_default_package(repo_root: Path | None = None) -> str | None:
"""Get the default package name from config.
Returns:
Package name string, or None if not configured.
"""
config = _load_config(repo_root)
value = config.get("default_package")
return str(value) if value else None
def get_submodule_packages(repo_root: Path | None = None) -> dict[str, str]:
"""Get packages that are git submodules.
Returns:
Dict mapping package name to its path for submodule-type packages.
Empty dict if none configured.
Example return:
{"docs-site": "docs-site"}
"""
packages = get_packages(repo_root)
if packages is None:
return {}
return {
name: cfg.get("path", name)
for name, cfg in packages.items()
if cfg.get("type") == "submodule"
}
def get_git_packages(repo_root: Path | None = None) -> dict[str, str]:
"""Get packages that have their own independent git repository.
These are sub-directories with their own .git (not submodules),
marked with ``git: true`` in config.yaml.
Returns:
Dict mapping package name to its path for git-repo packages.
Empty dict if none configured.
Example config::
packages:
backend:
path: iqs
git: true
Example return::
{"backend": "iqs"}
"""
packages = get_packages(repo_root)
if packages is None:
return {}
return {
name: cfg.get("path", name)
for name, cfg in packages.items()
if _is_true_config_value(cfg.get("git"))
}
def is_monorepo(repo_root: Path | None = None) -> bool:
"""Check if the project is configured as a monorepo (has packages in config)."""
return get_packages(repo_root) is not None
def get_spec_base(package: str | None = None, repo_root: Path | None = None) -> str:
"""Get the spec directory base path relative to .trellis/.
Single-repo: returns "spec"
Monorepo with package: returns "spec/<package>"
Monorepo without package: returns "spec" (caller should specify package)
"""
if package and is_monorepo(repo_root):
return f"spec/{package}"
return "spec"
def validate_package(package: str, repo_root: Path | None = None) -> bool:
"""Check if a package name is valid in this project.
Single-repo (no packages configured): always returns True.
Monorepo: returns True only if package exists in config.yaml packages.
"""
packages = get_packages(repo_root)
if packages is None:
return True # Single-repo, no validation needed
return package in packages
def resolve_package(
task_package: str | None = None,
repo_root: Path | None = None,
) -> str | None:
"""Resolve package from inferred sources with validation.
Checks in order: task_package → default_package.
Invalid inferred values print a warning to stderr and are skipped.
Returns:
Resolved package name, or None if no valid package found.
Note:
CLI --package should be validated separately by the caller
(fail-fast with available packages list on error).
"""
packages = get_packages(repo_root)
if packages is None:
return None # Single-repo, no package needed
# Try task_package (guard against non-string values from malformed JSON)
if task_package and isinstance(task_package, str):
if task_package in packages:
return task_package
print(
f"Warning: task.json package '{task_package}' not found in config, skipping",
file=sys.stderr,
)
# Try default_package
default = get_default_package(repo_root)
if default:
if default in packages:
return default
print(
f"Warning: default_package '{default}' not found in config, skipping",
file=sys.stderr,
)
return None
def get_spec_scope(repo_root: Path | None = None) -> list[str] | str | None:
"""Get session.spec_scope configuration.
Returns:
list[str]: Package names to include in spec scanning.
str: "active_task" to use current task's package.
None: No scope configured (scan all packages).
"""
config = _load_config(repo_root)
session = config.get("session")
if not isinstance(session, dict):
return None
scope = session.get("spec_scope")
if scope is None:
return None
if isinstance(scope, str):
return scope # e.g. "active_task"
if isinstance(scope, list):
return [str(s) for s in scope]
return None
+190
View File
@@ -0,0 +1,190 @@
#!/usr/bin/env python3
"""
Developer management utilities.
Provides:
init_developer - Initialize developer
ensure_developer - Ensure developer is initialized (exit if not)
show_developer_info - Show developer information
"""
from __future__ import annotations
import sys
from datetime import datetime
from pathlib import Path
from .paths import (
DIR_WORKFLOW,
DIR_WORKSPACE,
DIR_TASKS,
FILE_DEVELOPER,
FILE_JOURNAL_PREFIX,
get_repo_root,
get_developer,
check_developer,
)
# =============================================================================
# Developer Initialization
# =============================================================================
def init_developer(name: str, repo_root: Path | None = None) -> bool:
"""Initialize developer.
Creates:
- .trellis/.developer file with developer info
- .trellis/workspace/<name>/ directory structure
- Initial journal file and index.md
Args:
name: Developer name.
repo_root: Repository root path. Defaults to auto-detected.
Returns:
True on success, False on error.
"""
if not name:
print("Error: developer name is required", file=sys.stderr)
return False
if repo_root is None:
repo_root = get_repo_root()
dev_file = repo_root / DIR_WORKFLOW / FILE_DEVELOPER
workspace_dir = repo_root / DIR_WORKFLOW / DIR_WORKSPACE / name
# Create .developer file
initialized_at = datetime.now().isoformat()
try:
dev_file.write_text(
f"name={name}\ninitialized_at={initialized_at}\n",
encoding="utf-8"
)
except (OSError, IOError) as e:
print(f"Error: Failed to create .developer file: {e}", file=sys.stderr)
return False
# Create workspace directory structure
try:
workspace_dir.mkdir(parents=True, exist_ok=True)
except (OSError, IOError) as e:
print(f"Error: Failed to create workspace directory: {e}", file=sys.stderr)
return False
# Create initial journal file
journal_file = workspace_dir / f"{FILE_JOURNAL_PREFIX}1.md"
if not journal_file.exists():
today = datetime.now().strftime("%Y-%m-%d")
journal_content = f"""# Journal - {name} (Part 1)
> AI development session journal
> Started: {today}
---
"""
try:
journal_file.write_text(journal_content, encoding="utf-8")
except (OSError, IOError) as e:
print(f"Error: Failed to create journal file: {e}", file=sys.stderr)
return False
# Create index.md with markers for auto-update
index_file = workspace_dir / "index.md"
if not index_file.exists():
index_content = f"""# Workspace Index - {name}
> Journal tracking for AI development sessions.
---
## Current Status
<!-- @@@auto:current-status -->
- **Active File**: `journal-1.md`
- **Total Sessions**: 0
- **Last Active**: -
<!-- @@@/auto:current-status -->
---
## Active Documents
<!-- @@@auto:active-documents -->
| File | Lines | Status |
|------|-------|--------|
| `journal-1.md` | ~0 | Active |
<!-- @@@/auto:active-documents -->
---
## Session History
<!-- @@@auto:session-history -->
| # | Date | Title | Commits | Branch |
|---|------|-------|---------|--------|
<!-- @@@/auto:session-history -->
---
## Notes
- Sessions are appended to journal files
- New journal file created when current exceeds 2000 lines
- Use `add_session.py` to record sessions
"""
try:
index_file.write_text(index_content, encoding="utf-8")
except (OSError, IOError) as e:
print(f"Error: Failed to create index.md: {e}", file=sys.stderr)
return False
print(f"Developer initialized: {name}")
print(f" .developer file: {dev_file}")
print(f" Workspace dir: {workspace_dir}")
return True
def ensure_developer(repo_root: Path | None = None) -> None:
"""Ensure developer is initialized, exit if not.
Args:
repo_root: Repository root path. Defaults to auto-detected.
"""
if repo_root is None:
repo_root = get_repo_root()
if not check_developer(repo_root):
print("Error: Developer not initialized.", file=sys.stderr)
print(f"Run: python3 ./{DIR_WORKFLOW}/scripts/init_developer.py <your-name>", file=sys.stderr)
sys.exit(1)
def show_developer_info(repo_root: Path | None = None) -> None:
"""Show developer information.
Args:
repo_root: Repository root path. Defaults to auto-detected.
"""
if repo_root is None:
repo_root = get_repo_root()
developer = get_developer(repo_root)
if not developer:
print("Developer: (not initialized)")
else:
print(f"Developer: {developer}")
print(f"Workspace: {DIR_WORKFLOW}/{DIR_WORKSPACE}/{developer}/")
print(f"Tasks: {DIR_WORKFLOW}/{DIR_TASKS}/")
# =============================================================================
# Main Entry (for testing)
# =============================================================================
if __name__ == "__main__":
show_developer_info()
+31
View File
@@ -0,0 +1,31 @@
"""
Git command execution utility.
Single source of truth for running git commands across all Trellis scripts.
"""
from __future__ import annotations
import subprocess
from pathlib import Path
def run_git(args: list[str], cwd: Path | None = None) -> tuple[int, str, str]:
"""Run a git command and return (returncode, stdout, stderr).
Uses UTF-8 encoding with -c i18n.logOutputEncoding=UTF-8 to ensure
consistent output across all platforms (Windows, macOS, Linux).
"""
try:
git_args = ["git", "-c", "i18n.logOutputEncoding=UTF-8"] + args
result = subprocess.run(
git_args,
cwd=cwd,
capture_output=True,
text=True,
encoding="utf-8",
errors="replace",
)
return result.returncode, result.stdout, result.stderr
except Exception as e:
return 1, "", str(e)
+101
View File
@@ -0,0 +1,101 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Git and Session Context utilities.
Entry shim — delegates to session_context and packages_context.
Provides:
output_json - Output context in JSON format
output_text - Output context in text format
"""
from __future__ import annotations
import json
from .git import run_git
from .session_context import (
get_context_json,
get_context_text,
get_context_record_json,
get_context_text_record,
output_json,
output_text,
)
from .packages_context import (
get_context_packages_text,
get_context_packages_json,
)
from .workflow_phase import (
filter_platform,
get_phase_index,
get_step,
)
# Backward-compatible alias — external modules import this name
_run_git_command = run_git
# =============================================================================
# Main Entry
# =============================================================================
def main() -> None:
"""CLI entry point."""
import argparse
parser = argparse.ArgumentParser(description="Get Session Context for AI Agent")
parser.add_argument(
"--json",
"-j",
action="store_true",
help="Output in JSON format (works with any --mode)",
)
parser.add_argument(
"--mode",
"-m",
choices=["default", "record", "packages", "phase"],
default="default",
help="Output mode: default (full context), record (for record-session), packages (package info only), phase (workflow step extraction)",
)
parser.add_argument(
"--step",
help="Step id for --mode phase, e.g. 1.1, 2.2. Omit to get the Phase Index.",
)
parser.add_argument(
"--platform",
help="Platform name for --mode phase, e.g. cursor, claude-code. Filters platform-tagged blocks.",
)
args = parser.parse_args()
if args.mode == "record":
if args.json:
print(json.dumps(get_context_record_json(), indent=2, ensure_ascii=False))
else:
print(get_context_text_record())
elif args.mode == "packages":
if args.json:
print(json.dumps(get_context_packages_json(), indent=2, ensure_ascii=False))
else:
print(get_context_packages_text())
elif args.mode == "phase":
content = get_step(args.step) if args.step else get_phase_index()
if not content.strip():
if args.step:
parser.exit(2, f"Step not found: {args.step}\n")
else:
parser.exit(2, "Phase Index section not found in workflow.md\n")
if args.platform:
content = filter_platform(content, args.platform)
print(content, end="")
else:
if args.json:
output_json()
else:
output_text()
if __name__ == "__main__":
main()
+37
View File
@@ -0,0 +1,37 @@
"""
JSON file I/O utilities.
Provides read_json and write_json as the single source of truth
for JSON file operations across all Trellis scripts.
"""
from __future__ import annotations
import json
from pathlib import Path
def read_json(path: Path) -> dict | None:
"""Read and parse a JSON file.
Returns None if the file doesn't exist, is invalid JSON, or can't be read.
"""
try:
return json.loads(path.read_text(encoding="utf-8"))
except (FileNotFoundError, json.JSONDecodeError, OSError):
return None
def write_json(path: Path, data: dict) -> bool:
"""Write dict to JSON file with pretty formatting.
Returns True on success, False on error.
"""
try:
path.write_text(
json.dumps(data, indent=2, ensure_ascii=False),
encoding="utf-8",
)
return True
except (OSError, IOError):
return False
+45
View File
@@ -0,0 +1,45 @@
"""
Terminal output utilities: colors and structured logging.
Single source of truth for Colors and log_* functions
used across all Trellis scripts.
"""
from __future__ import annotations
class Colors:
"""ANSI color codes for terminal output."""
RED = "\033[0;31m"
GREEN = "\033[0;32m"
YELLOW = "\033[1;33m"
BLUE = "\033[0;34m"
CYAN = "\033[0;36m"
DIM = "\033[2m"
NC = "\033[0m" # No Color / Reset
def colored(text: str, color: str) -> str:
"""Apply ANSI color to text."""
return f"{color}{text}{Colors.NC}"
def log_info(msg: str) -> None:
"""Print info-level message with [INFO] prefix."""
print(f"{Colors.BLUE}[INFO]{Colors.NC} {msg}")
def log_success(msg: str) -> None:
"""Print success message with [SUCCESS] prefix."""
print(f"{Colors.GREEN}[SUCCESS]{Colors.NC} {msg}")
def log_warn(msg: str) -> None:
"""Print warning message with [WARN] prefix."""
print(f"{Colors.YELLOW}[WARN]{Colors.NC} {msg}")
def log_error(msg: str) -> None:
"""Print error message with [ERROR] prefix."""
print(f"{Colors.RED}[ERROR]{Colors.NC} {msg}")
+238
View File
@@ -0,0 +1,238 @@
#!/usr/bin/env python3
"""
Package discovery and context output.
Provides:
get_packages_info - Get structured package info
get_packages_section - Build PACKAGES text section
get_context_packages_text - Full packages text output (--mode packages)
get_context_packages_json - Full packages JSON output (--mode packages --json)
"""
from __future__ import annotations
from pathlib import Path
from .config import _is_true_config_value, get_default_package, get_packages, get_spec_scope
from .paths import (
DIR_SPEC,
DIR_WORKFLOW,
get_current_task,
get_repo_root,
)
from .tasks import load_task
# =============================================================================
# Internal Helpers
# =============================================================================
def _scan_spec_layers(spec_dir: Path, package: str | None = None) -> list[str]:
"""Scan spec directory for available layers (subdirectories).
For monorepo: scans spec/<package>/
For single-repo: scans spec/
"""
target = spec_dir / package if package else spec_dir
if not target.is_dir():
return []
return sorted(
d.name for d in target.iterdir() if d.is_dir() and d.name != "guides"
)
def _get_active_task_package(repo_root: Path) -> str | None:
"""Get the package field from the active task's task.json."""
current = get_current_task(repo_root)
if not current:
return None
ct = load_task(repo_root / current)
return ct.package if ct and ct.package else None
def _resolve_scope_set(
packages: dict,
spec_scope,
task_pkg: str | None,
default_pkg: str | None,
) -> set | None:
"""Resolve spec_scope to a set of allowed package names, or None for full scan."""
if not packages:
return None
if spec_scope is None:
return None
if isinstance(spec_scope, str) and spec_scope == "active_task":
if task_pkg and task_pkg in packages:
return {task_pkg}
if default_pkg and default_pkg in packages:
return {default_pkg}
return None
if isinstance(spec_scope, list):
valid = {e for e in spec_scope if e in packages}
if valid:
return valid
# All invalid: fallback
if task_pkg and task_pkg in packages:
return {task_pkg}
if default_pkg and default_pkg in packages:
return {default_pkg}
return None
return None
# =============================================================================
# Public Functions
# =============================================================================
def get_packages_info(repo_root: Path) -> list[dict]:
"""Get structured package info for monorepo projects.
Returns list of dicts with keys: name, path, type, default, specLayers,
isSubmodule, isGitRepo.
Returns empty list for single-repo projects.
"""
packages = get_packages(repo_root)
if not packages:
return []
default_pkg = get_default_package(repo_root)
spec_dir = repo_root / DIR_WORKFLOW / DIR_SPEC
result = []
for pkg_name, pkg_config in packages.items():
pkg_path = pkg_config.get("path", pkg_name) if isinstance(pkg_config, dict) else str(pkg_config)
pkg_type = pkg_config.get("type", "local") if isinstance(pkg_config, dict) else "local"
pkg_git = pkg_config.get("git", False) if isinstance(pkg_config, dict) else False
layers = _scan_spec_layers(spec_dir, pkg_name)
result.append({
"name": pkg_name,
"path": pkg_path,
"type": pkg_type,
"default": pkg_name == default_pkg,
"specLayers": layers,
"isSubmodule": pkg_type == "submodule",
"isGitRepo": _is_true_config_value(pkg_git),
})
return result
def get_packages_section(repo_root: Path) -> str:
"""Build the PACKAGES section for text output."""
spec_dir = repo_root / DIR_WORKFLOW / DIR_SPEC
pkg_info = get_packages_info(repo_root)
lines: list[str] = []
lines.append("## PACKAGES")
if not pkg_info:
lines.append("(single-repo mode)")
layers = _scan_spec_layers(spec_dir)
if layers:
lines.append(f"Spec layers: {', '.join(layers)}")
return "\n".join(lines)
default_pkg = get_default_package(repo_root)
for pkg in pkg_info:
layers_str = f" [{', '.join(pkg['specLayers'])}]" if pkg["specLayers"] else ""
submodule_tag = " (submodule)" if pkg["isSubmodule"] else ""
git_repo_tag = " (git repo)" if pkg["isGitRepo"] else ""
default_tag = " *" if pkg["default"] else ""
lines.append(
f"- {pkg['name']:<16} {pkg['path']:<20}{layers_str}{submodule_tag}{git_repo_tag}{default_tag}"
)
if default_pkg:
lines.append(f"Default package: {default_pkg}")
return "\n".join(lines)
def get_context_packages_text(repo_root: Path | None = None) -> str:
"""Get packages context as formatted text (for --mode packages)."""
if repo_root is None:
repo_root = get_repo_root()
pkg_info = get_packages_info(repo_root)
lines: list[str] = []
if not pkg_info:
spec_dir = repo_root / DIR_WORKFLOW / DIR_SPEC
lines.append("Single-repo project (no packages configured)")
lines.append("")
layers = _scan_spec_layers(spec_dir)
if layers:
lines.append(f"Spec layers: {', '.join(layers)}")
return "\n".join(lines)
# Resolve scope for annotations
packages_dict = get_packages(repo_root) or {}
default_pkg = get_default_package(repo_root)
spec_scope = get_spec_scope(repo_root)
task_pkg = _get_active_task_package(repo_root)
scope_set = _resolve_scope_set(packages_dict, spec_scope, task_pkg, default_pkg)
lines.append("## PACKAGES")
lines.append("")
for pkg in pkg_info:
default_tag = " (default)" if pkg["default"] else ""
type_tag = f" [{pkg['type']}]" if pkg["type"] != "local" else ""
git_tag = " [git repo]" if pkg["isGitRepo"] else ""
# Scope annotation
scope_tag = ""
if scope_set is not None and pkg["name"] not in scope_set:
scope_tag = " (out of scope)"
lines.append(f"### {pkg['name']}{default_tag}{type_tag}{git_tag}{scope_tag}")
lines.append(f"Path: {pkg['path']}")
if pkg["specLayers"]:
lines.append(f"Spec layers: {', '.join(pkg['specLayers'])}")
for layer in pkg["specLayers"]:
lines.append(f" - .trellis/spec/{pkg['name']}/{layer}/index.md")
else:
lines.append("Spec: not configured")
lines.append("")
# Also show shared guides
guides_dir = repo_root / DIR_WORKFLOW / DIR_SPEC / "guides"
if guides_dir.is_dir():
lines.append("### Shared Guides (always included)")
lines.append("Path: .trellis/spec/guides/index.md")
lines.append("")
return "\n".join(lines)
def get_context_packages_json(repo_root: Path | None = None) -> dict:
"""Get packages context as a dictionary (for --mode packages --json)."""
if repo_root is None:
repo_root = get_repo_root()
pkg_info = get_packages_info(repo_root)
if not pkg_info:
spec_dir = repo_root / DIR_WORKFLOW / DIR_SPEC
layers = _scan_spec_layers(spec_dir)
return {
"mode": "single-repo",
"specLayers": layers,
}
default_pkg = get_default_package(repo_root)
spec_scope = get_spec_scope(repo_root)
task_pkg = _get_active_task_package(repo_root)
return {
"mode": "monorepo",
"packages": pkg_info,
"defaultPackage": default_pkg,
"specScope": spec_scope,
"activeTaskPackage": task_pkg,
}
+444
View File
@@ -0,0 +1,444 @@
#!/usr/bin/env python3
"""
Common path utilities for Trellis workflow.
Provides:
get_repo_root - Get repository root directory
get_developer - Get developer name
get_workspace_dir - Get developer workspace directory
get_tasks_dir - Get tasks directory
get_active_journal_file - Get current journal file
"""
from __future__ import annotations
import re
from datetime import datetime
from pathlib import Path
# =============================================================================
# Path Constants (change here to rename directories)
# =============================================================================
# Directory names
DIR_WORKFLOW = ".trellis"
DIR_WORKSPACE = "workspace"
DIR_TASKS = "tasks"
DIR_ARCHIVE = "archive"
DIR_SPEC = "spec"
DIR_SCRIPTS = "scripts"
# File names
FILE_DEVELOPER = ".developer"
FILE_CURRENT_TASK = ".current-task"
FILE_TASK_JSON = "task.json"
FILE_JOURNAL_PREFIX = "journal-"
# =============================================================================
# Repository Root
# =============================================================================
def get_repo_root(start_path: Path | None = None) -> Path:
"""Find the nearest directory containing .trellis/ folder.
This handles nested git repos correctly (e.g., test project inside another repo).
Args:
start_path: Starting directory to search from. Defaults to current directory.
Returns:
Path to repository root, or current directory if no .trellis/ found.
"""
current = (start_path or Path.cwd()).resolve()
while current != current.parent:
if (current / DIR_WORKFLOW).is_dir():
return current
current = current.parent
# Fallback to current directory if no .trellis/ found
return Path.cwd().resolve()
# =============================================================================
# Developer
# =============================================================================
def get_developer(repo_root: Path | None = None) -> str | None:
"""Get developer name from .developer file.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Developer name or None if not initialized.
"""
if repo_root is None:
repo_root = get_repo_root()
dev_file = repo_root / DIR_WORKFLOW / FILE_DEVELOPER
if not dev_file.is_file():
return None
try:
content = dev_file.read_text(encoding="utf-8")
for line in content.splitlines():
if line.startswith("name="):
return line.split("=", 1)[1].strip()
except (OSError, IOError):
pass
return None
def check_developer(repo_root: Path | None = None) -> bool:
"""Check if developer is initialized.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
True if developer is initialized.
"""
return get_developer(repo_root) is not None
# =============================================================================
# Tasks Directory
# =============================================================================
def get_tasks_dir(repo_root: Path | None = None) -> Path:
"""Get tasks directory path.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Path to tasks directory.
"""
if repo_root is None:
repo_root = get_repo_root()
return repo_root / DIR_WORKFLOW / DIR_TASKS
# =============================================================================
# Workspace Directory
# =============================================================================
def get_workspace_dir(repo_root: Path | None = None) -> Path | None:
"""Get developer workspace directory.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Path to workspace directory or None if developer not set.
"""
if repo_root is None:
repo_root = get_repo_root()
developer = get_developer(repo_root)
if developer:
return repo_root / DIR_WORKFLOW / DIR_WORKSPACE / developer
return None
# =============================================================================
# Journal File
# =============================================================================
def get_active_journal_file(repo_root: Path | None = None) -> Path | None:
"""Get the current active journal file.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Path to active journal file or None if not found.
"""
if repo_root is None:
repo_root = get_repo_root()
workspace_dir = get_workspace_dir(repo_root)
if workspace_dir is None or not workspace_dir.is_dir():
return None
latest: Path | None = None
highest = 0
for f in workspace_dir.glob(f"{FILE_JOURNAL_PREFIX}*.md"):
if not f.is_file():
continue
# Extract number from filename
name = f.stem # e.g., "journal-1"
match = re.search(r"(\d+)$", name)
if match:
num = int(match.group(1))
if num > highest:
highest = num
latest = f
return latest
def count_lines(file_path: Path) -> int:
"""Count lines in a file.
Args:
file_path: Path to file.
Returns:
Number of lines, or 0 if file doesn't exist.
"""
if not file_path.is_file():
return 0
try:
return len(file_path.read_text(encoding="utf-8").splitlines())
except (OSError, IOError):
return 0
# =============================================================================
# Current Task Management
# =============================================================================
def _get_current_task_file(repo_root: Path | None = None) -> Path:
"""Get .current-task file path.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Path to .current-task file.
"""
if repo_root is None:
repo_root = get_repo_root()
return repo_root / DIR_WORKFLOW / FILE_CURRENT_TASK
def normalize_task_ref(task_ref: str) -> str:
"""Normalize a task ref for stable storage in .current-task.
Stored refs should prefer repo-relative POSIX paths like
`.trellis/tasks/03-27-my-task`, even on Windows. Absolute paths are preserved
unless they can later be converted back to repo-relative form by callers.
"""
normalized = task_ref.strip()
if not normalized:
return ""
path_obj = Path(normalized)
if path_obj.is_absolute():
return str(path_obj)
normalized = normalized.replace("\\", "/")
while normalized.startswith("./"):
normalized = normalized[2:]
if normalized.startswith(f"{DIR_TASKS}/"):
return f"{DIR_WORKFLOW}/{normalized}"
return normalized
def resolve_task_ref(task_ref: str, repo_root: Path | None = None) -> Path | None:
"""Resolve a task ref from .current-task to an absolute task directory path."""
if repo_root is None:
repo_root = get_repo_root()
normalized = normalize_task_ref(task_ref)
if not normalized:
return None
path_obj = Path(normalized)
if path_obj.is_absolute():
return path_obj
if normalized.startswith(f"{DIR_WORKFLOW}/"):
return repo_root / path_obj
return repo_root / DIR_WORKFLOW / DIR_TASKS / path_obj
def get_current_task(repo_root: Path | None = None) -> str | None:
"""Get current task directory path (relative to repo_root).
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Relative path to current task directory or None.
"""
current_file = _get_current_task_file(repo_root)
if not current_file.is_file():
return None
try:
content = current_file.read_text(encoding="utf-8").strip()
return normalize_task_ref(content) if content else None
except (OSError, IOError):
return None
def get_current_task_abs(repo_root: Path | None = None) -> Path | None:
"""Get current task directory absolute path.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Absolute path to current task directory or None.
"""
if repo_root is None:
repo_root = get_repo_root()
relative = get_current_task(repo_root)
if relative:
return resolve_task_ref(relative, repo_root)
return None
def set_current_task(task_path: str, repo_root: Path | None = None) -> bool:
"""Set current task.
Args:
task_path: Task directory path (relative to repo_root).
repo_root: Repository root path. Defaults to auto-detected.
Returns:
True on success, False on error.
"""
if repo_root is None:
repo_root = get_repo_root()
normalized = normalize_task_ref(task_path)
if not normalized:
return False
# Verify task directory exists
full_path = resolve_task_ref(normalized, repo_root)
if full_path is None or not full_path.is_dir():
return False
try:
normalized = full_path.relative_to(repo_root).as_posix()
except ValueError:
normalized = str(full_path)
current_file = _get_current_task_file(repo_root)
try:
current_file.write_text(normalized, encoding="utf-8")
return True
except (OSError, IOError):
return False
def clear_current_task(repo_root: Path | None = None) -> bool:
"""Clear current task.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
True on success.
"""
current_file = _get_current_task_file(repo_root)
try:
if current_file.is_file():
current_file.unlink()
return True
except (OSError, IOError):
return False
def has_current_task(repo_root: Path | None = None) -> bool:
"""Check if has current task.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
True if current task is set.
"""
return get_current_task(repo_root) is not None
# =============================================================================
# Task ID Generation
# =============================================================================
def generate_task_date_prefix() -> str:
"""Generate task ID based on date (MM-DD format).
Returns:
Date prefix string (e.g., "01-21").
"""
return datetime.now().strftime("%m-%d")
# =============================================================================
# Monorepo / Package Paths
# =============================================================================
def get_spec_dir(package: str | None = None, repo_root: Path | None = None) -> Path:
"""Get the spec directory path.
Single-repo: .trellis/spec
Monorepo with package: .trellis/spec/<package>
Uses lazy import to avoid circular dependency with config.py.
"""
if repo_root is None:
repo_root = get_repo_root()
from .config import get_spec_base
base = get_spec_base(package, repo_root)
return repo_root / DIR_WORKFLOW / base
def get_package_path(package: str, repo_root: Path | None = None) -> Path | None:
"""Get a package's source directory absolute path from config.
Returns:
Absolute path to the package directory, or None if not found.
"""
if repo_root is None:
repo_root = get_repo_root()
from .config import get_packages
packages = get_packages(repo_root)
if not packages or package not in packages:
return None
info = packages[package]
if isinstance(info, dict):
rel_path = info.get("path", package)
else:
rel_path = str(info)
return repo_root / rel_path
# =============================================================================
# Main Entry (for testing)
# =============================================================================
if __name__ == "__main__":
repo = get_repo_root()
print(f"Repository root: {repo}")
print(f"Developer: {get_developer(repo)}")
print(f"Tasks dir: {get_tasks_dir(repo)}")
print(f"Workspace dir: {get_workspace_dir(repo)}")
print(f"Journal file: {get_active_journal_file(repo)}")
print(f"Current task: {get_current_task(repo)}")
+562
View File
@@ -0,0 +1,562 @@
#!/usr/bin/env python3
"""
Session context generation (default + record modes).
Provides:
get_context_json - JSON output for default mode
get_context_text - Text output for default mode
get_context_record_json - JSON for record mode
get_context_text_record - Text for record mode
output_json - Print JSON
output_text - Print text
"""
from __future__ import annotations
import json
from pathlib import Path
from .config import get_git_packages
from .git import run_git
from .packages_context import get_packages_section
from .tasks import iter_active_tasks, load_task, get_all_statuses, children_progress
from .paths import (
DIR_SCRIPTS,
DIR_SPEC,
DIR_TASKS,
DIR_WORKFLOW,
DIR_WORKSPACE,
count_lines,
get_active_journal_file,
get_current_task,
get_developer,
get_repo_root,
get_tasks_dir,
)
# =============================================================================
# Helpers
# =============================================================================
def _collect_package_git_info(repo_root: Path) -> list[dict]:
"""Collect git status and recent commits for packages with independent git repos.
Only packages marked with ``git: true`` in config.yaml are included.
Returns:
List of dicts with keys: name, path, branch, isClean,
uncommittedChanges, recentCommits.
Empty list if no git-repo packages are configured.
"""
git_pkgs = get_git_packages(repo_root)
if not git_pkgs:
return []
result = []
for pkg_name, pkg_path in git_pkgs.items():
pkg_dir = repo_root / pkg_path
if not (pkg_dir / ".git").exists():
continue
_, branch_out, _ = run_git(["branch", "--show-current"], cwd=pkg_dir)
branch = branch_out.strip() or "unknown"
_, status_out, _ = run_git(["status", "--porcelain"], cwd=pkg_dir)
changes = len([l for l in status_out.splitlines() if l.strip()])
_, log_out, _ = run_git(["log", "--oneline", "-5"], cwd=pkg_dir)
commits = []
for line in log_out.splitlines():
if line.strip():
parts = line.split(" ", 1)
if len(parts) >= 2:
commits.append({"hash": parts[0], "message": parts[1]})
elif len(parts) == 1:
commits.append({"hash": parts[0], "message": ""})
result.append({
"name": pkg_name,
"path": pkg_path,
"branch": branch,
"isClean": changes == 0,
"uncommittedChanges": changes,
"recentCommits": commits,
})
return result
def _append_package_git_context(lines: list[str], package_git_info: list[dict]) -> None:
"""Append Git status and recent commits for package repositories."""
for pkg in package_git_info:
lines.append(f"## GIT STATUS ({pkg['name']}: {pkg['path']})")
lines.append(f"Branch: {pkg['branch']}")
if pkg["isClean"]:
lines.append("Working directory: Clean")
else:
lines.append(
f"Working directory: {pkg['uncommittedChanges']} uncommitted change(s)"
)
lines.append("")
lines.append(f"## RECENT COMMITS ({pkg['name']}: {pkg['path']})")
if pkg["recentCommits"]:
for commit in pkg["recentCommits"]:
lines.append(f"{commit['hash']} {commit['message']}")
else:
lines.append("(no commits)")
lines.append("")
# =============================================================================
# JSON Output
# =============================================================================
def get_context_json(repo_root: Path | None = None) -> dict:
"""Get context as a dictionary.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Context dictionary.
"""
if repo_root is None:
repo_root = get_repo_root()
developer = get_developer(repo_root)
tasks_dir = get_tasks_dir(repo_root)
journal_file = get_active_journal_file(repo_root)
journal_lines = 0
journal_relative = ""
if journal_file and developer:
journal_lines = count_lines(journal_file)
journal_relative = (
f"{DIR_WORKFLOW}/{DIR_WORKSPACE}/{developer}/{journal_file.name}"
)
# Git info
_, branch_out, _ = run_git(["branch", "--show-current"], cwd=repo_root)
branch = branch_out.strip() or "unknown"
_, status_out, _ = run_git(["status", "--porcelain"], cwd=repo_root)
git_status_count = len([line for line in status_out.splitlines() if line.strip()])
is_clean = git_status_count == 0
# Recent commits
_, log_out, _ = run_git(["log", "--oneline", "-5"], cwd=repo_root)
commits = []
for line in log_out.splitlines():
if line.strip():
parts = line.split(" ", 1)
if len(parts) >= 2:
commits.append({"hash": parts[0], "message": parts[1]})
elif len(parts) == 1:
commits.append({"hash": parts[0], "message": ""})
# Tasks
tasks = [
{
"dir": t.dir_name,
"name": t.name,
"status": t.status,
"children": list(t.children),
"parent": t.parent,
}
for t in iter_active_tasks(tasks_dir)
]
# Package git repos (independent sub-repositories)
pkg_git_info = _collect_package_git_info(repo_root)
result = {
"developer": developer or "",
"git": {
"branch": branch,
"isClean": is_clean,
"uncommittedChanges": git_status_count,
"recentCommits": commits,
},
"tasks": {
"active": tasks,
"directory": f"{DIR_WORKFLOW}/{DIR_TASKS}",
},
"journal": {
"file": journal_relative,
"lines": journal_lines,
"nearLimit": journal_lines > 1800,
},
}
if pkg_git_info:
result["packageGit"] = pkg_git_info
return result
def output_json(repo_root: Path | None = None) -> None:
"""Output context in JSON format.
Args:
repo_root: Repository root path. Defaults to auto-detected.
"""
context = get_context_json(repo_root)
print(json.dumps(context, indent=2, ensure_ascii=False))
# =============================================================================
# Text Output
# =============================================================================
def get_context_text(repo_root: Path | None = None) -> str:
"""Get context as formatted text.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Formatted text output.
"""
if repo_root is None:
repo_root = get_repo_root()
lines = []
lines.append("========================================")
lines.append("SESSION CONTEXT")
lines.append("========================================")
lines.append("")
developer = get_developer(repo_root)
# Developer section
lines.append("## DEVELOPER")
if not developer:
lines.append(
f"ERROR: Not initialized. Run: python3 ./{DIR_WORKFLOW}/{DIR_SCRIPTS}/init_developer.py <name>"
)
return "\n".join(lines)
lines.append(f"Name: {developer}")
lines.append("")
# Git status
lines.append("## GIT STATUS")
_, branch_out, _ = run_git(["branch", "--show-current"], cwd=repo_root)
branch = branch_out.strip() or "unknown"
lines.append(f"Branch: {branch}")
_, status_out, _ = run_git(["status", "--porcelain"], cwd=repo_root)
status_lines = [line for line in status_out.splitlines() if line.strip()]
status_count = len(status_lines)
if status_count == 0:
lines.append("Working directory: Clean")
else:
lines.append(f"Working directory: {status_count} uncommitted change(s)")
lines.append("")
lines.append("Changes:")
_, short_out, _ = run_git(["status", "--short"], cwd=repo_root)
for line in short_out.splitlines()[:10]:
lines.append(line)
lines.append("")
# Recent commits
lines.append("## RECENT COMMITS")
_, log_out, _ = run_git(["log", "--oneline", "-5"], cwd=repo_root)
if log_out.strip():
for line in log_out.splitlines():
lines.append(line)
else:
lines.append("(no commits)")
lines.append("")
# Package git repos — independent sub-repositories
_append_package_git_context(lines, _collect_package_git_info(repo_root))
# Current task
lines.append("## CURRENT TASK")
current_task = get_current_task(repo_root)
if current_task:
current_task_dir = repo_root / current_task
lines.append(f"Path: {current_task}")
ct = load_task(current_task_dir)
if ct:
lines.append(f"Name: {ct.name}")
lines.append(f"Status: {ct.status}")
lines.append(f"Created: {ct.raw.get('createdAt', 'unknown')}")
if ct.description:
lines.append(f"Description: {ct.description}")
# Check for prd.md
prd_file = current_task_dir / "prd.md"
if prd_file.is_file():
lines.append("")
lines.append("[!] This task has prd.md - read it for task details")
else:
lines.append("(none)")
lines.append("")
# Active tasks
lines.append("## ACTIVE TASKS")
tasks_dir = get_tasks_dir(repo_root)
task_count = 0
# Collect all task data for hierarchy display
all_tasks = {t.dir_name: t for t in iter_active_tasks(tasks_dir)}
all_statuses = {name: t.status for name, t in all_tasks.items()}
def _print_task_tree(name: str, indent: int = 0) -> None:
nonlocal task_count
t = all_tasks[name]
progress = children_progress(t.children, all_statuses)
prefix = " " * indent
lines.append(f"{prefix}- {name}/ ({t.status}){progress} @{t.assignee or '-'}")
task_count += 1
for child in t.children:
if child in all_tasks:
_print_task_tree(child, indent + 1)
for dir_name in sorted(all_tasks.keys()):
if not all_tasks[dir_name].parent:
_print_task_tree(dir_name)
if task_count == 0:
lines.append("(no active tasks)")
lines.append(f"Total: {task_count} active task(s)")
lines.append("")
# My tasks
lines.append("## MY TASKS (Assigned to me)")
my_task_count = 0
for t in all_tasks.values():
if t.assignee == developer and t.status != "done":
progress = children_progress(t.children, all_statuses)
lines.append(f"- [{t.priority}] {t.title} ({t.status}){progress}")
my_task_count += 1
if my_task_count == 0:
lines.append("(no tasks assigned to you)")
lines.append("")
# Journal file
lines.append("## JOURNAL FILE")
journal_file = get_active_journal_file(repo_root)
if journal_file:
journal_lines = count_lines(journal_file)
relative = f"{DIR_WORKFLOW}/{DIR_WORKSPACE}/{developer}/{journal_file.name}"
lines.append(f"Active file: {relative}")
lines.append(f"Line count: {journal_lines} / 2000")
if journal_lines > 1800:
lines.append("[!] WARNING: Approaching 2000 line limit!")
else:
lines.append("No journal file found")
lines.append("")
# Packages
packages_text = get_packages_section(repo_root)
if packages_text:
lines.append(packages_text)
lines.append("")
# Paths
lines.append("## PATHS")
lines.append(f"Workspace: {DIR_WORKFLOW}/{DIR_WORKSPACE}/{developer}/")
lines.append(f"Tasks: {DIR_WORKFLOW}/{DIR_TASKS}/")
lines.append(f"Spec: {DIR_WORKFLOW}/{DIR_SPEC}/")
lines.append("")
lines.append("========================================")
return "\n".join(lines)
# =============================================================================
# Record Mode
# =============================================================================
def get_context_record_json(repo_root: Path | None = None) -> dict:
"""Get record-mode context as a dictionary.
Focused on: my active tasks, git status, current task.
"""
if repo_root is None:
repo_root = get_repo_root()
developer = get_developer(repo_root)
tasks_dir = get_tasks_dir(repo_root)
# Git info
_, branch_out, _ = run_git(["branch", "--show-current"], cwd=repo_root)
branch = branch_out.strip() or "unknown"
_, status_out, _ = run_git(["status", "--porcelain"], cwd=repo_root)
git_status_count = len([line for line in status_out.splitlines() if line.strip()])
_, log_out, _ = run_git(["log", "--oneline", "-5"], cwd=repo_root)
commits = []
for line in log_out.splitlines():
if line.strip():
parts = line.split(" ", 1)
if len(parts) >= 2:
commits.append({"hash": parts[0], "message": parts[1]})
# My tasks (single pass — collect statuses and filter by assignee)
all_tasks_list = list(iter_active_tasks(tasks_dir))
all_statuses = {t.dir_name: t.status for t in all_tasks_list}
my_tasks = []
for t in all_tasks_list:
if t.assignee == developer:
done = sum(
1 for c in t.children
if all_statuses.get(c) in ("completed", "done")
)
my_tasks.append({
"dir": t.dir_name,
"title": t.title,
"status": t.status,
"priority": t.priority,
"children": list(t.children),
"childrenDone": done,
"parent": t.parent,
"meta": t.meta,
})
# Current task
current_task_info = None
current_task = get_current_task(repo_root)
if current_task:
ct = load_task(repo_root / current_task)
if ct:
current_task_info = {
"path": current_task,
"name": ct.name,
"status": ct.status,
}
# Package git repos
pkg_git_info = _collect_package_git_info(repo_root)
result = {
"developer": developer or "",
"git": {
"branch": branch,
"isClean": git_status_count == 0,
"uncommittedChanges": git_status_count,
"recentCommits": commits,
},
"myTasks": my_tasks,
"currentTask": current_task_info,
}
if pkg_git_info:
result["packageGit"] = pkg_git_info
return result
def get_context_text_record(repo_root: Path | None = None) -> str:
"""Get context as formatted text for record-session mode.
Focused output: MY ACTIVE TASKS first (with [!!!] emphasis),
then GIT STATUS, RECENT COMMITS, CURRENT TASK.
"""
if repo_root is None:
repo_root = get_repo_root()
lines: list[str] = []
lines.append("========================================")
lines.append("SESSION CONTEXT (RECORD MODE)")
lines.append("========================================")
lines.append("")
developer = get_developer(repo_root)
if not developer:
lines.append(
f"ERROR: Not initialized. Run: python3 ./{DIR_WORKFLOW}/{DIR_SCRIPTS}/init_developer.py <name>"
)
return "\n".join(lines)
# MY ACTIVE TASKS — first and prominent
lines.append(f"## [!!!] MY ACTIVE TASKS (Assigned to {developer})")
lines.append("[!] Review whether any should be archived before recording this session.")
lines.append("")
tasks_dir = get_tasks_dir(repo_root)
my_task_count = 0
# Single pass — collect all tasks and filter by assignee
all_statuses = get_all_statuses(tasks_dir)
for t in iter_active_tasks(tasks_dir):
if t.assignee == developer:
progress = children_progress(t.children, all_statuses)
lines.append(f"- [{t.priority}] {t.title} ({t.status}){progress}{t.dir_name}")
my_task_count += 1
if my_task_count == 0:
lines.append("(no active tasks assigned to you)")
lines.append("")
# GIT STATUS
lines.append("## GIT STATUS")
_, branch_out, _ = run_git(["branch", "--show-current"], cwd=repo_root)
branch = branch_out.strip() or "unknown"
lines.append(f"Branch: {branch}")
_, status_out, _ = run_git(["status", "--porcelain"], cwd=repo_root)
status_lines = [line for line in status_out.splitlines() if line.strip()]
status_count = len(status_lines)
if status_count == 0:
lines.append("Working directory: Clean")
else:
lines.append(f"Working directory: {status_count} uncommitted change(s)")
lines.append("")
lines.append("Changes:")
_, short_out, _ = run_git(["status", "--short"], cwd=repo_root)
for line in short_out.splitlines()[:10]:
lines.append(line)
lines.append("")
# RECENT COMMITS
lines.append("## RECENT COMMITS")
_, log_out, _ = run_git(["log", "--oneline", "-5"], cwd=repo_root)
if log_out.strip():
for line in log_out.splitlines():
lines.append(line)
else:
lines.append("(no commits)")
lines.append("")
# Package git repos — independent sub-repositories
_append_package_git_context(lines, _collect_package_git_info(repo_root))
# CURRENT TASK
lines.append("## CURRENT TASK")
current_task = get_current_task(repo_root)
if current_task:
lines.append(f"Path: {current_task}")
ct = load_task(repo_root / current_task)
if ct:
lines.append(f"Name: {ct.name}")
lines.append(f"Status: {ct.status}")
else:
lines.append("(none)")
lines.append("")
lines.append("========================================")
return "\n".join(lines)
def output_text(repo_root: Path | None = None) -> None:
"""Output context in text format.
Args:
repo_root: Repository root path. Defaults to auto-detected.
"""
print(get_context_text(repo_root))
+223
View File
@@ -0,0 +1,223 @@
#!/usr/bin/env python3
"""
Task JSONL context management.
Provides:
cmd_add_context - Add entry to JSONL context file
cmd_validate - Validate JSONL context files
cmd_list_context - List JSONL context entries
Note:
``cmd_init_context`` was removed in v0.5.0-beta.12. JSONL context files
are now seeded at ``task.py create`` time with a self-describing
``_example`` line; the AI agent curates real entries during Phase 1.3 of
the workflow. See ``.trellis/workflow.md`` Phase 1.3 for the current
instructions.
"""
from __future__ import annotations
import argparse
import json
from pathlib import Path
from .log import Colors, colored
from .paths import get_repo_root
from .task_utils import resolve_task_dir
# =============================================================================
# Command: add-context
# =============================================================================
def cmd_add_context(args: argparse.Namespace) -> int:
"""Add entry to JSONL context file."""
repo_root = get_repo_root()
target_dir = resolve_task_dir(args.dir, repo_root)
jsonl_name = args.file
path = args.path
reason = args.reason or "Added manually"
if not target_dir.is_dir():
print(colored(f"Error: Directory not found: {target_dir}", Colors.RED))
return 1
# Support shorthand
if not jsonl_name.endswith(".jsonl"):
jsonl_name = f"{jsonl_name}.jsonl"
jsonl_file = target_dir / jsonl_name
full_path = repo_root / path
entry_type = "file"
if full_path.is_dir():
entry_type = "directory"
if not path.endswith("/"):
path = f"{path}/"
elif not full_path.is_file():
print(colored(f"Error: Path not found: {path}", Colors.RED))
return 1
# Check if already exists
if jsonl_file.is_file():
content = jsonl_file.read_text(encoding="utf-8")
if f'"{path}"' in content:
print(colored(f"Warning: Entry already exists for {path}", Colors.YELLOW))
return 0
# Add entry
entry: dict
if entry_type == "directory":
entry = {"file": path, "type": "directory", "reason": reason}
else:
entry = {"file": path, "reason": reason}
with jsonl_file.open("a", encoding="utf-8") as f:
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
print(colored(f"Added {entry_type}: {path}", Colors.GREEN))
return 0
# =============================================================================
# Command: validate
# =============================================================================
def cmd_validate(args: argparse.Namespace) -> int:
"""Validate JSONL context files."""
repo_root = get_repo_root()
target_dir = resolve_task_dir(args.dir, repo_root)
if not target_dir.is_dir():
print(colored("Error: task directory required", Colors.RED))
return 1
print(colored("=== Validating Context Files ===", Colors.BLUE))
print(f"Target dir: {target_dir}")
print()
total_errors = 0
for jsonl_name in ["implement.jsonl", "check.jsonl"]:
jsonl_file = target_dir / jsonl_name
errors = _validate_jsonl(jsonl_file, repo_root)
total_errors += errors
print()
if total_errors == 0:
print(colored("✓ All validations passed", Colors.GREEN))
return 0
else:
print(colored(f"✗ Validation failed ({total_errors} errors)", Colors.RED))
return 1
def _validate_jsonl(jsonl_file: Path, repo_root: Path) -> int:
"""Validate a single JSONL file.
Seed rows (no ``file`` field — typically ``{"_example": "..."}``) are
skipped silently; they are self-describing comments, not real entries.
"""
file_name = jsonl_file.name
errors = 0
if not jsonl_file.is_file():
print(f" {colored(f'{file_name}: not found (skipped)', Colors.YELLOW)}")
return 0
line_num = 0
real_entries = 0
for line in jsonl_file.read_text(encoding="utf-8").splitlines():
line_num += 1
if not line.strip():
continue
try:
data = json.loads(line)
except json.JSONDecodeError:
print(f" {colored(f'{file_name}:{line_num}: Invalid JSON', Colors.RED)}")
errors += 1
continue
file_path = data.get("file")
entry_type = data.get("type", "file")
if not file_path:
# Seed / comment row — skip silently
continue
real_entries += 1
full_path = repo_root / file_path
if entry_type == "directory":
if not full_path.is_dir():
print(f" {colored(f'{file_name}:{line_num}: Directory not found: {file_path}', Colors.RED)}")
errors += 1
else:
if not full_path.is_file():
print(f" {colored(f'{file_name}:{line_num}: File not found: {file_path}', Colors.RED)}")
errors += 1
if errors == 0:
print(f" {colored(f'{file_name}: ✓ ({real_entries} entries)', Colors.GREEN)}")
else:
print(f" {colored(f'{file_name}: ✗ ({errors} errors)', Colors.RED)}")
return errors
# =============================================================================
# Command: list-context
# =============================================================================
def cmd_list_context(args: argparse.Namespace) -> int:
"""List JSONL context entries."""
repo_root = get_repo_root()
target_dir = resolve_task_dir(args.dir, repo_root)
if not target_dir.is_dir():
print(colored("Error: task directory required", Colors.RED))
return 1
print(colored("=== Context Files ===", Colors.BLUE))
print()
for jsonl_name in ["implement.jsonl", "check.jsonl"]:
jsonl_file = target_dir / jsonl_name
if not jsonl_file.is_file():
continue
print(colored(f"[{jsonl_name}]", Colors.CYAN))
count = 0
seed_only = True
for line in jsonl_file.read_text(encoding="utf-8").splitlines():
if not line.strip():
continue
try:
data = json.loads(line)
except json.JSONDecodeError:
continue
file_path = data.get("file")
if not file_path:
# Seed / comment row — don't count as a real entry
continue
seed_only = False
count += 1
entry_type = data.get("type", "file")
reason = data.get("reason", "-")
if entry_type == "directory":
print(f" {colored(f'{count}.', Colors.GREEN)} [DIR] {file_path}")
else:
print(f" {colored(f'{count}.', Colors.GREEN)} {file_path}")
print(f" {colored('', Colors.YELLOW)} {reason}")
if seed_only:
print(f" {colored('(no curated entries yet — only seed row)', Colors.YELLOW)}")
print()
return 0
+188
View File
@@ -0,0 +1,188 @@
#!/usr/bin/env python3
"""
Task queue utility functions.
Provides:
list_tasks_by_status - List tasks by status
list_pending_tasks - List tasks with pending status
list_tasks_by_assignee - List tasks by assignee
list_my_tasks - List tasks assigned to current developer
get_task_stats - Get P0/P1/P2/P3 counts
"""
from __future__ import annotations
from pathlib import Path
from .paths import (
get_repo_root,
get_developer,
get_tasks_dir,
)
from .tasks import iter_active_tasks
# =============================================================================
# Internal helper
# =============================================================================
def _task_to_dict(t) -> dict:
"""Convert TaskInfo to the dict format callers expect."""
return {
"priority": t.priority,
"id": t.raw.get("id", ""),
"title": t.title,
"status": t.status,
"assignee": t.assignee or "-",
"dir": t.dir_name,
"children": list(t.children),
"parent": t.parent,
}
# =============================================================================
# Public Functions
# =============================================================================
def list_tasks_by_status(
filter_status: str | None = None,
repo_root: Path | None = None
) -> list[dict]:
"""List tasks by status.
Args:
filter_status: Optional status filter.
repo_root: Repository root path. Defaults to auto-detected.
Returns:
List of task info dicts with keys: priority, id, title, status, assignee.
"""
if repo_root is None:
repo_root = get_repo_root()
tasks_dir = get_tasks_dir(repo_root)
results = []
for t in iter_active_tasks(tasks_dir):
if filter_status and t.status != filter_status:
continue
results.append(_task_to_dict(t))
return results
def list_pending_tasks(repo_root: Path | None = None) -> list[dict]:
"""List pending tasks.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
List of task info dicts.
"""
return list_tasks_by_status("planning", repo_root)
def list_tasks_by_assignee(
assignee: str,
filter_status: str | None = None,
repo_root: Path | None = None
) -> list[dict]:
"""List tasks assigned to a specific developer.
Args:
assignee: Developer name.
filter_status: Optional status filter.
repo_root: Repository root path. Defaults to auto-detected.
Returns:
List of task info dicts.
"""
if repo_root is None:
repo_root = get_repo_root()
tasks_dir = get_tasks_dir(repo_root)
results = []
for t in iter_active_tasks(tasks_dir):
if (t.assignee or "-") != assignee:
continue
if filter_status and t.status != filter_status:
continue
results.append(_task_to_dict(t))
return results
def list_my_tasks(
filter_status: str | None = None,
repo_root: Path | None = None
) -> list[dict]:
"""List tasks assigned to current developer.
Args:
filter_status: Optional status filter.
repo_root: Repository root path. Defaults to auto-detected.
Returns:
List of task info dicts.
Raises:
ValueError: If developer not set.
"""
if repo_root is None:
repo_root = get_repo_root()
developer = get_developer(repo_root)
if not developer:
raise ValueError("Developer not set")
return list_tasks_by_assignee(developer, filter_status, repo_root)
def get_task_stats(repo_root: Path | None = None) -> dict[str, int]:
"""Get task statistics.
Args:
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Dict with keys: P0, P1, P2, P3, Total.
"""
if repo_root is None:
repo_root = get_repo_root()
tasks_dir = get_tasks_dir(repo_root)
stats = {"P0": 0, "P1": 0, "P2": 0, "P3": 0, "Total": 0}
for t in iter_active_tasks(tasks_dir):
if t.priority in stats:
stats[t.priority] += 1
stats["Total"] += 1
return stats
def format_task_stats(stats: dict[str, int]) -> str:
"""Format task stats as string.
Args:
stats: Stats dict from get_task_stats.
Returns:
Formatted string like "P0:0 P1:1 P2:2 P3:0 Total:3".
"""
return f"P0:{stats['P0']} P1:{stats['P1']} P2:{stats['P2']} P3:{stats['P3']} Total:{stats['Total']}"
# =============================================================================
# Main Entry (for testing)
# =============================================================================
if __name__ == "__main__":
stats = get_task_stats()
print(format_task_stats(stats))
print()
print("Pending tasks:")
for task in list_pending_tasks():
print(f" {task['priority']}|{task['id']}|{task['title']}|{task['status']}|{task['assignee']}")
+598
View File
@@ -0,0 +1,598 @@
#!/usr/bin/env python3
"""
Task CRUD operations.
Provides:
ensure_tasks_dir - Ensure tasks directory exists
cmd_create - Create a new task
cmd_archive - Archive completed task
cmd_set_branch - Set git branch for task
cmd_set_base_branch - Set PR target branch
cmd_set_scope - Set scope for PR title
cmd_add_subtask - Link child task to parent
cmd_remove_subtask - Unlink child task from parent
"""
from __future__ import annotations
import argparse
import json
import re
import sys
from datetime import datetime
from pathlib import Path
from .config import (
get_packages,
is_monorepo,
resolve_package,
validate_package,
)
from .git import run_git
from .io import read_json, write_json
from .log import Colors, colored
from .paths import (
DIR_ARCHIVE,
DIR_TASKS,
DIR_WORKFLOW,
FILE_TASK_JSON,
clear_current_task,
generate_task_date_prefix,
get_current_task,
get_developer,
get_repo_root,
get_tasks_dir,
)
from .task_utils import (
archive_task_complete,
find_task_by_name,
resolve_task_dir,
run_task_hooks,
)
# =============================================================================
# Helper Functions
# =============================================================================
def _slugify(title: str) -> str:
"""Convert title to slug (only works with ASCII)."""
result = title.lower()
result = re.sub(r"[^a-z0-9]", "-", result)
result = re.sub(r"-+", "-", result)
result = result.strip("-")
return result
def ensure_tasks_dir(repo_root: Path) -> Path:
"""Ensure tasks directory exists."""
tasks_dir = get_tasks_dir(repo_root)
archive_dir = tasks_dir / "archive"
if not tasks_dir.exists():
tasks_dir.mkdir(parents=True)
print(colored(f"Created tasks directory: {tasks_dir}", Colors.GREEN), file=sys.stderr)
if not archive_dir.exists():
archive_dir.mkdir(parents=True)
return tasks_dir
# =============================================================================
# Sub-agent platform detection + JSONL seeding
# =============================================================================
# Config directories of platforms that consume implement.jsonl / check.jsonl.
# Keep in sync with src/types/ai-tools.ts AI_TOOLS entries — these are the
# platforms listed in workflow.md's "agent-capable" Skill Routing block
# (Class-1 hook-inject + Class-2 pull-based preludes). Kilo / Antigravity /
# Windsurf are NOT in this list: they do not consume JSONL.
_SUBAGENT_CONFIG_DIRS: tuple[str, ...] = (
".claude",
".cursor",
".codex",
".kiro",
".gemini",
".opencode",
".qoder",
".codebuddy",
".factory", # Factory Droid
".github/copilot",
)
_SEED_EXAMPLE = (
"Fill with {\"file\": \"<path>\", \"reason\": \"<why>\"}. "
"Put spec/research files only — no code paths. "
"Run `python3 .trellis/scripts/get_context.py --mode packages` to list available specs. "
"Delete this line once real entries are added."
)
def _has_subagent_platform(repo_root: Path) -> bool:
"""Return True if any sub-agent-capable platform is configured.
Detected by probing well-known config directories at the repo root. Used
only to decide whether ``task.py create`` should seed empty
``implement.jsonl`` / ``check.jsonl`` files.
"""
for config_dir in _SUBAGENT_CONFIG_DIRS:
if (repo_root / config_dir).is_dir():
return True
return False
def _write_seed_jsonl(path: Path) -> None:
"""Write a one-line seed JSONL file with a self-describing ``_example``.
The seed row has no ``file`` field, so downstream consumers (hooks +
preludes) that iterate entries via ``item.get("file")`` naturally skip
it. The row exists purely as an in-file prompt for the AI curator.
"""
seed = {"_example": _SEED_EXAMPLE}
path.write_text(json.dumps(seed, ensure_ascii=False) + "\n", encoding="utf-8")
# =============================================================================
# Command: create
# =============================================================================
def cmd_create(args: argparse.Namespace) -> int:
"""Create a new task."""
repo_root = get_repo_root()
if not args.title:
print(colored("Error: title is required", Colors.RED), file=sys.stderr)
return 1
# Validate --package (CLI source: fail-fast)
package: str | None = getattr(args, "package", None)
if not is_monorepo(repo_root):
# Single-repo: ignore --package, no package prefix
if package:
print(colored(f"Warning: --package ignored in single-repo project", Colors.YELLOW), file=sys.stderr)
package = None
elif package:
if not validate_package(package, repo_root):
packages = get_packages(repo_root)
available = ", ".join(sorted(packages.keys())) if packages else "(none)"
print(colored(f"Error: unknown package '{package}'. Available: {available}", Colors.RED), file=sys.stderr)
return 1
else:
# Inferred: default_package → None (no task.json yet for create)
package = resolve_package(repo_root=repo_root)
# Default assignee to current developer
assignee = args.assignee
if not assignee:
assignee = get_developer(repo_root)
if not assignee:
print(colored("Error: No developer set. Run init_developer.py first or use --assignee", Colors.RED), file=sys.stderr)
return 1
ensure_tasks_dir(repo_root)
# Get current developer as creator
creator = get_developer(repo_root) or assignee
# Generate slug if not provided
slug = args.slug or _slugify(args.title)
if not slug:
print(colored("Error: could not generate slug from title", Colors.RED), file=sys.stderr)
return 1
# Create task directory with MM-DD-slug format
tasks_dir = get_tasks_dir(repo_root)
date_prefix = generate_task_date_prefix()
dir_name = f"{date_prefix}-{slug}"
task_dir = tasks_dir / dir_name
task_json_path = task_dir / FILE_TASK_JSON
if task_dir.exists():
print(colored(f"Warning: Task directory already exists: {dir_name}", Colors.YELLOW), file=sys.stderr)
else:
task_dir.mkdir(parents=True)
today = datetime.now().strftime("%Y-%m-%d")
# Record current branch as base_branch (PR target)
_, branch_out, _ = run_git(["branch", "--show-current"], cwd=repo_root)
current_branch = branch_out.strip() or "main"
task_data = {
"id": slug,
"name": slug,
"title": args.title,
"description": args.description or "",
"status": "planning",
"dev_type": None,
"scope": None,
"package": package,
"priority": args.priority,
"creator": creator,
"assignee": assignee,
"createdAt": today,
"completedAt": None,
"branch": None,
"base_branch": current_branch,
"worktree_path": None,
"commit": None,
"pr_url": None,
"subtasks": [],
"children": [],
"parent": None,
"relatedFiles": [],
"notes": "",
"meta": {},
}
write_json(task_json_path, task_data)
# Seed implement.jsonl / check.jsonl for sub-agent-capable platforms.
# Agent curates real entries in Phase 1.3 (see .trellis/workflow.md).
# Agent-less platforms (Kilo / Antigravity / Windsurf) skip this — they
# load specs via the trellis-before-dev skill instead of JSONL.
seeded_jsonl = False
if _has_subagent_platform(repo_root):
for jsonl_name in ("implement.jsonl", "check.jsonl"):
jsonl_path = task_dir / jsonl_name
if not jsonl_path.exists():
_write_seed_jsonl(jsonl_path)
seeded_jsonl = True
# Handle --parent: establish bidirectional link
if args.parent:
parent_dir = resolve_task_dir(args.parent, repo_root)
parent_json_path = parent_dir / FILE_TASK_JSON
if not parent_json_path.is_file():
print(colored(f"Warning: Parent task.json not found: {args.parent}", Colors.YELLOW), file=sys.stderr)
else:
parent_data = read_json(parent_json_path)
if parent_data:
# Add child to parent's children list
parent_children = parent_data.get("children", [])
if dir_name not in parent_children:
parent_children.append(dir_name)
parent_data["children"] = parent_children
write_json(parent_json_path, parent_data)
# Set parent in child's task.json
task_data["parent"] = parent_dir.name
write_json(task_json_path, task_data)
print(colored(f"Linked as child of: {parent_dir.name}", Colors.GREEN), file=sys.stderr)
print(colored(f"Created task: {dir_name}", Colors.GREEN), file=sys.stderr)
print("", file=sys.stderr)
print(colored("Next steps:", Colors.BLUE), file=sys.stderr)
print(" 1. Create prd.md with requirements", file=sys.stderr)
if seeded_jsonl:
print(
" 2. Curate implement.jsonl / check.jsonl (spec + research files only — "
"see .trellis/workflow.md Phase 1.3)",
file=sys.stderr,
)
print(" 3. Run: python3 task.py start <dir>", file=sys.stderr)
else:
print(" 2. Run: python3 task.py start <dir>", file=sys.stderr)
print("", file=sys.stderr)
# Output relative path for script chaining
print(f"{DIR_WORKFLOW}/{DIR_TASKS}/{dir_name}")
run_task_hooks("after_create", task_json_path, repo_root)
return 0
# =============================================================================
# Command: archive
# =============================================================================
def cmd_archive(args: argparse.Namespace) -> int:
"""Archive completed task."""
repo_root = get_repo_root()
task_name = args.name
if not task_name:
print(colored("Error: Task name is required", Colors.RED), file=sys.stderr)
return 1
tasks_dir = get_tasks_dir(repo_root)
# Find task directory
task_dir = find_task_by_name(task_name, tasks_dir)
if not task_dir or not task_dir.is_dir():
print(colored(f"Error: Task not found: {task_name}", Colors.RED), file=sys.stderr)
print("Active tasks:", file=sys.stderr)
# Import lazily to avoid circular dependency
from .tasks import iter_active_tasks
for t in iter_active_tasks(tasks_dir):
print(f" - {t.dir_name}/", file=sys.stderr)
return 1
dir_name = task_dir.name
task_json_path = task_dir / FILE_TASK_JSON
# Update status before archiving
today = datetime.now().strftime("%Y-%m-%d")
if task_json_path.is_file():
data = read_json(task_json_path)
if data:
data["status"] = "completed"
data["completedAt"] = today
write_json(task_json_path, data)
# Handle subtask relationships on archive
task_parent = data.get("parent")
task_children = data.get("children", [])
# If this is a child, remove from parent's children list
if task_parent:
parent_dir = find_task_by_name(task_parent, tasks_dir)
if parent_dir:
parent_json = parent_dir / FILE_TASK_JSON
if parent_json.is_file():
parent_data = read_json(parent_json)
if parent_data:
parent_children = parent_data.get("children", [])
if dir_name in parent_children:
parent_children.remove(dir_name)
parent_data["children"] = parent_children
write_json(parent_json, parent_data)
# If this is a parent, clear parent field in all children
if task_children:
for child_name in task_children:
child_dir_path = find_task_by_name(child_name, tasks_dir)
if child_dir_path:
child_json = child_dir_path / FILE_TASK_JSON
if child_json.is_file():
child_data = read_json(child_json)
if child_data:
child_data["parent"] = None
write_json(child_json, child_data)
# Clear if current task
current = get_current_task(repo_root)
if current and dir_name in current:
clear_current_task(repo_root)
# Archive
result = archive_task_complete(task_dir, repo_root)
if "archived_to" in result:
archive_dest = Path(result["archived_to"])
year_month = archive_dest.parent.name
print(colored(f"Archived: {dir_name} -> archive/{year_month}/", Colors.GREEN), file=sys.stderr)
# Auto-commit unless --no-commit
if not getattr(args, "no_commit", False):
_auto_commit_archive(dir_name, repo_root)
# Return the archive path
print(f"{DIR_WORKFLOW}/{DIR_TASKS}/{DIR_ARCHIVE}/{year_month}/{dir_name}")
# Run hooks with the archived path
archived_json = archive_dest / FILE_TASK_JSON
run_task_hooks("after_archive", archived_json, repo_root)
return 0
return 1
def _auto_commit_archive(task_name: str, repo_root: Path) -> None:
"""Stage .trellis/tasks/ changes and commit after archive."""
tasks_rel = f"{DIR_WORKFLOW}/{DIR_TASKS}"
run_git(["add", "-A", tasks_rel], cwd=repo_root)
# Check if there are staged changes
rc, _, _ = run_git(
["diff", "--cached", "--quiet", "--", tasks_rel], cwd=repo_root
)
if rc == 0:
print("[OK] No task changes to commit.", file=sys.stderr)
return
commit_msg = f"chore(task): archive {task_name}"
rc, _, err = run_git(["commit", "-m", commit_msg], cwd=repo_root)
if rc == 0:
print(f"[OK] Auto-committed: {commit_msg}", file=sys.stderr)
else:
print(f"[WARN] Auto-commit failed: {err.strip()}", file=sys.stderr)
# =============================================================================
# Command: add-subtask
# =============================================================================
def cmd_add_subtask(args: argparse.Namespace) -> int:
"""Link a child task to a parent task."""
repo_root = get_repo_root()
parent_dir = resolve_task_dir(args.parent_dir, repo_root)
child_dir = resolve_task_dir(args.child_dir, repo_root)
parent_json_path = parent_dir / FILE_TASK_JSON
child_json_path = child_dir / FILE_TASK_JSON
if not parent_json_path.is_file():
print(colored(f"Error: Parent task.json not found: {args.parent_dir}", Colors.RED), file=sys.stderr)
return 1
if not child_json_path.is_file():
print(colored(f"Error: Child task.json not found: {args.child_dir}", Colors.RED), file=sys.stderr)
return 1
parent_data = read_json(parent_json_path)
child_data = read_json(child_json_path)
if not parent_data or not child_data:
print(colored("Error: Failed to read task.json", Colors.RED), file=sys.stderr)
return 1
# Check if child already has a parent
existing_parent = child_data.get("parent")
if existing_parent:
print(colored(f"Error: Child task already has a parent: {existing_parent}", Colors.RED), file=sys.stderr)
return 1
# Add child to parent's children list
parent_children = parent_data.get("children", [])
child_dir_name = child_dir.name
if child_dir_name not in parent_children:
parent_children.append(child_dir_name)
parent_data["children"] = parent_children
# Set parent in child's task.json
child_data["parent"] = parent_dir.name
# Write both
write_json(parent_json_path, parent_data)
write_json(child_json_path, child_data)
print(colored(f"Linked: {child_dir.name} -> {parent_dir.name}", Colors.GREEN), file=sys.stderr)
return 0
# =============================================================================
# Command: remove-subtask
# =============================================================================
def cmd_remove_subtask(args: argparse.Namespace) -> int:
"""Unlink a child task from a parent task."""
repo_root = get_repo_root()
parent_dir = resolve_task_dir(args.parent_dir, repo_root)
child_dir = resolve_task_dir(args.child_dir, repo_root)
parent_json_path = parent_dir / FILE_TASK_JSON
child_json_path = child_dir / FILE_TASK_JSON
if not parent_json_path.is_file():
print(colored(f"Error: Parent task.json not found: {args.parent_dir}", Colors.RED), file=sys.stderr)
return 1
if not child_json_path.is_file():
print(colored(f"Error: Child task.json not found: {args.child_dir}", Colors.RED), file=sys.stderr)
return 1
parent_data = read_json(parent_json_path)
child_data = read_json(child_json_path)
if not parent_data or not child_data:
print(colored("Error: Failed to read task.json", Colors.RED), file=sys.stderr)
return 1
# Remove child from parent's children list
parent_children = parent_data.get("children", [])
child_dir_name = child_dir.name
if child_dir_name in parent_children:
parent_children.remove(child_dir_name)
parent_data["children"] = parent_children
# Clear parent in child's task.json
child_data["parent"] = None
# Write both
write_json(parent_json_path, parent_data)
write_json(child_json_path, child_data)
print(colored(f"Unlinked: {child_dir.name} from {parent_dir.name}", Colors.GREEN), file=sys.stderr)
return 0
# =============================================================================
# Command: set-branch
# =============================================================================
def cmd_set_branch(args: argparse.Namespace) -> int:
"""Set git branch for task."""
repo_root = get_repo_root()
target_dir = resolve_task_dir(args.dir, repo_root)
branch = args.branch
if not branch:
print(colored("Error: Missing arguments", Colors.RED))
print("Usage: python3 task.py set-branch <task-dir> <branch-name>")
return 1
task_json = target_dir / FILE_TASK_JSON
if not task_json.is_file():
print(colored(f"Error: task.json not found at {target_dir}", Colors.RED))
return 1
data = read_json(task_json)
if not data:
return 1
data["branch"] = branch
write_json(task_json, data)
print(colored(f"✓ Branch set to: {branch}", Colors.GREEN))
return 0
# =============================================================================
# Command: set-base-branch
# =============================================================================
def cmd_set_base_branch(args: argparse.Namespace) -> int:
"""Set the base branch (PR target) for task."""
repo_root = get_repo_root()
target_dir = resolve_task_dir(args.dir, repo_root)
base_branch = args.base_branch
if not base_branch:
print(colored("Error: Missing arguments", Colors.RED))
print("Usage: python3 task.py set-base-branch <task-dir> <base-branch>")
print("Example: python3 task.py set-base-branch <dir> develop")
print()
print("This sets the target branch for PR (the branch your feature will merge into).")
return 1
task_json = target_dir / FILE_TASK_JSON
if not task_json.is_file():
print(colored(f"Error: task.json not found at {target_dir}", Colors.RED))
return 1
data = read_json(task_json)
if not data:
return 1
data["base_branch"] = base_branch
write_json(task_json, data)
print(colored(f"✓ Base branch set to: {base_branch}", Colors.GREEN))
print(f" PR will target: {base_branch}")
return 0
# =============================================================================
# Command: set-scope
# =============================================================================
def cmd_set_scope(args: argparse.Namespace) -> int:
"""Set scope for PR title."""
repo_root = get_repo_root()
target_dir = resolve_task_dir(args.dir, repo_root)
scope = args.scope
if not scope:
print(colored("Error: Missing arguments", Colors.RED))
print("Usage: python3 task.py set-scope <task-dir> <scope>")
return 1
task_json = target_dir / FILE_TASK_JSON
if not task_json.is_file():
print(colored(f"Error: task.json not found at {target_dir}", Colors.RED))
return 1
data = read_json(task_json)
if not data:
return 1
data["scope"] = scope
write_json(task_json, data)
print(colored(f"✓ Scope set to: {scope}", Colors.GREEN))
return 0
+274
View File
@@ -0,0 +1,274 @@
#!/usr/bin/env python3
"""
Task utility functions.
Provides:
is_safe_task_path - Validate task path is safe to operate on
find_task_by_name - Find task directory by name
resolve_task_dir - Resolve task directory from name, relative, or absolute path
archive_task_dir - Archive task to monthly directory
run_task_hooks - Run lifecycle hooks for task events
"""
from __future__ import annotations
import shutil
import sys
from datetime import datetime
from pathlib import Path
from .paths import get_repo_root, get_tasks_dir
# =============================================================================
# Path Safety
# =============================================================================
def is_safe_task_path(task_path: str, repo_root: Path | None = None) -> bool:
"""Check if a relative task path is safe to operate on.
Args:
task_path: Task path (relative to repo_root).
repo_root: Repository root path. Defaults to auto-detected.
Returns:
True if safe, False if dangerous.
"""
if repo_root is None:
repo_root = get_repo_root()
normalized = task_path.replace("\\", "/")
# Check empty or null
if not normalized or normalized == "null":
print("Error: empty or null task path", file=sys.stderr)
return False
# Reject absolute paths
if Path(task_path).is_absolute():
print(f"Error: absolute path not allowed: {task_path}", file=sys.stderr)
return False
# Reject ".", "..", paths starting with "./" or "../", or containing ".."
if normalized in (".", "..") or normalized.startswith("./") or normalized.startswith("../") or ".." in normalized:
print(f"Error: path traversal not allowed: {task_path}", file=sys.stderr)
return False
# Final check: ensure resolved path is not the repo root
abs_path = repo_root / Path(normalized)
if abs_path.exists():
try:
resolved = abs_path.resolve()
root_resolved = repo_root.resolve()
if resolved == root_resolved:
print(f"Error: path resolves to repo root: {task_path}", file=sys.stderr)
return False
except (OSError, IOError):
pass
return True
# =============================================================================
# Task Lookup
# =============================================================================
def find_task_by_name(task_name: str, tasks_dir: Path) -> Path | None:
"""Find task directory by name (exact or suffix match).
Args:
task_name: Task name to find.
tasks_dir: Tasks directory path.
Returns:
Absolute path to task directory, or None if not found.
"""
if not task_name or not tasks_dir or not tasks_dir.is_dir():
return None
# Try exact match first
exact_match = tasks_dir / task_name
if exact_match.is_dir():
return exact_match
# Try suffix match (e.g., "my-task" matches "01-21-my-task")
for d in tasks_dir.iterdir():
if d.is_dir() and d.name.endswith(f"-{task_name}"):
return d
return None
# =============================================================================
# Archive Operations
# =============================================================================
def archive_task_dir(task_dir_abs: Path, repo_root: Path | None = None) -> Path | None:
"""Archive a task directory to archive/{YYYY-MM}/.
Args:
task_dir_abs: Absolute path to task directory.
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Path to archived directory, or None on error.
"""
if not task_dir_abs.is_dir():
print(f"Error: task directory not found: {task_dir_abs}", file=sys.stderr)
return None
# Get tasks directory (parent of the task)
tasks_dir = task_dir_abs.parent
archive_dir = tasks_dir / "archive"
year_month = datetime.now().strftime("%Y-%m")
month_dir = archive_dir / year_month
# Create archive directory
try:
month_dir.mkdir(parents=True, exist_ok=True)
except (OSError, IOError) as e:
print(f"Error: Failed to create archive directory: {e}", file=sys.stderr)
return None
# Move task to archive
task_name = task_dir_abs.name
dest = month_dir / task_name
try:
shutil.move(str(task_dir_abs), str(dest))
except (OSError, IOError, shutil.Error) as e:
print(f"Error: Failed to move task to archive: {e}", file=sys.stderr)
return None
return dest
def archive_task_complete(
task_dir_abs: Path,
repo_root: Path | None = None
) -> dict[str, str]:
"""Complete archive workflow: archive directory.
Args:
task_dir_abs: Absolute path to task directory.
repo_root: Repository root path. Defaults to auto-detected.
Returns:
Dict with archive result info.
"""
if not task_dir_abs.is_dir():
print(f"Error: task directory not found: {task_dir_abs}", file=sys.stderr)
return {}
archive_dest = archive_task_dir(task_dir_abs, repo_root)
if archive_dest:
return {"archived_to": str(archive_dest)}
return {}
# =============================================================================
# Task Directory Resolution
# =============================================================================
def resolve_task_dir(target_dir: str, repo_root: Path) -> Path:
"""Resolve task directory to absolute path.
Supports:
- Absolute path: /path/to/task
- Relative path: .trellis/tasks/01-31-my-task
- Task name: my-task (uses find_task_by_name for lookup)
Args:
target_dir: Task directory specification.
repo_root: Repository root path.
Returns:
Resolved absolute path.
"""
if not target_dir:
return Path()
normalized = target_dir.replace("\\", "/")
while normalized.startswith("./"):
normalized = normalized[2:]
# Absolute path
if Path(target_dir).is_absolute():
return Path(target_dir)
# Relative path (contains path separator or starts with .trellis)
if "/" in normalized or normalized.startswith(".trellis"):
return repo_root / Path(normalized)
# Task name - try to find in tasks directory
tasks_dir = get_tasks_dir(repo_root)
found = find_task_by_name(target_dir, tasks_dir)
if found:
return found
# Fallback to treating as relative path
return repo_root / Path(normalized)
# =============================================================================
# Lifecycle Hooks
# =============================================================================
def run_task_hooks(event: str, task_json_path: Path, repo_root: Path) -> None:
"""Run lifecycle hooks for a task event.
Args:
event: Event name (e.g. "after_create").
task_json_path: Absolute path to the task's task.json.
repo_root: Repository root for cwd and config lookup.
"""
import os
import subprocess
from .config import get_hooks
from .log import Colors, colored
commands = get_hooks(event, repo_root)
if not commands:
return
env = {**os.environ, "TASK_JSON_PATH": str(task_json_path)}
for cmd in commands:
try:
result = subprocess.run(
cmd,
shell=True,
cwd=repo_root,
env=env,
capture_output=True,
text=True,
encoding="utf-8",
errors="replace",
)
if result.returncode != 0:
print(
colored(f"[WARN] Hook failed ({event}): {cmd}", Colors.YELLOW),
file=sys.stderr,
)
if result.stderr.strip():
print(f" {result.stderr.strip()}", file=sys.stderr)
except Exception as e:
print(
colored(f"[WARN] Hook error ({event}): {cmd}{e}", Colors.YELLOW),
file=sys.stderr,
)
# =============================================================================
# Main Entry (for testing)
# =============================================================================
if __name__ == "__main__":
repo = get_repo_root()
tasks = get_tasks_dir(repo)
print(f"Tasks dir: {tasks}")
print(f"is_safe_task_path('.trellis/tasks/test'): {is_safe_task_path('.trellis/tasks/test', repo)}")
print(f"is_safe_task_path('../test'): {is_safe_task_path('../test', repo)}")
+109
View File
@@ -0,0 +1,109 @@
"""
Task data access layer.
Single source of truth for loading and iterating task directories.
Replaces scattered task.json parsing across 9+ files.
Provides:
load_task — Load a single task by directory path
iter_active_tasks — Iterate all non-archived tasks (sorted)
get_all_statuses — Get {dir_name: status} map for children progress
"""
from __future__ import annotations
from collections.abc import Iterator
from pathlib import Path
from .io import read_json
from .paths import FILE_TASK_JSON
from .types import TaskInfo
def load_task(task_dir: Path) -> TaskInfo | None:
"""Load task from a directory containing task.json.
Args:
task_dir: Absolute path to the task directory.
Returns:
TaskInfo if task.json exists and is valid, None otherwise.
"""
task_json = task_dir / FILE_TASK_JSON
if not task_json.is_file():
return None
data = read_json(task_json)
if not data:
return None
return TaskInfo(
dir_name=task_dir.name,
directory=task_dir,
title=data.get("title") or data.get("name") or "unknown",
status=data.get("status", "unknown"),
assignee=data.get("assignee", ""),
priority=data.get("priority", "P2"),
children=tuple(data.get("children", [])),
parent=data.get("parent"),
package=data.get("package"),
raw=data,
)
def iter_active_tasks(tasks_dir: Path) -> Iterator[TaskInfo]:
"""Iterate all active (non-archived) tasks, sorted by directory name.
Skips the "archive" directory and directories without valid task.json.
Args:
tasks_dir: Path to the tasks directory.
Yields:
TaskInfo for each valid task.
"""
if not tasks_dir.is_dir():
return
for d in sorted(tasks_dir.iterdir()):
if not d.is_dir() or d.name == "archive":
continue
info = load_task(d)
if info is not None:
yield info
def get_all_statuses(tasks_dir: Path) -> dict[str, str]:
"""Get a {dir_name: status} mapping for all active tasks.
Useful for computing children progress without loading full TaskInfo.
Args:
tasks_dir: Path to the tasks directory.
Returns:
Dict mapping directory names to status strings.
"""
return {t.dir_name: t.status for t in iter_active_tasks(tasks_dir)}
def children_progress(
children: tuple[str, ...] | list[str],
all_statuses: dict[str, str],
) -> str:
"""Format children progress string like " [2/3 done]".
Args:
children: List of child directory names.
all_statuses: Status map from get_all_statuses().
Returns:
Formatted string, or "" if no children.
"""
if not children:
return ""
done = sum(
1 for c in children
if all_statuses.get(c) in ("completed", "done")
)
return f" [{done}/{len(children)} done]"
+110
View File
@@ -0,0 +1,110 @@
"""
Core type definitions for Trellis task data.
Provides:
TaskData — TypedDict for task.json shape (read-path type hints only)
TaskInfo — Frozen dataclass for loaded task (the public API type)
AgentRecord — TypedDict for registry.json agent entries
"""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import TypedDict
# =============================================================================
# task.json shape (TypedDict — used only for read-path type hints)
# =============================================================================
class TaskData(TypedDict, total=False):
"""Shape of task.json on disk.
Used only for type annotations when reading task.json.
Writes must use the original dict to avoid losing unknown fields.
"""
id: str
name: str
title: str
description: str
status: str
dev_type: str
scope: str | None
package: str | None
priority: str
creator: str
assignee: str
createdAt: str
completedAt: str | None
branch: str | None
base_branch: str | None
worktree_path: str | None
commit: str | None
pr_url: str | None
subtasks: list[str]
children: list[str]
parent: str | None
relatedFiles: list[str]
notes: str
meta: dict
# =============================================================================
# Loaded task object (frozen dataclass — the public API type)
# =============================================================================
@dataclass(frozen=True)
class TaskInfo:
"""Immutable view of a loaded task.
Created by load_task() / iter_active_tasks().
Contains the commonly accessed fields; the original dict
is preserved in `raw` for write-back and uncommon field access.
"""
dir_name: str
directory: Path
title: str
status: str
assignee: str
priority: str
children: tuple[str, ...]
parent: str | None
package: str | None
raw: dict # original dict — use for writes and uncommon fields
@property
def name(self) -> str:
"""Task name (id or name field)."""
return self.raw.get("name") or self.raw.get("id") or self.dir_name
@property
def description(self) -> str:
return self.raw.get("description", "")
@property
def branch(self) -> str | None:
return self.raw.get("branch")
@property
def meta(self) -> dict:
return self.raw.get("meta", {})
# =============================================================================
# registry.json agent entry
# =============================================================================
class AgentRecord(TypedDict, total=False):
"""Shape of an agent entry in registry.json."""
id: str
pid: int
task_dir: str
worktree_path: str
branch: str
platform: str
started_at: str
status: str
+176
View File
@@ -0,0 +1,176 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Workflow Phase Extraction.
Extracts step-level content from .trellis/workflow.md and optionally filters
platform-specific blocks.
Platform marker syntax in workflow.md:
[Claude Code, Cursor, ...]
agent-capable content
[/Claude Code, Cursor, ...]
Provides:
get_phase_index - Extract the Phase Index section (no --step)
get_step - Extract a single step (#### X.X) section
filter_platform - Strip platform blocks that don't include the given name
"""
from __future__ import annotations
import re
from .paths import DIR_WORKFLOW, get_repo_root
def _workflow_md_path():
return get_repo_root() / DIR_WORKFLOW / "workflow.md"
# Match a line that *is* a platform marker: "[A, B, C]" or "[/A, B, C]"
_MARKER_RE = re.compile(r"^\[(/?)([A-Za-z][^\[\]]*)\]\s*$")
# Step heading: "#### 1.0 Title" or "#### 1.0 ..."
_STEP_HEADING_RE = re.compile(r"^####\s+(\d+\.\d+)\b.*$")
# Phase Index starts here; Phase 1/2/3 step bodies follow; ends at Breadcrumbs.
_PHASE_INDEX_HEADING = "## Phase Index"
def _read_workflow() -> str:
path = _workflow_md_path()
if not path.exists():
raise FileNotFoundError(f"workflow.md not found: {path}")
return path.read_text(encoding="utf-8")
def _parse_marker(line: str) -> tuple[bool, list[str]] | None:
"""Parse a platform marker line.
Returns:
(is_closing, [platform_names]) if line is a marker, else None.
"""
m = _MARKER_RE.match(line)
if not m:
return None
is_closing = m.group(1) == "/"
names = [p.strip() for p in m.group(2).split(",") if p.strip()]
return is_closing, names
def get_phase_index() -> str:
"""Return Phase Index + Phase 1/2/3 step bodies from workflow.md.
Matches what the SessionStart hook injects into the `<workflow>` block:
starts at `## Phase Index`, continues through `## Phase 1: Plan`,
`## Phase 2: Execute`, `## Phase 3: Finish`, stops at
`## Workflow State Breadcrumbs` (consumed by UserPromptSubmit hook).
"""
text = _read_workflow()
lines = text.splitlines()
start: int | None = None
end: int | None = None
for i, line in enumerate(lines):
stripped = line.strip()
if start is None and stripped == _PHASE_INDEX_HEADING:
start = i
continue
if start is not None and stripped == "## Workflow State Breadcrumbs":
end = i
break
if start is None:
return ""
if end is None:
end = len(lines)
return "\n".join(lines[start:end]).rstrip() + "\n"
def get_step(step_id: str) -> str:
"""Return the `#### X.X` section matching step_id (header + body).
Body ends at the next `####` or `---` or `##` heading (whichever comes first).
"""
text = _read_workflow()
lines = text.splitlines()
start: int | None = None
for i, line in enumerate(lines):
m = _STEP_HEADING_RE.match(line)
if m and m.group(1) == step_id:
start = i
break
if start is None:
return ""
end: int = len(lines)
for j in range(start + 1, len(lines)):
line = lines[j]
if line.startswith("#### "):
end = j
break
if line.startswith("## "):
end = j
break
# Horizontal rule at column 0
if line.strip() == "---":
end = j
break
return "\n".join(lines[start:end]).rstrip() + "\n"
def _platform_matches(platform: str, block_names: list[str]) -> bool:
"""Case-insensitive fuzzy match: accept 'cursor', 'Cursor', 'claude-code', 'Claude Code'."""
needle = platform.lower().replace("-", "").replace("_", "").replace(" ", "")
for name in block_names:
hay = name.lower().replace("-", "").replace("_", "").replace(" ", "")
if needle == hay:
return True
return False
def filter_platform(content: str, platform: str) -> str:
"""Keep lines outside any `[...]` block + lines inside blocks that include platform.
Marker lines themselves are dropped from the output.
"""
lines = content.splitlines()
out: list[str] = []
in_block = False
keep_block = False
for line in lines:
marker = _parse_marker(line)
if marker is not None:
is_closing, names = marker
if not is_closing:
in_block = True
keep_block = _platform_matches(platform, names)
else:
in_block = False
keep_block = False
continue # drop the marker line itself
if in_block:
if keep_block:
out.append(line)
continue
out.append(line)
# Collapse runs of 3+ blank lines that may arise from dropped markers
collapsed: list[str] = []
blank_run = 0
for line in out:
if line.strip() == "":
blank_run += 1
if blank_run <= 2:
collapsed.append(line)
else:
blank_run = 0
collapsed.append(line)
return "\n".join(collapsed).rstrip() + "\n"
+16
View File
@@ -0,0 +1,16 @@
#!/usr/bin/env python3
"""
Get Session Context for AI Agent.
Usage:
python3 get_context.py Output context in text format
python3 get_context.py --json Output context in JSON format
"""
from __future__ import annotations
from common.git_context import main
if __name__ == "__main__":
main()
+26
View File
@@ -0,0 +1,26 @@
#!/usr/bin/env python3
"""
Get current developer name.
This is a wrapper that uses common/paths.py
"""
from __future__ import annotations
import sys
from common.paths import get_developer
def main() -> None:
"""CLI entry point."""
developer = get_developer()
if developer:
print(developer)
else:
print("Developer not initialized", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
+243
View File
@@ -0,0 +1,243 @@
#!/usr/bin/env python3
"""Linear sync hook for Trellis task lifecycle.
Syncs task events to Linear via the `linearis` CLI.
Usage (called automatically by task.py hooks):
python3 .trellis/scripts/hooks/linear_sync.py create
python3 .trellis/scripts/hooks/linear_sync.py start
python3 .trellis/scripts/hooks/linear_sync.py archive
Manual usage:
TASK_JSON_PATH=.trellis/tasks/<name>/task.json python3 .trellis/scripts/hooks/linear_sync.py sync
Environment:
TASK_JSON_PATH - Absolute path to task.json (set by task.py)
Configuration:
.trellis/hooks.local.json - Local config (gitignored), example:
{
"linear": {
"team": "TEAM_KEY",
"project": "Project Name",
"assignees": {
"dev-name": "linear-user-id"
}
}
}
"""
from __future__ import annotations
import json
import os
import subprocess
import sys
from pathlib import Path
# ─── Configuration ────────────────────────────────────────────────────────────
# Trellis priority → Linear priority (1=Urgent, 2=High, 3=Medium, 4=Low)
PRIORITY_MAP = {"P0": 1, "P1": 2, "P2": 3, "P3": 4}
# Linear status names (must match your team's workflow)
STATUS_IN_PROGRESS = "In Progress"
STATUS_DONE = "Done"
def _load_config() -> dict:
"""Load local hook config from .trellis/hooks.local.json."""
task_json_path = os.environ.get("TASK_JSON_PATH", "")
if task_json_path:
# Walk up from task.json to find .trellis/
trellis_dir = Path(task_json_path).parent.parent.parent
else:
trellis_dir = Path(".trellis")
config_path = trellis_dir / "hooks.local.json"
try:
with open(config_path, encoding="utf-8") as f:
return json.load(f)
except (OSError, json.JSONDecodeError):
return {}
CONFIG = _load_config()
LINEAR_CFG = CONFIG.get("linear", {})
TEAM = LINEAR_CFG.get("team", "")
PROJECT = LINEAR_CFG.get("project", "")
ASSIGNEE_MAP = LINEAR_CFG.get("assignees", {})
# ─── Helpers ──────────────────────────────────────────────────────────────────
def _read_task() -> tuple[dict, str]:
path = os.environ.get("TASK_JSON_PATH", "")
if not path:
print("TASK_JSON_PATH not set", file=sys.stderr)
sys.exit(1)
with open(path, encoding="utf-8") as f:
return json.load(f), path
def _write_task(data: dict, path: str) -> None:
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2, ensure_ascii=False)
f.write("\n")
def _linearis(*args: str) -> dict | None:
result = subprocess.run(
["linearis", *args],
capture_output=True,
text=True,
encoding="utf-8",
errors="replace",
)
if result.returncode != 0:
print(f"linearis error: {result.stderr.strip()}", file=sys.stderr)
sys.exit(1)
stdout = result.stdout.strip()
if stdout:
return json.loads(stdout)
return None
def _get_linear_issue(task: dict) -> str | None:
meta = task.get("meta")
if isinstance(meta, dict):
return meta.get("linear_issue")
return None
# ─── Actions ──────────────────────────────────────────────────────────────────
def cmd_create() -> None:
if not TEAM:
print("No linear.team configured in hooks.local.json", file=sys.stderr)
sys.exit(1)
task, path = _read_task()
# Skip if already linked
if _get_linear_issue(task):
print(f"Already linked: {_get_linear_issue(task)}")
return
title = task.get("title") or task.get("name") or "Untitled"
args = ["issues", "create", title, "--team", TEAM]
# Map priority
priority = PRIORITY_MAP.get(task.get("priority", ""), 0)
if priority:
args.extend(["-p", str(priority)])
# Set project
if PROJECT:
args.extend(["--project", PROJECT])
# Assign to Linear user
assignee = task.get("assignee", "")
linear_user_id = ASSIGNEE_MAP.get(assignee)
if linear_user_id:
args.extend(["--assignee", linear_user_id])
# Link to parent's Linear issue if available
parent_issue = _resolve_parent_linear_issue(task)
if parent_issue:
args.extend(["--parent-ticket", parent_issue])
result = _linearis(*args)
if result and "identifier" in result:
if not isinstance(task.get("meta"), dict):
task["meta"] = {}
task["meta"]["linear_issue"] = result["identifier"]
_write_task(task, path)
print(f"Created Linear issue: {result['identifier']}")
def cmd_start() -> None:
task, _ = _read_task()
issue = _get_linear_issue(task)
if not issue:
return
_linearis("issues", "update", issue, "-s", STATUS_IN_PROGRESS)
print(f"Updated {issue} -> {STATUS_IN_PROGRESS}")
cmd_sync()
def cmd_archive() -> None:
task, _ = _read_task()
issue = _get_linear_issue(task)
if not issue:
return
_linearis("issues", "update", issue, "-s", STATUS_DONE)
print(f"Updated {issue} -> {STATUS_DONE}")
def cmd_sync() -> None:
"""Sync prd.md content to Linear issue description."""
task, _ = _read_task()
issue = _get_linear_issue(task)
if not issue:
print("No linear_issue in meta, run create first", file=sys.stderr)
sys.exit(1)
# Find prd.md next to task.json
task_json_path = os.environ.get("TASK_JSON_PATH", "")
prd_path = Path(task_json_path).parent / "prd.md"
if not prd_path.is_file():
print(f"No prd.md found at {prd_path}", file=sys.stderr)
sys.exit(1)
description = prd_path.read_text(encoding="utf-8").strip()
_linearis("issues", "update", issue, "-d", description)
print(f"Synced prd.md to {issue} description")
# ─── Parent Issue Resolution ─────────────────────────────────────────────────
def _resolve_parent_linear_issue(task: dict) -> str | None:
"""Find parent task's Linear issue identifier."""
parent_name = task.get("parent")
if not parent_name:
return None
task_json_path = os.environ.get("TASK_JSON_PATH", "")
if not task_json_path:
return None
current_task_dir = Path(task_json_path).parent
tasks_dir = current_task_dir.parent
parent_json = tasks_dir / parent_name / "task.json"
if parent_json.exists():
try:
with open(parent_json, encoding="utf-8") as f:
parent_task = json.load(f)
return _get_linear_issue(parent_task)
except (json.JSONDecodeError, OSError):
pass
return None
# ─── Main ─────────────────────────────────────────────────────────────────────
if __name__ == "__main__":
action = sys.argv[1] if len(sys.argv) > 1 else ""
actions = {
"create": cmd_create,
"start": cmd_start,
"archive": cmd_archive,
"sync": cmd_sync,
}
fn = actions.get(action)
if fn:
fn()
else:
print(f"Unknown action: {action}", file=sys.stderr)
print(f"Valid actions: {', '.join(actions)}", file=sys.stderr)
sys.exit(1)
+51
View File
@@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""
Initialize developer for workflow.
Usage:
python3 init_developer.py <developer-name>
This creates:
- .trellis/.developer file with developer info
- .trellis/workspace/<name>/ directory structure
"""
from __future__ import annotations
import sys
from common.paths import (
DIR_WORKFLOW,
FILE_DEVELOPER,
get_developer,
)
from common.developer import init_developer
def main() -> None:
"""CLI entry point."""
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <developer-name>")
print()
print("Example:")
print(f" {sys.argv[0]} john")
sys.exit(1)
name = sys.argv[1]
# Check if already initialized
existing = get_developer()
if existing:
print(f"Developer already initialized: {existing}")
print()
print(f"To reinitialize, remove {DIR_WORKFLOW}/{FILE_DEVELOPER} first")
sys.exit(0)
if init_developer(name):
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main()
+439
View File
@@ -0,0 +1,439 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Task Management Script.
Usage:
python3 task.py create "<title>" [--slug <name>] [--assignee <dev>] [--priority P0|P1|P2|P3] [--parent <dir>] [--package <pkg>]
python3 task.py add-context <dir> <file> <path> [reason] # Add jsonl entry
python3 task.py validate <dir> # Validate jsonl files
python3 task.py list-context <dir> # List jsonl entries
python3 task.py start <dir> # Set as current task
python3 task.py finish # Clear current task
python3 task.py set-branch <dir> <branch> # Set git branch
python3 task.py set-base-branch <dir> <branch> # Set PR target branch
python3 task.py set-scope <dir> <scope> # Set scope for PR title
python3 task.py archive <task-name> # Archive completed task
python3 task.py list # List active tasks
python3 task.py list-archive [month] # List archived tasks
python3 task.py add-subtask <parent-dir> <child-dir> # Link child to parent
python3 task.py remove-subtask <parent-dir> <child-dir> # Unlink child from parent
"""
from __future__ import annotations
import argparse
import sys
from common.log import Colors, colored
from common.paths import (
DIR_WORKFLOW,
DIR_TASKS,
FILE_TASK_JSON,
get_repo_root,
get_developer,
get_tasks_dir,
get_current_task,
set_current_task,
clear_current_task,
)
from common.io import read_json, write_json
from common.task_utils import resolve_task_dir, run_task_hooks
from common.tasks import iter_active_tasks, children_progress
# Import command handlers from split modules (also re-exports for plan.py compatibility)
from common.task_store import (
cmd_create,
cmd_archive,
cmd_set_branch,
cmd_set_base_branch,
cmd_set_scope,
cmd_add_subtask,
cmd_remove_subtask,
)
from common.task_context import (
cmd_add_context,
cmd_validate,
cmd_list_context,
)
# =============================================================================
# Command: start / finish
# =============================================================================
def cmd_start(args: argparse.Namespace) -> int:
"""Set current task."""
repo_root = get_repo_root()
task_input = args.dir
if not task_input:
print(colored("Error: task directory or name required", Colors.RED))
return 1
# Resolve task directory (supports task name, relative path, or absolute path)
full_path = resolve_task_dir(task_input, repo_root)
if not full_path.is_dir():
print(colored(f"Error: Task not found: {task_input}", Colors.RED))
print("Hint: Use task name (e.g., 'my-task') or full path (e.g., '.trellis/tasks/01-31-my-task')")
return 1
# Convert to relative path for storage
try:
task_dir = full_path.relative_to(repo_root).as_posix()
except ValueError:
task_dir = str(full_path)
if set_current_task(task_dir, repo_root):
print(colored(f"✓ Current task set to: {task_dir}", Colors.GREEN))
task_json_path = full_path / FILE_TASK_JSON
if task_json_path.is_file():
data = read_json(task_json_path)
if data and data.get("status") == "planning":
data["status"] = "in_progress"
if write_json(task_json_path, data):
print(colored("✓ Status: planning → in_progress", Colors.GREEN))
print()
print(colored("The hook will now inject context from this task's jsonl files.", Colors.BLUE))
run_task_hooks("after_start", task_json_path, repo_root)
return 0
else:
print(colored("Error: Failed to set current task", Colors.RED))
return 1
def cmd_finish(args: argparse.Namespace) -> int:
"""Clear current task."""
_ = args # signature required by argparse dispatcher
repo_root = get_repo_root()
current = get_current_task(repo_root)
if not current:
print(colored("No current task set", Colors.YELLOW))
return 0
# Resolve task.json path before clearing
task_json_path = repo_root / current / FILE_TASK_JSON
clear_current_task(repo_root)
print(colored(f"✓ Cleared current task (was: {current})", Colors.GREEN))
if task_json_path.is_file():
run_task_hooks("after_finish", task_json_path, repo_root)
return 0
# =============================================================================
# Command: list
# =============================================================================
def cmd_list(args: argparse.Namespace) -> int:
"""List active tasks."""
repo_root = get_repo_root()
tasks_dir = get_tasks_dir(repo_root)
current_task = get_current_task(repo_root)
developer = get_developer(repo_root)
filter_mine = args.mine
filter_status = args.status
if filter_mine:
if not developer:
print(colored("Error: No developer set. Run init_developer.py first", Colors.RED), file=sys.stderr)
return 1
print(colored(f"My tasks (assignee: {developer}):", Colors.BLUE))
else:
print(colored("All active tasks:", Colors.BLUE))
print()
# Single pass: collect all tasks via shared iterator
all_tasks = {t.dir_name: t for t in iter_active_tasks(tasks_dir)}
all_statuses = {name: t.status for name, t in all_tasks.items()}
# Display tasks hierarchically
count = 0
def _print_task(dir_name: str, indent: int = 0) -> None:
nonlocal count
t = all_tasks[dir_name]
# Apply --mine filter
if filter_mine and (t.assignee or "-") != developer:
return
# Apply --status filter
if filter_status and t.status != filter_status:
return
relative_path = f"{DIR_WORKFLOW}/{DIR_TASKS}/{dir_name}"
marker = ""
if relative_path == current_task:
marker = f" {colored('<- current', Colors.GREEN)}"
# Children progress
progress = children_progress(t.children, all_statuses)
# Package tag
pkg_tag = f" @{t.package}" if t.package else ""
prefix = " " * indent + " - "
if filter_mine:
print(f"{prefix}{dir_name}/ ({t.status}){pkg_tag}{progress}{marker}")
else:
print(f"{prefix}{dir_name}/ ({t.status}){pkg_tag}{progress} [{colored(t.assignee or '-', Colors.CYAN)}]{marker}")
count += 1
# Print children indented
for child_name in t.children:
if child_name in all_tasks:
_print_task(child_name, indent + 1)
# Display only top-level tasks (those without a parent)
for dir_name in sorted(all_tasks.keys()):
if not all_tasks[dir_name].parent:
_print_task(dir_name)
if count == 0:
if filter_mine:
print(" (no tasks assigned to you)")
else:
print(" (no active tasks)")
print()
print(f"Total: {count} task(s)")
return 0
# =============================================================================
# Command: list-archive
# =============================================================================
def cmd_list_archive(args: argparse.Namespace) -> int:
"""List archived tasks."""
repo_root = get_repo_root()
tasks_dir = get_tasks_dir(repo_root)
archive_dir = tasks_dir / "archive"
month = args.month
print(colored("Archived tasks:", Colors.BLUE))
print()
if month:
month_dir = archive_dir / month
if month_dir.is_dir():
print(f"[{month}]")
for d in sorted(month_dir.iterdir()):
if d.is_dir():
print(f" - {d.name}/")
else:
print(f" No archives for {month}")
else:
if archive_dir.is_dir():
for month_dir in sorted(archive_dir.iterdir()):
if month_dir.is_dir():
month_name = month_dir.name
count = sum(1 for d in month_dir.iterdir() if d.is_dir())
print(f"[{month_name}] - {count} task(s)")
return 0
# =============================================================================
# Help
# =============================================================================
def show_usage() -> None:
"""Show usage help."""
print("""Task Management Script
Usage:
python3 task.py create <title> Create new task directory
python3 task.py create <title> --package <pkg> Create task for a specific package
python3 task.py create <title> --parent <dir> Create task as child of parent
python3 task.py add-context <dir> <jsonl> <path> [reason] Add entry to jsonl
python3 task.py validate <dir> Validate jsonl files
python3 task.py list-context <dir> List jsonl entries
python3 task.py start <dir> Set as current task
python3 task.py finish Clear current task
python3 task.py set-branch <dir> <branch> Set git branch
python3 task.py set-base-branch <dir> <branch> Set PR target branch
python3 task.py set-scope <dir> <scope> Set scope for PR title
python3 task.py archive <task-name> Archive completed task
python3 task.py add-subtask <parent> <child> Link child task to parent
python3 task.py remove-subtask <parent> <child> Unlink child from parent
python3 task.py list [--mine] [--status <status>] List tasks
python3 task.py list-archive [YYYY-MM] List archived tasks
Monorepo options:
--package <pkg> Package name (validated against config.yaml packages)
List options:
--mine, -m Show only tasks assigned to current developer
--status, -s <s> Filter by status (planning, in_progress, review, completed)
Examples:
python3 task.py create "Add login feature" --slug add-login
python3 task.py create "Add login feature" --slug add-login --package cli
python3 task.py create "Child task" --slug child --parent .trellis/tasks/01-21-parent
python3 task.py add-context <dir> implement .trellis/spec/cli/backend/auth.md "Auth guidelines"
python3 task.py set-branch <dir> task/add-login
python3 task.py start .trellis/tasks/01-21-add-login
python3 task.py finish
python3 task.py archive add-login
python3 task.py add-subtask parent-task child-task # Link existing tasks
python3 task.py remove-subtask parent-task child-task
python3 task.py list # List all active tasks
python3 task.py list --mine # List my tasks only
python3 task.py list --mine --status in_progress # List my in-progress tasks
""")
# =============================================================================
# Main Entry
# =============================================================================
def main() -> int:
"""CLI entry point."""
# Deprecation guard: `init-context` was removed in v0.5.0-beta.12.
# Detect early so argparse doesn't mask the real reason with a generic
# "invalid choice" error.
if len(sys.argv) >= 2 and sys.argv[1] == "init-context":
print(
colored(
"Error: `task.py init-context` was removed in v0.5.0-beta.12.",
Colors.RED,
),
file=sys.stderr,
)
print(
"implement.jsonl / check.jsonl are now seeded on `task.py create` for",
file=sys.stderr,
)
print(
"sub-agent-capable platforms and curated by the AI during Phase 1.3.",
file=sys.stderr,
)
print("See .trellis/workflow.md Phase 1.3 or run:", file=sys.stderr)
print(
" python3 ./.trellis/scripts/get_context.py --mode phase --step 1.3",
file=sys.stderr,
)
print(
"Use `task.py add-context <dir> implement|check <path> <reason>` to append entries.",
file=sys.stderr,
)
return 2
parser = argparse.ArgumentParser(
description="Task Management Script",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
subparsers = parser.add_subparsers(dest="command", help="Commands")
# create
p_create = subparsers.add_parser("create", help="Create new task")
p_create.add_argument("title", help="Task title")
p_create.add_argument("--slug", "-s", help="Task slug")
p_create.add_argument("--assignee", "-a", help="Assignee developer")
p_create.add_argument("--priority", "-p", default="P2", help="Priority (P0-P3)")
p_create.add_argument("--description", "-d", help="Task description")
p_create.add_argument("--parent", help="Parent task directory (establishes subtask link)")
p_create.add_argument("--package", help="Package name for monorepo projects")
# add-context
p_add = subparsers.add_parser("add-context", help="Add context entry")
p_add.add_argument("dir", help="Task directory")
p_add.add_argument("file", help="JSONL file (implement|check)")
p_add.add_argument("path", help="File path to add")
p_add.add_argument("reason", nargs="?", help="Reason for adding")
# validate
p_validate = subparsers.add_parser("validate", help="Validate context files")
p_validate.add_argument("dir", help="Task directory")
# list-context
p_listctx = subparsers.add_parser("list-context", help="List context entries")
p_listctx.add_argument("dir", help="Task directory")
# start
p_start = subparsers.add_parser("start", help="Set current task")
p_start.add_argument("dir", help="Task directory")
# finish
subparsers.add_parser("finish", help="Clear current task")
# set-branch
p_branch = subparsers.add_parser("set-branch", help="Set git branch")
p_branch.add_argument("dir", help="Task directory")
p_branch.add_argument("branch", help="Branch name")
# set-base-branch
p_base = subparsers.add_parser("set-base-branch", help="Set PR target branch")
p_base.add_argument("dir", help="Task directory")
p_base.add_argument("base_branch", help="Base branch name (PR target)")
# set-scope
p_scope = subparsers.add_parser("set-scope", help="Set scope")
p_scope.add_argument("dir", help="Task directory")
p_scope.add_argument("scope", help="Scope name")
# archive
p_archive = subparsers.add_parser("archive", help="Archive task")
p_archive.add_argument("name", help="Task name")
p_archive.add_argument("--no-commit", action="store_true", help="Skip auto git commit after archive")
# list
p_list = subparsers.add_parser("list", help="List tasks")
p_list.add_argument("--mine", "-m", action="store_true", help="My tasks only")
p_list.add_argument("--status", "-s", help="Filter by status")
# add-subtask
p_addsub = subparsers.add_parser("add-subtask", help="Link child task to parent")
p_addsub.add_argument("parent_dir", help="Parent task directory")
p_addsub.add_argument("child_dir", help="Child task directory")
# remove-subtask
p_rmsub = subparsers.add_parser("remove-subtask", help="Unlink child task from parent")
p_rmsub.add_argument("parent_dir", help="Parent task directory")
p_rmsub.add_argument("child_dir", help="Child task directory")
# list-archive
p_listarch = subparsers.add_parser("list-archive", help="List archived tasks")
p_listarch.add_argument("month", nargs="?", help="Month (YYYY-MM)")
args = parser.parse_args()
if not args.command:
show_usage()
return 1
commands = {
"create": cmd_create,
"add-context": cmd_add_context,
"validate": cmd_validate,
"list-context": cmd_list_context,
"start": cmd_start,
"finish": cmd_finish,
"set-branch": cmd_set_branch,
"set-base-branch": cmd_set_base_branch,
"set-scope": cmd_set_scope,
"archive": cmd_archive,
"add-subtask": cmd_add_subtask,
"remove-subtask": cmd_remove_subtask,
"list": cmd_list,
"list-archive": cmd_list_archive,
}
if args.command in commands:
return commands[args.command](args)
else:
show_usage()
return 1
if __name__ == "__main__":
sys.exit(main())
@@ -0,0 +1,141 @@
# Database Guidelines
> MySQL data and DAO conventions for the library-management system.
---
## Overview
MySQL is the project data layer. DAO classes perform CRUD and query operations
against MySQL. Application source and schema files are not present yet, so table
and class names here are illustrative conventions for future implementation.
---
## Core Tables
Use primary keys for every table and foreign keys for cross-entity integrity.
Illustrative table names:
- `books`: book information, inventory count/status, category reference.
- `book_categories`: category names and descriptions.
- `readers`: reader profiles, borrowing eligibility, contact information.
- `borrow_records`: book-reader borrowing, return, renew, and overdue data.
- `administrators`: administrator/librarian login and profile data.
- `roles`: administrator, librarian, reader, and future role definitions.
- `permissions`: permission definitions for protected actions.
- `role_permissions`: role-to-permission mapping.
- `system_logs`: key operation logs, backup events, and exception traces.
When schema files are introduced, record the actual path, DDL style, and exact
table names here.
---
## DAO Responsibilities
- DAOs own database CRUD and query details.
- Use parameterized SQL or prepared-statement style access; never concatenate
raw request parameters into SQL.
- Keep transaction boundaries in the service layer for workflows that span
multiple DAO calls, such as borrow/return operations that also update
inventory status.
- Return entities or small query result objects to services, not HTML or
servlet response objects.
- Keep MySQL connection details in a shared configuration/helper once one
exists, for example `JdbcUtil` plus `db.properties`.
---
## Query Guidance
- Book search must support combined lookup by title, author, category, and ID.
- Statistics queries should cover borrowing rankings, inventory reports, and
overdue reports.
- Borrowing records should preserve enough dates/status fields for borrow,
return, renew, overdue calculation, and automatic collection status updates.
- Permission queries should support role-based checks for administrator,
librarian, and reader workflows.
---
## Integrity Constraints
- `books.category_id` should reference `book_categories`.
- `borrow_records.book_id` should reference `books`.
- `borrow_records.reader_id` should reference `readers`.
- Administrator-role and role-permission mapping tables should use foreign keys
to preserve authorization integrity.
- Prefer explicit status columns/enums for inventory and borrowing states, then
document the chosen values once code exists.
## Scenario: Login And Permission Scaffold Schema
### 1. Scope / Trigger
- Trigger: the initial Java Web scaffold introduced a concrete MySQL schema and
login contract.
- Schema path: `src/main/resources/db/schema.sql`.
- Example configuration path: `src/main/resources/db.properties.example`.
### 2. Signatures
- DAO signature: `UserDao.findActiveByUsername(String username)`.
- Service signature: `AuthService.authenticate(String username, String password)`.
- Permission signature: `AuthService.hasPermission(AuthenticatedUser user, Permission permission)`.
- Login tables: `roles`, `permissions`, `role_permissions`, `users`, and
`system_logs`.
### 3. Contracts
- `users.username`: unique login identifier submitted by `LoginServlet`.
- `users.password_hash`: PBKDF2 hash in
`pbkdf2_sha256$iterations$saltBase64$hashBase64` format.
- `users.role_code`: foreign key to `roles.code`; supported scaffold values
are `administrator`, `librarian`, and `reader`.
- Session state stores an `AuthenticatedUser` snapshot, role code, and
permission-code set. It must not store raw passwords or DAO result objects
with password hashes.
### 4. Validation & Error Matrix
- Missing username or password -> request returns to login JSP with
`Username and password are required.`
- Unknown user, inactive user, or hash mismatch -> request returns to login JSP
with `Invalid username or password.`
- Missing `db.properties`, JDBC failure, or unsupported role code -> request
returns a generic service-unavailable message and logs server-side details.
- Authenticated user missing a required permission -> HTTP 403 and
`WEB-INF/jsp/auth/unauthorized.jsp`.
### 5. Good/Base/Bad Cases
- Good: `admin` resolves to `administrator`, receives all scaffold
permissions, and can access `/admin/home`.
- Base: `reader` resolves to `reader`, can access `/reader/home`, and cannot
access `/admin/home`.
- Bad: a JSP reads SQL or password hashes directly from the database. Keep that
logic in DAO/service code.
### 6. Tests Required
- Compile service/DAO/entity/util classes with `javac` when Maven is
unavailable.
- Run `PermissionPolicyCheck` or equivalent assertions for administrator,
librarian, and reader permissions.
- When Maven/Tomcat dependencies are installed, run `mvn test` or
`mvn clean package` to compile Servlet and JSP integration.
### 7. Wrong vs Correct
#### Wrong
```java
// JSP or Servlet opens JDBC and checks passwords directly.
```
#### Correct
```text
login.jsp -> LoginServlet -> AuthService -> UserDao -> users/roles tables
```
@@ -0,0 +1,73 @@
# Directory Structure
> Backend directory organization for the JSP + Servlet + MySQL application.
---
## Overview
Future backend code should follow the established B/S layered architecture.
There is no application source tree yet; the paths and package names below are
illustrative conventions to use when creating the first application code.
---
## Suggested Layout
```text
src/main/java/com/mzh/library/
controller/ Servlet controllers such as BookServlet
service/ Service interfaces such as BookService
service/impl/ Business implementations such as BookServiceImpl
dao/ DAO interfaces such as BookDao
dao/impl/ MySQL DAO implementations such as BookDaoImpl
entity/ JavaBeans/entities such as Book, Reader, BorrowRecord
filter/ Authentication, authorization, encoding filters
util/ Shared utilities such as JdbcUtil or DateUtil
src/main/resources/
db.properties MySQL connection configuration when introduced
src/main/webapp/
WEB-INF/web.xml Servlet/filter mappings when annotations are not used
```
Use the package root consistently once the first code is created. If a
different root package is chosen in IDEA, update this spec in the same change.
---
## Layer Responsibilities
- `controller`: Servlet classes dispatch requests, validate parameters, call
services, choose JSP forwards or redirects, and return results.
- `service`: Business workflows including book warehousing/intake, borrowing,
returning, renewals, inventory status updates, overdue statistics, and
permission checks.
- `dao`: CRUD and query access to MySQL. DAOs should not perform presentation
rendering or multi-step business workflows.
- `entity`: Plain data objects representing books, categories, readers,
borrowing records, administrators, roles/permissions, and system logs.
- `filter`: Cross-cutting web concerns such as login checks, role guards, and
request encoding.
- `util`: Small shared helpers only after searching for existing helpers first.
---
## Naming Conventions
- Servlet controllers use names such as `BookServlet`, `ReaderServlet`,
`BorrowServlet`, and `LoginServlet`.
- Service interfaces and implementations use names such as `BookService` and
`BookServiceImpl`.
- DAO interfaces and implementations use names such as `BookDao` and
`BookDaoImpl`.
- Entity names should be singular Java nouns such as `Book`, `BookCategory`,
`Reader`, `BorrowRecord`, `Administrator`, `RolePermission`, and
`SystemLog`.
---
## Boundaries
Do not put SQL in JSPs or Servlets. Do not put HTML generation in DAOs or
services. Keep permission checks in filters/services and keep request parameter
parsing in controllers.
+54
View File
@@ -0,0 +1,54 @@
# Error Handling
> Backend error handling conventions for Servlet, service, DAO, and MySQL code.
---
## Overview
Servlet controllers are responsible for parameter validation and safe result
return. Services report business failures such as ineligible readers,
unavailable inventory, failed permission checks, and overdue-rule violations.
DAOs report database failures without leaking SQL details to JSP pages.
---
## Controller Behavior
- Validate required parameters, numeric IDs, dates, and operation names before
calling services.
- On validation failure, return to the relevant JSP with field-level messages
or redirect with a short flash-style message.
- Do not print stack traces or database details into JSP output.
- Use forwards for rendering request-scoped form errors and redirects after
successful mutating operations to avoid duplicate submissions.
---
## Service Behavior
- Centralize business rule failures in the service layer: permission denied,
reader not eligible to borrow, book not available, renewal rejected, overdue
status conflicts, and inventory update failures.
- Keep transaction rollback decisions close to multi-step workflows such as
borrow, return, renew, and book intake.
- Return clear success/failure results or throw project-specific exceptions
once the application defines an exception model.
---
## DAO Behavior
- Wrap low-level SQL/MySQL exceptions with enough context for logs, but do not
expose credentials, raw SQL with user values, or stack traces to users.
- Close JDBC resources reliably using the project helper or language construct
chosen by the first implementation.
- Let services decide whether a DAO failure should abort a transaction or map
to a user-facing message.
---
## User-Facing Results
Use concise messages suitable for JSP rendering. For protected operations,
prefer generic denial messages over exposing permission internals.
+77
View File
@@ -0,0 +1,77 @@
# Backend Development Guidelines
> Backend conventions for the JSP + Servlet + MySQL library-management system.
---
## Overview
The developer has established the backend architecture as a B/S Java web
application using JSP + Servlet, MySQL, Tomcat, and IDEA. Application source
code does not exist in this workspace yet, so package names, class names, and
table names below are illustrative project conventions for future code rather
than references to existing files.
Use a layered design:
```
JSP/CSS presentation -> Servlet controller -> Service/business -> DAO -> MySQL
```
Controllers handle request dispatch, parameter validation, and result return.
Services handle business workflows and permission checks. DAOs perform database
CRUD. MySQL stores books, categories, readers, borrowing records,
administrators, roles/permissions, and system logs.
---
## Guidelines Index
| Guide | Description | Status |
|-------|-------------|--------|
| [Directory Structure](./directory-structure.md) | Servlet, service, DAO, entity, and config organization | Project decision documented |
| [Database Guidelines](./database-guidelines.md) | MySQL tables, DAO CRUD, keys, and transactions | Project decision documented |
| [Error Handling](./error-handling.md) | Servlet validation, service failures, and safe responses | Project decision documented |
| [Quality Guidelines](./quality-guidelines.md) | Layer boundaries and review constraints | Project decision documented |
| [Logging Guidelines](./logging-guidelines.md) | System logs, key operations, and exception tracing | Project decision documented |
---
## Pre-Development Checklist
Before backend implementation, read:
- `.trellis/spec/backend/directory-structure.md`
- `.trellis/spec/backend/database-guidelines.md`
- `.trellis/spec/backend/error-handling.md`
- `.trellis/spec/backend/logging-guidelines.md`
- `.trellis/spec/backend/quality-guidelines.md`
- `.trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md`
---
## Core Backend Modules
- Login and permission management for administrator, librarian, and reader
roles.
- Book information management for create, update, delete, category maintenance,
and inventory status.
- Reader information management for profiles, borrowing eligibility, and
contact information.
- Borrowing and return management for borrow, return, renew, overdue handling,
and automatic collection status updates.
- Book search and statistics for combined title, author, category, and ID
search, borrowing rankings, inventory reports, and overdue reports.
- System maintenance and logs for key operation logs, data backup support, and
exception tracing.
## Evidence
- `.trellis/tasks/00-bootstrap-guidelines/research/repo-scan.md` records that
no application source code exists yet.
- `.trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md`
records the developer-provided stack, architecture, modules, and data model.
---
**Language**: All documentation should be written in **English**.
@@ -0,0 +1,53 @@
# Logging Guidelines
> Logging and system-log conventions for the library-management backend.
---
## Overview
The project requires system maintenance/log support for key operation logs, data
backup support, and exception tracing. MySQL should include a system log table,
illustratively `system_logs`, for durable audit and troubleshooting records.
---
## What to Log
Record key operations that affect security, data integrity, or inventory:
- Login success/failure and logout for administrator, librarian, and reader
roles when applicable.
- Permission changes, role changes, and denied protected operations.
- Book create, update, delete, category maintenance, and warehousing/intake.
- Borrow, return, renew, overdue handling, and automatic inventory status
updates.
- Reader profile, eligibility, and contact information changes.
- Data backup events and restore-related maintenance actions.
- Unhandled exceptions and failed database operations with safe context.
---
## System Log Fields
When the schema is introduced, `system_logs` should preserve enough information
to trace actions without exposing sensitive data. Useful fields include an ID,
operator ID, operator role, operation type, target entity/table, target ID,
result status, message, timestamp, and request IP when available.
---
## Sensitive Data
Do not log passwords, raw credentials, full request bodies, database connection
strings, or unnecessary personal data. Prefer IDs and operation summaries over
large before/after payloads.
---
## Exception Tracing
Server logs may contain technical stack traces for developers. User-facing JSP
pages should receive concise messages. Durable system logs should record the
operation, actor, failure category, and correlation details needed to locate
the server-side exception.
@@ -0,0 +1,58 @@
# Quality Guidelines
> Backend quality constraints for the JSP + Servlet + MySQL application.
---
## Overview
No application source code exists yet, but the developer has established the
backend stack and layered design. Future backend work should preserve those
decisions and update these specs with real file paths once code exists.
---
## Required Patterns
- Keep strict layer boundaries: Servlet -> Service -> DAO -> MySQL.
- Use JSP/CSS only for presentation; JSPs must not contain SQL or business
workflow logic.
- Validate request parameters in Servlet controllers before calling services.
- Enforce permission checks in filters/services for administrator, librarian,
and reader roles.
- Keep inventory status updates inside service workflows so borrow, return,
renew, overdue handling, and book intake remain consistent.
- Use primary keys and foreign keys for core entity relationships.
- Record key operations and exceptions according to the logging spec.
---
## Forbidden Patterns
- Do not introduce React, Vue, SPA routing, ORM conventions, or non-Servlet
backend frameworks unless the developer explicitly changes the stack.
- Do not place SQL in JSP files or directly in presentation helpers.
- Do not put business workflows in DAO classes.
- Do not rely on client-side validation as the only validation for protected
operations.
- Do not expose stack traces, raw SQL errors, or sensitive personal data to end
users.
---
## Checks And Testing
When Java source exists, document and run the actual compile/test commands for
the chosen IDEA/Tomcat project structure. Until then, documentation-only
changes should run Trellis validation, Python compile checks for Trellis
scripts when relevant, and placeholder scans for scaffold markers.
---
## Review Checklist
- Does the change preserve JSP + Servlet + MySQL + Tomcat assumptions?
- Are Servlet, service, DAO, and JSP responsibilities separated?
- Are book, category, reader, borrowing, administrator, permission, and log
data flows covered where relevant?
- Are role permissions and operation logs handled for protected workflows?
@@ -0,0 +1,52 @@
# Component Guidelines
> JSP fragment, form, table, and reusable UI conventions.
---
## Overview
This project uses JSP-based presentation, not a component framework. Treat JSP
includes, fragments, tag files, form layouts, tables, and shared CSS classes as
the reusable UI units.
---
## JSP Includes And Fragments
- Use shared fragments for repeated layout pieces such as header, navigation,
sidebar, footer, pagination, and message banners.
- Prefer `.jspf` includes or JSP tag files once the project chooses one
pattern; document the actual paths after implementation.
- Keep fragments presentation-focused. They should not open database
connections or call DAOs.
---
## Forms
- Forms should post to Servlet controller endpoints, not directly to DAOs or
JSP-only handlers.
- Render validation messages from request attributes set by controllers.
- Preserve user-entered values on validation failure where practical.
- Use clear labels, required-field indicators, and server-side validation for
book, reader, borrowing, login, and permission forms.
---
## Tables And Reports
- Use consistent table patterns for book lists, reader lists, borrowing
records, rankings, inventory reports, overdue reports, and system logs.
- Include stable empty states and pagination or filtering controls when lists
can grow.
- Keep search forms aligned with supported filters: title, author, category,
and book ID.
---
## Styling
Implement JSP/CSS pages to faithfully restore the approved image design. Prefer
semantic class names tied to page structure or reusable UI roles. Avoid adding a
frontend component framework unless explicitly introduced later.
@@ -0,0 +1,52 @@
# Directory Structure
> JSP page and static asset organization for the presentation layer.
---
## Overview
Future frontend code should be JSP/CSS rendered by the Servlet/Tomcat
application. There is no application source tree yet; the paths below are
illustrative conventions for the first implementation.
---
## Suggested Layout
```text
src/main/webapp/
WEB-INF/jsp/
common/ Shared JSP fragments such as header.jspf
auth/ Login and permission pages
books/ Book list, form, detail, category pages
readers/ Reader list, form, detail pages
borrowing/ Borrow, return, renew, overdue pages
statistics/ Search, ranking, inventory, overdue reports
maintenance/ System logs, backup, exception trace pages
static/
css/ Page and shared styles
js/ Small page scripts when needed
images/ Designed/generated UI images and static assets
```
JSPs that should not be accessed directly belong under `WEB-INF/jsp/` and are
rendered through Servlet forwards. Public static files belong under `static/`.
---
## Page Naming
Use module-oriented JSP names such as `books/list.jsp`, `books/form.jsp`,
`readers/detail.jsp`, `borrowing/overdue.jsp`, and
`maintenance/system-logs.jsp`. Match Servlet dispatch paths once controllers
exist.
---
## Static Assets
Keep CSS in `static/css/`, small browser scripts in `static/js/`, and
image-first design references or exported assets in `static/images/` once the
application tree exists. Do not create React/Vue component directories or SPA
asset conventions unless the stack changes.
+31
View File
@@ -0,0 +1,31 @@
# Hook Guidelines
> Hook conventions for this frontend.
---
## Decision
There are no React, Vue, or hook-based frontend conventions in this project.
The established frontend approach is JSP/CSS rendered by Servlets on Tomcat.
Do not introduce React hooks, Vue composables, SPA lifecycle hooks, client-side
state hooks, or hook-style data fetching unless the developer explicitly changes
the stack later.
---
## JSP Alternative
For reusable presentation behavior, use JSP includes/fragments, tag files if
introduced, request attributes, session attributes for authenticated identity,
and small page scripts only when server-rendered JSP cannot handle the
interaction cleanly.
---
## Future Updates
If a JavaScript framework is explicitly introduced later, replace this file
with concrete conventions from the new source code and update the frontend
index status at the same time.
+57
View File
@@ -0,0 +1,57 @@
# Frontend Development Guidelines
> Frontend conventions for the JSP/CSS presentation layer.
---
## Overview
The developer has established a JSP-based presentation layer served by a
Servlet/Tomcat application. Frontend work should use JSP, JSP includes or
fragments, CSS, static images, and small page scripts when needed. Do not assume
React, Vue, frontend hooks, SPA state libraries, or TypeScript conventions
unless the developer explicitly introduces them later.
The frontend workflow is image-first: UI should be designed or generated as
images first, then JSP/CSS pages should restore the design faithfully.
---
## Guidelines Index
| Guide | Description | Status |
|-------|-------------|--------|
| [Directory Structure](./directory-structure.md) | JSP pages, includes, and static assets | Project decision documented |
| [Component Guidelines](./component-guidelines.md) | JSP fragments, forms, tables, and reusable UI | Project decision documented |
| [Hook Guidelines](./hook-guidelines.md) | No React/Vue hook conventions unless introduced later | Project decision documented |
| [State Management](./state-management.md) | Server-rendered request/session/form state | Project decision documented |
| [Quality Guidelines](./quality-guidelines.md) | Image-to-JSP restoration and UI checks | Project decision documented |
| [Type Safety](./type-safety.md) | JSP/Servlet validation and JavaBean display contracts | Project decision documented |
---
## Pre-Development Checklist
Before frontend implementation, read:
- `.trellis/spec/frontend/directory-structure.md`
- `.trellis/spec/frontend/component-guidelines.md`
- `.trellis/spec/frontend/hook-guidelines.md`
- `.trellis/spec/frontend/state-management.md`
- `.trellis/spec/frontend/type-safety.md`
- `.trellis/spec/frontend/quality-guidelines.md`
- `.trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md`
---
## Evidence
- `.trellis/tasks/00-bootstrap-guidelines/research/repo-scan.md` records that
no application source code exists yet.
- `.trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md`
records the developer-provided JSP presentation approach and image-to-JSP
workflow.
---
**Language**: All documentation should be written in **English**.
@@ -0,0 +1,60 @@
# Quality Guidelines
> Frontend quality constraints for JSP/CSS pages.
---
## Overview
Frontend work should implement JSP/CSS pages that match the approved
image-first design and preserve the Servlet/JSP layered architecture.
---
## Image-To-JSP Workflow
1. Design or generate the UI as images first.
2. Use the approved image as the visual reference for JSP/CSS implementation.
3. Restore layout, spacing, typography, color, table density, and form states
faithfully in JSP/CSS.
4. Preserve the image assets or references in the application assets area once
the source tree exists.
5. Compare the implemented page against the source image before considering UI
work complete.
---
## Required Patterns
- JSP pages focus on display and user interaction.
- Forms submit to Servlet controllers and render server-provided validation
messages.
- Tables and reports support scanning for books, readers, borrowing records,
rankings, inventory, overdue data, and system logs.
- Navigation should reflect role permissions for administrator, librarian, and
reader users.
- Keep CSS and small scripts in static assets rather than inline unless there
is a local reason.
---
## Forbidden Patterns
- Do not introduce React, Vue, SPA routing, hook/state conventions, or
TypeScript tooling without an explicit stack change.
- Do not implement UI only from text descriptions when an approved image
reference exists.
- Do not put SQL, DAO calls, or business workflows in JSP pages.
- Do not rely only on browser validation for protected workflows.
---
## Review Checklist
- Does the JSP/CSS page visibly match the approved image design?
- Are forms, tables, empty states, errors, and permission-specific navigation
handled?
- Are JSPs rendered through Servlet controllers where access control or page
data is required?
- Are accessibility basics preserved with labels, headings, focus order, and
readable contrast?
@@ -0,0 +1,42 @@
# State Management
> State conventions for server-rendered JSP pages.
---
## Overview
This project does not use React/Vue state libraries or SPA stores. State should
flow through the Servlet/JSP request cycle unless a future explicit decision
changes the frontend architecture.
---
## State Categories
- Request attributes: page data, validation errors, form echoes, table rows,
reports, and short result messages for a single render.
- Session attributes: authenticated user identity, role, permission summary,
and short-lived flash-style messages when redirects are used.
- Database state: books, categories, readers, borrowing records,
administrators, permissions, and logs stored in MySQL through services/DAOs.
- Form state: submitted by browser forms to Servlets and re-rendered from
validated request attributes on failure.
---
## Rules
- Keep business state in MySQL, not hidden fields or long-lived browser state.
- Keep permission decisions server-side.
- Use redirects after successful mutations such as create/update/delete,
borrow, return, renew, and permission changes.
- Do not add Redux, Pinia, client caches, or SPA routing state unless the
developer explicitly introduces that stack later.
---
## Page Scripts
Small JavaScript can improve interaction, such as confirm dialogs or local form
helpers, but server-side validation and service-layer rules remain mandatory.
+42
View File
@@ -0,0 +1,42 @@
# Type Safety
> Display and validation contracts for JSP/Servlet frontend work.
---
## Overview
The frontend is JSP/CSS, not TypeScript. Type safety comes from Java entities,
Servlet parameter parsing, service contracts, DAO results, and careful JSP
rendering.
---
## JSP Data Contracts
- Servlets should set clearly named request attributes before forwarding to
JSPs, such as `books`, `reader`, `borrowRecords`, `errors`, and `message`.
- JSP pages should render JavaBean/entity properties through JSP EL or JSTL
rather than embedding business Java code.
- Keep attribute names stable between Servlet controllers and JSP pages; update
both sides together when a contract changes.
---
## Validation
- Parse and validate IDs, dates, enum/status values, and required strings in
Servlet controllers before calling services.
- Services should re-check business rules such as borrowing eligibility,
inventory availability, overdue status, and permission access.
- JSP pages should display validation messages but should not be the only place
validation occurs.
---
## Forbidden Patterns
- Do not claim TypeScript, generated API types, React prop types, or Vue type
conventions exist unless the stack changes.
- Do not use JSP scriptlets for business logic.
- Do not let JSP pages infer database schema details directly.
@@ -0,0 +1,105 @@
# Code Reuse Thinking Guide
> **Purpose**: Stop and think before creating new code - does it already exist?
---
## The Problem
**Duplicated code is the #1 source of inconsistency bugs.**
When you copy-paste or rewrite existing logic:
- Bug fixes don't propagate
- Behavior diverges over time
- Codebase becomes harder to understand
---
## Before Writing New Code
### Step 1: Search First
```bash
# Search for similar function names
grep -r "functionName" .
# Search for similar logic
grep -r "keyword" .
```
### Step 2: Ask These Questions
| Question | If Yes... |
|----------|-----------|
| Does a similar function exist? | Use or extend it |
| Is this pattern used elsewhere? | Follow the existing pattern |
| Could this be a shared utility? | Create it in the right place |
| Am I copying code from another file? | **STOP** - extract to shared |
---
## Common Duplication Patterns
### Pattern 1: Copy-Paste Functions
**Bad**: Copying a validation function to another file
**Good**: Extract to shared utilities, import where needed
### Pattern 2: Similar Components
**Bad**: Creating a new component that's 80% similar to existing
**Good**: Extend existing component with props/variants
### Pattern 3: Repeated Constants
**Bad**: Defining the same constant in multiple files
**Good**: Single source of truth, import everywhere
---
## When to Abstract
**Abstract when**:
- Same code appears 3+ times
- Logic is complex enough to have bugs
- Multiple people might need this
**Don't abstract when**:
- Only used once
- Trivial one-liner
- Abstraction would be more complex than duplication
---
## After Batch Modifications
When you've made similar changes to multiple files:
1. **Review**: Did you catch all instances?
2. **Search**: Run grep to find any missed
3. **Consider**: Should this be abstracted?
---
## Gotcha: Asymmetric Mechanisms Producing Same Output
**Problem**: When two different mechanisms must produce the same file set (e.g., recursive directory copy for init vs. manual `files.set()` for update), structural changes (renaming, moving, adding subdirectories) only propagate through the automatic mechanism. The manual one silently drifts.
**Symptom**: Init works perfectly, but update creates files at wrong paths or misses files entirely.
**Prevention checklist**:
- [ ] When migrating directory structures, search for ALL code paths that reference the old structure
- [ ] If one path is auto-derived (glob/copy) and another is manually listed, the manual one needs updating
- [ ] Add a regression test that compares outputs from both mechanisms
---
## Checklist Before Commit
- [ ] Searched for existing similar code
- [ ] No copy-pasted logic that should be shared
- [ ] Constants defined in one place
- [ ] Similar patterns follow same structure
@@ -0,0 +1,94 @@
# Cross-Layer Thinking Guide
> **Purpose**: Think through data flow across layers before implementing.
---
## The Problem
**Most bugs happen at layer boundaries**, not within layers.
Common cross-layer bugs:
- API returns format A, frontend expects format B
- Database stores X, service transforms to Y, but loses data
- Multiple layers implement the same logic differently
---
## Before Implementing Cross-Layer Features
### Step 1: Map the Data Flow
Draw out how data moves:
```
Source → Transform → Store → Retrieve → Transform → Display
```
For each arrow, ask:
- What format is the data in?
- What could go wrong?
- Who is responsible for validation?
### Step 2: Identify Boundaries
| Boundary | Common Issues |
|----------|---------------|
| API ↔ Service | Type mismatches, missing fields |
| Service ↔ Database | Format conversions, null handling |
| Backend ↔ Frontend | Serialization, date formats |
| Component ↔ Component | Props shape changes |
### Step 3: Define Contracts
For each boundary:
- What is the exact input format?
- What is the exact output format?
- What errors can occur?
---
## Common Cross-Layer Mistakes
### Mistake 1: Implicit Format Assumptions
**Bad**: Assuming date format without checking
**Good**: Explicit format conversion at boundaries
### Mistake 2: Scattered Validation
**Bad**: Validating the same thing in multiple layers
**Good**: Validate once at the entry point
### Mistake 3: Leaky Abstractions
**Bad**: Component knows about database schema
**Good**: Each layer only knows its neighbors
---
## Checklist for Cross-Layer Features
Before implementation:
- [ ] Mapped the complete data flow
- [ ] Identified all layer boundaries
- [ ] Defined format at each boundary
- [ ] Decided where validation happens
After implementation:
- [ ] Tested with edge cases (null, empty, invalid)
- [ ] Verified error handling at each boundary
- [ ] Checked data survives round-trip
---
## When to Create Flow Documentation
Create detailed flow docs when:
- Feature spans 3+ layers
- Multiple teams are involved
- Data format is complex
- Feature has caused bugs before
+79
View File
@@ -0,0 +1,79 @@
# Thinking Guides
> **Purpose**: Expand your thinking to catch things you might not have considered.
---
## Why Thinking Guides?
**Most bugs and tech debt come from "didn't think of that"**, not from lack of skill:
- Didn't think about what happens at layer boundaries → cross-layer bugs
- Didn't think about code patterns repeating → duplicated code everywhere
- Didn't think about edge cases → runtime errors
- Didn't think about future maintainers → unreadable code
These guides help you **ask the right questions before coding**.
---
## Available Guides
| Guide | Purpose | When to Use |
|-------|---------|-------------|
| [Code Reuse Thinking Guide](./code-reuse-thinking-guide.md) | Identify patterns and reduce duplication | When you notice repeated patterns |
| [Cross-Layer Thinking Guide](./cross-layer-thinking-guide.md) | Think through data flow across layers | Features spanning multiple layers |
---
## Quick Reference: Thinking Triggers
### When to Think About Cross-Layer Issues
- [ ] Feature touches 3+ layers (API, Service, Component, Database)
- [ ] Data format changes between layers
- [ ] Multiple consumers need the same data
- [ ] You're not sure where to put some logic
→ Read [Cross-Layer Thinking Guide](./cross-layer-thinking-guide.md)
### When to Think About Code Reuse
- [ ] You're writing similar code to something that exists
- [ ] You see the same pattern repeated 3+ times
- [ ] You're adding a new field to multiple places
- [ ] **You're modifying any constant or config**
- [ ] **You're creating a new utility/helper function** ← Search first!
→ Read [Code Reuse Thinking Guide](./code-reuse-thinking-guide.md)
---
## Pre-Modification Rule (CRITICAL)
> **Before changing ANY value, ALWAYS search first!**
```bash
# Search for the value you're about to change
grep -r "value_to_change" .
```
This single habit prevents most "forgot to update X" bugs.
---
## How to Use This Directory
1. **Before coding**: Skim the relevant thinking guide
2. **During coding**: If something feels repetitive or complex, check the guides
3. **After bugs**: Add new insights to the relevant guide (learn from mistakes)
---
## Contributing
Found a new "didn't think of that" moment? Add it to the relevant guide.
---
**Core Principle**: 30 minutes of thinking saves 3 hours of debugging.
@@ -0,0 +1,13 @@
{"file": ".trellis/spec/backend/index.md", "reason": "Check implementation against backend architecture and expected core modules."}
{"file": ".trellis/spec/backend/directory-structure.md", "reason": "Verify backend file placement and layering."}
{"file": ".trellis/spec/backend/database-guidelines.md", "reason": "Verify schema/DAO choices for login and roles."}
{"file": ".trellis/spec/backend/error-handling.md", "reason": "Verify validation, authentication failure, and unauthorized access handling."}
{"file": ".trellis/spec/backend/logging-guidelines.md", "reason": "Verify logging/error tracing expectations where authentication code warrants it."}
{"file": ".trellis/spec/backend/quality-guidelines.md", "reason": "Verify backend quality and layer boundary constraints."}
{"file": ".trellis/spec/frontend/index.md", "reason": "Check JSP/CSS frontend stack alignment."}
{"file": ".trellis/spec/frontend/directory-structure.md", "reason": "Verify JSP/static asset placement."}
{"file": ".trellis/spec/frontend/component-guidelines.md", "reason": "Verify JSP form and fragment conventions."}
{"file": ".trellis/spec/frontend/hook-guidelines.md", "reason": "Ensure SPA/hook conventions were not introduced."}
{"file": ".trellis/spec/frontend/state-management.md", "reason": "Verify request/session/form-state handling."}
{"file": ".trellis/spec/frontend/type-safety.md", "reason": "Verify display contracts and validation boundaries."}
{"file": ".trellis/spec/frontend/quality-guidelines.md", "reason": "Verify frontend quality expectations."}
@@ -0,0 +1,13 @@
{"file": ".trellis/spec/backend/index.md", "reason": "Backend architecture overview and pre-development checklist for the JSP + Servlet + MySQL system."}
{"file": ".trellis/spec/backend/directory-structure.md", "reason": "Backend source layout conventions for Servlet, service, DAO, entity, and config files."}
{"file": ".trellis/spec/backend/database-guidelines.md", "reason": "MySQL schema, DAO CRUD, keys, and transaction guidance for login and role persistence."}
{"file": ".trellis/spec/backend/error-handling.md", "reason": "Validation, authentication failure, service failure, and safe Servlet response handling."}
{"file": ".trellis/spec/backend/logging-guidelines.md", "reason": "Guidance for operation logs and exception tracing in authentication-related code."}
{"file": ".trellis/spec/backend/quality-guidelines.md", "reason": "Layer boundary and backend quality constraints for implementation."}
{"file": ".trellis/spec/frontend/index.md", "reason": "Frontend overview for JSP/CSS presentation served by Servlet/Tomcat."}
{"file": ".trellis/spec/frontend/directory-structure.md", "reason": "JSP, include, static CSS/JS/image placement conventions."}
{"file": ".trellis/spec/frontend/component-guidelines.md", "reason": "JSP fragment, form, table, and reusable UI conventions for login views."}
{"file": ".trellis/spec/frontend/hook-guidelines.md", "reason": "Constraint that React/Vue hooks and SPA conventions should not be introduced."}
{"file": ".trellis/spec/frontend/state-management.md", "reason": "Server-rendered request/session/form state conventions for login flow."}
{"file": ".trellis/spec/frontend/type-safety.md", "reason": "JSP/Servlet validation and JavaBean display contract guidance."}
{"file": ".trellis/spec/frontend/quality-guidelines.md", "reason": "JSP/CSS quality and UI verification guidance."}
+70
View File
@@ -0,0 +1,70 @@
# brainstorm: start coding
## Goal
Build the initial Java Web project scaffold and a login/permission skeleton for this JSP + Servlet + MySQL library-management system.
## What I already know
* The user asked to start writing code.
* There is no active task before this one.
* The repository currently has Trellis project guidance but no application source code files.
* Existing project specs describe a B/S Java web application using JSP, Servlet, MySQL, Tomcat, and IDEA.
* Backend guidance expects a layered structure: JSP/CSS presentation -> Servlet controller -> Service/business -> DAO -> MySQL.
* Frontend guidance expects JSP/CSS pages, JSP includes or fragments, static assets, and small page scripts when needed.
* The user selected option A: build the Java Web foundation and login/permission skeleton first.
## Assumptions (temporary)
* The desired work is to start building the library-management system rather than editing an existing application.
* Maven is a reasonable default for Java web project scaffolding unless the user prefers plain IDEA/Tomcat project files.
* The first implementation should establish a compilable structure and a minimal login flow rather than complete every business module.
## Open Questions
* None currently blocking.
## Requirements (evolving)
* Follow the existing Trellis backend and frontend specs.
* Keep backend and frontend conventions aligned with JSP + Servlet + MySQL, not SPA frameworks.
* Create or update tests/checks where practical for the selected implementation scope.
* Create the base Java Web project structure for Tomcat deployment.
* Add a login/permission skeleton covering administrator, librarian, and reader roles.
* Add model/entity, DAO, service, servlet/controller, JSP, and static asset structure needed for the login slice.
* Include database initialization or schema notes for user/role data needed by the skeleton.
* Keep business logic layered: Servlet parameter/session handling, Service authentication and role checks, DAO persistence access.
* Include safe error handling for invalid credentials, missing parameters, and unauthorized access.
## Acceptance Criteria (evolving)
* [x] The selected first module or feature is explicitly confirmed.
* [ ] The implementation follows the documented JSP + Servlet + MySQL layered architecture.
* [ ] A fresh checkout has recognizable Java Web/Tomcat project structure and build configuration.
* [ ] Login page submits credentials to a Servlet controller and displays validation/authentication failures safely.
* [ ] Authentication logic is represented through service and DAO boundaries rather than embedded in JSP.
* [ ] Session state stores the authenticated user and role in a controlled way.
* [ ] Basic role/permission constants or helpers exist for administrator, librarian, and reader.
* [ ] SQL/schema guidance exists for the minimal account/role tables needed by login.
* [ ] Lint, type-check, compile, or equivalent project validation is run where available.
## Definition of Done (team quality bar)
* Tests added/updated where appropriate.
* Lint / typecheck / compile / CI-equivalent checks are green where available.
* Docs/notes updated if behavior changes.
* Rollout/rollback considered if risky.
## Out of Scope (explicit)
* Choosing React, Vue, TypeScript, SPA routing, or frontend hook/state-library conventions unless the user explicitly changes the stack.
* Implementing full book, reader, borrowing, statistics, backup, or logging modules beyond placeholders needed for navigation or role skeleton.
* Implementing production-grade password reset, remember-me, MFA, or external identity provider integration.
## Technical Notes
* Relevant spec indexes: `.trellis/spec/backend/index.md`, `.trellis/spec/frontend/index.md`.
* Backend pre-development checklist includes directory structure, database, error handling, logging, and quality guidelines.
* Frontend pre-development checklist includes directory structure, JSP component guidelines, state management, type safety, and quality guidelines.
* Codebase retrieval on 2026-04-27 found no application source code and surfaced the project specs as the main implementation context.
* Spec indexes reference `.trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md`, but that file is absent in the current workspace, so context curation uses only present spec files.
@@ -0,0 +1,26 @@
{
"id": "start-coding",
"name": "start-coding",
"title": "brainstorm: start coding",
"description": "",
"status": "in_progress",
"dev_type": null,
"scope": null,
"package": null,
"priority": "P2",
"creator": "Zzzz",
"assignee": "Zzzz",
"createdAt": "2026-04-27",
"completedAt": null,
"branch": null,
"base_branch": "main",
"worktree_path": null,
"commit": null,
"pr_url": null,
"subtasks": [],
"children": [],
"parent": null,
"relatedFiles": [],
"notes": "",
"meta": {}
}
@@ -0,0 +1,5 @@
{"file": ".trellis/spec/guides/index.md", "reason": "Shared Trellis thinking guide index for reviewing bootstrap spec quality"}
{"file": ".trellis/spec/guides/code-reuse-thinking-guide.md", "reason": "Shared guidance for checking that specs avoid duplicated or invented conventions"}
{"file": ".trellis/spec/guides/cross-layer-thinking-guide.md", "reason": "Shared guidance for reviewing future backend/frontend boundary notes"}
{"file": ".trellis/tasks/00-bootstrap-guidelines/research/repo-scan.md", "reason": "Repository scan findings to verify specs reflect the actual empty app workspace"}
{"file": ".trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md", "reason": "Developer-provided architecture and module decisions to verify against backend/frontend specs"}
@@ -0,0 +1,5 @@
{"file": ".trellis/spec/guides/index.md", "reason": "Shared Trellis thinking guide index for documenting current reality without inventing conventions"}
{"file": ".trellis/spec/guides/code-reuse-thinking-guide.md", "reason": "Shared guidance for avoiding duplicated or invented conventions while bootstrapping specs"}
{"file": ".trellis/spec/guides/cross-layer-thinking-guide.md", "reason": "Shared guidance for future backend/frontend cross-layer decisions"}
{"file": ".trellis/tasks/00-bootstrap-guidelines/research/repo-scan.md", "reason": "Repository scan findings showing no application source conventions currently exist"}
{"file": ".trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md", "reason": "Developer-provided JSP/Servlet/MySQL/Tomcat architecture, modules, and frontend workflow decisions"}
@@ -0,0 +1,140 @@
# Bootstrap Task: Fill Project Development Guidelines
**You (the AI) are running this task. The developer does not read this file.**
The developer just ran `trellis init` on this project for the first time.
`.trellis/` now exists with empty spec scaffolding, and this task has been
set as their current task. They'll open their AI tool, run `/trellis:continue`,
and you'll land here.
**Your job**: help them populate `.trellis/spec/` with the team's real
coding conventions. Every future AI session — this project's
`trellis-implement` and `trellis-check` sub-agents — auto-loads spec files
listed in per-task jsonl manifests. Empty spec = sub-agents write generic
code. Real spec = sub-agents match the team's actual patterns.
Don't dump instructions. Open with a short greeting, figure out if the repo
has any existing convention docs (CLAUDE.md, .cursorrules, etc.), and drive
the rest conversationally.
---
## Status (update the checkboxes as you complete each item)
- [x] Record developer-provided JSP + Servlet + MySQL + Tomcat project decisions
- [x] Fill backend guidelines with the layered B/S architecture, DAO, MySQL, service, controller, logging, and permission conventions
- [x] Fill frontend guidelines with JSP/CSS presentation conventions and the image-to-JSP restoration workflow
- [x] Preserve the note that no application source code exists yet; examples are illustrative conventions until real files are introduced
---
## Spec files to populate
### Backend guidelines
| File | What to document |
|------|------------------|
| `.trellis/spec/backend/directory-structure.md` | JSP/Servlet Java web app layering: controller, service, DAO, entity, filter, util |
| `.trellis/spec/backend/database-guidelines.md` | MySQL core tables, DAO CRUD, keys, query patterns, integrity constraints |
| `.trellis/spec/backend/error-handling.md` | Servlet validation, service failures, DAO/database exceptions, safe user results |
| `.trellis/spec/backend/logging-guidelines.md` | System logs, key operations, backup events, exception tracing |
| `.trellis/spec/backend/quality-guidelines.md` | Layer boundaries, permission checks, validation, documentation-only checks |
### Frontend guidelines
| File | What to document |
|------|------------------|
| `.trellis/spec/frontend/directory-structure.md` | JSP pages, WEB-INF organization, shared fragments, static CSS/JS/images |
| `.trellis/spec/frontend/component-guidelines.md` | JSP includes/fragments, forms, tables, reports, shared CSS conventions |
| `.trellis/spec/frontend/hook-guidelines.md` | Explicit absence of React/Vue hook conventions unless introduced later |
| `.trellis/spec/frontend/state-management.md` | Servlet/JSP request, session, database, and form state |
| `.trellis/spec/frontend/type-safety.md` | JSP/Servlet data contracts, JavaBean display, server-side validation |
| `.trellis/spec/frontend/quality-guidelines.md` | Image-first design and faithful JSP/CSS restoration workflow |
### Thinking guides (already populated)
`.trellis/spec/guides/` contains general thinking guides pre-filled with
best practices. Customize only if something clearly doesn't fit this project.
---
## How to fill the spec
### Step 1: Import from existing convention files first (preferred)
Search the repo for existing convention docs. If any exist, read them and
extract the relevant rules into the matching `.trellis/spec/` files —
usually much faster than documenting from scratch.
| File / Directory | Tool |
|------|------|
| `CLAUDE.md` / `CLAUDE.local.md` | Claude Code |
| `AGENTS.md` | Codex / Claude Code / agent-compatible tools |
| `.cursorrules` | Cursor |
| `.cursor/rules/*.mdc` | Cursor (rules directory) |
| `.windsurfrules` | Windsurf |
| `.clinerules` | Cline |
| `.roomodes` | Roo Code |
| `.github/copilot-instructions.md` | GitHub Copilot |
| `.vscode/settings.json``github.copilot.chat.codeGeneration.instructions` | VS Code Copilot |
| `CONVENTIONS.md` / `.aider.conf.yml` | aider |
| `CONTRIBUTING.md` | General project conventions |
| `.editorconfig` | Editor formatting rules |
### Step 2: Analyze the codebase for anything not covered by existing docs
Scan real code to discover patterns. Before writing each spec file:
- Find 2-3 real examples of each pattern in the codebase.
- Reference real file paths (not hypothetical ones).
- Document anti-patterns the team clearly avoids.
### Step 3: Document reality, not ideals
**Critical**: write what the code *actually does*, not what it should do.
Sub-agents match the spec, so aspirational patterns that don't exist in the
codebase will cause sub-agents to write code that looks out of place.
If the team has known tech debt, document the current state — improvement
is a separate conversation, not a bootstrap concern.
---
## Quick explainer of the runtime (share when they ask "why do we need spec at all")
- Every AI coding task spawns two sub-agents: `trellis-implement` (writes
code) and `trellis-check` (verifies quality).
- Each task has `implement.jsonl` / `check.jsonl` manifests listing which
spec files to load.
- The platform hook auto-injects those spec files + the task's `prd.md`
into every sub-agent prompt, so the sub-agent codes/reviews per team
conventions without anyone pasting them manually.
- Source of truth: `.trellis/spec/`. That's why filling it well now pays
off forever.
---
## Completion
When the developer confirms the checklist items above are done with real
examples (not placeholders), guide them to run:
```bash
python3 ./.trellis/scripts/task.py finish
python3 ./.trellis/scripts/task.py archive 00-bootstrap-guidelines
```
After archive, every new developer who joins this project will get a
`00-join-<slug>` onboarding task instead of this bootstrap task.
---
## Suggested opening line
"Welcome to Trellis! Your init just set me up to help you fill the project
spec — a one-time setup so every future AI session follows the team's
conventions instead of writing generic code. Before we start, do you have
any existing convention docs (CLAUDE.md, .cursorrules, CONTRIBUTING.md,
etc.) I can pull from, or should I scan the codebase from scratch?"
@@ -0,0 +1,73 @@
# Project Requirements From Developer
## Source
Captured on 2026-04-27 during the Trellis bootstrap-guidelines task from
developer-provided project facts.
## Architecture And Stack
- The system uses B/S architecture with a typical layered design.
- The application stack is JSP + Servlet, MySQL, Tomcat, developed in IDEA.
- The layers are presentation, controller, business/service, DAO, and data.
## Data Layer
MySQL is the data layer and stores:
- Book information.
- Book category information.
- Reader information.
- Borrowing records.
- Administrator information.
- Role and permission information.
- System logs.
Core entity tables should include book information, book categories, reader
information, borrowing records, administrators, role permissions, and system
logs. Use primary keys and foreign keys to preserve data integrity.
## DAO Layer
The data access layer uses DAO classes to perform database CRUD. DAOs should own
SQL/database access concerns and should not contain presentation or business
workflow logic.
## Business And Service Layer
The service layer handles:
- Book warehousing/intake.
- Borrowing and returning.
- Inventory status updates.
- Overdue statistics.
- Permission checks.
## Controller Layer
Servlet controllers handle request dispatch, parameter validation, and result
return. Controllers should coordinate request/response flow and call services
for business behavior.
## Presentation Layer
JSP/CSS pages handle page display and user interaction.
The frontend approach is image-first: UI should be designed or generated as
images first, then JSP/CSS pages should restore and implement the image
faithfully.
## Core Modules
- Login and permission management for administrator, librarian, and reader
roles.
- Book information management for create, update, delete, category
maintenance, and inventory status.
- Reader information management for profiles, borrowing eligibility, and
contact information.
- Borrowing and return management for borrow, return, renew, overdue handling,
and automatic collection status updates.
- Book search and statistics for combined search by title, author, category,
and ID, plus borrowing rankings, inventory reports, and overdue reports.
- System maintenance and logs for key operation logs, data backup support, and
exception tracing.
@@ -0,0 +1,42 @@
# Repository Scan
## Scope
Scanned the workspace on 2026-04-27 while bootstrapping Trellis project guidelines.
## Findings
- The workspace root is `/mnt/d/document/mzh`.
- `git status --short --branch` from the workspace root fails with `fatal: not a git repository`; no git metadata is available at this level.
- The only root-level convention file discovered by the bootstrap search is `AGENTS.md`.
- `AGENTS.md` contains Trellis-managed instructions only. It does not describe backend or frontend implementation conventions.
- No app package manifests were found at the workspace root, including `package.json`, `pyproject.toml`, `go.mod`, `Cargo.toml`, or common lockfiles.
- No application source files were found outside Trellis/Codex scaffolding at shallow workspace depth. The current files are Trellis workflow/config/scripts/specs/tasks and Codex agent configuration.
- At initial scan time, existing `.trellis/spec/backend/*.md` and
`.trellis/spec/frontend/*.md` files were unpopulated scaffold templates; they
have since been populated during this bootstrap task.
## Later Developer Decision
The source scan remains accurate as a scan of files on disk, but it no longer
means the project stack lacks a decision. The developer later provided explicit
project requirements in
`.trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md`.
Future agents should combine both facts: application source code does not exist
yet, and the intended stack is JSP + Servlet + MySQL + Tomcat with a layered
B/S architecture.
## Source-Code Evidence Boundary
Because there is no backend or frontend application code in this workspace yet,
the bootstrap specs should distinguish source-code evidence from
developer-provided decisions:
- Backend and frontend conventions are not evidenced by source files yet.
- The project stack and intended architecture are established by the developer:
B/S layered JSP + Servlet + MySQL + Tomcat development in IDEA.
- Future agents should follow the populated `.trellis/spec/` files and
`.trellis/tasks/00-bootstrap-guidelines/research/project-requirements.md`
for stack, module, table, and workflow decisions.
- When new backend/frontend code is introduced, agents should update the
matching `.trellis/spec/` files with concrete examples from that code.
@@ -0,0 +1,29 @@
{
"id": "00-bootstrap-guidelines",
"name": "00-bootstrap-guidelines",
"title": "Bootstrap Guidelines",
"description": "Fill in project development guidelines for AI agents",
"status": "completed",
"dev_type": "docs",
"scope": null,
"package": null,
"priority": "P1",
"creator": "Zzzz",
"assignee": "Zzzz",
"createdAt": "2026-04-27",
"completedAt": "2026-04-27",
"branch": null,
"base_branch": null,
"worktree_path": null,
"commit": null,
"pr_url": null,
"subtasks": [],
"children": [],
"parent": null,
"relatedFiles": [
".trellis/spec/backend/",
".trellis/spec/frontend/"
],
"notes": "First-time setup task created by trellis init (fullstack project)",
"meta": {}
}
+480
View File
@@ -0,0 +1,480 @@
# Development Workflow
---
## Core Principles
1. **Plan before code** — figure out what to do before you start
2. **Specs injected, not remembered** — guidelines are injected via hook/skill, not recalled from memory
3. **Persist everything** — research, decisions, and lessons all go to files; conversations get compacted, files don't
4. **Incremental development** — one task at a time
5. **Capture learnings** — after each task, review and write new knowledge back to spec
---
## Trellis System
### Developer Identity
On first use, initialize your identity:
```bash
python3 ./.trellis/scripts/init_developer.py <your-name>
```
Creates `.trellis/.developer` (gitignored) + `.trellis/workspace/<your-name>/`.
### Spec System
`.trellis/spec/` holds coding guidelines organized by package and layer.
- `.trellis/spec/<package>/<layer>/index.md` — entry point with **Pre-Development Checklist** + **Quality Check**. Actual guidelines live in the `.md` files it points to.
- `.trellis/spec/guides/index.md` — cross-package thinking guides.
```bash
python3 ./.trellis/scripts/get_context.py --mode packages # list packages / layers
```
**When to update spec**: new pattern/convention found · bug-fix prevention to codify · new technical decision.
### Task System
Every task has its own directory under `.trellis/tasks/{MM-DD-name}/` holding `prd.md`, `implement.jsonl`, `check.jsonl`, `task.json`, optional `research/`, `info.md`.
```bash
# Task lifecycle
python3 ./.trellis/scripts/task.py create "<title>" [--slug <name>] [--parent <dir>]
python3 ./.trellis/scripts/task.py start <name> # set as current (writes .current-task, triggers after_start hooks)
python3 ./.trellis/scripts/task.py finish # clear current task (triggers after_finish hooks)
python3 ./.trellis/scripts/task.py archive <name> # move to archive/{year-month}/
python3 ./.trellis/scripts/task.py list [--mine] [--status <s>]
python3 ./.trellis/scripts/task.py list-archive
# Code-spec context (injected into implement/check agents via JSONL).
# `implement.jsonl` / `check.jsonl` are seeded on `task create` for sub-agent-capable
# platforms; the AI curates real spec + research entries during Phase 1.3.
python3 ./.trellis/scripts/task.py add-context <name> <action> <file> <reason>
python3 ./.trellis/scripts/task.py list-context <name> [action]
python3 ./.trellis/scripts/task.py validate <name>
# Task metadata
python3 ./.trellis/scripts/task.py set-branch <name> <branch>
python3 ./.trellis/scripts/task.py set-base-branch <name> <branch> # PR target
python3 ./.trellis/scripts/task.py set-scope <name> <scope>
# Hierarchy (parent/child)
python3 ./.trellis/scripts/task.py add-subtask <parent> <child>
python3 ./.trellis/scripts/task.py remove-subtask <parent> <child>
# PR creation
python3 ./.trellis/scripts/task.py create-pr [name] [--dry-run]
```
> Run `python3 ./.trellis/scripts/task.py --help` to see the authoritative, up-to-date list.
**Current-task mechanism**: `task.py start` writes the task path into `.trellis/.current-task`. Hook-capable platforms auto-inject this at session start, so the AI knows what you're working on without being told.
### Workspace System
Records every AI session for cross-session tracking under `.trellis/workspace/<developer>/`.
- `journal-N.md` — session log. **Max 2000 lines per file**; a new `journal-(N+1).md` is auto-created when exceeded.
- `index.md` — personal index (total sessions, last active).
```bash
python3 ./.trellis/scripts/add_session.py --title "Title" --commit "hash" --summary "Summary"
```
### Context Script
```bash
python3 ./.trellis/scripts/get_context.py # full session context
python3 ./.trellis/scripts/get_context.py --mode packages # available packages + spec layers
python3 ./.trellis/scripts/get_context.py --mode phase --step <X.Y> # detailed guide for a workflow step
```
---
## Phase Index
```
Phase 1: Plan → figure out what to do (brainstorm + research → prd.md)
Phase 2: Execute → write code and pass quality checks
Phase 3: Finish → distill lessons + wrap-up
```
### Phase 1: Plan
- 1.0 Create task `[required · once]`
- 1.1 Requirement exploration `[required · repeatable]`
- 1.2 Research `[optional · repeatable]`
- 1.3 Configure context `[required · once]` — Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid
- 1.4 Completion criteria
### Phase 2: Execute
- 2.1 Implement `[required · repeatable]`
- 2.2 Quality check `[required · repeatable]`
- 2.3 Rollback `[on demand]`
### Phase 3: Finish
- 3.1 Quality verification `[required · repeatable]`
- 3.2 Debug retrospective `[on demand]`
- 3.3 Spec update `[required · once]`
- 3.4 Wrap-up reminder
### Rules
1. Identify which Phase you're in, then continue from the next step there
2. Run steps in order inside each Phase; `[required]` steps can't be skipped
3. Phases can roll back (e.g., Execute reveals a prd defect → return to Plan to fix, then re-enter Execute)
4. Steps tagged `[once]` are skipped if already done; don't re-run
### Skill Routing
When a user request matches one of these intents, load the corresponding skill (or dispatch the corresponding sub-agent) first — do not skip skills.
[Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
| User intent | Route |
|---|---|
| Wants a new feature / requirement unclear | `trellis-brainstorm` |
| About to write code / start implementing | Dispatch the `trellis-implement` sub-agent per Phase 2.1 |
| Finished writing / want to verify | Dispatch the `trellis-check` sub-agent per Phase 2.2 |
| Stuck / fixed same bug several times | `trellis-break-loop` |
| Spec needs update | `trellis-update-spec` |
**Why `trellis-before-dev` is NOT in this table:** you are not the one writing code — the `trellis-implement` sub-agent is. Sub-agent platforms get spec context via `implement.jsonl` injection / prelude, not via the main thread loading `trellis-before-dev`.
[/Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
[Kilo, Antigravity, Windsurf]
| User intent | Skill |
|---|---|
| Wants a new feature / requirement unclear | `trellis-brainstorm` |
| About to write code / start implementing | `trellis-before-dev` (then implement directly in the main session) |
| Finished writing / want to verify | `trellis-check` |
| Stuck / fixed same bug several times | `trellis-break-loop` |
| Spec needs update | `trellis-update-spec` |
[/Kilo, Antigravity, Windsurf]
### DO NOT skip skills
[Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
| What you're thinking | Why it's wrong |
|---|---|
| "This is simple, I'll just code it in the main thread" | Dispatching `trellis-implement` is the cheap path; skipping it tempts you to write code in the main thread and lose spec context — sub-agents get `implement.jsonl` injected, you don't |
| "I already thought it through in plan mode" | Plan-mode output lives in memory — sub-agents can't see it; must be persisted to prd.md |
| "I already know the spec" | The spec may have been updated since you last read it; the sub-agent gets the fresh copy, you may not |
| "Code first, check later" | `trellis-check` surfaces issues you won't notice yourself; earlier is cheaper |
[/Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
[Kilo, Antigravity, Windsurf]
| What you're thinking | Why it's wrong |
|---|---|
| "This is simple, just code it" | Simple tasks often grow complex; `trellis-before-dev` takes under a minute and loads the spec context you'll need |
| "I already thought it through in plan mode" | Plan-mode output lives in memory — must be persisted to prd.md before code |
| "I already know the spec" | The spec may have been updated since you last read it; read again |
| "Code first, check later" | `trellis-check` surfaces issues you won't notice yourself; earlier is cheaper |
[/Kilo, Antigravity, Windsurf]
### Loading Step Detail
At each step, run this to fetch detailed guidance:
```bash
python3 ./.trellis/scripts/get_context.py --mode phase --step <step>
# e.g. python3 ./.trellis/scripts/get_context.py --mode phase --step 1.1
```
---
## Phase 1: Plan
Goal: figure out what to build, produce a clear requirements doc and the context needed to implement it.
#### 1.0 Create task `[required · once]`
Create the task directory and set it as current:
```bash
python3 ./.trellis/scripts/task.py create "<task title>" --slug <name>
python3 ./.trellis/scripts/task.py start <task-dir>
```
Skip when: `.trellis/.current-task` already points to a task.
#### 1.1 Requirement exploration `[required · repeatable]`
Load the `trellis-brainstorm` skill and explore requirements interactively with the user per the skill's guidance.
The brainstorm skill will guide you to:
- Ask one question at a time
- Prefer researching over asking the user
- Prefer offering options over open-ended questions
- Update `prd.md` immediately after each user answer
Return to this step whenever requirements change and revise `prd.md`.
#### 1.2 Research `[optional · repeatable]`
Research can happen at any time during requirement exploration. It isn't limited to local code — you can use any available tool (MCP servers, skills, web search, etc.) to look up external information, including third-party library docs, industry practices, API references, etc.
[Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
Spawn the research sub-agent:
- **Agent type**: `trellis-research`
- **Task description**: Research <specific question>
- **Key requirement**: Research output MUST be persisted to `{TASK_DIR}/research/`
[/Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
[Kilo, Antigravity, Windsurf]
Do the research in the main session directly and write findings into `{TASK_DIR}/research/`.
[/Kilo, Antigravity, Windsurf]
**Research artifact conventions**:
- One file per research topic (e.g. `research/auth-library-comparison.md`)
- Record third-party library usage examples, API references, version constraints in files
- Note relevant spec file paths you discovered for later reference
Brainstorm and research can interleave freely — pause to research a technical question, then return to talk with the user.
**Key principle**: Research output must be written to files, not left only in the chat. Conversations get compacted; files don't.
#### 1.3 Configure context `[required · once]`
[Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
Curate `implement.jsonl` and `check.jsonl` so the Phase 2 sub-agents get the right spec context. These files were seeded on `task create` with a single self-describing `_example` line; your job here is to fill in real entries.
**Location**: `{TASK_DIR}/implement.jsonl` and `{TASK_DIR}/check.jsonl` (already exist).
**Format**: one JSON object per line — `{"file": "<path>", "reason": "<why>"}`. Paths are repo-root relative.
**What to put in**:
- **Spec files** — `.trellis/spec/<package>/<layer>/index.md` and any specific guideline files (`error-handling.md`, `conventions.md`, etc.) relevant to this task
- **Research files** — `{TASK_DIR}/research/*.md` that the sub-agent will need to consult
**What NOT to put in**:
- Code files (`src/**`, `packages/**/*.ts`, etc.) — those are read by the sub-agent during implementation, not pre-registered here
- Files you're about to modify — same reason
**Split between the two files**:
- `implement.jsonl` → specs + research the implement sub-agent needs to write code correctly
- `check.jsonl` → specs for the check sub-agent (quality guidelines, check conventions, same research if needed)
**How to discover relevant specs**:
```bash
python3 ./.trellis/scripts/get_context.py --mode packages
```
Lists every package + its spec layers with paths. Pick the entries that match this task's domain.
**How to append entries**:
Either edit the jsonl file directly in your editor, or use:
```bash
python3 ./.trellis/scripts/task.py add-context "$TASK_DIR" implement "<path>" "<reason>"
python3 ./.trellis/scripts/task.py add-context "$TASK_DIR" check "<path>" "<reason>"
```
Delete the seed `_example` line once real entries exist (optional — it's skipped automatically by consumers).
Skip when: `implement.jsonl` has agent-curated entries (the seed row alone doesn't count).
[/Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
[Kilo, Antigravity, Windsurf]
Skip this step. Context is loaded directly by the `trellis-before-dev` skill in Phase 2.
[/Kilo, Antigravity, Windsurf]
#### 1.4 Completion criteria
| Condition | Required |
|------|:---:|
| `prd.md` exists | ✅ |
| User confirms requirements | ✅ |
| `research/` has artifacts (complex tasks) | recommended |
| `info.md` technical design (complex tasks) | optional |
[Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
| `implement.jsonl` has agent-curated entries (not just the seed row) | ✅ |
[/Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
---
## Phase 2: Execute
Goal: turn the prd into code that passes quality checks.
#### 2.1 Implement `[required · repeatable]`
[Claude Code, Cursor, OpenCode, Gemini, Qoder, CodeBuddy, Copilot, Droid]
Spawn the implement sub-agent:
- **Agent type**: `trellis-implement`
- **Task description**: Implement the requirements per prd.md, consulting materials under `{TASK_DIR}/research/`; finish by running project lint and type-check
The platform hook/plugin auto-handles:
- Reads `implement.jsonl` and injects the referenced spec files into the agent prompt
- Injects prd.md content
[/Claude Code, Cursor, OpenCode, Gemini, Qoder, CodeBuddy, Copilot, Droid]
[Codex]
Spawn the implement sub-agent:
- **Agent type**: `trellis-implement`
- **Task description**: Implement the requirements per prd.md, consulting materials under `{TASK_DIR}/research/`; finish by running project lint and type-check
The Codex sub-agent definition auto-handles the context load requirement:
- Reads `.trellis/.current-task`, `prd.md`, and `info.md` if present
- Reads `implement.jsonl` and requires the agent to load each referenced spec file before coding
[/Codex]
[Kiro]
Spawn the implement sub-agent:
- **Agent type**: `trellis-implement`
- **Task description**: Implement the requirements per prd.md, consulting materials under `{TASK_DIR}/research/`; finish by running project lint and type-check
The platform prelude auto-handles the context load requirement:
- Reads `implement.jsonl` and injects the referenced spec files into the agent prompt
- Injects prd.md content
[/Kiro]
[Kilo, Antigravity, Windsurf]
1. Load the `trellis-before-dev` skill to read project guidelines
2. Read `{TASK_DIR}/prd.md` for requirements
3. Consult materials under `{TASK_DIR}/research/`
4. Implement the code per requirements
5. Run project lint and type-check
[/Kilo, Antigravity, Windsurf]
#### 2.2 Quality check `[required · repeatable]`
[Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
Spawn the check sub-agent:
- **Agent type**: `trellis-check`
- **Task description**: Review all code changes against spec and prd; fix any findings directly; ensure lint and type-check pass
The check agent's job:
- Review code changes against specs
- Auto-fix issues it finds
- Run lint and typecheck to verify
[/Claude Code, Cursor, OpenCode, Codex, Kiro, Gemini, Qoder, CodeBuddy, Copilot, Droid]
[Kilo, Antigravity, Windsurf]
Load the `trellis-check` skill and verify the code per its guidance:
- Spec compliance
- lint / type-check / tests
- Cross-layer consistency (when changes span layers)
If issues are found → fix → re-check, until green.
[/Kilo, Antigravity, Windsurf]
#### 2.3 Rollback `[on demand]`
- `check` reveals a prd defect → return to Phase 1, fix `prd.md`, then redo 2.1
- Implementation went wrong → revert code, redo 2.1
- Need more research → research (same as Phase 1.2), write findings into `research/`
---
## Phase 3: Finish
Goal: ensure code quality, capture lessons, record the work.
#### 3.1 Quality verification `[required · repeatable]`
Load the `trellis-check` skill and do a final verification:
- Spec compliance
- lint / type-check / tests
- Cross-layer consistency (when changes span layers)
If issues are found → fix → re-check, until green.
#### 3.2 Debug retrospective `[on demand]`
If this task involved repeated debugging (the same issue was fixed multiple times), load the `trellis-break-loop` skill to:
- Classify the root cause
- Explain why earlier fixes failed
- Propose prevention
The goal is to capture debugging lessons so the same class of issue doesn't recur.
#### 3.3 Spec update `[required · once]`
Load the `trellis-update-spec` skill and review whether this task produced new knowledge worth recording:
- Newly discovered patterns or conventions
- Pitfalls you hit
- New technical decisions
Update the docs under `.trellis/spec/` accordingly. Even if the conclusion is "nothing to update", walk through the judgment.
#### 3.4 Wrap-up reminder
After the above, remind the user they can run `/finish-work` to wrap up (archive the task, record the session).
---
## Workflow State Breadcrumbs
<!-- Injected per-turn by UserPromptSubmit hook (inject-workflow-state.py).
Edit the text inside each [workflow-state:STATUS]...[/workflow-state:STATUS]
block to customize per-task-status flow reminders. Users who fork the
Trellis workflow only need to edit this file, not the hook script.
Tag STATUS matches task.json.status. Default statuses: planning /
in_progress / completed. Add custom status blocks as needed (hyphens
and underscores allowed). Hook falls back to built-in defaults when
a status has no tag block. -->
[workflow-state:no_task]
No active task.
Trigger words in the user message that REQUIRE creating a task (non-negotiable, do NOT self-exempt): 重构 / 抽成 / 独立 / 分发 / 拆出来 / 搞一个 / 做成 / 接入 / 集成 / refactor / rewrite / extract / productize / publish / build X / design Y.
Task is NOT required ONLY if ALL three hold: (a) zero file writes this turn, (b) answer fits in one reply with no multi-round plan, (c) no research beyond reading 1-2 repo files.
When in doubt: create task. Over-tasking is cheap; under-tasking leaks plans and research into main context.
Flow: load `trellis-brainstorm` skill → it creates the task via `python3 ./.trellis/scripts/task.py create` and drives requirements Q&A. For research-heavy work (tool comparison, docs, cross-platform survey), spawn `trellis-research` sub-agents via Task tool — NEVER do 3+ inline WebFetch/WebSearch/`gh api` calls in the main conversation.
[/workflow-state:no_task]
[workflow-state:planning]
Complete prd.md via trellis-brainstorm skill; then run task.py start.
Research belongs in `{task_dir}/research/*.md`, written by `trellis-research` sub-agents. Do NOT inline WebFetch/WebSearch in main session — PRD only links to research files.
[/workflow-state:planning]
[workflow-state:in_progress]
Flow: trellis-implement → trellis-check → trellis-update-spec → finish
Next required action: inspect conversation history + git status, then execute the next uncompleted step in that sequence.
For agent-capable platforms, do NOT edit code in the main session; dispatch `trellis-implement` for implementation and dispatch `trellis-check` before reporting completion.
[/workflow-state:in_progress]
[workflow-state:completed]
User commits changes; then run task.py archive.
[/workflow-state:completed]
+41
View File
@@ -0,0 +1,41 @@
# Workspace Index - Zzzz
> Journal tracking for AI development sessions.
---
## Current Status
<!-- @@@auto:current-status -->
- **Active File**: `journal-1.md`
- **Total Sessions**: 1
- **Last Active**: 2026-04-27
<!-- @@@/auto:current-status -->
---
## Active Documents
<!-- @@@auto:active-documents -->
| File | Lines | Status |
|------|-------|--------|
| `journal-1.md` | ~110 | Active |
<!-- @@@/auto:active-documents -->
---
## Session History
<!-- @@@auto:session-history -->
| # | Date | Title | Commits | Branch |
|---|------|-------|---------|--------|
| 1 | 2026-04-27 | Bootstrap Guidelines | - | `-` |
<!-- @@@/auto:session-history -->
---
## Notes
- Sessions are appended to journal files
- New journal file created when current exceeds 2000 lines
- Use `add_session.py` to record sessions
+110
View File
@@ -0,0 +1,110 @@
# Journal - Zzzz (Part 1)
> AI development session journal
> Started: 2026-04-27
---
## Session 1: Bootstrap Guidelines
**Date**: 2026-04-27
**Task**: Bootstrap Guidelines
### Summary
Populated Trellis backend/frontend specs for the JSP + Servlet + MySQL + Tomcat library management system and archived the bootstrap task.
### Main Changes
# Project Requirements From Developer
## Source
Captured on 2026-04-27 during the Trellis bootstrap-guidelines task from
developer-provided project facts.
## Architecture And Stack
- The system uses B/S architecture with a typical layered design.
- The application stack is JSP + Servlet, MySQL, Tomcat, developed in IDEA.
- The layers are presentation, controller, business/service, DAO, and data.
## Data Layer
MySQL is the data layer and stores:
- Book information.
- Book category information.
- Reader information.
- Borrowing records.
- Administrator information.
- Role and permission information.
- System logs.
Core entity tables should include book information, book categories, reader
information, borrowing records, administrators, role permissions, and system
logs. Use primary keys and foreign keys to preserve data integrity.
## DAO Layer
The data access layer uses DAO classes to perform database CRUD. DAOs should own
SQL/database access concerns and should not contain presentation or business
workflow logic.
## Business And Service Layer
The service layer handles:
- Book warehousing/intake.
- Borrowing and returning.
- Inventory status updates.
- Overdue statistics.
- Permission checks.
## Controller Layer
Servlet controllers handle request dispatch, parameter validation, and result
return. Controllers should coordinate request/response flow and call services
for business behavior.
## Presentation Layer
JSP/CSS pages handle page display and user interaction.
The frontend approach is image-first: UI should be designed or generated as
images first, then JSP/CSS pages should restore and implement the image
faithfully.
## Core Modules
- Login and permission management for administrator, librarian, and reader
roles.
- Book information management for create, update, delete, category
maintenance, and inventory status.
- Reader information management for profiles, borrowing eligibility, and
contact information.
- Borrowing and return management for borrow, return, renew, overdue handling,
and automatic collection status updates.
- Book search and statistics for combined search by title, author, category,
and ID, plus borrowing rankings, inventory reports, and overdue reports.
- System maintenance and logs for key operation logs, data backup support, and
exception tracing.
### Git Commits
(No commits - planning session)
### Testing
- [OK] (Add test results)
### Status
[OK] **Completed**
### Next Steps
- None - task complete
+125
View File
@@ -0,0 +1,125 @@
# Workspace Index
> Records of all AI Agent work records across all developers
---
## Overview
This directory tracks records for all developers working with AI Agents on this project.
### File Structure
```
workspace/
|-- index.md # This file - main index
+-- {developer}/ # Per-developer directory
|-- index.md # Personal index with session history
|-- tasks/ # Task files
| |-- *.json # Active tasks
| +-- archive/ # Archived tasks by month
+-- journal-N.md # Journal files (sequential: 1, 2, 3...)
```
---
## Active Developers
| Developer | Last Active | Sessions | Active File |
|-----------|-------------|----------|-------------|
| (none yet) | - | - | - |
---
## Getting Started
### For New Developers
Run the initialization script:
```bash
python3 ./.trellis/scripts/init_developer.py <your-name>
```
This will:
1. Create your identity file (gitignored)
2. Create your progress directory
3. Create your personal index
4. Create initial journal file
### For Returning Developers
1. Get your developer name:
```bash
python3 ./.trellis/scripts/get_developer.py
```
2. Read your personal index:
```bash
cat .trellis/workspace/$(python3 ./.trellis/scripts/get_developer.py)/index.md
```
---
## Guidelines
### Journal File Rules
- **Max 2000 lines** per journal file
- When limit is reached, create `journal-{N+1}.md`
- Update your personal `index.md` when creating new files
### Session Record Format
Each session should include:
- Summary: One-line description
- Branch: Which branch the work was done on
- Main Changes: What was modified
- Git Commits: Commit hashes and messages
- Next Steps: What to do next
---
## Session Template
Use this template when recording sessions:
```markdown
## Session {N}: {Title}
**Date**: YYYY-MM-DD
**Task**: {task-name}
**Branch**: `{branch-name}`
### Summary
{One-line summary}
### Main Changes
- {Change 1}
- {Change 2}
### Git Commits
| Hash | Message |
|------|---------|
| `abc1234` | {commit message} |
### Testing
- [OK] {Test result}
### Status
[OK] **Completed** / # **In Progress** / [P] **Blocked**
### Next Steps
- {Next step 1}
- {Next step 2}
```
---
**Language**: All documentation must be written in **English**.