chore: clean .gitignore cache
This commit is contained in:
202
.github/skills/scripts/_environment_helpers.sh
vendored
202
.github/skills/scripts/_environment_helpers.sh
vendored
@@ -1,202 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Agent Skills - Environment Helpers
|
||||
#
|
||||
# Provides environment validation and setup utilities.
|
||||
|
||||
# validate_go_environment: Check Go installation and version
|
||||
validate_go_environment() {
|
||||
local min_version="${1:-1.23}"
|
||||
|
||||
if ! command -v go >/dev/null 2>&1; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Go is not installed or not in PATH"
|
||||
else
|
||||
echo "[ERROR] Go is not installed or not in PATH" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
local go_version
|
||||
go_version=$(go version | grep -oP 'go\K[0-9]+\.[0-9]+' || echo "0.0")
|
||||
|
||||
if declare -f log_debug >/dev/null 2>&1; then
|
||||
log_debug "Go version: ${go_version} (required: >=${min_version})"
|
||||
fi
|
||||
|
||||
# Simple version comparison (assumes semantic versioning)
|
||||
if [[ "$(printf '%s\n' "${min_version}" "${go_version}" | sort -V | head -n1)" != "${min_version}" ]]; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Go version ${go_version} is below minimum required version ${min_version}"
|
||||
else
|
||||
echo "[ERROR] Go version ${go_version} is below minimum required version ${min_version}" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# validate_python_environment: Check Python installation and version
|
||||
validate_python_environment() {
|
||||
local min_version="${1:-3.8}"
|
||||
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Python 3 is not installed or not in PATH"
|
||||
else
|
||||
echo "[ERROR] Python 3 is not installed or not in PATH" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
local python_version
|
||||
python_version=$(python3 --version 2>&1 | grep -oP 'Python \K[0-9]+\.[0-9]+' || echo "0.0")
|
||||
|
||||
if declare -f log_debug >/dev/null 2>&1; then
|
||||
log_debug "Python version: ${python_version} (required: >=${min_version})"
|
||||
fi
|
||||
|
||||
# Simple version comparison
|
||||
if [[ "$(printf '%s\n' "${min_version}" "${python_version}" | sort -V | head -n1)" != "${min_version}" ]]; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Python version ${python_version} is below minimum required version ${min_version}"
|
||||
else
|
||||
echo "[ERROR] Python version ${python_version} is below minimum required version ${min_version}" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# validate_node_environment: Check Node.js installation and version
|
||||
validate_node_environment() {
|
||||
local min_version="${1:-18.0}"
|
||||
|
||||
if ! command -v node >/dev/null 2>&1; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Node.js is not installed or not in PATH"
|
||||
else
|
||||
echo "[ERROR] Node.js is not installed or not in PATH" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
local node_version
|
||||
node_version=$(node --version | grep -oP 'v\K[0-9]+\.[0-9]+' || echo "0.0")
|
||||
|
||||
if declare -f log_debug >/dev/null 2>&1; then
|
||||
log_debug "Node.js version: ${node_version} (required: >=${min_version})"
|
||||
fi
|
||||
|
||||
# Simple version comparison
|
||||
if [[ "$(printf '%s\n' "${min_version}" "${node_version}" | sort -V | head -n1)" != "${min_version}" ]]; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Node.js version ${node_version} is below minimum required version ${min_version}"
|
||||
else
|
||||
echo "[ERROR] Node.js version ${node_version} is below minimum required version ${min_version}" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# validate_docker_environment: Check Docker installation and daemon
|
||||
validate_docker_environment() {
|
||||
if ! command -v docker >/dev/null 2>&1; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Docker is not installed or not in PATH"
|
||||
else
|
||||
echo "[ERROR] Docker is not installed or not in PATH" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if Docker daemon is running
|
||||
if ! docker info >/dev/null 2>&1; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Docker daemon is not running"
|
||||
else
|
||||
echo "[ERROR] Docker daemon is not running" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
if declare -f log_debug >/dev/null 2>&1; then
|
||||
local docker_version
|
||||
docker_version=$(docker --version | grep -oP 'Docker version \K[0-9]+\.[0-9]+\.[0-9]+' || echo "unknown")
|
||||
log_debug "Docker version: ${docker_version}"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# set_default_env: Set environment variable with default value if not set
|
||||
set_default_env() {
|
||||
local var_name="$1"
|
||||
local default_value="$2"
|
||||
|
||||
if [[ -z "${!var_name:-}" ]]; then
|
||||
export "${var_name}=${default_value}"
|
||||
|
||||
if declare -f log_debug >/dev/null 2>&1; then
|
||||
log_debug "Set ${var_name}=${default_value} (default)"
|
||||
fi
|
||||
else
|
||||
if declare -f log_debug >/dev/null 2>&1; then
|
||||
log_debug "Using ${var_name}=${!var_name} (from environment)"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# validate_project_structure: Check we're in the correct project directory
|
||||
validate_project_structure() {
|
||||
local required_files=("$@")
|
||||
|
||||
for file in "${required_files[@]}"; do
|
||||
if [[ ! -e "${file}" ]]; then
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Required file/directory not found: ${file}"
|
||||
log_error "Are you running this from the project root?"
|
||||
else
|
||||
echo "[ERROR] Required file/directory not found: ${file}" >&2
|
||||
echo "[ERROR] Are you running this from the project root?" >&2
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# get_project_root: Find project root by looking for marker files
|
||||
get_project_root() {
|
||||
local marker_file="${1:-.git}"
|
||||
local current_dir
|
||||
current_dir="$(pwd)"
|
||||
|
||||
while [[ "${current_dir}" != "/" ]]; do
|
||||
if [[ -e "${current_dir}/${marker_file}" ]]; then
|
||||
echo "${current_dir}"
|
||||
return 0
|
||||
fi
|
||||
current_dir="$(dirname "${current_dir}")"
|
||||
done
|
||||
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Could not find project root (looking for ${marker_file})"
|
||||
else
|
||||
echo "[ERROR] Could not find project root (looking for ${marker_file})" >&2
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Export functions
|
||||
export -f validate_go_environment
|
||||
export -f validate_python_environment
|
||||
export -f validate_node_environment
|
||||
export -f validate_docker_environment
|
||||
export -f set_default_env
|
||||
export -f validate_project_structure
|
||||
export -f get_project_root
|
||||
134
.github/skills/scripts/_error_handling_helpers.sh
vendored
134
.github/skills/scripts/_error_handling_helpers.sh
vendored
@@ -1,134 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Agent Skills - Error Handling Helpers
|
||||
#
|
||||
# Provides error handling utilities for robust skill execution.
|
||||
|
||||
# error_exit: Print error message and exit with code
|
||||
error_exit() {
|
||||
local message="$1"
|
||||
local exit_code="${2:-1}"
|
||||
|
||||
# Source logging helpers if not already loaded
|
||||
if ! declare -f log_error >/dev/null 2>&1; then
|
||||
echo "[ERROR] ${message}" >&2
|
||||
else
|
||||
log_error "${message}"
|
||||
fi
|
||||
|
||||
exit "${exit_code}"
|
||||
}
|
||||
|
||||
# check_command_exists: Verify a command is available
|
||||
check_command_exists() {
|
||||
local cmd="$1"
|
||||
local error_msg="${2:-Command not found: ${cmd}}"
|
||||
|
||||
if ! command -v "${cmd}" >/dev/null 2>&1; then
|
||||
error_exit "${error_msg}" 127
|
||||
fi
|
||||
}
|
||||
|
||||
# check_file_exists: Verify a file exists
|
||||
check_file_exists() {
|
||||
local file="$1"
|
||||
local error_msg="${2:-File not found: ${file}}"
|
||||
|
||||
if [[ ! -f "${file}" ]]; then
|
||||
error_exit "${error_msg}" 1
|
||||
fi
|
||||
}
|
||||
|
||||
# check_dir_exists: Verify a directory exists
|
||||
check_dir_exists() {
|
||||
local dir="$1"
|
||||
local error_msg="${2:-Directory not found: ${dir}}"
|
||||
|
||||
if [[ ! -d "${dir}" ]]; then
|
||||
error_exit "${error_msg}" 1
|
||||
fi
|
||||
}
|
||||
|
||||
# check_exit_code: Verify previous command succeeded
|
||||
check_exit_code() {
|
||||
local exit_code=$?
|
||||
local error_msg="${1:-Command failed with exit code ${exit_code}}"
|
||||
|
||||
if [[ ${exit_code} -ne 0 ]]; then
|
||||
error_exit "${error_msg}" "${exit_code}"
|
||||
fi
|
||||
}
|
||||
|
||||
# run_with_retry: Run a command with retry logic
|
||||
run_with_retry() {
|
||||
local max_attempts="${1}"
|
||||
local delay="${2}"
|
||||
shift 2
|
||||
local cmd=("$@")
|
||||
|
||||
local attempt=1
|
||||
while [[ ${attempt} -le ${max_attempts} ]]; do
|
||||
if "${cmd[@]}"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ ${attempt} -lt ${max_attempts} ]]; then
|
||||
if declare -f log_warning >/dev/null 2>&1; then
|
||||
log_warning "Command failed (attempt ${attempt}/${max_attempts}). Retrying in ${delay}s..."
|
||||
else
|
||||
echo "[WARNING] Command failed (attempt ${attempt}/${max_attempts}). Retrying in ${delay}s..." >&2
|
||||
fi
|
||||
sleep "${delay}"
|
||||
fi
|
||||
|
||||
((attempt++))
|
||||
done
|
||||
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Command failed after ${max_attempts} attempts: ${cmd[*]}"
|
||||
else
|
||||
echo "[ERROR] Command failed after ${max_attempts} attempts: ${cmd[*]}" >&2
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# trap_error: Set up error trapping for the current script
|
||||
trap_error() {
|
||||
local script_name="${1:-${BASH_SOURCE[1]}}"
|
||||
|
||||
trap 'error_handler ${LINENO} ${BASH_LINENO} "${BASH_COMMAND}" "${script_name}"' ERR
|
||||
}
|
||||
|
||||
# error_handler: Internal error handler for trap
|
||||
error_handler() {
|
||||
local line_no="$1"
|
||||
local bash_line_no="$2"
|
||||
local command="$3"
|
||||
local script="$4"
|
||||
|
||||
if declare -f log_error >/dev/null 2>&1; then
|
||||
log_error "Script failed at line ${line_no} in ${script}"
|
||||
log_error "Command: ${command}"
|
||||
else
|
||||
echo "[ERROR] Script failed at line ${line_no} in ${script}" >&2
|
||||
echo "[ERROR] Command: ${command}" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# cleanup_on_exit: Register a cleanup function to run on exit
|
||||
cleanup_on_exit() {
|
||||
local cleanup_func="$1"
|
||||
|
||||
# Register cleanup function
|
||||
trap "${cleanup_func}" EXIT
|
||||
}
|
||||
|
||||
# Export functions
|
||||
export -f error_exit
|
||||
export -f check_command_exists
|
||||
export -f check_file_exists
|
||||
export -f check_dir_exists
|
||||
export -f check_exit_code
|
||||
export -f run_with_retry
|
||||
export -f trap_error
|
||||
export -f error_handler
|
||||
export -f cleanup_on_exit
|
||||
109
.github/skills/scripts/_logging_helpers.sh
vendored
109
.github/skills/scripts/_logging_helpers.sh
vendored
@@ -1,109 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Agent Skills - Logging Helpers
|
||||
#
|
||||
# Provides colored logging functions for consistent output across all skills.
|
||||
|
||||
# Color codes
|
||||
readonly COLOR_RESET="\033[0m"
|
||||
readonly COLOR_RED="\033[0;31m"
|
||||
readonly COLOR_GREEN="\033[0;32m"
|
||||
readonly COLOR_YELLOW="\033[0;33m"
|
||||
readonly COLOR_BLUE="\033[0;34m"
|
||||
readonly COLOR_MAGENTA="\033[0;35m"
|
||||
readonly COLOR_CYAN="\033[0;36m"
|
||||
readonly COLOR_GRAY="\033[0;90m"
|
||||
|
||||
# Check if output is a terminal (for color support)
|
||||
if [[ -t 1 ]]; then
|
||||
COLORS_ENABLED=true
|
||||
else
|
||||
COLORS_ENABLED=false
|
||||
fi
|
||||
|
||||
# Disable colors if NO_COLOR environment variable is set
|
||||
if [[ -n "${NO_COLOR:-}" ]]; then
|
||||
COLORS_ENABLED=false
|
||||
fi
|
||||
|
||||
# log_info: Print informational message
|
||||
log_info() {
|
||||
local message="$*"
|
||||
if [[ "${COLORS_ENABLED}" == "true" ]]; then
|
||||
echo -e "${COLOR_BLUE}[INFO]${COLOR_RESET} ${message}"
|
||||
else
|
||||
echo "[INFO] ${message}"
|
||||
fi
|
||||
}
|
||||
|
||||
# log_success: Print success message
|
||||
log_success() {
|
||||
local message="$*"
|
||||
if [[ "${COLORS_ENABLED}" == "true" ]]; then
|
||||
echo -e "${COLOR_GREEN}[SUCCESS]${COLOR_RESET} ${message}"
|
||||
else
|
||||
echo "[SUCCESS] ${message}"
|
||||
fi
|
||||
}
|
||||
|
||||
# log_warning: Print warning message
|
||||
log_warning() {
|
||||
local message="$*"
|
||||
if [[ "${COLORS_ENABLED}" == "true" ]]; then
|
||||
echo -e "${COLOR_YELLOW}[WARNING]${COLOR_RESET} ${message}" >&2
|
||||
else
|
||||
echo "[WARNING] ${message}" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# log_error: Print error message
|
||||
log_error() {
|
||||
local message="$*"
|
||||
if [[ "${COLORS_ENABLED}" == "true" ]]; then
|
||||
echo -e "${COLOR_RED}[ERROR]${COLOR_RESET} ${message}" >&2
|
||||
else
|
||||
echo "[ERROR] ${message}" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# log_debug: Print debug message (only if DEBUG=1)
|
||||
log_debug() {
|
||||
if [[ "${DEBUG:-0}" == "1" ]]; then
|
||||
local message="$*"
|
||||
if [[ "${COLORS_ENABLED}" == "true" ]]; then
|
||||
echo -e "${COLOR_GRAY}[DEBUG]${COLOR_RESET} ${message}"
|
||||
else
|
||||
echo "[DEBUG] ${message}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# log_step: Print step header
|
||||
log_step() {
|
||||
local step_name="$1"
|
||||
shift
|
||||
local message="$*"
|
||||
if [[ "${COLORS_ENABLED}" == "true" ]]; then
|
||||
echo -e "${COLOR_CYAN}[${step_name}]${COLOR_RESET} ${message}"
|
||||
else
|
||||
echo "[${step_name}] ${message}"
|
||||
fi
|
||||
}
|
||||
|
||||
# log_command: Log a command before executing (for transparency)
|
||||
log_command() {
|
||||
local command="$*"
|
||||
if [[ "${COLORS_ENABLED}" == "true" ]]; then
|
||||
echo -e "${COLOR_MAGENTA}[$]${COLOR_RESET} ${command}"
|
||||
else
|
||||
echo "[\$] ${command}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Export functions so they can be used by sourcing scripts
|
||||
export -f log_info
|
||||
export -f log_success
|
||||
export -f log_warning
|
||||
export -f log_error
|
||||
export -f log_debug
|
||||
export -f log_step
|
||||
export -f log_command
|
||||
96
.github/skills/scripts/skill-runner.sh
vendored
96
.github/skills/scripts/skill-runner.sh
vendored
@@ -1,96 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Agent Skills Universal Skill Runner
|
||||
#
|
||||
# This script locates and executes Agent Skills by name, providing a unified
|
||||
# interface for running skills from tasks.json, CI/CD workflows, and the CLI.
|
||||
#
|
||||
# Usage:
|
||||
# skill-runner.sh <skill-name> [args...]
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0 - Skill executed successfully
|
||||
# 1 - Skill not found or invalid
|
||||
# 2 - Skill execution failed
|
||||
# 126 - Skill script not executable
|
||||
# 127 - Skill script not found
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Source helper scripts
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
# shellcheck source=_logging_helpers.sh
|
||||
source "${SCRIPT_DIR}/_logging_helpers.sh"
|
||||
# shellcheck source=_error_handling_helpers.sh
|
||||
source "${SCRIPT_DIR}/_error_handling_helpers.sh"
|
||||
# shellcheck source=_environment_helpers.sh
|
||||
source "${SCRIPT_DIR}/_environment_helpers.sh"
|
||||
|
||||
# Configuration
|
||||
SKILLS_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SKILLS_DIR}/../.." && pwd)"
|
||||
|
||||
# Validate arguments
|
||||
if [[ $# -eq 0 ]]; then
|
||||
log_error "Usage: skill-runner.sh <skill-name> [args...]"
|
||||
log_error "Example: skill-runner.sh test-backend-coverage"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SKILL_NAME="$1"
|
||||
shift # Remove skill name from arguments
|
||||
|
||||
# Validate skill name format
|
||||
if [[ ! "${SKILL_NAME}" =~ ^[a-z][a-z0-9-]*$ ]]; then
|
||||
log_error "Invalid skill name: ${SKILL_NAME}"
|
||||
log_error "Skill names must be kebab-case (lowercase, hyphens, start with letter)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify SKILL.md exists
|
||||
SKILL_FILE="${SKILLS_DIR}/${SKILL_NAME}.SKILL.md"
|
||||
if [[ ! -f "${SKILL_FILE}" ]]; then
|
||||
log_error "Skill not found: ${SKILL_NAME}"
|
||||
log_error "Expected file: ${SKILL_FILE}"
|
||||
log_info "Available skills:"
|
||||
for skill_file in "${SKILLS_DIR}"/*.SKILL.md; do
|
||||
if [[ -f "${skill_file}" ]]; then
|
||||
basename "${skill_file}" .SKILL.md
|
||||
fi
|
||||
done | sort | sed 's/^/ - /'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Locate skill execution script (flat structure: skill-name-scripts/run.sh)
|
||||
SKILL_SCRIPT="${SKILLS_DIR}/${SKILL_NAME}-scripts/run.sh"
|
||||
|
||||
if [[ ! -f "${SKILL_SCRIPT}" ]]; then
|
||||
log_error "Skill execution script not found: ${SKILL_SCRIPT}"
|
||||
log_error "Expected: ${SKILL_NAME}-scripts/run.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -x "${SKILL_SCRIPT}" ]]; then
|
||||
log_error "Skill execution script is not executable: ${SKILL_SCRIPT}"
|
||||
log_error "Fix with: chmod +x ${SKILL_SCRIPT}"
|
||||
exit 126
|
||||
fi
|
||||
|
||||
# Log skill execution
|
||||
log_info "Executing skill: ${SKILL_NAME}"
|
||||
log_debug "Skill file: ${SKILL_FILE}"
|
||||
log_debug "Skill script: ${SKILL_SCRIPT}"
|
||||
log_debug "Working directory: ${PROJECT_ROOT}"
|
||||
log_debug "Arguments: $*"
|
||||
|
||||
# Change to project root for execution
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
# Execute skill with all remaining arguments
|
||||
# shellcheck disable=SC2294
|
||||
if ! "${SKILL_SCRIPT}" "$@"; then
|
||||
log_error "Skill execution failed: ${SKILL_NAME}"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
log_success "Skill completed successfully: ${SKILL_NAME}"
|
||||
exit 0
|
||||
422
.github/skills/scripts/validate-skills.py
vendored
422
.github/skills/scripts/validate-skills.py
vendored
@@ -1,422 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Agent Skills Frontmatter Validator
|
||||
|
||||
Validates YAML frontmatter in .SKILL.md files against the agentskills.io
|
||||
specification. Ensures required fields are present, formats are correct,
|
||||
and custom metadata follows project conventions.
|
||||
|
||||
Usage:
|
||||
python3 validate-skills.py [path/to/.github/skills/]
|
||||
python3 validate-skills.py --single path/to/skill.SKILL.md
|
||||
|
||||
Exit Codes:
|
||||
0 - All validations passed
|
||||
1 - Validation errors found
|
||||
2 - Script error (missing dependencies, invalid arguments)
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple, Any, Optional
|
||||
|
||||
try:
|
||||
import yaml
|
||||
except ImportError:
|
||||
print("Error: PyYAML is required. Install with: pip install pyyaml", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
# Validation rules
|
||||
REQUIRED_FIELDS = ["name", "version", "description", "author", "license", "tags"]
|
||||
VALID_CATEGORIES = ["test", "integration-test", "security", "qa", "build", "utility", "docker"]
|
||||
VALID_EXECUTION_TIMES = ["short", "medium", "long"]
|
||||
VALID_RISK_LEVELS = ["low", "medium", "high"]
|
||||
VALID_OS_VALUES = ["linux", "darwin", "windows"]
|
||||
VALID_SHELL_VALUES = ["bash", "sh", "zsh", "powershell", "cmd"]
|
||||
|
||||
VERSION_REGEX = re.compile(r'^\d+\.\d+\.\d+$')
|
||||
NAME_REGEX = re.compile(r'^[a-z][a-z0-9-]*$')
|
||||
|
||||
|
||||
class ValidationError:
|
||||
"""Represents a validation error with context."""
|
||||
|
||||
def __init__(self, skill_file: str, field: str, message: str, severity: str = "error"):
|
||||
self.skill_file = skill_file
|
||||
self.field = field
|
||||
self.message = message
|
||||
self.severity = severity
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"[{self.severity.upper()}] {self.skill_file} :: {self.field}: {self.message}"
|
||||
|
||||
|
||||
class SkillValidator:
|
||||
"""Validates Agent Skills frontmatter."""
|
||||
|
||||
def __init__(self, strict: bool = False):
|
||||
self.strict = strict
|
||||
self.errors: List[ValidationError] = []
|
||||
self.warnings: List[ValidationError] = []
|
||||
|
||||
def validate_file(self, skill_path: Path) -> Tuple[bool, List[ValidationError]]:
|
||||
"""Validate a single SKILL.md file."""
|
||||
try:
|
||||
with open(skill_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
return False, [ValidationError(str(skill_path), "file", f"Cannot read file: {e}")]
|
||||
|
||||
# Extract frontmatter
|
||||
frontmatter = self._extract_frontmatter(content)
|
||||
if not frontmatter:
|
||||
return False, [ValidationError(str(skill_path), "frontmatter", "No valid YAML frontmatter found")]
|
||||
|
||||
# Parse YAML
|
||||
try:
|
||||
data = yaml.safe_load(frontmatter)
|
||||
except yaml.YAMLError as e:
|
||||
return False, [ValidationError(str(skill_path), "yaml", f"Invalid YAML: {e}")]
|
||||
|
||||
if not isinstance(data, dict):
|
||||
return False, [ValidationError(str(skill_path), "yaml", "Frontmatter must be a YAML object")]
|
||||
|
||||
# Run validation checks
|
||||
file_errors: List[ValidationError] = []
|
||||
file_errors.extend(self._validate_required_fields(skill_path, data))
|
||||
file_errors.extend(self._validate_name(skill_path, data))
|
||||
file_errors.extend(self._validate_version(skill_path, data))
|
||||
file_errors.extend(self._validate_description(skill_path, data))
|
||||
file_errors.extend(self._validate_tags(skill_path, data))
|
||||
file_errors.extend(self._validate_compatibility(skill_path, data))
|
||||
file_errors.extend(self._validate_metadata(skill_path, data))
|
||||
|
||||
# Separate errors and warnings
|
||||
errors = [e for e in file_errors if e.severity == "error"]
|
||||
warnings = [e for e in file_errors if e.severity == "warning"]
|
||||
|
||||
self.errors.extend(errors)
|
||||
self.warnings.extend(warnings)
|
||||
|
||||
return len(errors) == 0, file_errors
|
||||
|
||||
def _extract_frontmatter(self, content: str) -> Optional[str]:
|
||||
"""Extract YAML frontmatter from markdown content."""
|
||||
if not content.startswith('---\n'):
|
||||
return None
|
||||
|
||||
end_marker = content.find('\n---\n', 4)
|
||||
if end_marker == -1:
|
||||
return None
|
||||
|
||||
return content[4:end_marker]
|
||||
|
||||
def _validate_required_fields(self, skill_path: Path, data: Dict) -> List[ValidationError]:
|
||||
"""Check that all required fields are present."""
|
||||
errors = []
|
||||
for field in REQUIRED_FIELDS:
|
||||
if field not in data:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), field, f"Required field missing"
|
||||
))
|
||||
elif not data[field]:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), field, f"Required field is empty"
|
||||
))
|
||||
return errors
|
||||
|
||||
def _validate_name(self, skill_path: Path, data: Dict) -> List[ValidationError]:
|
||||
"""Validate name field format."""
|
||||
errors = []
|
||||
if "name" in data:
|
||||
name = data["name"]
|
||||
if not isinstance(name, str):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "name", "Must be a string"
|
||||
))
|
||||
elif not NAME_REGEX.match(name):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "name",
|
||||
"Must be kebab-case (lowercase, hyphens only, start with letter)"
|
||||
))
|
||||
return errors
|
||||
|
||||
def _validate_version(self, skill_path: Path, data: Dict) -> List[ValidationError]:
|
||||
"""Validate version field format."""
|
||||
errors = []
|
||||
if "version" in data:
|
||||
version = data["version"]
|
||||
if not isinstance(version, str):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "version", "Must be a string"
|
||||
))
|
||||
elif not VERSION_REGEX.match(version):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "version",
|
||||
"Must follow semantic versioning (x.y.z)"
|
||||
))
|
||||
return errors
|
||||
|
||||
def _validate_description(self, skill_path: Path, data: Dict) -> List[ValidationError]:
|
||||
"""Validate description field."""
|
||||
errors = []
|
||||
if "description" in data:
|
||||
desc = data["description"]
|
||||
if not isinstance(desc, str):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "description", "Must be a string"
|
||||
))
|
||||
elif len(desc) > 120:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "description",
|
||||
f"Must be 120 characters or less (current: {len(desc)})"
|
||||
))
|
||||
elif '\n' in desc:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "description", "Must be a single line"
|
||||
))
|
||||
return errors
|
||||
|
||||
def _validate_tags(self, skill_path: Path, data: Dict) -> List[ValidationError]:
|
||||
"""Validate tags field."""
|
||||
errors = []
|
||||
if "tags" in data:
|
||||
tags = data["tags"]
|
||||
if not isinstance(tags, list):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "tags", "Must be a list"
|
||||
))
|
||||
elif len(tags) < 2:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "tags", "Must have at least 2 tags"
|
||||
))
|
||||
elif len(tags) > 5:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "tags",
|
||||
f"Must have at most 5 tags (current: {len(tags)})",
|
||||
severity="warning"
|
||||
))
|
||||
else:
|
||||
for tag in tags:
|
||||
if not isinstance(tag, str):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "tags", "All tags must be strings"
|
||||
))
|
||||
elif tag != tag.lower():
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "tags",
|
||||
f"Tag '{tag}' should be lowercase",
|
||||
severity="warning"
|
||||
))
|
||||
return errors
|
||||
|
||||
def _validate_compatibility(self, skill_path: Path, data: Dict) -> List[ValidationError]:
|
||||
"""Validate compatibility section."""
|
||||
errors = []
|
||||
if "compatibility" in data:
|
||||
compat = data["compatibility"]
|
||||
if not isinstance(compat, dict):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "compatibility", "Must be an object"
|
||||
))
|
||||
else:
|
||||
# Validate OS
|
||||
if "os" in compat:
|
||||
os_list = compat["os"]
|
||||
if not isinstance(os_list, list):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "compatibility.os", "Must be a list"
|
||||
))
|
||||
else:
|
||||
for os_val in os_list:
|
||||
if os_val not in VALID_OS_VALUES:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "compatibility.os",
|
||||
f"Invalid OS '{os_val}'. Valid: {VALID_OS_VALUES}",
|
||||
severity="warning"
|
||||
))
|
||||
|
||||
# Validate shells
|
||||
if "shells" in compat:
|
||||
shells = compat["shells"]
|
||||
if not isinstance(shells, list):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "compatibility.shells", "Must be a list"
|
||||
))
|
||||
else:
|
||||
for shell in shells:
|
||||
if shell not in VALID_SHELL_VALUES:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "compatibility.shells",
|
||||
f"Invalid shell '{shell}'. Valid: {VALID_SHELL_VALUES}",
|
||||
severity="warning"
|
||||
))
|
||||
return errors
|
||||
|
||||
def _validate_metadata(self, skill_path: Path, data: Dict) -> List[ValidationError]:
|
||||
"""Validate custom metadata section."""
|
||||
errors = []
|
||||
if "metadata" not in data:
|
||||
return errors # Metadata is optional
|
||||
|
||||
metadata = data["metadata"]
|
||||
if not isinstance(metadata, dict):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "metadata", "Must be an object"
|
||||
))
|
||||
return errors
|
||||
|
||||
# Validate category
|
||||
if "category" in metadata:
|
||||
category = metadata["category"]
|
||||
if category not in VALID_CATEGORIES:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "metadata.category",
|
||||
f"Invalid category '{category}'. Valid: {VALID_CATEGORIES}",
|
||||
severity="warning"
|
||||
))
|
||||
|
||||
# Validate execution_time
|
||||
if "execution_time" in metadata:
|
||||
exec_time = metadata["execution_time"]
|
||||
if exec_time not in VALID_EXECUTION_TIMES:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "metadata.execution_time",
|
||||
f"Invalid execution_time '{exec_time}'. Valid: {VALID_EXECUTION_TIMES}",
|
||||
severity="warning"
|
||||
))
|
||||
|
||||
# Validate risk_level
|
||||
if "risk_level" in metadata:
|
||||
risk = metadata["risk_level"]
|
||||
if risk not in VALID_RISK_LEVELS:
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), "metadata.risk_level",
|
||||
f"Invalid risk_level '{risk}'. Valid: {VALID_RISK_LEVELS}",
|
||||
severity="warning"
|
||||
))
|
||||
|
||||
# Validate boolean fields
|
||||
for bool_field in ["ci_cd_safe", "requires_network", "idempotent"]:
|
||||
if bool_field in metadata:
|
||||
if not isinstance(metadata[bool_field], bool):
|
||||
errors.append(ValidationError(
|
||||
str(skill_path), f"metadata.{bool_field}",
|
||||
"Must be a boolean (true/false)",
|
||||
severity="warning"
|
||||
))
|
||||
|
||||
return errors
|
||||
|
||||
def validate_directory(self, skills_dir: Path) -> bool:
|
||||
"""Validate all SKILL.md files in a directory."""
|
||||
if not skills_dir.exists():
|
||||
print(f"Error: Directory not found: {skills_dir}", file=sys.stderr)
|
||||
return False
|
||||
|
||||
skill_files = list(skills_dir.glob("*.SKILL.md"))
|
||||
if not skill_files:
|
||||
print(f"Warning: No .SKILL.md files found in {skills_dir}", file=sys.stderr)
|
||||
return True # Not an error, just nothing to validate
|
||||
|
||||
print(f"Validating {len(skill_files)} skill(s)...\n")
|
||||
|
||||
success_count = 0
|
||||
for skill_file in sorted(skill_files):
|
||||
is_valid, _ = self.validate_file(skill_file)
|
||||
if is_valid:
|
||||
success_count += 1
|
||||
print(f"✓ {skill_file.name}")
|
||||
else:
|
||||
print(f"✗ {skill_file.name}")
|
||||
|
||||
# Print summary
|
||||
print(f"\n{'='*70}")
|
||||
print(f"Validation Summary:")
|
||||
print(f" Total skills: {len(skill_files)}")
|
||||
print(f" Passed: {success_count}")
|
||||
print(f" Failed: {len(skill_files) - success_count}")
|
||||
print(f" Errors: {len(self.errors)}")
|
||||
print(f" Warnings: {len(self.warnings)}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
# Print errors
|
||||
if self.errors:
|
||||
print("ERRORS:")
|
||||
for error in self.errors:
|
||||
print(f" {error}")
|
||||
print()
|
||||
|
||||
# Print warnings
|
||||
if self.warnings:
|
||||
print("WARNINGS:")
|
||||
for warning in self.warnings:
|
||||
print(f" {warning}")
|
||||
print()
|
||||
|
||||
return len(self.errors) == 0
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Validate Agent Skills frontmatter",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=__doc__
|
||||
)
|
||||
parser.add_argument(
|
||||
"path",
|
||||
nargs="?",
|
||||
default=".github/skills",
|
||||
help="Path to .github/skills directory or single .SKILL.md file (default: .github/skills)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--strict",
|
||||
action="store_true",
|
||||
help="Treat warnings as errors"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--single",
|
||||
action="store_true",
|
||||
help="Validate a single .SKILL.md file instead of a directory"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
validator = SkillValidator(strict=args.strict)
|
||||
path = Path(args.path)
|
||||
|
||||
if args.single:
|
||||
if not path.exists():
|
||||
print(f"Error: File not found: {path}", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
is_valid, errors = validator.validate_file(path)
|
||||
|
||||
if is_valid:
|
||||
print(f"✓ {path.name} is valid")
|
||||
if errors: # Warnings only
|
||||
print("\nWARNINGS:")
|
||||
for error in errors:
|
||||
print(f" {error}")
|
||||
else:
|
||||
print(f"✗ {path.name} has errors")
|
||||
for error in errors:
|
||||
print(f" {error}")
|
||||
|
||||
return 0 if is_valid else 1
|
||||
else:
|
||||
success = validator.validate_directory(path)
|
||||
|
||||
if args.strict and validator.warnings:
|
||||
print("Strict mode: treating warnings as errors", file=sys.stderr)
|
||||
success = False
|
||||
|
||||
return 0 if success else 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user