feat: add gitea-webhook-ambassador service and migration script
Signed-off-by: zhenyus <zhenyus@mathmast.com>
This commit is contained in:
parent
e5000dfb3e
commit
60817c1be4
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,4 +1,5 @@
|
||||
cluster/ansible/venv
|
||||
cluster/ansible/manifests/inventory.ini
|
||||
.idea/*
|
||||
|
||||
apps/gitea-webhook-ambassador/gitea-webhook-ambassador
|
||||
apps/gitea-webhook-ambassador/config.yaml
|
||||
22
apps/gitea-webhook-ambassador/.dockerignore
Normal file
22
apps/gitea-webhook-ambassador/.dockerignore
Normal file
@ -0,0 +1,22 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Build artifacts
|
||||
gitea-webhook-ambassador
|
||||
build/
|
||||
|
||||
# Development/test files
|
||||
*_test.go
|
||||
Makefile
|
||||
README.md
|
||||
LICENSE
|
||||
docker-compose.yml
|
||||
|
||||
# Editor configs
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
# Temporary files
|
||||
*.log
|
||||
*.tmp
|
||||
52
apps/gitea-webhook-ambassador/Dockerfile
Normal file
52
apps/gitea-webhook-ambassador/Dockerfile
Normal file
@ -0,0 +1,52 @@
|
||||
# Build stage
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache git make
|
||||
|
||||
# Copy go.mod and go.sum (if present)
|
||||
COPY go.mod .
|
||||
COPY go.sum* .
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the application with version information
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o gitea-webhook-ambassador .
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:3.19
|
||||
|
||||
# Add non-root user
|
||||
RUN addgroup -S appgroup && adduser -S appuser -G appgroup
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache ca-certificates tzdata
|
||||
|
||||
# Create necessary directories with appropriate permissions
|
||||
RUN mkdir -p /app/config && \
|
||||
chown -R appuser:appgroup /app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the binary from builder stage
|
||||
COPY --from=builder /app/gitea-webhook-ambassador .
|
||||
|
||||
# Copy default config (will be overridden by volume mount in production)
|
||||
COPY config.yaml /app/config/
|
||||
|
||||
# Switch to non-root user
|
||||
USER appuser
|
||||
|
||||
# Expose the service port
|
||||
EXPOSE 8080
|
||||
|
||||
# Default command (can be overridden at runtime)
|
||||
ENTRYPOINT ["/app/gitea-webhook-ambassador"]
|
||||
CMD ["-config=/app/config/config.yaml"]
|
||||
68
apps/gitea-webhook-ambassador/Makefile
Normal file
68
apps/gitea-webhook-ambassador/Makefile
Normal file
@ -0,0 +1,68 @@
|
||||
.PHONY: build clean test lint docker-build docker-push run help
|
||||
|
||||
# Variables
|
||||
APP_NAME := gitea-webhook-ambassador
|
||||
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
|
||||
LDFLAGS := -ldflags "-X main.version=$(VERSION) -s -w"
|
||||
GO_FILES := $(shell find . -name "*.go" -type f)
|
||||
IMAGE_NAME := freeleaps/$(APP_NAME)
|
||||
IMAGE_TAG := $(VERSION)
|
||||
CONFIG_FILE := config.yaml
|
||||
|
||||
# Go commands
|
||||
GO := go
|
||||
GOFMT := gofmt
|
||||
GOTEST := $(GO) test
|
||||
GOBUILD := $(GO) build
|
||||
|
||||
# Default target
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
# Build executable
|
||||
build: $(GO_FILES)
|
||||
@echo "Building $(APP_NAME)..."
|
||||
$(GOBUILD) $(LDFLAGS) -o $(APP_NAME) .
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
@echo "Cleaning up..."
|
||||
@rm -f $(APP_NAME)
|
||||
@rm -rf build/
|
||||
|
||||
# Run tests
|
||||
test:
|
||||
@echo "Running tests..."
|
||||
$(GOTEST) -v ./...
|
||||
|
||||
# Run linter
|
||||
lint:
|
||||
@which golangci-lint > /dev/null || (echo "Installing golangci-lint..." && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest)
|
||||
golangci-lint run
|
||||
|
||||
# Build Docker image
|
||||
docker-build:
|
||||
@echo "Building Docker image $(IMAGE_NAME):$(IMAGE_TAG)..."
|
||||
docker build -t $(IMAGE_NAME):$(IMAGE_TAG) .
|
||||
docker tag $(IMAGE_NAME):$(IMAGE_TAG) $(IMAGE_NAME):latest
|
||||
|
||||
# Push Docker image to registry
|
||||
docker-push: docker-build
|
||||
@echo "Pushing Docker image $(IMAGE_NAME):$(IMAGE_TAG)..."
|
||||
docker push $(IMAGE_NAME):$(IMAGE_TAG)
|
||||
docker push $(IMAGE_NAME):latest
|
||||
|
||||
# Run locally
|
||||
run: build
|
||||
./$(APP_NAME) -config=$(CONFIG_FILE)
|
||||
|
||||
# Show help
|
||||
help:
|
||||
@echo "Gitea Webhook Ambassador - Makefile commands:"
|
||||
@echo " build - Build the application"
|
||||
@echo " clean - Remove build artifacts"
|
||||
@echo " test - Run tests"
|
||||
@echo " lint - Run linter"
|
||||
@echo " docker-build - Build Docker image"
|
||||
@echo " docker-push - Build and push Docker image to registry"
|
||||
@echo " run - Build and run locally"
|
||||
@echo " help - Show this help message"
|
||||
0
apps/gitea-webhook-ambassador/VERSION
Normal file
0
apps/gitea-webhook-ambassador/VERSION
Normal file
49
apps/gitea-webhook-ambassador/configs/configs.example.yaml
Normal file
49
apps/gitea-webhook-ambassador/configs/configs.example.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
server:
|
||||
port: 8080
|
||||
webhookPath: "/webhook"
|
||||
secretHeader: "X-Gitea-Signature"
|
||||
|
||||
jenkins:
|
||||
url: "http://jenkins.example.com"
|
||||
username: "jenkins-user"
|
||||
token: "jenkins-api-token"
|
||||
timeout: 30
|
||||
|
||||
gitea:
|
||||
secretToken: "your-gitea-webhook-secret"
|
||||
projects:
|
||||
# Simple configuration with different jobs for different branches
|
||||
"owner/repo1":
|
||||
defaultJob: "repo1-default-job" # Used when no specific branch match is found
|
||||
branchJobs:
|
||||
"main": "repo1-main-job" # Specific job for the main branch
|
||||
"develop": "repo1-dev-job" # Specific job for the develop branch
|
||||
"release": "repo1-release-job" # Specific job for the release branch
|
||||
|
||||
# Advanced configuration with regex pattern matching
|
||||
"owner/repo2":
|
||||
defaultJob: "repo2-default-job"
|
||||
branchJobs:
|
||||
"main": "repo2-main-job"
|
||||
branchPatterns:
|
||||
- pattern: "^feature/.*$" # All feature branches
|
||||
job: "repo2-feature-job"
|
||||
- pattern: "^release/v[0-9]+\\.[0-9]+$" # Release branches like release/v1.0
|
||||
job: "repo2-release-job"
|
||||
- pattern: "^hotfix/.*$" # All hotfix branches
|
||||
job: "repo2-hotfix-job"
|
||||
|
||||
# Simple configuration with just a default job
|
||||
"owner/repo3":
|
||||
defaultJob: "repo3-job" # This job is triggered for all branches
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "json"
|
||||
file: ""
|
||||
|
||||
worker:
|
||||
poolSize: 10
|
||||
queueSize: 100
|
||||
maxRetries: 3
|
||||
retryBackoff: 1
|
||||
22
apps/gitea-webhook-ambassador/go.mod
Normal file
22
apps/gitea-webhook-ambassador/go.mod
Normal file
@ -0,0 +1,22 @@
|
||||
module freeleaps.com/gitea-webhook-ambassador
|
||||
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.8.0
|
||||
github.com/go-playground/validator/v10 v10.26.0
|
||||
github.com/panjf2000/ants/v2 v2.11.2
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
golang.org/x/crypto v0.33.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
)
|
||||
38
apps/gitea-webhook-ambassador/go.sum
Normal file
38
apps/gitea-webhook-ambassador/go.sum
Normal file
@ -0,0 +1,38 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
|
||||
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/panjf2000/ants/v2 v2.11.2 h1:AVGpMSePxUNpcLaBO34xuIgM1ZdKOiGnpxLXixLi5Jo=
|
||||
github.com/panjf2000/ants/v2 v2.11.2/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
758
apps/gitea-webhook-ambassador/main.go
Normal file
758
apps/gitea-webhook-ambassador/main.go
Normal file
@ -0,0 +1,758 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Configuration holds application configuration
|
||||
type Configuration struct {
|
||||
Server struct {
|
||||
Port int `yaml:"port" validate:"required,gt=0"`
|
||||
WebhookPath string `yaml:"webhookPath" validate:"required"`
|
||||
SecretHeader string `yaml:"secretHeader" default:"X-Gitea-Signature"`
|
||||
} `yaml:"server"`
|
||||
|
||||
Jenkins struct {
|
||||
URL string `yaml:"url" validate:"required,url"`
|
||||
Username string `yaml:"username"`
|
||||
Token string `yaml:"token"`
|
||||
Timeout int `yaml:"timeout" default:"30"`
|
||||
} `yaml:"jenkins"`
|
||||
|
||||
Gitea struct {
|
||||
SecretToken string `yaml:"secretToken"`
|
||||
Projects map[string]ProjectConfig `yaml:"projects" validate:"required"` // repo name -> project config
|
||||
} `yaml:"gitea"`
|
||||
|
||||
Logging struct {
|
||||
Level string `yaml:"level" default:"info" validate:"oneof=debug info warn error"`
|
||||
Format string `yaml:"format" default:"text" validate:"oneof=text json"`
|
||||
File string `yaml:"file"`
|
||||
} `yaml:"logging"`
|
||||
|
||||
Worker struct {
|
||||
PoolSize int `yaml:"poolSize" default:"10" validate:"gt=0"`
|
||||
QueueSize int `yaml:"queueSize" default:"100" validate:"gt=0"`
|
||||
MaxRetries int `yaml:"maxRetries" default:"3" validate:"gte=0"`
|
||||
RetryBackoff int `yaml:"retryBackoff" default:"1" validate:"gt=0"` // seconds
|
||||
} `yaml:"worker"`
|
||||
}
|
||||
|
||||
// ProjectConfig represents the configuration for a specific repository
|
||||
type ProjectConfig struct {
|
||||
DefaultJob string `yaml:"defaultJob"` // Default Jenkins job to trigger
|
||||
BranchJobs map[string]string `yaml:"branchJobs,omitempty"` // Branch-specific jobs
|
||||
BranchPatterns []BranchPattern `yaml:"branchPatterns,omitempty"`
|
||||
}
|
||||
|
||||
// BranchPattern defines a pattern-based branch to job mapping
|
||||
type BranchPattern struct {
|
||||
Pattern string `yaml:"pattern"` // Regex pattern for branch name
|
||||
Job string `yaml:"job"` // Jenkins job to trigger
|
||||
}
|
||||
|
||||
// GiteaWebhook represents the webhook payload from Gitea
|
||||
type GiteaWebhook struct {
|
||||
Secret string `json:"secret"`
|
||||
Ref string `json:"ref"`
|
||||
Before string `json:"before"`
|
||||
After string `json:"after"`
|
||||
CompareURL string `json:"compare_url"`
|
||||
Commits []struct {
|
||||
ID string `json:"id"`
|
||||
Message string `json:"message"`
|
||||
URL string `json:"url"`
|
||||
Author struct {
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
Username string `json:"username"`
|
||||
} `json:"author"`
|
||||
} `json:"commits"`
|
||||
Repository struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Owner struct {
|
||||
ID int `json:"id"`
|
||||
Login string `json:"login"`
|
||||
FullName string `json:"full_name"`
|
||||
} `json:"owner"`
|
||||
FullName string `json:"full_name"`
|
||||
Private bool `json:"private"`
|
||||
CloneURL string `json:"clone_url"`
|
||||
SSHURL string `json:"ssh_url"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
DefaultBranch string `json:"default_branch"`
|
||||
} `json:"repository"`
|
||||
Pusher struct {
|
||||
ID int `json:"id"`
|
||||
Login string `json:"login"`
|
||||
FullName string `json:"full_name"`
|
||||
Email string `json:"email"`
|
||||
} `json:"pusher"`
|
||||
}
|
||||
|
||||
type jobRequest struct {
|
||||
jobName string
|
||||
parameters map[string]string
|
||||
eventID string
|
||||
attempts int
|
||||
}
|
||||
|
||||
var (
|
||||
configFile = flag.String("config", "config.yaml", "Path to configuration file")
|
||||
config Configuration
|
||||
configMutex sync.RWMutex
|
||||
validate = validator.New()
|
||||
jobQueue chan jobRequest
|
||||
httpClient *http.Client
|
||||
logger *log.Logger
|
||||
workerPool *ants.PoolWithFunc
|
||||
|
||||
// For idempotency
|
||||
processedEvents sync.Map
|
||||
|
||||
// For config reloading
|
||||
watcher *fsnotify.Watcher
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// Initialize basic logger temporarily
|
||||
logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
logger.Println("Starting Gitea Webhook Ambassador...")
|
||||
|
||||
// Load initial configuration
|
||||
if err := loadConfig(*configFile); err != nil {
|
||||
logger.Fatalf("Failed to load configuration: %v", err)
|
||||
}
|
||||
|
||||
// Configure proper logger based on configuration
|
||||
setupLogger()
|
||||
|
||||
// Setup config file watcher for auto-reload
|
||||
setupConfigWatcher(*configFile)
|
||||
|
||||
// Configure HTTP client with timeout
|
||||
configMutex.RLock()
|
||||
httpClient = &http.Client{
|
||||
Timeout: time.Duration(config.Jenkins.Timeout) * time.Second,
|
||||
}
|
||||
|
||||
// Initialize job queue
|
||||
jobQueue = make(chan jobRequest, config.Worker.QueueSize)
|
||||
configMutex.RUnlock()
|
||||
|
||||
// Initialize worker pool
|
||||
initWorkerPool()
|
||||
|
||||
// Configure webhook handler
|
||||
http.HandleFunc(config.Server.WebhookPath, handleWebhook)
|
||||
http.HandleFunc("/health", handleHealthCheck)
|
||||
|
||||
// Start HTTP server
|
||||
serverAddr := fmt.Sprintf(":%d", config.Server.Port)
|
||||
logger.Printf("Server listening on %s", serverAddr)
|
||||
if err := http.ListenAndServe(serverAddr, nil); err != nil {
|
||||
logger.Fatalf("HTTP server error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupLogger configures the logger based on application settings
|
||||
func setupLogger() {
|
||||
configMutex.RLock()
|
||||
defer configMutex.RUnlock()
|
||||
|
||||
// Determine log output
|
||||
var logOutput io.Writer = os.Stdout
|
||||
if config.Logging.File != "" {
|
||||
file, err := os.OpenFile(config.Logging.File, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
logger.Printf("Failed to open log file %s: %v, using stdout instead", config.Logging.File, err)
|
||||
} else {
|
||||
logOutput = file
|
||||
// Create a multiwriter to also log to stdout for important messages
|
||||
logOutput = io.MultiWriter(file, os.Stdout)
|
||||
}
|
||||
}
|
||||
|
||||
// Create new logger with proper format
|
||||
var prefix string
|
||||
var flags int
|
||||
|
||||
// Set log format based on configuration
|
||||
if config.Logging.Format == "json" {
|
||||
// For JSON logging, we'll handle formatting in the custom writer
|
||||
prefix = ""
|
||||
flags = 0
|
||||
logOutput = &jsonLogWriter{out: logOutput}
|
||||
} else {
|
||||
// Text format with timestamp
|
||||
prefix = ""
|
||||
flags = log.LstdFlags | log.Lshortfile
|
||||
}
|
||||
|
||||
// Create the new logger
|
||||
logger = log.New(logOutput, prefix, flags)
|
||||
|
||||
// Log level will be checked in our custom log functions (not implemented here)
|
||||
logger.Printf("Logger configured with level=%s, format=%s, output=%s",
|
||||
config.Logging.Level,
|
||||
config.Logging.Format,
|
||||
func() string {
|
||||
if config.Logging.File == "" {
|
||||
return "stdout"
|
||||
}
|
||||
return config.Logging.File
|
||||
}())
|
||||
}
|
||||
|
||||
func setupConfigWatcher(configPath string) {
|
||||
var err error
|
||||
watcher, err = fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
logger.Fatalf("Failed to create file watcher: %v", err)
|
||||
}
|
||||
|
||||
// Extract directory containing the config file
|
||||
configDir := filepath.Dir(configPath)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// Check if the config file was modified
|
||||
if event.Op&fsnotify.Write == fsnotify.Write &&
|
||||
filepath.Base(event.Name) == filepath.Base(configPath) {
|
||||
logger.Printf("Config file modified, reloading configuration")
|
||||
if err := reloadConfig(configPath); err != nil {
|
||||
logger.Printf("Error reloading config: %v", err)
|
||||
}
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
logger.Printf("Error watching config file: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Start watching the directory containing the config file
|
||||
err = watcher.Add(configDir)
|
||||
if err != nil {
|
||||
logger.Fatalf("Failed to watch config directory: %v", err)
|
||||
}
|
||||
|
||||
logger.Printf("Watching config file for changes: %s", configPath)
|
||||
}
|
||||
|
||||
func loadConfig(file string) error {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open config file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var newConfig Configuration
|
||||
decoder := yaml.NewDecoder(f)
|
||||
if err := decoder.Decode(&newConfig); err != nil {
|
||||
return fmt.Errorf("cannot decode config: %v", err)
|
||||
}
|
||||
|
||||
// Set defaults
|
||||
if newConfig.Server.SecretHeader == "" {
|
||||
newConfig.Server.SecretHeader = "X-Gitea-Signature"
|
||||
}
|
||||
if newConfig.Jenkins.Timeout == 0 {
|
||||
newConfig.Jenkins.Timeout = 30
|
||||
}
|
||||
if newConfig.Worker.PoolSize == 0 {
|
||||
newConfig.Worker.PoolSize = 10
|
||||
}
|
||||
if newConfig.Worker.QueueSize == 0 {
|
||||
newConfig.Worker.QueueSize = 100
|
||||
}
|
||||
if newConfig.Worker.MaxRetries == 0 {
|
||||
newConfig.Worker.MaxRetries = 3
|
||||
}
|
||||
if newConfig.Worker.RetryBackoff == 0 {
|
||||
newConfig.Worker.RetryBackoff = 1
|
||||
}
|
||||
|
||||
// Handle legacy configuration format (where Projects is map[string]string)
|
||||
// This is to maintain backward compatibility with existing configs
|
||||
if len(newConfig.Gitea.Projects) == 0 {
|
||||
// Check if we're dealing with a legacy config
|
||||
var legacyConfig struct {
|
||||
Gitea struct {
|
||||
Projects map[string]string `yaml:"projects"`
|
||||
} `yaml:"gitea"`
|
||||
}
|
||||
|
||||
// Reopen and reparse the file for legacy config
|
||||
f.Seek(0, 0)
|
||||
decoder = yaml.NewDecoder(f)
|
||||
if err := decoder.Decode(&legacyConfig); err == nil && len(legacyConfig.Gitea.Projects) > 0 {
|
||||
// Convert legacy config to new format
|
||||
newConfig.Gitea.Projects = make(map[string]ProjectConfig)
|
||||
for repo, jobName := range legacyConfig.Gitea.Projects {
|
||||
newConfig.Gitea.Projects[repo] = ProjectConfig{
|
||||
DefaultJob: jobName,
|
||||
}
|
||||
}
|
||||
logWarn("Using legacy configuration format. Consider updating to new format.")
|
||||
}
|
||||
}
|
||||
|
||||
// Validate configuration
|
||||
if err := validate.Struct(newConfig); err != nil {
|
||||
return fmt.Errorf("invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
configMutex.Lock()
|
||||
config = newConfig
|
||||
configMutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func reloadConfig(file string) error {
|
||||
if err := loadConfig(file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update logger configuration
|
||||
setupLogger()
|
||||
|
||||
configMutex.RLock()
|
||||
defer configMutex.RUnlock()
|
||||
|
||||
// Update HTTP client timeout
|
||||
httpClient.Timeout = time.Duration(config.Jenkins.Timeout) * time.Second
|
||||
|
||||
// If worker pool size has changed, reinitialize worker pool
|
||||
poolSize := workerPool.Cap()
|
||||
if poolSize != config.Worker.PoolSize {
|
||||
logger.Printf("Worker pool size changed from %d to %d, reinitializing",
|
||||
poolSize, config.Worker.PoolSize)
|
||||
|
||||
// Must release the read lock before calling initWorkerPool which acquires a write lock
|
||||
configMutex.RUnlock()
|
||||
initWorkerPool()
|
||||
configMutex.RLock()
|
||||
}
|
||||
|
||||
// If queue size has changed, create a new channel and copy items
|
||||
if cap(jobQueue) != config.Worker.QueueSize {
|
||||
logger.Printf("Job queue size changed from %d to %d, recreating",
|
||||
cap(jobQueue), config.Worker.QueueSize)
|
||||
|
||||
// Create new queue
|
||||
newQueue := make(chan jobRequest, config.Worker.QueueSize)
|
||||
|
||||
// Close the current queue channel to stop accepting new items
|
||||
close(jobQueue)
|
||||
|
||||
// Start a goroutine to drain the old queue and fill the new one
|
||||
go func(oldQueue, newQueue chan jobRequest) {
|
||||
for job := range oldQueue {
|
||||
newQueue <- job
|
||||
}
|
||||
|
||||
configMutex.Lock()
|
||||
jobQueue = newQueue
|
||||
configMutex.Unlock()
|
||||
}(jobQueue, newQueue)
|
||||
}
|
||||
|
||||
logger.Printf("Configuration reloaded successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func initWorkerPool() {
|
||||
configMutex.Lock()
|
||||
defer configMutex.Unlock()
|
||||
|
||||
// Release existing pool if any
|
||||
if workerPool != nil {
|
||||
workerPool.Release()
|
||||
}
|
||||
|
||||
var err error
|
||||
workerPool, err = ants.NewPoolWithFunc(config.Worker.PoolSize, func(i interface{}) {
|
||||
job := i.(jobRequest)
|
||||
success := triggerJenkinsJob(job)
|
||||
|
||||
configMutex.RLock()
|
||||
maxRetries := config.Worker.MaxRetries
|
||||
retryBackoff := config.Worker.RetryBackoff
|
||||
configMutex.RUnlock()
|
||||
|
||||
// If job failed but we haven't reached max retries
|
||||
if !success && job.attempts < maxRetries {
|
||||
job.attempts++
|
||||
// Exponential backoff
|
||||
backoff := time.Duration(retryBackoff<<uint(job.attempts-1)) * time.Second
|
||||
time.Sleep(backoff)
|
||||
|
||||
configMutex.RLock()
|
||||
select {
|
||||
case jobQueue <- job:
|
||||
logger.Printf("Retrying job %s (attempt %d/%d) after %v",
|
||||
job.jobName, job.attempts, maxRetries, backoff)
|
||||
default:
|
||||
logger.Printf("Failed to queue retry for job %s: queue full", job.jobName)
|
||||
}
|
||||
configMutex.RUnlock()
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.Fatalf("Failed to initialize worker pool: %v", err)
|
||||
}
|
||||
|
||||
logger.Printf("Worker pool initialized with %d workers", config.Worker.PoolSize)
|
||||
|
||||
// Start job queue processing
|
||||
go processJobQueue()
|
||||
}
|
||||
|
||||
func processJobQueue() {
|
||||
for job := range jobQueue {
|
||||
err := workerPool.Invoke(job)
|
||||
if err != nil {
|
||||
logger.Printf("Failed to process job: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
configMutex.RLock()
|
||||
poolRunning := workerPool != nil
|
||||
runningWorkers := workerPool.Running()
|
||||
poolCap := workerPool.Cap()
|
||||
queueSize := len(jobQueue)
|
||||
queueCap := cap(jobQueue)
|
||||
configMutex.RUnlock()
|
||||
|
||||
health := map[string]interface{}{
|
||||
"status": "UP",
|
||||
"time": time.Now().Format(time.RFC3339),
|
||||
"workers": map[string]interface{}{
|
||||
"running": poolRunning,
|
||||
"active": runningWorkers,
|
||||
"capacity": poolCap,
|
||||
},
|
||||
"queue": map[string]interface{}{
|
||||
"size": queueSize,
|
||||
"capacity": queueCap,
|
||||
},
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(health)
|
||||
}
|
||||
|
||||
func handleWebhook(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify signature if secret token is set
|
||||
configMutex.RLock()
|
||||
secretToken := config.Gitea.SecretToken
|
||||
secretHeader := config.Server.SecretHeader
|
||||
configMutex.RUnlock()
|
||||
|
||||
if secretToken != "" {
|
||||
signature := r.Header.Get(secretHeader)
|
||||
if !verifySignature(r, signature, secretToken) {
|
||||
http.Error(w, "Invalid signature", http.StatusUnauthorized)
|
||||
logWarn("Invalid webhook signature received")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Read and parse the webhook payload
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read request body", http.StatusInternalServerError)
|
||||
logError("Failed to read webhook body: %v", err)
|
||||
return
|
||||
}
|
||||
r.Body.Close()
|
||||
|
||||
var webhook GiteaWebhook
|
||||
if err := json.Unmarshal(body, &webhook); err != nil {
|
||||
http.Error(w, "Failed to parse webhook payload", http.StatusBadRequest)
|
||||
logError("Failed to parse webhook payload: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate event ID for idempotency
|
||||
eventID := webhook.Repository.FullName + "-" + webhook.After
|
||||
|
||||
// Check if we've already processed this event
|
||||
if _, exists := processedEvents.Load(eventID); exists {
|
||||
logInfo("Skipping already processed event: %s", eventID)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
// Store in processed events with a TTL (we'll use a goroutine to remove after 1 hour)
|
||||
processedEvents.Store(eventID, true)
|
||||
go func(key string) {
|
||||
time.Sleep(1 * time.Hour)
|
||||
processedEvents.Delete(key)
|
||||
}(eventID)
|
||||
|
||||
// Check if we have a Jenkins job mapping for this repository
|
||||
configMutex.RLock()
|
||||
projectConfig, exists := config.Gitea.Projects[webhook.Repository.FullName]
|
||||
configMutex.RUnlock()
|
||||
|
||||
if !exists {
|
||||
logInfo("No Jenkins job mapping for repository: %s", webhook.Repository.FullName)
|
||||
w.WriteHeader(http.StatusOK) // Still return OK to not alarm Gitea
|
||||
return
|
||||
}
|
||||
|
||||
// Extract branch name from ref
|
||||
branchName := strings.TrimPrefix(webhook.Ref, "refs/heads/")
|
||||
|
||||
// Determine which job to trigger based on branch name
|
||||
jobName := determineJobName(projectConfig, branchName)
|
||||
if jobName == "" {
|
||||
logInfo("No job configured to trigger for repository %s, branch %s",
|
||||
webhook.Repository.FullName, branchName)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare parameters for Jenkins job
|
||||
params := map[string]string{
|
||||
"BRANCH_NAME": branchName,
|
||||
"COMMIT_SHA": webhook.After,
|
||||
"REPOSITORY_URL": webhook.Repository.CloneURL,
|
||||
"REPOSITORY_NAME": webhook.Repository.FullName,
|
||||
"PUSHER_NAME": webhook.Pusher.Login,
|
||||
"PUSHER_EMAIL": webhook.Pusher.Email,
|
||||
}
|
||||
|
||||
// Queue the job for processing
|
||||
configMutex.RLock()
|
||||
select {
|
||||
case jobQueue <- jobRequest{
|
||||
jobName: jobName,
|
||||
parameters: params,
|
||||
eventID: eventID,
|
||||
attempts: 0,
|
||||
}:
|
||||
logInfo("Webhook received and queued for repository %s, branch %s, commit %s, job %s",
|
||||
webhook.Repository.FullName, branchName, webhook.After, jobName)
|
||||
default:
|
||||
logWarn("Failed to queue webhook: queue full")
|
||||
http.Error(w, "Server busy, try again later", http.StatusServiceUnavailable)
|
||||
configMutex.RUnlock()
|
||||
return
|
||||
}
|
||||
configMutex.RUnlock()
|
||||
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// determineJobName selects the appropriate Jenkins job to trigger based on branch name
|
||||
func determineJobName(config ProjectConfig, branchName string) string {
|
||||
// First check for exact branch match
|
||||
if jobName, ok := config.BranchJobs[branchName]; ok {
|
||||
logDebug("Found exact branch match for %s: job %s", branchName, jobName)
|
||||
return jobName
|
||||
}
|
||||
|
||||
// Then check for pattern-based matches
|
||||
for _, pattern := range config.BranchPatterns {
|
||||
matched, err := regexp.MatchString(pattern.Pattern, branchName)
|
||||
if err != nil {
|
||||
logError("Error matching branch pattern %s: %v", pattern.Pattern, err)
|
||||
continue
|
||||
}
|
||||
if matched {
|
||||
logDebug("Branch %s matched pattern %s: job %s", branchName, pattern.Pattern, pattern.Job)
|
||||
return pattern.Job
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to default job if available
|
||||
if config.DefaultJob != "" {
|
||||
logDebug("Using default job for branch %s: job %s", branchName, config.DefaultJob)
|
||||
return config.DefaultJob
|
||||
}
|
||||
|
||||
// No job found
|
||||
logDebug("No job configured for branch %s", branchName)
|
||||
return ""
|
||||
}
|
||||
|
||||
func verifySignature(r *http.Request, signature string, secret string) bool {
|
||||
if signature == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Reset the body for subsequent reads
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||
|
||||
// The signature from Gitea is in format "sha256=HASH"
|
||||
parts := strings.SplitN(signature, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
mac := hmac.New(sha256.New, []byte(secret))
|
||||
mac.Write(body)
|
||||
expectedMAC := mac.Sum(nil)
|
||||
receivedMAC, err := hex.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return hmac.Equal(expectedMAC, receivedMAC)
|
||||
}
|
||||
|
||||
func triggerJenkinsJob(job jobRequest) bool {
|
||||
configMutex.RLock()
|
||||
jenkinsURL := fmt.Sprintf("%s/job/%s/buildWithParameters",
|
||||
strings.TrimSuffix(config.Jenkins.URL, "/"),
|
||||
job.jobName)
|
||||
jenkinsUser := config.Jenkins.Username
|
||||
jenkinsToken := config.Jenkins.Token
|
||||
configMutex.RUnlock()
|
||||
|
||||
req, err := http.NewRequest("POST", jenkinsURL, nil)
|
||||
if err != nil {
|
||||
logger.Printf("Error creating Jenkins request for job %s: %v", job.jobName, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Add auth if credentials are provided
|
||||
if jenkinsUser != "" && jenkinsToken != "" {
|
||||
req.SetBasicAuth(jenkinsUser, jenkinsToken)
|
||||
}
|
||||
|
||||
// Add parameters to URL query
|
||||
q := req.URL.Query()
|
||||
for key, value := range job.parameters {
|
||||
q.Add(key, value)
|
||||
}
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
// Execute request
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
logger.Printf("Error triggering Jenkins job %s: %v", job.jobName, err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
logger.Printf("Jenkins returned error for job %s: status=%d, body=%s",
|
||||
job.jobName, resp.StatusCode, string(bodyBytes))
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Printf("Successfully triggered Jenkins job %s for event %s",
|
||||
job.jobName, job.eventID)
|
||||
return true
|
||||
}
|
||||
|
||||
// Custom JSON log writer
|
||||
type jsonLogWriter struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (w *jsonLogWriter) Write(p []byte) (n int, err error) {
|
||||
// Parse the log message
|
||||
message := string(p)
|
||||
|
||||
// Create JSON structure
|
||||
entry := map[string]interface{}{
|
||||
"timestamp": time.Now().Format(time.RFC3339),
|
||||
"message": strings.TrimSpace(message),
|
||||
"level": "info", // Default level, in a real implementation you'd parse this
|
||||
}
|
||||
|
||||
// Convert to JSON
|
||||
jsonData, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Write JSON with newline
|
||||
return w.out.Write(append(jsonData, '\n'))
|
||||
}
|
||||
|
||||
// Add these utility functions for level-based logging
|
||||
func logDebug(format string, v ...interface{}) {
|
||||
configMutex.RLock()
|
||||
level := config.Logging.Level
|
||||
configMutex.RUnlock()
|
||||
|
||||
if level == "debug" {
|
||||
logger.Printf("[DEBUG] "+format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func logInfo(format string, v ...interface{}) {
|
||||
configMutex.RLock()
|
||||
level := config.Logging.Level
|
||||
configMutex.RUnlock()
|
||||
|
||||
if level == "debug" || level == "info" {
|
||||
logger.Printf("[INFO] "+format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func logWarn(format string, v ...interface{}) {
|
||||
configMutex.RLock()
|
||||
level := config.Logging.Level
|
||||
configMutex.RUnlock()
|
||||
|
||||
if level == "debug" || level == "info" || level == "warn" {
|
||||
logger.Printf("[WARN] "+format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func logError(format string, v ...interface{}) {
|
||||
// Error level logs are always shown
|
||||
logger.Printf("[ERROR] "+format, v...)
|
||||
}
|
||||
@ -0,0 +1,39 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: gitea-webhook-ambassador-config
|
||||
namespace: freeleaps-devops-system
|
||||
labels:
|
||||
app: gitea-webhook-ambassador
|
||||
data:
|
||||
config.yaml: |
|
||||
server:
|
||||
port: 8080
|
||||
webhookPath: "/webhook"
|
||||
secretHeader: "X-Gitea-Signature"
|
||||
|
||||
jenkins:
|
||||
url: "http://jenkins.freeleaps-devops-system.svc.cluster.local:8080"
|
||||
username: "admin"
|
||||
token: "115127e693f1bc6b7194f58ff6d6283bd0"
|
||||
timeout: 30
|
||||
|
||||
gitea:
|
||||
secretToken: "b510afe7b60acdb4261df0155117b7a2b5339cc9"
|
||||
projects:
|
||||
"freeleaps/freeleaps-service-hub":
|
||||
defaultJob: "freeleaps/alpha/freeleaps-service-hub"
|
||||
branchJobs:
|
||||
"master": "freeleaps/prod/freeleaps-service-hub"
|
||||
"dev": "freeleaps/alpha/freeleaps-service-hub"
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "json"
|
||||
file: ""
|
||||
|
||||
worker:
|
||||
poolSize: 10
|
||||
queueSize: 100
|
||||
maxRetries: 3
|
||||
retryBackoff: 1
|
||||
@ -0,0 +1,100 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: gitea-webhook-ambassador
|
||||
namespace: freeleaps-devops-system
|
||||
labels:
|
||||
app: gitea-webhook-ambassador
|
||||
component: ci-cd
|
||||
spec:
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: gitea-webhook-ambassador
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: gitea-webhook-ambassador
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "/metrics"
|
||||
prometheus.io/port: "8080"
|
||||
spec:
|
||||
containers:
|
||||
- name: gitea-webhook-ambassador
|
||||
image: freeleaps/gitea-webhook-ambassador:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
args:
|
||||
- "-config=/app/config/config.yaml"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /app/config
|
||||
readOnly: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
env:
|
||||
- name: TZ
|
||||
value: "UTC"
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: gitea-webhook-ambassador-config
|
||||
securityContext:
|
||||
fsGroup: 1000
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- gitea-webhook-ambassador
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 30
|
||||
@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: gitea-webhook-ambassador
|
||||
namespace: freeleaps-devops-system
|
||||
labels:
|
||||
app: gitea-webhook-ambassador
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: gitea-webhook-ambassador
|
||||
@ -545,6 +545,8 @@ gitea:
|
||||
additionalConfigFromEnvs:
|
||||
- name: GITEA__SERVICE__DISABLE_REGISTRATION
|
||||
value: "true"
|
||||
- name: GITEA__WEBHOOK__ALLOWED_HOST_LIST
|
||||
value: "gitea-webhook-ambassador.freeleaps-devops-system.svc.freeleaps.cluster"
|
||||
|
||||
## @param gitea.podAnnotations Annotations for the Gitea pod
|
||||
podAnnotations: {}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user