feat: rewrite scripting handling runner processes and config

This commit is contained in:
Marc 2024-11-05 13:37:26 -05:00
parent cbed0c2116
commit e9e38d856e
Signed by: marc
GPG key ID: 048E042F22B5DC79
7 changed files with 235 additions and 24 deletions

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
runner

View file

@ -9,5 +9,5 @@ Using [spud](https://forge.karnov.club/spadinastan/spud), just `spud start ./ser
### Starting runners
```
FORGEJO_RUNNER_ROOT=... ./start-action-runner.sh
./runners.py start
```

View file

@ -0,0 +1,86 @@
log:
level: info
runner:
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
CI: 1
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Forgejo instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Forgejo instance if it's timeout is shorter than this.
timeout: 10m
# Whether skip verifying the TLS certificate of the Forgejo instance.
insecure: false
# The timeout for fetching the job from the Forgejo instance.
fetch_timeout: 5s
# The interval for fetching the job from the Forgejo instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `deamon`, will use labels in `.runner` file.
labels: [
"imagefactory-latest:docker://localhost:5000/forge-runners/debian-12.6-slim:latest"
]
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, create a network automatically.
network: ""
# Whether to create networks with IPv6 enabled. Requires the Docker daemon to be set up accordingly.
# Only takes effect if "network" is set to "".
enable_ipv6: false
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: true
# And other options to be used when the container is started (eg, --add-host=my.forgejo.url:host-gateway).
options:
# The parent directory of a job's working directory.
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

13
metadata.json Normal file
View file

@ -0,0 +1,13 @@
{
"runner_version": "4.0.1",
"instances": [
{
"config": "./config/regular_runner.yml",
"name": "regular"
},
{
"config": "./config/imagefactory_runner.yml",
"name": "imagefactory"
}
]
}

134
runners.py Executable file
View file

@ -0,0 +1,134 @@
#!/bin/python
from urllib.request import urlretrieve
import pathlib
import stat
import logging
import math
import subprocess
import sys
import json
import typing
import multiprocessing
import os
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class RunnerInstance(typing.TypedDict):
config: str
name: str
class Metadata(typing.TypedDict):
runner_version: str
instances: list[RunnerInstance]
def get_runner_url(version: str) -> str:
"""Returns a formatted URL pointing to a specific runner release."""
return f"https://code.forgejo.org/forgejo/runner/releases/download/v{version}/forgejo-runner-{version}-linux-amd64"
def get_runner_runtime(runner_root: pathlib.Path, version: str) -> pathlib.Path:
"""
Downloads the runner binary of the selected version if it doesn't exist already, makes it
executable and returns its path.
"""
runner_bin_path = runner_root.joinpath(f"runner_{version}")
def report(chunk, chunk_size, total_size):
total_chunks = "unknown"
if total_size != -1:
total_chunks = math.ceil(total_size / chunk_size)
if not chunk % 100 or chunk == total_chunks:
logger.info(f"Chunk {chunk} / {total_chunks} downloaded.")
source_url = get_runner_url(version)
if not runner_bin_path.exists():
urlretrieve(source_url, runner_bin_path, reporthook=report)
if not runner_bin_path.exists():
raise RuntimeError(f"Failed to download runner from {source_url}.")
runner_bin_path.chmod(runner_bin_path.stat().st_mode | stat.S_IEXEC)
return runner_bin_path
def get_runners_metadata(metadata_path: pathlib.Path) -> Metadata:
if not metadata_path.exists():
raise RuntimeError("Failed to open metadata file.")
with open(metadata_path, "r", encoding="utf8") as metadata_file:
metadata = json.load(metadata_file)
return metadata
def start_runner(runner_path: pathlib.Path, meta: RunnerInstance):
if not pathlib.Path(meta["config"]).exists():
raise RuntimeError(f"Runner config path does not exist: {meta['config']}")
runner_root = runner_path.parent.joinpath(f"runner__{meta['name']}")
runner_root.mkdir(parents=True, exist_ok=True)
result = subprocess.Popen(
[
runner_path.resolve(),
"--config",
pathlib.Path(meta["config"]).resolve(),
"daemon",
],
cwd=runner_root,
)
if result.returncode != 0:
print(result)
raise RuntimeError(f"Failed to start runner {meta['name']}.")
def start_runners(instances: list[RunnerInstance], runner_path: pathlib.Path):
"""Starts a runner instance with the given configuration."""
if not runner_path.exists():
raise RuntimeError(f"Runner path does not exist: {runner_path}")
with multiprocessing.Pool(2) as p:
p.starmap(
start_runner,
[(bin_path, runner_metadata) for runner_metadata in metadata["instances"]],
)
def register_runners(instances: list[RunnerInstance], runner_bin_path: pathlib.Path):
for runner_metadata in metadata["instances"]:
runner_path = runner_root.joinpath(f"runner__{runner_metadata['name']}")
runner_path.mkdir(exist_ok=True, parents=True)
result = subprocess.run([bin_path, "register"], cwd=runner_path)
if result.returncode != 0:
raise RuntimeError("Failed to register runner.")
if __name__ == "__main__":
argcount = len(sys.argv)
if 2 > argcount > 3:
raise RuntimeError("Insufficient arguments.")
runner_root = pathlib.Path(os.getenv("RUNNER_ROOT", pathlib.Path.cwd()))
runner_root.mkdir(exist_ok=True, parents=True)
metadata = get_runners_metadata(pathlib.Path.cwd().joinpath("metadata.json"))
cmd = sys.argv[1]
bin_path = get_runner_runtime(runner_root, metadata["runner_version"])
if cmd == "register":
register_runners(metadata["instances"], bin_path)
elif cmd == "start":
start_runners(metadata["instances"], bin_path)

View file

@ -1,23 +0,0 @@
#!/usr/bin/bash
RUNNER_VERSION=4.0.1
wget -O "$FORGEJO_RUNNER_ROOT"/forgejo-runner https://code.forgejo.org/forgejo/runner/releases/download/v$RUNNER_VERSION/forgejo-runner-$RUNNER_VERSION-linux-amd64
chmod +x "$FORGEJO_RUNNER_ROOT"/forgejo-runner
wget -O forgejo-runner.asc https://code.forgejo.org/forgejo/runner/releases/download/v$RUNNER_VERSION/forgejo-runner-$RUNNER_VERSION-linux-amd64.asc
gpg --keyserver keys.openpgp.org --recv EB114F5E6C0DC2BCDD183550A4B61A2DC5923710
gpg --verify forgejo-runner.asc "$FORGEJO_RUNNER_ROOT"/forgejo-runner
rm forgejo-runner.asc
if [ -f "$FORGEJO_RUNNER_ROOT/.runner" ]; then
echo "Found runner state."
(
cp ./config.yml "$FORGEJO_RUNNER_ROOT/config.yml"
cd "$FORGEJO_RUNNER_ROOT" || exit
"$FORGEJO_RUNNER_ROOT"/forgejo-runner --config "${FORGEJO_RUNNER_CONFIG:=./config.yml}" daemon &
)
else
echo "Use $FORGEJO_RUNNER_ROOT/forgejo-runner register ... to register the runner"
echo "Once registered, use $FORGEJO_RUNNER_ROOT/forgejo-runner --config <path-to-config> daemon & to start"
fi