Compare commits
30 Commits
31 changed files with 1867 additions and 617 deletions
@ -0,0 +1,11 @@ |
|||
# See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.231.6/containers/ubuntu/.devcontainer/base.Dockerfile |
|||
|
|||
# [Choice] Ubuntu version (use hirsuite or bionic on local arm64/Apple Silicon): hirsute, focal, bionic |
|||
ARG VARIANT="hirsute" |
|||
FROM mcr.microsoft.com/vscode/devcontainers/base:0-${VARIANT} |
|||
|
|||
# [Optional] Uncomment this section to install additional OS packages. |
|||
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ |
|||
# && apt-get -y install --no-install-recommends <your-package-list-here> |
|||
|
|||
|
@ -0,0 +1,32 @@ |
|||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: |
|||
// https://github.com/microsoft/vscode-dev-containers/tree/v0.231.6/containers/ubuntu |
|||
{ |
|||
"name": "Ubuntu", |
|||
"build": { |
|||
"dockerfile": "Dockerfile", |
|||
// Update 'VARIANT' to pick an Ubuntu version: hirsute, focal, bionic |
|||
// Use hirsute or bionic on local arm64/Apple Silicon. |
|||
"args": { "VARIANT": "focal" } |
|||
}, |
|||
|
|||
// Set *default* container specific settings.json values on container create. |
|||
"settings": {}, |
|||
|
|||
|
|||
// Add the IDs of extensions you want installed when the container is created. |
|||
"extensions": [], |
|||
|
|||
// Use 'forwardPorts' to make a list of ports inside the container available locally. |
|||
// "forwardPorts": [], |
|||
|
|||
// Use 'postCreateCommand' to run commands after the container is created. |
|||
// "postCreateCommand": "uname -a", |
|||
|
|||
// Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. |
|||
"remoteUser": "vscode", |
|||
"features": { |
|||
"docker-from-docker": "20.10", |
|||
"git": "latest", |
|||
"python": "3.10" |
|||
} |
|||
} |
@ -1,5 +1,9 @@ |
|||
# flake8: noqa F401 |
|||
from .cli import App |
|||
from .jobparser import Job, JobParser |
|||
from .configparser import ConfigParser |
|||
from .exceptions import NonZeroRetcode |
|||
from .pythonrunner import PythonRunner |
|||
from .runnerfactory import Factory |
|||
from .exceptions import NonZeroRetcode |
|||
from .exceptions import RunnerError |
|||
from .exceptions import ConfigException |
|||
|
|||
name = "alice" |
@ -1,3 +1,4 @@ |
|||
from cli import main |
|||
from .cli import main |
|||
|
|||
if __name__ == '__main__': |
|||
main() |
|||
|
@ -0,0 +1,51 @@ |
|||
import logging |
|||
import os |
|||
|
|||
from .exceptions import ConfigException |
|||
|
|||
|
|||
class ConfigHolder: |
|||
__instance = None |
|||
file_name = os.path.join(os.getcwd(), ".alice", "vars") |
|||
|
|||
@staticmethod |
|||
def getInstance(): |
|||
""" Static access method. """ |
|||
if ConfigHolder.__instance is None: |
|||
ConfigHolder() |
|||
return ConfigHolder.__instance |
|||
|
|||
def __init__(self): |
|||
""" Virtually private constructor. """ |
|||
if ConfigHolder.__instance is not None: |
|||
raise Exception("This class is a singleton!") |
|||
else: |
|||
ConfigHolder.__instance = self |
|||
config = os.path.abspath(os.path.join(os.getcwd(), self.file_name)) |
|||
self.vars = {} |
|||
if os.path.isfile(config): |
|||
with open(config) as f: |
|||
for _line in f: |
|||
line = _line.strip() |
|||
items = line.split("=") |
|||
if len(items) > 1: |
|||
self.vars[items[0]] = line.replace(f"{items[0]}=", "") |
|||
logging.debug(f"Loaded from {self.file_name}: {self.vars}") |
|||
|
|||
def get(self, key): |
|||
try: |
|||
return self.vars[key] |
|||
except KeyError: |
|||
raise ConfigException(f"{key} not defined in .conf!") |
|||
|
|||
def set(self, key, value): |
|||
self.vars[key] = value |
|||
self.commit() |
|||
|
|||
def soft_set(self, key, value): |
|||
self.vars[key] = value |
|||
|
|||
def commit(self): |
|||
with open(self.file_name, 'w') as f: |
|||
for k, v in self.vars.items(): |
|||
f.write(f"{k}={v if v is not None else ''}\n") |
@ -0,0 +1,101 @@ |
|||
import logging |
|||
from os import getcwd, path, environ |
|||
import subprocess |
|||
import yaml |
|||
|
|||
from .exceptions import ConfigException |
|||
from .runnerfactory import Factory |
|||
|
|||
|
|||
class ConfigParser: |
|||
def __init__(self, file_path, cli_env_vars) -> None: |
|||
with open(file_path) as f: |
|||
self.config = yaml.safe_load(f) |
|||
self.factory = Factory(self.__gen_globals(cli_env_vars), self.config.get("runners", {})) |
|||
self.jobs = self.__get_jobs() |
|||
self.pipelines = self.config.get("pipelines", {}) |
|||
|
|||
# Initialize env and workdir if not present in global |
|||
def __gen_globals(self, cli_vars): |
|||
env_vars = environ.copy() |
|||
env_vars.update(cli_vars) |
|||
globals = { |
|||
"env": env_vars, |
|||
"workdir": getcwd() |
|||
} |
|||
if "runners" in self.config: |
|||
if "global" in self.config["runners"]: |
|||
if "env" in self.config["runners"]["global"]: |
|||
for var in self.config["runners"]["global"]["env"]: |
|||
globals["env"][var["name"]] = var["value"] |
|||
if "workdir" in self.config["runners"]["global"]: |
|||
globals["workdir"] = self.config["runners"]["global"]["workdir"] |
|||
|
|||
logging.debug(f"[Alice] Configured globals: {globals}") |
|||
return globals |
|||
|
|||
def __get_jobs(self): |
|||
if "jobs" in self.config: |
|||
jobs = {} |
|||
for job_spec in self.config["jobs"]: |
|||
name = job_spec["name"] |
|||
if name in jobs: |
|||
raise ConfigException(f"Job with name {name} already exists!") |
|||
|
|||
jobs[name] = job_spec |
|||
logging.info(f"[Alice] Parsed jobs: {', '.join(jobs.keys())}") |
|||
return jobs |
|||
else: |
|||
raise ConfigException("No jobs defined in config") |
|||
|
|||
def __is_changed(self, changes): |
|||
try: |
|||
target = changes["branch"] |
|||
paths = [] |
|||
for _path in changes["paths"]: |
|||
paths.append(path.abspath(_path)) |
|||
# TODO: Error handling |
|||
command = ["git", "diff", "--name-only", target] |
|||
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
p.wait() |
|||
for line in p.stdout: |
|||
change_path = path.abspath(line.decode("UTF-8").strip()) |
|||
for _path in paths: |
|||
spec_path = path.abspath(_path) |
|||
if change_path.startswith(spec_path): |
|||
logging.info(f"[Alice] Modified file: {change_path}") |
|||
logging.info(f"[Alice] Path match: {_path}") |
|||
return True |
|||
except KeyError: |
|||
raise ConfigException(f"Invalid 'changes' config: {changes}") |
|||
return False |
|||
|
|||
def execute(self, task_name): |
|||
if task_name in self.jobs: |
|||
self.execute_job(task_name) |
|||
elif task_name in self.pipelines: |
|||
self.execute_pipeline(task_name) |
|||
else: |
|||
raise ConfigException(f"No such job or pipeline: {task_name}") |
|||
|
|||
def execute_pipeline(self, pipeline_name): |
|||
if pipeline_name in self.pipelines: |
|||
print(f"[Alice][Pipeline] {pipeline_name}: Start") |
|||
for task in self.pipelines[pipeline_name]: |
|||
self.execute(task) |
|||
print(f"[Alice][Pipeline] {pipeline_name}: Success") |
|||
|
|||
def execute_job(self, job_name): |
|||
if job_name in self.jobs: |
|||
print(f"[Alice][Job] {job_name}: Start") |
|||
job_spec = self.jobs[job_name] |
|||
should_run = True |
|||
if "changes" in job_spec: |
|||
should_run = self.__is_changed(job_spec["changes"]) |
|||
if should_run: |
|||
runner = self.factory.get_runner(job_spec["type"]) |
|||
runner.run(job_spec) |
|||
status = "SUCCESS" |
|||
else: |
|||
status = "SKIP, no change detected" |
|||
print(f"[Alice][Job] {job_name}: {status}") |
@ -1,34 +1,57 @@ |
|||
from runners.pythonrunner import PythonRunner |
|||
from os import getcwd |
|||
import logging |
|||
from os.path import join, abspath |
|||
|
|||
from .runners.pythonrunner import PythonRunner |
|||
from .runners.pypirunner import PyPiRunner |
|||
from .runners.dockerrunner import DockerRunner |
|||
from .runners.pypirepo import PypiRepoRunner |
|||
from .exceptions import ConfigException |
|||
|
|||
|
|||
class Factory(): |
|||
def __init__(self) -> None: |
|||
self.runnertypes = self.__load_runners() |
|||
def __init__(self, globals, runner_configs) -> None: |
|||
self.globals = globals |
|||
self.runner_configs = {} |
|||
self.runnertypes = {} |
|||
self.runners = {} |
|||
self.workdir = getcwd() |
|||
self.globals = {} |
|||
self.__load_runners() |
|||
self.__gen_runner_configs(runner_configs) |
|||
|
|||
def __load_runners(self): |
|||
# TODO: Runners can be imported via cli too |
|||
# https://git.gyulai.cloud/gyulaid/alice/issues/4 |
|||
# module = __import__("module_file") |
|||
# my_class = getattr(module, "class_name") |
|||
self.runnertypes = {"python": PythonRunner, |
|||
"pypi": PyPiRunner, |
|||
"docker": DockerRunner, |
|||
"pypirepo": PypiRepoRunner} |
|||
|
|||
return {"python": PythonRunner} |
|||
|
|||
def set_globals(self, globals): |
|||
self.globals = globals |
|||
|
|||
def update_globals(self, update): |
|||
if "env" in update: |
|||
self.globals["env"].update(update["env"]) |
|||
logging.info(f"[Alice] Available runners: {'|'.join(self.runnertypes.keys())}") |
|||
|
|||
def update_runners(self, config): |
|||
def __gen_runner_configs(self, config): |
|||
for runnertype, runnerconfig in config.items(): |
|||
if runnertype != "global": |
|||
self.get_runner(runnertype).update_config(runnerconfig) |
|||
logging.info(f"[Alice] Global config found for runner {runnertype}") |
|||
config = self.globals.copy() |
|||
for key, value in runnerconfig.items(): |
|||
if key == "env": |
|||
for env_var in value: |
|||
config["env"][env_var["name"]] = env_var["value"] |
|||
elif key == "workdir": |
|||
config["workdir"] = abspath(join(config["workdir"], value)) |
|||
else: |
|||
config[key] = value |
|||
self.runner_configs[runnertype] = config |
|||
logging.debug(f"[Alice] Globals for {runnertype}: {runnerconfig}") |
|||
|
|||
def get_runner(self, runnertype): |
|||
if runnertype not in self.runners: |
|||
self.runners[runnertype] = self.runnertypes[runnertype](self.workdir, self.globals) |
|||
if runnertype in self.runnertypes: |
|||
logging.info(f"[Alice] Initializing runner: {runnertype}") |
|||
# If there is a runner specific config, use that, else global |
|||
config = self.runner_configs.get(runnertype, self.globals.copy()) |
|||
self.runners[runnertype] = self.runnertypes[runnertype](config) |
|||
else: |
|||
raise ConfigException(f"Invalid runner type: {runnertype}") |
|||
return self.runners[runnertype] |
|||
|
@ -0,0 +1,4 @@ |
|||
# flake8: noqa F401 |
|||
from .pythonrunner import PythonRunner |
|||
from .pypirunner import PyPiRunner |
|||
from .dockerrunner import DockerRunner |
@ -1 +1,241 @@ |
|||
# TODO Implement |
|||
from enum import Enum |
|||
import json |
|||
import logging |
|||
from os import path, getcwd |
|||
import docker |
|||
|
|||
from .pyutils import grab_from, gen_dict |
|||
from ..exceptions import ConfigException, NonZeroRetcode, RunnerError |
|||
|
|||
|
|||
class ImageSource(Enum): |
|||
NONE = 1 |
|||
BUILD = 2 |
|||
PULL = 3 |
|||
|
|||
|
|||
def get_user(config, default): |
|||
if "credentials" in config: |
|||
if "username" in config["credentials"]: |
|||
data = config["credentials"]["username"] |
|||
if isinstance(data, str): |
|||
return data |
|||
else: |
|||
return grab_from(data) |
|||
return default |
|||
|
|||
|
|||
def get_pass(config, default): |
|||
if "credentials" in config: |
|||
if "password" in config["credentials"]: |
|||
data = config["credentials"]["password"] |
|||
if isinstance(data, str): |
|||
return data |
|||
else: |
|||
return grab_from(data) |
|||
return default |
|||
|
|||
|
|||
def get_provider(config, default, default_type): |
|||
if "image" in config: |
|||
build = False |
|||
pull = False |
|||
candidate_type = default_type |
|||
if "build" in config["image"]: |
|||
build = True |
|||
if default_type == ImageSource.BUILD: |
|||
candidate = default.copy(config["image"]["build"]) |
|||
else: |
|||
candidate = Builder(config["image"]["build"]) |
|||
candidate_type = ImageSource.BUILD |
|||
elif "pull" in config["image"]: |
|||
pull = True |
|||
if default_type == ImageSource.PULL: |
|||
candidate = default.copy(config["image"]["pull"]) |
|||
else: |
|||
candidate = Puller(config["image"]["pull"]) |
|||
candidate_type = ImageSource.PULL |
|||
|
|||
if build and pull: |
|||
raise ConfigException("[DockerRunner] Can't build and pull the same image!") |
|||
|
|||
return candidate, candidate_type |
|||
return default, default_type |
|||
|
|||
|
|||
class Tagger: |
|||
def __init__(self, config={}) -> None: |
|||
self.name = config.get("name", None) |
|||
self.username = get_user(config, None) |
|||
self.password = get_pass(config, None) |
|||
self.publish = config.get("publish", False) |
|||
|
|||
def copy(self, job_config): |
|||
t = Tagger() |
|||
t.name = job_config.get("name", self.name) |
|||
t.username = get_user(job_config, self.username) |
|||
t.password = get_pass(job_config, self.password) |
|||
t.publish = job_config.get("publish", self.publish) |
|||
return t |
|||
|
|||
def __str__(self) -> str: |
|||
data = { |
|||
"name": self.name, |
|||
"publish": self.publish, |
|||
"credentials": { |
|||
"username": self.username, |
|||
"password": self.password |
|||
} |
|||
} |
|||
return f"{data}" |
|||
|
|||
def handle(self, client, image): |
|||
if self.name is not None: |
|||
if self.name not in image.tags and f"{self.name}:latest" not in image.tags: |
|||
print(f"[DockerRunner] Tagging {image.tags[0]} as {self.name}") |
|||
image.tag(self.name) |
|||
if self.publish: |
|||
print(f"[DockerRunner] Pushing {self.name}") |
|||
client.push(self.name) |
|||
|
|||
|
|||
class Builder(): |
|||
def __init__(self, config) -> None: |
|||
self.dir = path.abspath(config.get("dir", getcwd())) |
|||
self.dockerfile = config.get("dockerfile", None) |
|||
self.name = config.get("name", None) |
|||
self.args = gen_dict(config.get("args", [])) |
|||
|
|||
def copy(self, job_config): |
|||
b = Builder({}) |
|||
b.dir = path.abspath(path.join(self.dir, job_config.get("dir", "."))) |
|||
b.dockerfile = job_config.get("dockerfile", self.dockerfile) |
|||
b.name = job_config.get("name", self.name) |
|||
b.args = self.args.copy().update(gen_dict(job_config.get("args", []))) |
|||
return b |
|||
|
|||
def __str__(self) -> str: |
|||
data = { |
|||
"type": "builder", |
|||
"dir": self.dir, |
|||
"dockerfile": self.dockerfile, |
|||
"name": self.name, |
|||
"args": self.args |
|||
} |
|||
return json.dumps(data) |
|||
|
|||
def prepare(self, client): |
|||
print(f"[DockerRunner] Building image {self.name}") |
|||
if self.dockerfile is None: |
|||
self.dockerfile = "Dockerfile" |
|||
try: |
|||
image, log = client.images.build(path=self.dir, |
|||
dockerfile=self.dockerfile, |
|||
tag=self.name, |
|||
buildargs=self.args, |
|||
labels={"builder": "alice-ci"}) |
|||
for i in log: |
|||
logging.debug(i) |
|||
return image |
|||
except docker.errors.BuildError as e: |
|||
raise RunnerError(f"[DockerRunner] Build failed: {e}") |
|||
except docker.errors.APIError as e: |
|||
raise RunnerError(f"[DockerRunner] Error: {e}") |
|||
|
|||
|
|||
class Puller(): |
|||
def __init__(self, config={}) -> None: |
|||
self.name = config.get("name", None) |
|||
self.username = get_user(config, None) |
|||
self.password = get_pass(config, None) |
|||
|
|||
def copy(self, job_config={}): |
|||
p = Puller() |
|||
p.name = job_config.get("name", self.name) |
|||
p.username = get_user(job_config, self.username) |
|||
p.password = get_pass(job_config, self.password) |
|||
|
|||
def __str__(self) -> str: |
|||
data = { |
|||
"name": self.name, |
|||
"credentials": { |
|||
"username": self.username, |
|||
"password": self.password |
|||
} |
|||
} |
|||
return f"{data}" |
|||
|
|||
def prepare(self, client): |
|||
print(f"[DockerRunner] Pulling image {self.name}") |
|||
return client.images.pull(self.name) |
|||
|
|||
|
|||
class DockerConfig: |
|||
def __init__(self, config={}) -> None: |
|||
self.username = get_user(config, None) |
|||
self.password = get_pass(config, None) |
|||
self.image_provider, self.provider_type = get_provider(config, None, ImageSource.NONE) |
|||
self.tagger = Tagger(config.get("tag", {})) |
|||
self.commands = config.get("commands", []) |
|||
self.env = config.get("env", {}) |
|||
|
|||
def copy(self, job_config={}): |
|||
d = DockerConfig() |
|||
d.username = get_user(job_config, self.username) |
|||
d.password = get_pass(job_config, self.password) |
|||
d.image_provider, d.provider_type = get_provider(job_config, self.image_provider, self.provider_type) |
|||
d.tagger = self.tagger.copy(job_config.get("tag", {})) |
|||
d.commands = self.commands.copy() + job_config.get("commands", []) |
|||
d.env = self.env.copy() |
|||
d.env.update(gen_dict(job_config.get("env", []))) |
|||
return d |
|||
|
|||
def __str__(self) -> str: |
|||
data = { |
|||
"credentials": { |
|||
"username": {self.username}, |
|||
"password": {self.password} |
|||
}, |
|||
"image": self.image_provider.__str__(), |
|||
"commands": self.commands, |
|||
"tag": self.tagger.__str__() |
|||
} |
|||
return f"{data}" |
|||
|
|||
|
|||
class DockerRunner(): |
|||
def __init__(self, config) -> None: |
|||
logging.info("[DockerRunner] Initializing") |
|||
self.config = DockerConfig(config) |
|||
self.client = docker.from_env() |
|||
|
|||
def run(self, job_spec): |
|||
job_config = self.config.copy(job_spec) |
|||
logging.debug(f"[DockerRunner] Job config: {job_config.__str__()}") |
|||
if job_config.image_provider is None: |
|||
raise RunnerError("[DockerRunner] No image provider configured!") |
|||
image = job_config.image_provider.prepare(self.client) |
|||
logging.info(f"[DockerRunner] Image: {image.tags} ({image.id})") |
|||
|
|||
if len(job_config.commands) > 0: |
|||
if "PATH" in job_config.env: |
|||
del job_config.env["PATH"] |
|||
container = self.client.containers.run(image=image.id, |
|||
entrypoint=["sleep", "infinity"], |
|||
detach=True, |
|||
auto_remove=True) |
|||
try: |
|||
for i in job_config.commands: |
|||
command = ["/bin/sh", "-c", i] |
|||
logging.debug(f"[DockerRunner] Command array: {command}") |
|||
code, output = container.exec_run(cmd=command, |
|||
environment=job_config.env) |
|||
for line in output.decode("UTF-8").splitlines(): |
|||
print(f"[{job_spec['name']}] {line}") |
|||
if code != 0: |
|||
raise NonZeroRetcode(f"Command {i} returned code {code}") |
|||
finally: |
|||
if container is not None: |
|||
container.stop() |
|||
|
|||
job_config.tagger.handle(self.client, image) |
|||
|
@ -0,0 +1,168 @@ |
|||
import logging |
|||
import subprocess |
|||
import docker |
|||
from os.path import join, isdir |
|||
from os import getcwd, mkdir |
|||
import os |
|||
import requests |
|||
import platform |
|||
import time |
|||
|
|||
from ..exceptions import RunnerError |
|||
from ..config import ConfigHolder |
|||
|
|||
|
|||
pipconf = """[global] |
|||
index-url = URL |
|||
trusted-host = BASE |
|||
pypi.org |
|||
extra-index-url= http://pypi.org/simple""" |
|||
|
|||
|
|||
class RepoConfig: |
|||
def __init__(self, config={}) -> None: |
|||
self.port = config.get("port", 8888) |
|||
self.enabled = config.get("enabled", True) |
|||
self.container_name = config.get("container_name", "alice-pypiserver") |
|||
|
|||
def copy(self, job_config): |
|||
r = RepoConfig() |
|||
r.container_name = job_config.get("container_name", self.container_name) |
|||
r.enabled = job_config.get("enabled", self.enabled) |
|||
r.port = job_config.get("port", self.port) |
|||
return r |
|||
|
|||
|
|||
class PypiRepoRunner: |
|||
def __init__(self, config) -> None: |
|||
logging.info("[PyPiRepo] Initializing") |
|||
self.config = RepoConfig(config) |
|||
self.client = docker.from_env() |
|||
self.user = "alice" |
|||
self.passwd = "alice" |
|||
self.htpasswd = 'alice:{SHA}UisnajVr3zkBPfq+os1D4UHsyeg=' |
|||
|
|||
def get_image(self): |
|||
# TODO: remove when resolved: |
|||
# Official Docker image support for ARM? |
|||
# https://github.com/pypiserver/pypiserver/issues/364 |
|||
pypiserver = "https://github.com/pypiserver/pypiserver.git" |
|||
if platform.machine() == "aarch64": |
|||
tag = "alice.localhost/pypiserver:arm" |
|||
try: |
|||
self.client.images.get(tag) |
|||
return tag |
|||
except docker.errors.ImageNotFound: |
|||
print("[PyPiRepo] Building PyPiServer ARM image, this could take a while") |
|||
workdir = join(getcwd(), ".alice", "pypirepo", "source") |
|||
if not os.path.isdir(workdir): |
|||
os.mkdir(workdir) |
|||
git_command = ["git", "clone", pypiserver, "--branch=v1.3.2"] |
|||
output = [] |
|||
with subprocess.Popen(git_command, cwd=workdir, stdout=subprocess.PIPE) as p: |
|||
for line in p.stdout: |
|||
output.append(line.decode('utf8').strip()) |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
print("\n".join(output)) |
|||
raise(RunnerError("[PyPiRepo] Could not fetch pypiserver source")) |
|||
source_path = os.path.join(workdir, "pypiserver") |
|||
self.client.images.build(path=source_path, tag=tag) |
|||
return tag |
|||
else: |
|||
return "pypiserver/pypiserver:latest" |
|||
|
|||
def run(self, job_spec): |
|||
job_config = self.config.copy(job_spec) |
|||
docker_host_ip = None |
|||
for network in self.client.networks.list(): |
|||
if network.name == "bridge": |
|||
try: |
|||
docker_host_ip = network.attrs["IPAM"]["Config"][0]["Gateway"] |
|||
except KeyError: |
|||
docker_host_ip = network.attrs["IPAM"]["Config"][0]["Subnet"].replace(".0/16", ".1") |
|||
if docker_host_ip is None: |
|||
raise RunnerError("Unable to determine Docker host IP") |
|||
|
|||
if job_config.enabled: |
|||
try: |
|||
c = self.client.containers.get(job_config.container_name) |
|||
print(f"[PyPiRepo] {job_config.container_name} already running") |
|||
except docker.errors.NotFound: |
|||
persistency_dir = join(getcwd(), ".alice", "pypirepo") |
|||
if not isdir(persistency_dir): |
|||
mkdir(persistency_dir) |
|||
|
|||
package_dir = join(persistency_dir, "packages") |
|||
if not isdir(package_dir): |
|||
mkdir(package_dir) |
|||
|
|||
htpasswd_file = join(persistency_dir, ".htpasswd") |
|||
with open(htpasswd_file, 'w') as f: |
|||
f.write(self.htpasswd) |
|||
|
|||
c = self.client.containers.run( |
|||
name=job_config.container_name, |
|||
image=self.get_image(), |
|||
detach=True, |
|||
labels={"app": "alice"}, |
|||
command=["--overwrite", "-P", ".htpasswd", "packages"], |
|||
ports={"8080/tcp": job_config.port}, |
|||
volumes={ |
|||
package_dir: { |
|||
"bind": "/data/packages", |
|||
"mode": "rw" |
|||
}, |
|||
htpasswd_file: { |
|||
"bind": "/data/.htpasswd", |
|||
"mode": "ro" |
|||
} |
|||
}, |
|||
restart_policy={ |
|||
"Name": "unless-stopped" |
|||
} |
|||
) |
|||
print(f"[PyPiRepo] Started {job_config.container_name}") |
|||
|
|||
c.reload() |
|||
logging.info(f"[PyPiRepo] {job_config.container_name} : {c.status}") |
|||
if c.status != "running": |
|||
raise RunnerError(f"[PyPiRepo] Repo container unstable: {c.status}") |
|||
|
|||
uri = f"http://localhost:{job_config.port}" |
|||
unreachable = True |
|||
attempts = 0 |
|||
while unreachable and attempts < 5: |
|||
attempts += 1 |
|||
try: |
|||
requests.get(uri) |
|||
unreachable = False |
|||
except Exception as e: |
|||
logging.info(f"[PyPiRepo] {attempts} - Repo at {uri} is unavailable: {e}") |
|||
time.sleep(2) |
|||
if unreachable: |
|||
raise RunnerError(f"[PyPiRepo] Repo unreachable") |
|||
|
|||
|
|||
cfgh = ConfigHolder.getInstance() |
|||
cfgh.soft_set("PYPI_USER", self.user) |
|||
cfgh.soft_set("PYPI_PASS", self.passwd) |
|||
cfgh.soft_set("PYPI_REPO", uri) |
|||
cfgh.soft_set("DOCKER_PYPI_USER", self.user) |
|||
cfgh.soft_set("DOCKER_PYPI_PASS", self.passwd) |
|||
cfgh.soft_set("DOCKER_PYPI_REPO", f"http://{docker_host_ip}:{job_config.port}") |
|||
cfgh.commit() |
|||
|
|||
venv = join(os.getcwd(), "venv") |
|||
if os.path.isdir(venv): |
|||
netloc = f"localhost:{job_config.port}" |
|||
url = f"http://{self.user}:{self.passwd}@{netloc}" |
|||
conf = pipconf.replace("URL", url).replace("BASE", netloc) |
|||
|
|||
if os.name == "nt": # Windows |
|||
filename = join(venv, "pip.ini") |
|||
else: # Linux & Mac |
|||
filename = join(venv, "pip.conf") |
|||
with open(filename, 'w') as f: |
|||
f.write(conf) |
|||
print(f"[PyPiRepo] pip conf written to {filename}") |
@ -0,0 +1,345 @@ |
|||
from distutils.command.config import config |
|||
from distutils.log import debug |
|||
import json |
|||
import logging |
|||
from ntpath import join |
|||
import os |
|||
import re |
|||
import subprocess |
|||
import sys |
|||
from pkg_resources import parse_version |
|||
from requests import get |
|||
from requests.auth import HTTPBasicAuth |
|||
from os import environ, path |
|||
from html.parser import HTMLParser |
|||
from alice.runners.pyutils import PackageManager, glob, grab_from |
|||
from alice.exceptions import ConfigException, RunnerError |
|||
import hashlib |
|||
from pathlib import Path |
|||
|
|||
|
|||
def md5_update_from_file(filename, hash): |
|||
assert Path(filename).is_file() |
|||
with open(str(filename), "rb") as f: |
|||
for chunk in iter(lambda: f.read(4096), b""): |
|||
hash.update(chunk) |
|||
return hash |
|||
|
|||
|
|||
def md5_file(filename): |
|||
return md5_update_from_file(filename, hashlib.md5()).hexdigest() |
|||
|
|||
|
|||
def md5_update_from_dir(directory, hash, exclude_dirs, exclude_extensions, exclude_dirs_wildcard): |
|||
assert Path(directory).is_dir() |
|||
for _path in os.listdir(directory): |
|||
path = os.path.join(directory, _path) |
|||
if os.path.isfile(path) : |
|||
hash.update(_path.encode()) |
|||
logging.debug(f"[PyPiRunner][Hash] File: {path}") |
|||
hash = md5_update_from_file(path, hash) |
|||
elif os.path.isdir(path): |
|||
skip = False |
|||
for name in exclude_dirs: |
|||
if name in os.path.basename(_path): |
|||
skip = True |
|||
if not skip: |
|||
hash = md5_update_from_dir(path, hash, exclude_dirs, exclude_extensions, exclude_dirs_wildcard) |
|||
return hash |
|||
|
|||
|
|||
def md5_dir(directory, exclude_dirs=[], exclude_extensions=[], exclude_dirs_wildcard=[]): |
|||
return md5_update_from_dir(directory, hashlib.sha1(), exclude_dirs, exclude_extensions, exclude_dirs_wildcard).hexdigest() |
|||
|
|||
|
|||
def get_uri(config, default): |
|||
url = config.get("repo", {}).get("uri", default) |
|||
if url is not None: |
|||
if not isinstance(url, str): |
|||
url = grab_from(url) |
|||
if not re.match('(?:http|ftp|https)://', url): |
|||
url = f"https://{url}" |
|||
return url |
|||
|
|||
|
|||
def get_user(config, default): |
|||
if "repo" in config: |
|||
if "username" in config["repo"]: |
|||
data = config["repo"]["username"] |
|||
if isinstance(data, str): |
|||
return data |
|||
else: |
|||
return grab_from(data) |
|||
return default |
|||
|
|||
|
|||
def get_pass(config, default): |
|||
if "repo" in config: |
|||
if "password" in config["repo"]: |
|||
data = config["repo"]["password"] |
|||
if isinstance(data, str): |
|||
return data |
|||
else: |
|||
return grab_from(data) |
|||
return default |
|||
|
|||
|
|||
class SimpleRepoParser(HTMLParser): |
|||
def __init__(self): |
|||
HTMLParser.__init__(self) |
|||
self.packages = [] |
|||
|
|||
def handle_data(self, data): |
|||
re_groups = re.findall("(\d*\.\d*\.\d*)", data) |
|||
if len(re_groups) == 1: |
|||
file_version = re_groups[0] |
|||
if file_version not in self.packages: |
|||
self.packages.append(file_version) |
|||
|
|||
|
|||
# Parses and stores the config from yaml |
|||
class PypiConfig: |
|||
def __init__(self, config={}) -> None: |
|||
self.workdir = path.abspath(config.get("workdir", ".")) |
|||
self.repo_uri = get_uri(config, None) |
|||
self.repo_user = get_user(config, None) |
|||
self.repo_pass = get_pass(config, None) |
|||
self.packages = set(config.get("packages", [])) |
|||
self.upload = config.get("upload", False) |
|||
self.fail_if_exists = config.get("fail_if_exists", False) |
|||
|
|||
# returns a PyPiConfig with merged values |
|||
def copy(self, job_config={}): |
|||
p = PypiConfig() |
|||
p.workdir = path.abspath(path.join(self.workdir, job_config.get("workdir", "."))) |
|||
p.repo_uri = get_uri(job_config, self.repo_uri) |
|||
p.repo_user = get_user(job_config, self.repo_user) |
|||
p.repo_pass = get_pass(job_config, self.repo_pass) |
|||
job_pkg_set = set(job_config["packages"]) |
|||
job_pkg_set.update(self.packages) |
|||
p.packages = job_pkg_set |
|||
p.upload = job_config.get("upload", self.upload) |
|||
p.fail_if_exists = job_config.get("fail_if_exists", self.fail_if_exists) |
|||
return p |
|||
|
|||
|
|||
# TODO: Refactor to something sensible, more flexible |
|||
class PackageMeta: |
|||
def __init__(self): |
|||
self.conf_dir = path.join(os.getcwd(), ".alice", "pypirunner") |
|||
self.metafile = path.join(self.conf_dir, "packagemeta.json") |
|||
if not path.isdir(self.conf_dir): |
|||
os.mkdir(self.conf_dir) |
|||
if path.isfile(self.metafile): |
|||
with open(self.metafile) as f: |
|||
self.metadata = json.load(f) |
|||
else: |
|||
self.metadata = {} |
|||
self.__save() |
|||
|
|||
def __save(self): |
|||
with open(self.metafile, 'w') as f: |
|||
json.dump(self.metadata, f) |
|||
|
|||
def get(self, package, key): |
|||
return self.metadata.get(package, {}).get(key, "") |
|||
|
|||
def set(self, package, key, value): |
|||
if package not in self.metadata: |
|||
self.metadata[package] = {} |
|||
self.metadata[package][key] = value |
|||
self.__save() |
|||
|
|||
|
|||
# TODO: consider "--skip-existing" flag for twine |
|||
class PyPiRunner(): |
|||
def __init__(self, config) -> None: |
|||
logging.info("[PyPiRunner] Initializing") |
|||
self.workdir = config["workdir"] |
|||
self.config = PypiConfig(config) |
|||
self.metadata = PackageMeta() |
|||
|
|||
def __versions(self, config, pkg_name): |
|||
repo = config.repo_uri |
|||
if repo is None: |
|||
repo = "https://pypi.python.org/pypi" |
|||
|
|||
if config.repo_pass is not None and config.repo_user is not None: |
|||
logging.info(f"[PyPiRunner][Versions] Set auth headers from config") |
|||
logging.debug(f"[PyPiRunner][Versions] Auth: {config.repo_user}:{config.repo_pass}") |
|||
auth = HTTPBasicAuth(config.repo_user, config.repo_pass) |
|||
else: |
|||
logging.info(f"[PyPiRunner][Versions] No auth headers in config, skip") |
|||
logging.debug(f"[PyPiRunner][Versions] Auth: {config.repo_user}:{config.repo_pass}") |
|||
auth = None |
|||
|
|||
try: |
|||
if repo.endswith("pypi"): |
|||
url = f'{repo}/{pkg_name}/json' |
|||
logging.info(f"[PyPiRunner][Versions] Trying JSON API at {url}") |
|||
response = get(url, auth=auth) |
|||
if response.status_code == 200: |
|||
releases = json.loads(response.text)["releases"] |
|||
return sorted(releases, key=parse_version, reverse=True) |
|||
else: |
|||
logging.info(f"[PyPiRunner][Versions] JSON failed: [{response.status_code}]") |
|||
logging.debug(response.text) |
|||
repo = f"{repo}/simple" |
|||
url = f"{repo}/{pkg_name}" |
|||
logging.info(f"[PyPiRunner][Versions] Trying Simple API at {url}") |
|||
response = get(url, auth=auth) |
|||
if response.status_code == 200: |
|||
parser = SimpleRepoParser() |
|||
parser.feed(response.text) |
|||
return sorted(parser.packages, key=parse_version, reverse=True) |
|||
if response.status_code == 404: |
|||
return [] |
|||
else: |
|||
logging.info(f"[PyPiRunner][Versions] Simple failed: [{response.status_code}]") |
|||
logging.debug(response.text) |
|||
raise Exception("Failed to fetch available versions") |
|||
|
|||
except Exception as e: |
|||
raise RunnerError(f"{url}: {e}") |
|||
|
|||
def build(self, config, package): |
|||
print(f"[PyPiRunner] Building {package}") |
|||
pkg_path = path.join(config.workdir, package) |
|||
if not path.isdir(pkg_path): |
|||
raise ConfigException(f"Path does not exists: {pkg_path}") |
|||
PackageManager.getInstance().ensure("build") |
|||
command = [sys.executable, "-m", "build", package] |
|||
if logging.root.isEnabledFor(logging.DEBUG): |
|||
with subprocess.Popen(command, cwd=config.workdir) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
raise RunnerError(f"[PyPiRunner] Failed to build {package}") |
|||
else: |
|||
with subprocess.Popen(command, cwd=config.workdir, stdout=subprocess.PIPE) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
raise RunnerError(f"[PyPiRunner] Failed to build {package}") |
|||
print(f"[PyPiRunner] Package {package} built") |
|||
|
|||
def find_unuploaded(self, config, file_list, pkg_name): |
|||
versions = self.__versions(config, pkg_name) |
|||
unuploaded = [] |
|||
for file in file_list: |
|||
# flake8: noqa W605 |
|||
re_groups = re.findall("(\d*\.\d*\.\d*)", file) |
|||
if len(re_groups) < 1: |
|||
raise RunnerError(f"Unable to determine version of file {file}") |
|||
file_version = re_groups[0] |
|||
if file_version not in versions: |
|||
unuploaded.append(file) |
|||
else: |
|||
print(f"[PyPiRunner] File already uploaded: {os.path.basename(file)}") |
|||
print(f"[PyPiRunner] Packages to publish: {', '.join(unuploaded) if len(unuploaded) > 1 else 'None'}") |
|||
return unuploaded |
|||
|
|||
def upload_command(self, config, package, _command, to_upload): |
|||
unregistered = False |
|||
command = _command + to_upload |
|||
with subprocess.Popen(command, cwd=config.workdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
for line in map(lambda x: x.decode('utf8').strip(), p.stderr): |
|||
if "405 Method Not Allowed" in line: |
|||
unregistered = True |
|||
if not unregistered: |
|||
print("STDOUT:") |
|||
sys.stdout.buffer.write(p.stdout.read()) |
|||
print("STDERR:") |
|||
sys.stdout.buffer.write(p.stderr.read()) |
|||
raise RunnerError(f"[PyPiRunner] Failed to upload {package} ({p.returncode})") |
|||
if unregistered: |
|||
print("[PyPiRunner] Registering package") |
|||
register_command = [sys.executable, "-m", "twine", "register", "--verbose", "--non-interactive"] |
|||
if config.repo_uri is not None: |
|||
register_command.append("--repository-url") |
|||
register_command.append(config.repo_uri) |
|||
if config.repo_user is not None and config.repo_pass is not None: |
|||
register_command.append("-u") |
|||
register_command.append(config.repo_user) |
|||
register_command.append("-p") |
|||
register_command.append(config.repo_pass) |
|||
register_command.append(to_upload[0]) |
|||
with subprocess.Popen(register_command, cwd=config.workdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
print("STDOUT:") |
|||
sys.stdout.buffer.write(p.stdout.read()) |
|||
print("STDERR:") |
|||
sys.stdout.buffer.write(p.stderr.read()) |
|||
raise RunnerError(f"[PyPiRunner] Failed to register {package} ({p.returncode})") |
|||
self.upload_command(config, package, _command, to_upload) |
|||
|
|||
def upload(self, config, package, current_version): |
|||
print(f"[PyPiRunner] Uploading {package}") |
|||
PackageManager.getInstance().ensure("twine") |
|||
command = [sys.executable, "-m", "twine", "upload", "--verbose", "--non-interactive"] |
|||
if config.repo_uri is not None: |
|||
command.append("--repository-url") |
|||
command.append(config.repo_uri) |
|||
if config.repo_user is not None and config.repo_pass is not None: |
|||
command.append("-u") |
|||
command.append(config.repo_user) |
|||
command.append("-p") |
|||
command.append(config.repo_pass) |
|||
else: |
|||
raise RunnerError("[PyPiRunner] Can't upload without credentials!") |
|||
|
|||
dist_path = os.path.abspath(os.path.join(config.workdir, package, "dist")) |
|||
_files = glob(os.path.join(dist_path, "*"), config.workdir) |
|||
files = [] |
|||
for file in _files: |
|||
if current_version in os.path.basename(file): |
|||
files.append(file) |
|||
print(f"[PyPiRunner] Found: {file}") |
|||
else: |
|||
logging.info(f"[PyPiRunner] Dropped: {file} doesn't match current version: {current_version}") |
|||
|
|||
to_upload = self.find_unuploaded(config, files, package) |
|||
if len(to_upload) == 0: |
|||
return |
|||
#command += to_upload |
|||
self.upload_command(config, package, command, to_upload) |
|||
print(f"[PyPiRunner] Uploaded {package}") |
|||
|
|||
def package_version(self, config, package): |
|||
cfg_path = path.join(config.workdir, package, "setup.cfg") |
|||
with open(cfg_path) as f: |
|||
for line in f: |
|||
if line.startswith("version"): |
|||
re_groups = re.findall("(\d*\.\d*\.\d*)", line) |
|||
if len(re_groups) < 1: |
|||
raise RunnerError(f"Unable to determine version of package: |{line}|") |
|||
return re_groups[0] |
|||
|
|||
def run(self, job_spec): |
|||
job_config = self.config.copy(job_spec) |
|||
|
|||
for package in job_config.packages: |
|||
pkg_dir = path.join(job_config.workdir, package) |
|||
pkg_hash = md5_dir(pkg_dir, exclude_dirs=["pycache", "pytest_cache", "build", "dist", "egg-info"]) |
|||
logging.debug(f"[PyPiRunner] {package} hash: {pkg_hash}") |
|||
pkg_version = self.package_version(job_config, package) |
|||
logging.debug(f"[PyPiRunner] {package} local version: {pkg_version}") |
|||
repo_versions = self.__versions(job_config, package) |
|||
logging.debug(f"[PyPiRunner] {package} remote version: {repo_versions}") |
|||
|
|||
if pkg_version not in repo_versions: |
|||
print(f"[PyPiRunner] {package} not found in repo") |
|||
self.build(job_config, package) |
|||
self.metadata.set(package, pkg_version, pkg_hash) |
|||
else: |
|||
if pkg_hash != self.metadata.get(package, pkg_version): |
|||
self.build(job_config, package) |
|||
self.metadata.set(package, pkg_version, pkg_hash) |
|||
else: |
|||
print(f"[PyPiRunner] {package} Unchanged since last build") |
|||
|
|||
if job_config.upload: |
|||
self.upload(job_config, package, pkg_version) |
|||
else: |
|||
print(f"[PyPiRunner] Upload disabled, skipping") |
@ -1,96 +1,102 @@ |
|||
import logging |
|||
import subprocess |
|||
import os |
|||
import sys |
|||
import shlex |
|||
|
|||
from exceptions import NonZeroRetcode, RunnerError, ConfigException |
|||
from ..exceptions import NonZeroRetcode, RunnerError, ConfigException |
|||
from .pyutils import PackageManager, glob_command, grab_from |
|||
|
|||
|
|||
# same venv across all runs! |
|||
class PythonRunner(): |
|||
def __init__(self, workdir, defaults) -> None: |
|||
self.workdir = workdir |
|||
self.virtual_dir = os.path.abspath(os.path.join(workdir, "venv")) |
|||
self.config = defaults |
|||
self.env_vars = os.environ.copy() |
|||
for env_var in defaults["env"]: |
|||
self.env_vars[env_var["name"]] = env_var["value"] |
|||
|
|||
# TODO: Handle config like PyPiConfig |
|||
class PythonRunner: |
|||
def __init__(self, config) -> None: |
|||
logging.info("[PythonRunner] Initializing") |
|||
self.workdir = config["workdir"] |
|||
self.virtual_dir = os.path.abspath(os.path.join(self.workdir, "venv")) |
|||
self.config = config |
|||
PackageManager.getInstance().ensure("virtualenv") |
|||
self.__init_venv() |
|||
|
|||
# TODO: Detect if the prev venv is the same OS type |
|||
def __init_venv(self): |
|||
if os.name == "nt": # Windows |
|||
self.vpython = os.path.join(self.virtual_dir, "Scripts", "python.exe") |
|||
else: # Linux & Mac |
|||
self.vpython = os.path.join(self.virtual_dir, "bin", "python3") |
|||
self.vpython = os.path.join(self.virtual_dir, "bin", "python") |
|||
|
|||
if not os.path.exists(self.vpython): |
|||
logging.debug(f"[PythonRunner] Venv not found at {self.vpython}") |
|||
logging.info("[PythonRunner] Initializing venv") |
|||
output = [] |
|||
with subprocess.Popen([sys.executable, "-m", "virtualenv", self.virtual_dir], |
|||
stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
stdout=subprocess.PIPE) as p: |
|||
p.wait() |
|||
for line in p.stdout: |
|||
output.append(line.decode('utf8').strip()) |
|||
if p.returncode != 0: |
|||
sys.stdout.buffer.write(p.stderr.read()) |
|||
raise RunnerError("PythonRunner: Could not create virtualenv") |
|||
print("\n".join(output)) |
|||
raise RunnerError("[PythonRunner] Could not create virtualenv") |
|||
else: |
|||
print(f"PythonRunner: Virtualenv initialized at {self.virtual_dir}") |
|||
logging.info(f"[PythonRunner] Virtualenv initialized at {self.virtual_dir}") |
|||
else: |
|||
print(f"PythonRunner: Found virtualenv at {self.virtual_dir}") |
|||
|
|||
# Stores common defaults for all jobs - all types! |
|||
# Also - dependency install by config is only allowed in this step |
|||
def update_config(self, config): |
|||
if "dependencies" in config: |
|||
for dependency in config["dependencies"]: |
|||
# TODO: Check what happens with fixed version |
|||
with subprocess.Popen([self.vpython, "-m", "pip", "install", dependency, "--upgrade"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
logging.info(f"[PythonRunner] Found virtualenv at {self.virtual_dir}") |
|||
dependencies = self.config.get("dependencies", []) |
|||
if len(dependencies) > 0: |
|||
logging.info(f"[PythonRunner] Ensuring dependencies: {', '.join(dependencies)}") |
|||
command = [self.vpython, "-m", "pip", "install"] + dependencies |
|||
if logging.root.isEnabledFor(logging.DEBUG): |
|||
with subprocess.Popen(command) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
sys.stdout.buffer.write(p.stderr.read()) |
|||
raise(RunnerError(f"PythonRunner: Could not install dependency: {dependency} ({p.returncode})")) |
|||
for env_var in config["env"]: |
|||
self.env_vars[env_var["name"]] = env_var["value"] |
|||
if "workdir" in config and config["workdir"] is not None: |
|||
self.workdir = os.path.join(self.workdir, config["workdir"]) |
|||
|
|||
def __ghetto_glob(self, command): |
|||
new_command = [] |
|||
for item in command: |
|||
if "*" in item: |
|||
dir = os.path.abspath(os.path.dirname(item)) |
|||
base_name = os.path.basename(item) |
|||
if os.path.isdir(dir): |
|||
item_parts = base_name.split("*") |
|||
print(item_parts) |
|||
for file in os.listdir(dir): |
|||
# TODO: Fix ordering! A*B = B*A = AB* |
|||
if item_parts[0] in file and item_parts[1] in file: |
|||
new_command.append(os.path.join(dir, file)) |
|||
raise(RunnerError(f"[PythonRunner] Could not install dependencies: {dependencies} ({p.returncode})")) |
|||
else: |
|||
new_command.append(item) |
|||
return new_command |
|||
output = [] |
|||
with subprocess.Popen(command, stdout=subprocess.PIPE) as p: |
|||
for line in p.stdout: |
|||
output.append(line.decode('utf8').strip()) |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
print("\n".join(output)) |
|||
raise(RunnerError(f"[PythonRunner] Could not install dependencies: {dependencies} ({p.returncode})")) |
|||
logging.info("[PythonRunner] Installation done") |
|||
|
|||
# Executes the given job in the one and only venv |
|||
# parameter shall be the raw jobscpec |
|||
# parameter is the raw jobscpec |
|||
def run(self, job_spec): |
|||
if "workdir" in job_spec: |
|||
pwd = os.path.abspath(os.path.join(self.workdir, job_spec["workdir"])) |
|||
else: |
|||
pwd = self.workdir |
|||
run_env = self.env_vars.copy() |
|||
run_env = {} |
|||
for k, v in self.config["env"].items(): |
|||
if isinstance(v, str): |
|||
run_env[k] = v |
|||
else: |
|||
run_env[k] = grab_from(v) |
|||
if "env" in job_spec: |
|||
for env_var in job_spec["env"]: |
|||
if isinstance(env_var["value"], str): |
|||
run_env[env_var["name"]] = env_var["value"] |
|||
else: |
|||
run_env[env_var["name"]] = grab_from(env_var["value"]) |
|||
if "commands" in job_spec: |
|||
commands = job_spec["commands"] |
|||
for command in commands: |
|||
logging.debug(f"[PythonRunner] Raw command: {command}") |
|||
# TODO: only split if command is not an array |
|||
run_command = self.__ghetto_glob(shlex.split(command)) |
|||
if "*" in command: |
|||
run_command = glob_command(shlex.split(command), pwd) |
|||
else: |
|||
run_command = shlex.split(command) |
|||
logging.info(f"[PythonRunner] Command to execute: {run_command}") |
|||
logging.debug(f"[PythonRunner] Workdir: {pwd}") |
|||
if os.path.isdir(pwd): |
|||
with subprocess.Popen([self.vpython] + run_command, cwd=pwd, env=run_env) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
raise NonZeroRetcode(f"Command {command} returned code {p.returncode}") |
|||
else: |
|||
raise RunnerError(f"PythonRunner: Invalid path for shell command: {pwd}") |
|||
raise RunnerError(f"[PythonRunner] Invalid path for shell command: {pwd}") |
|||
else: |
|||
raise ConfigException(f"PythonRunner: No commands specified in step {job_spec['name']}") |
|||
raise ConfigException(f"[PythonRunner] No commands specified in step {job_spec['name']}") |
|||
|
@ -0,0 +1,154 @@ |
|||
import logging |
|||
import os |
|||
import subprocess |
|||
import sys |
|||
from pkg_resources import parse_version |
|||
import re |
|||
|
|||
from ..exceptions import RunnerError, ConfigException |
|||
from ..config import ConfigHolder |
|||
|
|||
|
|||
class PackageManager: |
|||
__instance = None |
|||
|
|||
@staticmethod |
|||
def getInstance(): |
|||
""" Static access method. """ |
|||
if PackageManager.__instance is None: |
|||
PackageManager() |
|||
return PackageManager.__instance |
|||
|
|||
def __init__(self): |
|||
""" Virtually private constructor. """ |
|||
if PackageManager.__instance is not None: |
|||
raise Exception("This class is a singleton!") |
|||
else: |
|||
PackageManager.__instance = self |
|||
self.package_list = self.__get_packages() |
|||
|
|||
def __get_packages(self): |
|||
packages = {} |
|||
with subprocess.Popen([sys.executable, "-m", "pip", "freeze"], |
|||
stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
p.wait() |
|||
installed = list(map(lambda x: x.decode("UTF-8").split("=="), filter(lambda x: b'==' in x, p.stdout.read().splitlines()))) |
|||
for name, version in installed: |
|||
packages[name] = parse_version(version) |
|||
logging.debug(f"[PackageManager] Picked up packages: {packages}") |
|||
return packages |
|||
|
|||
def ensure_more(self, package_list, executable=sys.executable): |
|||
to_install = list(filter(lambda x: not self.__has_package(x), package_list)) |
|||
if len(to_install) > 0: |
|||
command = [executable, "-m", "pip", "install"] + to_install |
|||
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
sys.stdout.buffer.write(p.stderr.read()) |
|||
raise(RunnerError(f"[PackageManager] Could not install dependencies ({p.returncode})")) |
|||
self.package_list = self.__get_packages() |
|||
|
|||
# Assumption: there are more hits in the long run, than misses |
|||
def ensure(self, package_string, executable=sys.executable): |
|||
if not self.__has_package(package_string): |
|||
logging.info(f"[PackageManager] Installing {package_string}") |
|||
command = [executable, "-m", "pip", "install", package_string] |
|||
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p: |
|||
p.wait() |
|||
if p.returncode != 0: |
|||
sys.stdout.buffer.write(p.stderr.read()) |
|||
raise(RunnerError(f"[PackageManager] Could not install dependencies ({p.returncode})")) |
|||
self.package_list = self.__get_packages() |
|||
else: |
|||
logging.info(f"[PackageManager] {package_string} already installed") |
|||
|
|||
def __has_package(self, package_string): |
|||
package_data = re.split("==|>|>=|<|<=", package_string) |
|||
# check in cache |
|||
if package_data[0] in self.package_list: |
|||
# check if version is needed |
|||
if len(package_data) == 2: |
|||
required_version = parse_version(package_data[1]) |
|||
installed_version = self.package_list[package_data[0]] |
|||
comparator = package_string.replace(package_data[0], "").replace(package_data[1], "") |
|||
if comparator == "==": |
|||
return required_version == installed_version |
|||
elif comparator == ">": |
|||
return installed_version > required_version |
|||
elif comparator == ">=": |
|||
return installed_version >= required_version |
|||
elif comparator == "<": |
|||
return installed_version < required_version |
|||
elif comparator == "<=": |
|||
return installed_version <= required_version |
|||
else: |
|||
raise ConfigException(f"Illegal comparator found: {comparator}") |
|||
else: |
|||
return True |
|||
return False |
|||
|
|||
|
|||
def glob(item, workdir): |
|||
new_command = [] |
|||
if "*" in item: |
|||
logging.debug(f"[Globbing] Found item: [{item}]") |
|||
dir = os.path.abspath(os.path.join(workdir, os.path.dirname(item))) |
|||
base_name = os.path.basename(item) |
|||
if os.path.isdir(dir): |
|||
item_parts = base_name.split("*") |
|||
for file in os.listdir(dir): |
|||
# TODO: Fix ordering! A*B = B*A = AB* |
|||
if item_parts[0] in file and item_parts[1] in file: |
|||
new_item = os.path.join(dir, file) |
|||
logging.debug(f"[Globbing] Substitute: {new_item}") |
|||
new_command.append(new_item) |
|||
else: |
|||
raise ConfigException(f"[Globbing] Dir not exists: {dir}") |
|||
return new_command |
|||
else: |
|||
return [item] |
|||
|
|||
|
|||
def glob_command(command, workdir): |
|||
logging.debug(f"[Globbing] Starting command: {' '.join(command)}") |
|||
new_command = [] |
|||
for item in command: |
|||
new_command += glob(item, workdir) |
|||
return new_command |
|||
|
|||
|
|||
def grab_from(target): |
|||
if "from_env" in target: |
|||
try: |
|||
return os.environ[target["from_env"]] |
|||
except KeyError: |
|||
raise ConfigException(f"Env var unset: {target['from_env']}") |
|||
elif "from_cfg" in target: |
|||
value = ConfigHolder.getInstance().get(target["from_cfg"]) |
|||
if len(value) == 0: |
|||
value = None |
|||
return value |
|||
else: |
|||
raise ConfigException(f"Unsupported grabber: {target}") |
|||
|
|||
|
|||
def gen_dict(list_of_dicts): |
|||
""" |
|||
Generates a dictionary from a list of dictionaries composed of |
|||
'name' and 'value' keys. |
|||
|
|||
[{'name': 'a', 'value': 'b'}] => {'a': 'b'} |
|||
""" |
|||
return_dict = {} |
|||
|
|||
for _dict in list_of_dicts: |
|||
try: |
|||
if isinstance(_dict["value"], str): |
|||
return_dict[_dict["name"]] = _dict["value"] |
|||
else: |
|||
return_dict[_dict["name"]] = grab_from(_dict["value"]) |
|||
except KeyError: |
|||
raise ConfigException(f"Invalid dict item: {_dict}") |
|||
|
|||
return return_dict |
@ -1,78 +0,0 @@ |
|||
import yaml |
|||
from runners.pythonrunner import PythonRunner |
|||
from exceptions import NonZeroRetcode, ConfigException |
|||
|
|||
|
|||
class DummyRunner(): |
|||
def __init__(self, type) -> None: |
|||
self.type = type |
|||
|
|||
def run(self, command, workdir=None, env=None): |
|||
raise Exception(f"Invalid runner type in config: {self.type}") |
|||
|
|||
|
|||
class Job(): |
|||
def __init__(self, type, repoDir, vpython, workspace, env={}) -> None: |
|||
self.runner = self.__get_runner(type, repoDir, vpython) |
|||
self.commands = [] |
|||
self.workspace = workspace |
|||
self.env = env |
|||
|
|||
def __get_runner(self, type, repoDir, vpython): |
|||
if type == "python": |
|||
return PythonRunner(repoDir, vpython) |
|||
else: |
|||
return DummyRunner(type) |
|||
|
|||
def run_commands(self, _env={}): |
|||
try: |
|||
if self.env is None: |
|||
env = _env.copy() |
|||
else: |
|||
env = self.env.copy() |
|||
env.update(_env) |
|||
for command in self.commands: |
|||
self.runner.run(command, self.workspace, env) |
|||
except NonZeroRetcode as n: |
|||
print(n) |
|||
exit(1) |
|||
|
|||
|
|||
class ConfigParser: |
|||
def __init__(self, file_path, factory) -> None: |
|||
with open(file_path) as f: |
|||
self.config = yaml.safe_load(f) |
|||
self.factory = factory |
|||
if "runners" in self.config: |
|||
if "global" in self.config["runners"]: |
|||
self.factory.set_globals(self.__gen_globals()) |
|||
self.factory.update_runners(self.config["runners"]) |
|||
self.jobs = self.__get_jobs() |
|||
|
|||
# Initialize env, workdir if not present |
|||
def __gen_globals(self): |
|||
globals = self.config["runners"]["global"] |
|||
if "env" not in globals: |
|||
globals["env"] = [] |
|||
if "workdir" not in globals: |
|||
globals["workdir"] = None |
|||
return globals |
|||
|
|||
def __get_jobs(self): |
|||
if "jobs" in self.config: |
|||
jobs = {} |
|||
for job_spec in self.config["jobs"]: |
|||
name = job_spec["name"] |
|||
if name in jobs: |
|||
raise ConfigException(f"Job with name {name} already exists!") |
|||
|
|||
jobs[name] = job_spec |
|||
return jobs |
|||
else: |
|||
raise ConfigException("No jobs defined in config") |
|||
|
|||
def execute_job(self, job_name): |
|||
if job_name in self.jobs: |
|||
# Pass the job_spec to a runner |
|||
runner = self.factory.get_runner(self.jobs[job_name]["type"]) |
|||
runner.run(self.jobs[job_name]) |
@ -0,0 +1,9 @@ |
|||
FROM ubuntu:latest |
|||
|
|||
RUN apt update && apt install -y python3 |
|||
|
|||
ADD hello.py /opt/hello.py |
|||
|
|||
#ENTRYPOINT [ "/bin/sh", "-c" ] |
|||
|
|||
#CMD ["/usr/local/python/bin/python3", "/opt/hello.py"] |
@ -0,0 +1,2 @@ |
|||
if __name__ == "__main__": |
|||
print("Hi Mom!") |
@ -0,0 +1,18 @@ |
|||
runners: |
|||
python: |
|||
dependencies: |
|||
- flake8 |
|||
- build |
|||
- twine |
|||
jobs: |
|||
- name: selfcheck |
|||
type: python |
|||
workdir: ci |
|||
commands: |
|||
- "-m flake8 --ignore E501 --exclude venv" |
|||
|
|||
- name: lint |
|||
type: python |
|||
workdir: alice-ci/src |
|||
commands: |
|||
- "-m flake8 --ignore E501" |
@ -0,0 +1,26 @@ |
|||
# Schema |
|||
|
|||
``` |
|||
name: "" |
|||
type: docker |
|||
credentials: - global ...ish |
|||
username |
|||
password |
|||
image: - to use, pull, run |
|||
build: |
|||
dir: |
|||
dockerfile: |
|||
name: - defaults to step name |
|||
args: |
|||
- name: |
|||
- value: |
|||
pull: - pulls, current working image - mutually exclusive with build |
|||
name: |
|||
credentials: - optional |
|||
command: - overwrite, not append |
|||
- ... |
|||
tag: |
|||
publish: true |
|||
name: - published name with repo and everything |
|||
credentials: |
|||
``` |
Loading…
Reference in new issue