feat: expose read-only cache to runner containers

This commit is contained in:
Marc 2024-09-02 10:14:59 -04:00
parent 23eef5de24
commit b197449f70
Signed by: marc
GPG key ID: 048E042F22B5DC79
5 changed files with 34 additions and 10 deletions

View file

@ -6,7 +6,7 @@ import (
type ContainerDriver interface {
Pull(string) error
Start(string, string) error
Start(string, string, string) error
Stop(string) error
Exec(containerId string, command string, options CommandOptions) CommandResult
}

View file

@ -32,7 +32,7 @@ func (d *MockDriver) Pull(uri string) error {
return nil
}
func (d *MockDriver) Start(uri string, containerName string) error {
func (d *MockDriver) Start(uri string, containerName string, cacheRoot string) error {
if _, init := d.Calls["Start"]; !init {
d.Calls["Start"] = []MockCall{}
}

View file

@ -31,14 +31,14 @@ func (d PodmanDriver) Pull(uri string) error {
//
// The volume created is labeled with `owner=<containerName>` so it
// can be easily collected and cleaned up on stop.
func (d PodmanDriver) Start(uri string, containerName string) error {
func (d PodmanDriver) Start(uri string, containerName string, cachePath string) error {
volumeName := fmt.Sprintf("%s-workspace", containerName)
if err := podmanCommand("volume", "create", volumeName, "--label", fmt.Sprintf("owner=%s", containerName)).Run(); err != nil {
return err
}
if err := podmanCommand("run", "-td", "--name", containerName, "-v", fmt.Sprintf("%s:/workspace", volumeName), uri).Run(); err != nil {
if err := podmanCommand("run", "-td", "--name", containerName, "-v", fmt.Sprintf("%s:/workspace", volumeName), "-v", fmt.Sprintf("%s:/cache:ro", cachePath), uri).Run(); err != nil {
return err
}

View file

@ -13,6 +13,8 @@ const TEST_IMAGE_NAME = "busybox:stable"
type PodmanMountDetails struct {
Name string `json:"Name"`
Source string `json:"Source"`
ReadWrite bool `json:"RW"`
Destination string `json:"Destination"`
}
@ -91,7 +93,7 @@ func TestStartCreatesAContainerWithTheDesiredUriAndName(t *testing.T) {
defer CleanAll(containerName)
driver.Start(TEST_IMAGE_NAME, containerName)
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
inspectOut := InspectContainer(containerName)
@ -112,11 +114,11 @@ func TestStartCreatesAContainerWithAnAttachedVolume(t *testing.T) {
defer CleanAll(containerName)
driver.Start(TEST_IMAGE_NAME, containerName)
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
inspectOut := InspectContainer(containerName)
if len(inspectOut.Mounts) != 1 {
if len(inspectOut.Mounts) == 0 {
t.Error("Expected a mount.")
}
@ -135,6 +137,28 @@ func TestStartCreatesAContainerWithAnAttachedVolume(t *testing.T) {
})
}
func TestStartCreatesAContainerWithAReadonlyCacheVolume(t *testing.T) {
MustPodman(t, func() {
driver := PodmanDriver{}
containerName := "test-container"
defer CleanAll(containerName)
cacheRoot := t.TempDir()
driver.Start(TEST_IMAGE_NAME, containerName, cacheRoot)
inspectOut := InspectContainer(containerName)
cacheVolume := inspectOut.Mounts[1]
if cacheVolume.ReadWrite {
t.Error("Expected cache volume to be read-only.")
}
if cacheVolume.Destination != "/cache" || cacheVolume.Source != cacheRoot {
t.Error("Expected cache volume to map from provided cache root to /cache")
}
})
}
func TestStopStopsAContainerByName(t *testing.T) {
MustPodman(t, func() {
driver := PodmanDriver{}
@ -142,7 +166,7 @@ func TestStopStopsAContainerByName(t *testing.T) {
defer CleanAll(containerName)
driver.Start(TEST_IMAGE_NAME, containerName)
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
driver.Stop(containerName)
if ContainerExists(containerName) {
@ -158,7 +182,7 @@ func TestStopCleansUpWorkspaceVolume(t *testing.T) {
defer CleanAll(containerName)
driver.Start(TEST_IMAGE_NAME, containerName)
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
driver.Stop(containerName)
if VolumeExists(fmt.Sprintf("%s-workspace", containerName)) {

View file

@ -137,7 +137,7 @@ func (r *Runner) RunCommandInContainer(containerId string, command string, optio
//
// The container is started before the job steps are run and cleaned up after.
func (r *Runner) RunJobInContainer(imageUri string, containerId string, jobContext context.Context) error {
r.Driver.Start(imageUri, containerId)
r.Driver.Start(imageUri, containerId, r.Cache.Root)
r.deferred.Queue(fmt.Sprintf("job-%s", containerId), func() {
logger.Info("Started cleaning up %s", containerId)