feat: expose read-only cache to runner containers
This commit is contained in:
parent
23eef5de24
commit
b197449f70
5 changed files with 34 additions and 10 deletions
|
@ -6,7 +6,7 @@ import (
|
||||||
|
|
||||||
type ContainerDriver interface {
|
type ContainerDriver interface {
|
||||||
Pull(string) error
|
Pull(string) error
|
||||||
Start(string, string) error
|
Start(string, string, string) error
|
||||||
Stop(string) error
|
Stop(string) error
|
||||||
Exec(containerId string, command string, options CommandOptions) CommandResult
|
Exec(containerId string, command string, options CommandOptions) CommandResult
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ func (d *MockDriver) Pull(uri string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *MockDriver) Start(uri string, containerName string) error {
|
func (d *MockDriver) Start(uri string, containerName string, cacheRoot string) error {
|
||||||
if _, init := d.Calls["Start"]; !init {
|
if _, init := d.Calls["Start"]; !init {
|
||||||
d.Calls["Start"] = []MockCall{}
|
d.Calls["Start"] = []MockCall{}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,14 +31,14 @@ func (d PodmanDriver) Pull(uri string) error {
|
||||||
//
|
//
|
||||||
// The volume created is labeled with `owner=<containerName>` so it
|
// The volume created is labeled with `owner=<containerName>` so it
|
||||||
// can be easily collected and cleaned up on stop.
|
// can be easily collected and cleaned up on stop.
|
||||||
func (d PodmanDriver) Start(uri string, containerName string) error {
|
func (d PodmanDriver) Start(uri string, containerName string, cachePath string) error {
|
||||||
volumeName := fmt.Sprintf("%s-workspace", containerName)
|
volumeName := fmt.Sprintf("%s-workspace", containerName)
|
||||||
|
|
||||||
if err := podmanCommand("volume", "create", volumeName, "--label", fmt.Sprintf("owner=%s", containerName)).Run(); err != nil {
|
if err := podmanCommand("volume", "create", volumeName, "--label", fmt.Sprintf("owner=%s", containerName)).Run(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := podmanCommand("run", "-td", "--name", containerName, "-v", fmt.Sprintf("%s:/workspace", volumeName), uri).Run(); err != nil {
|
if err := podmanCommand("run", "-td", "--name", containerName, "-v", fmt.Sprintf("%s:/workspace", volumeName), "-v", fmt.Sprintf("%s:/cache:ro", cachePath), uri).Run(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,8 @@ const TEST_IMAGE_NAME = "busybox:stable"
|
||||||
|
|
||||||
type PodmanMountDetails struct {
|
type PodmanMountDetails struct {
|
||||||
Name string `json:"Name"`
|
Name string `json:"Name"`
|
||||||
|
Source string `json:"Source"`
|
||||||
|
ReadWrite bool `json:"RW"`
|
||||||
Destination string `json:"Destination"`
|
Destination string `json:"Destination"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +93,7 @@ func TestStartCreatesAContainerWithTheDesiredUriAndName(t *testing.T) {
|
||||||
|
|
||||||
defer CleanAll(containerName)
|
defer CleanAll(containerName)
|
||||||
|
|
||||||
driver.Start(TEST_IMAGE_NAME, containerName)
|
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
|
||||||
|
|
||||||
inspectOut := InspectContainer(containerName)
|
inspectOut := InspectContainer(containerName)
|
||||||
|
|
||||||
|
@ -112,11 +114,11 @@ func TestStartCreatesAContainerWithAnAttachedVolume(t *testing.T) {
|
||||||
|
|
||||||
defer CleanAll(containerName)
|
defer CleanAll(containerName)
|
||||||
|
|
||||||
driver.Start(TEST_IMAGE_NAME, containerName)
|
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
|
||||||
|
|
||||||
inspectOut := InspectContainer(containerName)
|
inspectOut := InspectContainer(containerName)
|
||||||
|
|
||||||
if len(inspectOut.Mounts) != 1 {
|
if len(inspectOut.Mounts) == 0 {
|
||||||
t.Error("Expected a mount.")
|
t.Error("Expected a mount.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,6 +137,28 @@ func TestStartCreatesAContainerWithAnAttachedVolume(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStartCreatesAContainerWithAReadonlyCacheVolume(t *testing.T) {
|
||||||
|
MustPodman(t, func() {
|
||||||
|
driver := PodmanDriver{}
|
||||||
|
containerName := "test-container"
|
||||||
|
|
||||||
|
defer CleanAll(containerName)
|
||||||
|
cacheRoot := t.TempDir()
|
||||||
|
driver.Start(TEST_IMAGE_NAME, containerName, cacheRoot)
|
||||||
|
|
||||||
|
inspectOut := InspectContainer(containerName)
|
||||||
|
|
||||||
|
cacheVolume := inspectOut.Mounts[1]
|
||||||
|
|
||||||
|
if cacheVolume.ReadWrite {
|
||||||
|
t.Error("Expected cache volume to be read-only.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cacheVolume.Destination != "/cache" || cacheVolume.Source != cacheRoot {
|
||||||
|
t.Error("Expected cache volume to map from provided cache root to /cache")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
func TestStopStopsAContainerByName(t *testing.T) {
|
func TestStopStopsAContainerByName(t *testing.T) {
|
||||||
MustPodman(t, func() {
|
MustPodman(t, func() {
|
||||||
driver := PodmanDriver{}
|
driver := PodmanDriver{}
|
||||||
|
@ -142,7 +166,7 @@ func TestStopStopsAContainerByName(t *testing.T) {
|
||||||
|
|
||||||
defer CleanAll(containerName)
|
defer CleanAll(containerName)
|
||||||
|
|
||||||
driver.Start(TEST_IMAGE_NAME, containerName)
|
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
|
||||||
driver.Stop(containerName)
|
driver.Stop(containerName)
|
||||||
|
|
||||||
if ContainerExists(containerName) {
|
if ContainerExists(containerName) {
|
||||||
|
@ -158,7 +182,7 @@ func TestStopCleansUpWorkspaceVolume(t *testing.T) {
|
||||||
|
|
||||||
defer CleanAll(containerName)
|
defer CleanAll(containerName)
|
||||||
|
|
||||||
driver.Start(TEST_IMAGE_NAME, containerName)
|
driver.Start(TEST_IMAGE_NAME, containerName, t.TempDir())
|
||||||
driver.Stop(containerName)
|
driver.Stop(containerName)
|
||||||
|
|
||||||
if VolumeExists(fmt.Sprintf("%s-workspace", containerName)) {
|
if VolumeExists(fmt.Sprintf("%s-workspace", containerName)) {
|
||||||
|
|
|
@ -137,7 +137,7 @@ func (r *Runner) RunCommandInContainer(containerId string, command string, optio
|
||||||
//
|
//
|
||||||
// The container is started before the job steps are run and cleaned up after.
|
// The container is started before the job steps are run and cleaned up after.
|
||||||
func (r *Runner) RunJobInContainer(imageUri string, containerId string, jobContext context.Context) error {
|
func (r *Runner) RunJobInContainer(imageUri string, containerId string, jobContext context.Context) error {
|
||||||
r.Driver.Start(imageUri, containerId)
|
r.Driver.Start(imageUri, containerId, r.Cache.Root)
|
||||||
|
|
||||||
r.deferred.Queue(fmt.Sprintf("job-%s", containerId), func() {
|
r.deferred.Queue(fmt.Sprintf("job-%s", containerId), func() {
|
||||||
logger.Info("Started cleaning up %s", containerId)
|
logger.Info("Started cleaning up %s", containerId)
|
||||||
|
|
Loading…
Reference in a new issue