feat: cli option to enable the new action cache (#1954)
* Enable the new action cache * fix * fix: CopyTarStream (Docker) * suppress panic in test * add a cli option for opt in * fixups * add package * fix * rc.Config nil in test??? * add feature flag * patch * Fix respect --action-cache-path Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> * add remote reusable workflow to ActionCache * fixup --------- Co-authored-by: Björn Brauer <zaubernerd@zaubernerd.de> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
parent
cd40f3fe9b
commit
f7a846d2f5
|
@ -57,6 +57,7 @@ type Input struct {
|
||||||
actionCachePath string
|
actionCachePath string
|
||||||
logPrefixJobID bool
|
logPrefixJobID bool
|
||||||
networkName string
|
networkName string
|
||||||
|
useNewActionCache bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Input) resolve(path string) string {
|
func (i *Input) resolve(path string) string {
|
||||||
|
|
|
@ -98,6 +98,7 @@ func Execute(ctx context.Context, version string) {
|
||||||
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
|
rootCmd.PersistentFlags().Uint16VarP(&input.cacheServerPort, "cache-server-port", "", 0, "Defines the port where the artifact server listens. 0 means a randomly available port.")
|
||||||
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
|
rootCmd.PersistentFlags().StringVarP(&input.actionCachePath, "action-cache-path", "", filepath.Join(CacheHomeDir, "act"), "Defines the path where the actions get cached and host workspaces created.")
|
||||||
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
|
rootCmd.PersistentFlags().StringVarP(&input.networkName, "network", "", "host", "Sets a docker network name. Defaults to host.")
|
||||||
|
rootCmd.PersistentFlags().BoolVarP(&input.useNewActionCache, "use-new-action-cache", "", false, "Enable using the new Action Cache for storing Actions locally")
|
||||||
rootCmd.SetArgs(args())
|
rootCmd.SetArgs(args())
|
||||||
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
@ -617,6 +618,11 @@ func newRunCommand(ctx context.Context, input *Input) func(*cobra.Command, []str
|
||||||
Matrix: matrixes,
|
Matrix: matrixes,
|
||||||
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
|
ContainerNetworkMode: docker_container.NetworkMode(input.networkName),
|
||||||
}
|
}
|
||||||
|
if input.useNewActionCache {
|
||||||
|
config.ActionCache = &runner.GoGitActionCache{
|
||||||
|
Path: config.ActionCacheDir,
|
||||||
|
}
|
||||||
|
}
|
||||||
r, err := runner.New(config)
|
r, err := runner.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -671,10 +671,28 @@ func (cr *containerReference) waitForCommand(ctx context.Context, isTerminal boo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
func (cr *containerReference) CopyTarStream(ctx context.Context, destPath string, tarStream io.Reader) error {
|
||||||
err := cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
// Mkdir
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
_ = tw.WriteHeader(&tar.Header{
|
||||||
|
Name: destPath,
|
||||||
|
Mode: 777,
|
||||||
|
Typeflag: tar.TypeDir,
|
||||||
|
})
|
||||||
|
tw.Close()
|
||||||
|
err := cr.cli.CopyToContainer(ctx, cr.id, "/", buf, types.CopyToContainerOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to mkdir to copy content to container: %w", err)
|
||||||
|
}
|
||||||
|
// Copy Content
|
||||||
|
err = cr.cli.CopyToContainer(ctx, cr.id, destPath, tarStream, types.CopyToContainerOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to copy content to container: %w", err)
|
return fmt.Errorf("failed to copy content to container: %w", err)
|
||||||
}
|
}
|
||||||
|
// If this fails, then folders have wrong permissions on non root container
|
||||||
|
if cr.UID != 0 || cr.GID != 0 {
|
||||||
|
_ = cr.Exec([]string{"chown", "-R", fmt.Sprintf("%d:%d", cr.UID, cr.GID), destPath}, nil, "0", "")(ctx)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,12 +148,10 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
|
||||||
workflow.Name = wf.workflowDirEntry.Name()
|
workflow.Name = wf.workflowDirEntry.Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
err = validateJobName(workflow)
|
||||||
for k := range workflow.Jobs {
|
if err != nil {
|
||||||
if ok := jobNameRegex.MatchString(k); !ok {
|
_ = f.Close()
|
||||||
_ = f.Close()
|
return nil, err
|
||||||
return nil, fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wp.workflows = append(wp.workflows, workflow)
|
wp.workflows = append(wp.workflows, workflow)
|
||||||
|
@ -164,6 +162,42 @@ func NewWorkflowPlanner(path string, noWorkflowRecurse bool) (WorkflowPlanner, e
|
||||||
return wp, nil
|
return wp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewSingleWorkflowPlanner(name string, f io.Reader) (WorkflowPlanner, error) {
|
||||||
|
wp := new(workflowPlanner)
|
||||||
|
|
||||||
|
log.Debugf("Reading workflow %s", name)
|
||||||
|
workflow, err := ReadWorkflow(f)
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, fmt.Errorf("unable to read workflow '%s': file is empty: %w", name, err)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("workflow is not valid. '%s': %w", name, err)
|
||||||
|
}
|
||||||
|
workflow.File = name
|
||||||
|
if workflow.Name == "" {
|
||||||
|
workflow.Name = name
|
||||||
|
}
|
||||||
|
|
||||||
|
err = validateJobName(workflow)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wp.workflows = append(wp.workflows, workflow)
|
||||||
|
|
||||||
|
return wp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateJobName(workflow *Workflow) error {
|
||||||
|
jobNameRegex := regexp.MustCompile(`^([[:alpha:]_][[:alnum:]_\-]*)$`)
|
||||||
|
for k := range workflow.Jobs {
|
||||||
|
if ok := jobNameRegex.MatchString(k); !ok {
|
||||||
|
return fmt.Errorf("workflow is not valid. '%s': Job name '%s' is invalid. Names must start with a letter or '_' and contain only alphanumeric characters, '-', or '_'", workflow.Name, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type workflowPlanner struct {
|
type workflowPlanner struct {
|
||||||
workflows []*Workflow
|
workflows []*Workflow
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func readActionImpl(ctx context.Context, step *model.Step, actionDir string, act
|
||||||
reader, closer, err := readFile("action.yml")
|
reader, closer, err := readFile("action.yml")
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
reader, closer, err = readFile("action.yaml")
|
reader, closer, err = readFile("action.yaml")
|
||||||
if err != nil {
|
if os.IsNotExist(err) {
|
||||||
if _, closer, err2 := readFile("Dockerfile"); err2 == nil {
|
if _, closer, err2 := readFile("Dockerfile"); err2 == nil {
|
||||||
closer.Close()
|
closer.Close()
|
||||||
action := &model.Action{
|
action := &model.Action{
|
||||||
|
@ -91,6 +91,8 @@ func readActionImpl(ctx context.Context, step *model.Step, actionDir string, act
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -110,6 +112,17 @@ func maybeCopyToActionDir(ctx context.Context, step actionStep, actionDir string
|
||||||
if stepModel.Type() != model.StepTypeUsesActionRemote {
|
if stepModel.Type() != model.StepTypeUsesActionRemote {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rc.Config != nil && rc.Config.ActionCache != nil {
|
||||||
|
raction := step.(*stepActionRemote)
|
||||||
|
ta, err := rc.Config.ActionCache.GetTarArchive(ctx, raction.cacheDir, raction.resolvedSha, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer ta.Close()
|
||||||
|
return rc.JobContainer.CopyTarStream(ctx, containerActionDir, ta)
|
||||||
|
}
|
||||||
|
|
||||||
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
if err := removeGitIgnore(ctx, actionDir); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -265,6 +278,13 @@ func execAsDocker(ctx context.Context, step actionStep, actionName string, based
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer buildContext.Close()
|
defer buildContext.Close()
|
||||||
|
} else if rc.Config.ActionCache != nil {
|
||||||
|
rstep := step.(*stepActionRemote)
|
||||||
|
buildContext, err = rc.Config.ActionCache.GetTarArchive(ctx, rstep.cacheDir, rstep.resolvedSha, contextDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer buildContext.Close()
|
||||||
}
|
}
|
||||||
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
prepImage = container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
|
||||||
ContextDir: contextDir,
|
ContextDir: contextDir,
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package runner
|
package runner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -33,12 +34,51 @@ func newRemoteReusableWorkflowExecutor(rc *RunContext) common.Executor {
|
||||||
filename := fmt.Sprintf("%s/%s@%s", remoteReusableWorkflow.Org, remoteReusableWorkflow.Repo, remoteReusableWorkflow.Ref)
|
filename := fmt.Sprintf("%s/%s@%s", remoteReusableWorkflow.Org, remoteReusableWorkflow.Repo, remoteReusableWorkflow.Ref)
|
||||||
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(filename))
|
workflowDir := fmt.Sprintf("%s/%s", rc.ActionCacheDir(), safeFilename(filename))
|
||||||
|
|
||||||
|
if rc.Config.ActionCache != nil {
|
||||||
|
return newActionCacheReusableWorkflowExecutor(rc, filename, remoteReusableWorkflow)
|
||||||
|
}
|
||||||
|
|
||||||
return common.NewPipelineExecutor(
|
return common.NewPipelineExecutor(
|
||||||
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir)),
|
newMutexExecutor(cloneIfRequired(rc, *remoteReusableWorkflow, workflowDir)),
|
||||||
newReusableWorkflowExecutor(rc, workflowDir, fmt.Sprintf("./.github/workflows/%s", remoteReusableWorkflow.Filename)),
|
newReusableWorkflowExecutor(rc, workflowDir, fmt.Sprintf("./.github/workflows/%s", remoteReusableWorkflow.Filename)),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newActionCacheReusableWorkflowExecutor(rc *RunContext, filename string, remoteReusableWorkflow *remoteReusableWorkflow) common.Executor {
|
||||||
|
return func(ctx context.Context) error {
|
||||||
|
ghctx := rc.getGithubContext(ctx)
|
||||||
|
remoteReusableWorkflow.URL = ghctx.ServerURL
|
||||||
|
sha, err := rc.Config.ActionCache.Fetch(ctx, filename, remoteReusableWorkflow.CloneURL(), remoteReusableWorkflow.Ref, ghctx.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
archive, err := rc.Config.ActionCache.GetTarArchive(ctx, filename, sha, fmt.Sprintf(".github/workflows/%s", remoteReusableWorkflow.Filename))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer archive.Close()
|
||||||
|
treader := tar.NewReader(archive)
|
||||||
|
if _, err = treader.Next(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
planner, err := model.NewSingleWorkflowPlanner(remoteReusableWorkflow.Filename, treader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
plan, err := planner.PlanEvent("workflow_call")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
runner, err := NewReusableWorkflowRunner(rc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return runner.NewPlanExecutor(plan)(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
executorLock sync.Mutex
|
executorLock sync.Mutex
|
||||||
)
|
)
|
||||||
|
|
|
@ -59,6 +59,7 @@ type Config struct {
|
||||||
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
ReplaceGheActionTokenWithGithubCom string // Token of private action repo on GitHub.
|
||||||
Matrix map[string]map[string]bool // Matrix config to run
|
Matrix map[string]map[string]bool // Matrix config to run
|
||||||
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
ContainerNetworkMode docker_container.NetworkMode // the network mode of job containers (the value of --network)
|
||||||
|
ActionCache ActionCache // Use a custom ActionCache Implementation
|
||||||
}
|
}
|
||||||
|
|
||||||
type caller struct {
|
type caller struct {
|
||||||
|
|
|
@ -34,6 +34,9 @@ const (
|
||||||
stepStagePost
|
stepStagePost
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Controls how many symlinks are resolved for local and remote Actions
|
||||||
|
const maxSymlinkDepth = 10
|
||||||
|
|
||||||
func (s stepStage) String() string {
|
func (s stepStage) String() string {
|
||||||
switch s {
|
switch s {
|
||||||
case stepStagePre:
|
case stepStagePre:
|
||||||
|
@ -307,3 +310,13 @@ func mergeIntoMapCaseInsensitive(target map[string]string, maps ...map[string]st
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func symlinkJoin(filename, sym, parent string) (string, error) {
|
||||||
|
dir := path.Dir(filename)
|
||||||
|
dest := path.Join(dir, sym)
|
||||||
|
prefix := path.Clean(parent) + "/"
|
||||||
|
if strings.HasPrefix(dest, prefix) || prefix == "./" {
|
||||||
|
return dest, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("symlink tries to access file '%s' outside of '%s'", strings.ReplaceAll(dest, "'", "''"), strings.ReplaceAll(parent, "'", "''"))
|
||||||
|
}
|
||||||
|
|
|
@ -3,7 +3,10 @@ package runner
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -42,15 +45,31 @@ func (sal *stepActionLocal) main() common.Executor {
|
||||||
localReader := func(ctx context.Context) actionYamlReader {
|
localReader := func(ctx context.Context) actionYamlReader {
|
||||||
_, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext)
|
_, cpath := getContainerActionPaths(sal.Step, path.Join(actionDir, ""), sal.RunContext)
|
||||||
return func(filename string) (io.Reader, io.Closer, error) {
|
return func(filename string) (io.Reader, io.Closer, error) {
|
||||||
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, path.Join(cpath, filename))
|
spath := path.Join(cpath, filename)
|
||||||
if err != nil {
|
for i := 0; i < maxSymlinkDepth; i++ {
|
||||||
return nil, nil, os.ErrNotExist
|
tars, err := sal.RunContext.JobContainer.GetContainerArchive(ctx, spath)
|
||||||
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
|
return nil, nil, err
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, nil, fs.ErrNotExist
|
||||||
|
}
|
||||||
|
treader := tar.NewReader(tars)
|
||||||
|
header, err := treader.Next()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
return nil, nil, os.ErrNotExist
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
spath, err = symlinkJoin(spath, header.Linkname, cpath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return treader, tars, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
treader := tar.NewReader(tars)
|
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
|
||||||
if _, err := treader.Next(); err != nil {
|
|
||||||
return nil, nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
return treader, tars, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package runner
|
package runner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -28,6 +29,8 @@ type stepActionRemote struct {
|
||||||
action *model.Action
|
action *model.Action
|
||||||
env map[string]string
|
env map[string]string
|
||||||
remoteAction *remoteAction
|
remoteAction *remoteAction
|
||||||
|
cacheDir string
|
||||||
|
resolvedSha string
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -60,6 +63,46 @@ func (sar *stepActionRemote) prepareActionExecutor() common.Executor {
|
||||||
github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom
|
github.Token = sar.RunContext.Config.ReplaceGheActionTokenWithGithubCom
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if sar.RunContext.Config.ActionCache != nil {
|
||||||
|
cache := sar.RunContext.Config.ActionCache
|
||||||
|
|
||||||
|
var err error
|
||||||
|
sar.cacheDir = fmt.Sprintf("%s/%s", sar.remoteAction.Org, sar.remoteAction.Repo)
|
||||||
|
sar.resolvedSha, err = cache.Fetch(ctx, sar.cacheDir, sar.remoteAction.URL+"/"+sar.cacheDir, sar.remoteAction.Ref, github.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
remoteReader := func(ctx context.Context) actionYamlReader {
|
||||||
|
return func(filename string) (io.Reader, io.Closer, error) {
|
||||||
|
spath := filename
|
||||||
|
for i := 0; i < maxSymlinkDepth; i++ {
|
||||||
|
tars, err := cache.GetTarArchive(ctx, sar.cacheDir, sar.resolvedSha, spath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
treader := tar.NewReader(tars)
|
||||||
|
header, err := treader.Next()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
if header.FileInfo().Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
spath, err = symlinkJoin(spath, header.Linkname, ".")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return treader, tars, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, fmt.Errorf("max depth %d of symlinks exceeded while reading %s", maxSymlinkDepth, spath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actionModel, err := sar.readAction(ctx, sar.Step, sar.resolvedSha, sar.remoteAction.Path, remoteReader(ctx), os.WriteFile)
|
||||||
|
sar.action = actionModel
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
actionDir := fmt.Sprintf("%s/%s", sar.RunContext.ActionCacheDir(), safeFilename(sar.Step.Uses))
|
||||||
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
gitClone := stepActionRemoteNewCloneExecutor(git.NewGitCloneExecutorInput{
|
||||||
|
|
Loading…
Reference in New Issue