NixActions is built on four core contracts: Step, Executor, Job, and Workflow.
Definition: A Step is an attribute set with a bash script that gets compiled to a Nix derivation.
Note: "Step" is the execution primitive. "Action" refers to reusable components from
lib/actions/or SDK-defined actions.
Step :: {
name :: String,
bash :: String, # Required: bash script to execute
deps :: [Derivation] = [],
env :: AttrSet String = {},
workdir :: Path | Null = null,
condition :: Condition | Null = null,
retry :: RetryConfig | Null = null,
timeout :: Int | Null = null,
}- Step is defined as an attrset with
bashfield (required) - Compiled to derivation at build time via
writeShellApplication - Use
depsto add packages available in PATH during execution - For non-bash scripts (Python, JS, etc.), wrap with
lib.getExe - Build-time validation (if step doesn't build -> workflow doesn't build)
- Caching (steps built once, reused across jobs)
# Simple step:
{
name = "test";
bash = "npm test";
deps = [ pkgs.nodejs ];
}
# With condition:
{
name = "deploy";
bash = "kubectl apply -f k8s/";
deps = [ pkgs.kubectl ];
condition = ''[ "$BRANCH" = "main" ]'';
}
# Running Python script:
{
name = "analyze";
bash = ''
${lib.getExe (pkgs.writers.writePython3 "analyze" {} ''
print("Analyzing...")
'')}
'';
}
# Running any executable:
{
name = "custom-tool";
bash = "${lib.getExe pkgs.ripgrep} -r 'TODO' .";
}Definition: Abstraction of "where to execute" with workspace-level and job-level lifecycle hooks
Executor :: {
name :: String, # Unique identifier (can be customized)
copyRepo :: Bool, # Whether to copy repository to job directory (default: true)
# === WORKSPACE LEVEL (for entire workflow) ===
setupWorkspace :: {
actionDerivations :: [Derivation] # ALL steps from ALL jobs sharing this executor
} -> Bash,
cleanupWorkspace :: {
actionDerivations :: [Derivation]
} -> Bash,
# === JOB LEVEL (for each job) ===
setupJob :: {
jobName :: String,
actionDerivations :: [Derivation], # Steps for THIS job only
} -> Bash,
executeJob :: {
jobName :: String,
actionDerivations :: [Derivation],
env :: AttrSet,
} -> Bash,
cleanupJob :: {
jobName :: String,
} -> Bash,
# === ARTIFACTS ===
saveArtifact :: {
name :: String,
path :: String,
jobName :: String,
} -> Bash,
restoreArtifact :: {
name :: String,
path :: String, # Target path (relative to job dir)
jobName :: String,
} -> Bash,
}-
Workspace-level hooks (
setupWorkspace,cleanupWorkspace)- Called ONCE per unique executor (by name)
- Receive ALL actionDerivations from ALL jobs sharing this executor
-
Job-level hooks (
setupJob,executeJob,cleanupJob)- Called per job
- Each job gets isolated resources (directory, container, pod)
-
Executor uniqueness by name
- Executors deduplicated by
namefield - Custom names allow multiple workspaces with same configuration
- Executors deduplicated by
main() {
# 1. Setup workspaces (ONCE per unique executor)
local.setupWorkspace({ actionDerivations = [all local actions] })
oci.setupWorkspace({ actionDerivations = [all oci actions] })
# 2. Run jobs
job_build() {
oci.setupJob({ jobName = "build", actionDerivations = [...] })
restore_artifacts
oci.executeJob({ jobName = "build", actionDerivations, env })
save_artifacts
oci.cleanupJob({ jobName = "build" })
}
}
# Workflow end (via trap)
cleanup_all() {
oci.cleanupWorkspace({ actionDerivations = [...] })
local.cleanupWorkspace({ actionDerivations = [...] })
}Definition: Composition of steps + executor + metadata
Job :: {
# Required
executor :: Executor,
steps :: [Step],
# Dependencies (GitHub Actions style)
needs :: [String] = [],
# Conditional execution
condition :: Condition = "success()",
# Error handling
continueOnError :: Bool = false,
# Environment (runtime values)
env :: AttrSet String = {},
envFrom :: [Derivation] = [], # Environment provider derivations
# Artifacts
inputs :: [String | { name :: String, path :: String }] = [],
outputs :: AttrSet String = {},
# Retry/timeout
retry :: RetryConfig | Null = null,
timeout :: Int | Null = null,
}0. Setup workflow environment (on HOST)
- WORKFLOW_ID, NIXACTIONS_ARTIFACTS_DIR
- Load environment variables (immutable for workflow)
For each job:
1. Setup workspace (lazy init)
- executor.setupWorkspace (if not already done)
2. Restore artifacts (if inputs specified)
- executor.restoreArtifact for each input
3. Execute job
- executor.executeJob { jobName, actionDerivations, env }
4. Save artifacts (if outputs specified)
- executor.saveArtifact for each output
At workflow end:
5. Cleanup workspace
- executor.cleanupWorkspace
Definition: DAG of jobs with parallel execution
WorkflowConfig :: {
name :: String,
jobs :: AttrSet Job,
env :: AttrSet String = {},
envFrom :: [Derivation] = {},
retry :: RetryConfig | Null = null,
timeout :: Int | Null = null,
}mkWorkflow :: {
name :: String,
jobs :: AttrSet Job,
env :: AttrSet String = {},
} -> Derivation # Bash script with all actions pre-compiled1. Convert all action attrsets to derivations
actions = map mkAction job.actions
2. Collect ALL derivations per executor
executorDerivations = groupBy executor [all actions]
3. Generate setup functions (one per executor)
setup_executor_local() {
setupWorkspace { derivations = [...]; }
}
4. Generate job functions
job_test() {
executeJob {
steps = [ /nix/store/xxx /nix/store/yyy ];
}
}
5. Generate main execution (DAG-based)
Level 0: run jobs in parallel
Level 1: run jobs in parallel
...
Condition ::
| "always()" # Always run
| "failure()" # Run if any previous job failed
| "success()" # Run if all previous jobs succeeded (default)
| "cancelled()" # Run if workflow was cancelled
| BashScript # Any bash that returns exit code 0 (run) or 1 (skip)RetryConfig :: {
max_attempts :: Int = 1, # Total attempts (1 = no retry)
backoff :: "exponential" | "linear" | "constant" = "exponential",
min_time :: Int = 1, # Minimum delay (seconds)
max_time :: Int = 60, # Maximum delay (seconds)
}- Actions - Deep dive into actions
- Executors - Executor implementations
- API Reference - Full API documentation