Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
9c42f62
feat(sandbox): pre-pause guest reclaim via envd
ValentaTomas May 4, 2026
c041b7d
fix(sandbox): forward access token on reclaim envd call
ValentaTomas May 4, 2026
2a02232
feat(sandbox): per-step timeouts for pre-pause reclaim chain
ValentaTomas May 4, 2026
8698667
fix(sandbox): correct reclaim chain — drop master flag, fix timeout s…
ValentaTomas May 4, 2026
13ca893
refactor(sandbox): extract StartEnvdProcess helper; reuse from resume…
ValentaTomas May 4, 2026
db30f55
fix(sandbox): add missing envd_process.go helper
ValentaTomas May 4, 2026
e4a31de
fix(sandbox): silence reclaim step output and surface failures
ValentaTomas May 4, 2026
40a1e24
refactor(sandbox): inline envd reclaim call; drop --foreground
ValentaTomas May 6, 2026
294c7e4
Merge branch 'main' into feat/sandbox-pause-reclaim
ValentaTomas May 6, 2026
a7e6b75
chore(featureflags): isolate reclaim flags in own var block
ValentaTomas May 6, 2026
ec3db39
refactor(sandbox): extract StartEnvdBash; reuse from reclaim + resume…
ValentaTomas May 6, 2026
801a37f
refactor(reclaim): use DurationFlag and run via /bin/sh
ValentaTomas May 7, 2026
d03b564
fix(reclaim): skip sub-ms step durations to avoid no-timeout
ValentaTomas May 7, 2026
2760098
fix(sandbox): parameterize StartEnvdShell binary; keep bash for resum…
ValentaTomas May 7, 2026
c3f843b
refactor(reclaim): collapse 4 flags into one targeted JSON flag
ValentaTomas May 7, 2026
eecb55b
chore(reclaim): drop NewOfflineClient, trim docstrings
ValentaTomas May 8, 2026
e23ee08
chore(reclaim): use int ms directly, drop string parsing
ValentaTomas May 8, 2026
76f3b04
chore(reclaim): rename LD flag to guest-pause-reclaim
ValentaTomas May 8, 2026
1a742bf
perf(sandbox): reorder reclaim chain to fstrim → sync → drop_caches →…
ValentaTomas May 8, 2026
1b451c2
fix(resume-build): restore 10-minute upper bound on runCommandInSandbox
ValentaTomas May 8, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions packages/orchestrator/cmd/resume-build/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,18 @@ func main() {
cmdSignalPause := flag.String("cmd-signal-pause", "", "execute command in sandbox, then wait for SIGUSR1 before pausing")
optimize := flag.Bool("optimize", false, "collect fresh prefetch mapping after pause (resumes snapshot to record page faults)")

// Enables the pre-pause reclaim chain with sensible per-step caps.
reclaim := flag.Bool("reclaim", false, "enable pre-pause reclaim chain (sync 500ms, drop_caches 200ms, compact 1s, fstrim 500ms)")

flag.Parse()

if *reclaim {
featureflags.NewIntFlag("reclaim-sync-timeout-ms", 500)
featureflags.NewIntFlag("reclaim-drop-caches-timeout-ms", 200)
featureflags.NewIntFlag("reclaim-compact-memory-timeout-ms", 1000)
featureflags.NewIntFlag("reclaim-fstrim-timeout-ms", 500)
Comment thread
ValentaTomas marked this conversation as resolved.
Outdated
}
Comment thread
ValentaTomas marked this conversation as resolved.

if *fromBuild == "" {
log.Fatal("-from-build required")
}
Expand Down
116 changes: 116 additions & 0 deletions packages/orchestrator/pkg/sandbox/reclaim.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
package sandbox

import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"time"

"connectrpc.com/connect"
"go.uber.org/zap"

"github.com/e2b-dev/infra/packages/shared/pkg/consts"
"github.com/e2b-dev/infra/packages/shared/pkg/featureflags"
"github.com/e2b-dev/infra/packages/shared/pkg/grpc"
"github.com/e2b-dev/infra/packages/shared/pkg/grpc/envd/process"
"github.com/e2b-dev/infra/packages/shared/pkg/grpc/envd/process/processconnect"
"github.com/e2b-dev/infra/packages/shared/pkg/logger"
)

type reclaimStep struct {
flag featureflags.IntFlag
cmd string
}

// Order matters: sync makes drop_caches more effective; drop_caches gives
// compact_memory more headroom; fstrim wants a stable FS view.
var reclaimSteps = []reclaimStep{
{featureflags.ReclaimSyncTimeoutMs, "sync"},
{featureflags.ReclaimDropCachesTimeoutMs, "echo 3 > /proc/sys/vm/drop_caches"},
{featureflags.ReclaimCompactMemoryTimeoutMs, "echo 1 > /proc/sys/vm/compact_memory"},
{featureflags.ReclaimFstrimTimeoutMs, "fstrim -av"},
}

// Slack added to the sum of per-step caps to absorb shell start /
// envd round-trip overhead.
const reclaimOuterSlack = 500 * time.Millisecond

// buildReclaimScript composes a chain where each step has its own
// `timeout -s KILL` ceiling. Steps with cap=0 are skipped. Returns
// ("", 0) when every step is disabled.
func (s *Sandbox) buildReclaimScript(ctx context.Context) (string, time.Duration) {
var (
parts []string
sum time.Duration
)
for _, st := range reclaimSteps {
ms := s.featureFlags.IntFlag(ctx, st.flag)
Comment thread
ValentaTomas marked this conversation as resolved.
Outdated
if ms <= 0 {
continue
}
// `timeout` accepts fractional seconds (s/m/h/d), not `ms`. Output
// is dropped; non-zero status is captured into `rc` so the final
// exit code surfaces failures without short-circuiting later steps.
secs := float64(ms) / 1000.0
parts = append(parts, fmt.Sprintf("timeout -s KILL %.3f sh -c %q >/dev/null 2>&1 || rc=$?", secs, st.cmd))
sum += time.Duration(ms) * time.Millisecond
}
if len(parts) == 0 {
return "", 0
}

return "rc=0; " + strings.Join(parts, "; ") + "; exit $rc", sum + reclaimOuterSlack
}

// bestEffortReclaim asks envd to reclaim guest memory + disk before pause.
// Per-step output is silenced inside the guest; we only log when envd
// itself errors or the script reports a non-zero exit code.
func (s *Sandbox) bestEffortReclaim(ctx context.Context) {
script, timeout := s.buildReclaimScript(ctx)
if script == "" {
return
}

ctx, span := tracer.Start(ctx, "envd-reclaim")
defer span.End()

rcCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()

addr := fmt.Sprintf("http://%s:%d", s.Slot.HostIPString(), consts.DefaultEnvdServerPort)
pc := processconnect.NewProcessClient(&http.Client{Transport: sandboxHttpClient.Transport}, addr)

req := connect.NewRequest(&process.StartRequest{
Process: &process.ProcessConfig{Cmd: "/bin/bash", Args: []string{"-c", script}},
})
Comment thread
cursor[bot] marked this conversation as resolved.
Outdated
req.Header().Set("Connect-Timeout-Ms", strconv.FormatInt(int64(timeout/time.Millisecond), 10))
if s.Config.Envd.AccessToken != nil {
req.Header().Set("X-Access-Token", *s.Config.Envd.AccessToken)
}
grpc.SetUserHeader(req.Header(), "root")
Comment thread
ValentaTomas marked this conversation as resolved.
Outdated

stream, err := pc.Start(rcCtx, req)
if err != nil {
logger.L().Warn(ctx, "envd reclaim failed", logger.WithSandboxID(s.Runtime.SandboxID), zap.Error(err))

return
}
defer stream.Close()

var exitCode int32
for stream.Receive() {
if end := stream.Msg().GetEvent().GetEnd(); end != nil {
exitCode = end.GetExitCode()
}
}
if err := stream.Err(); err != nil {
logger.L().Warn(ctx, "envd reclaim stream error", logger.WithSandboxID(s.Runtime.SandboxID), zap.Error(err))

return
}
if exitCode != 0 {
logger.L().Warn(ctx, "envd reclaim non-zero exit", logger.WithSandboxID(s.Runtime.SandboxID), zap.Int32("exit_code", exitCode))
}
}
13 changes: 11 additions & 2 deletions packages/orchestrator/pkg/sandbox/sandbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,8 @@ type Sandbox struct {
files *storage.SandboxFiles
cleanup *Cleanup

featureFlags *featureflags.Client

process *fc.Process
cgroupHandle *cgroup.CgroupHandle

Expand Down Expand Up @@ -457,7 +459,8 @@ func (f *Factory) CreateSandbox(
files: sandboxFiles,
process: fcHandle,

cleanup: cleanup,
cleanup: cleanup,
featureFlags: f.featureFlags,

APIStoredConfig: apiConfigToStore,

Expand Down Expand Up @@ -797,7 +800,8 @@ func (f *Factory) ResumeSandbox(
files: sandboxFiles,
process: fcHandle,

cleanup: cleanup,
cleanup: cleanup,
featureFlags: f.featureFlags,

APIStoredConfig: apiConfigToStore,
CABundle: f.egressProxy.CABundle(),
Expand Down Expand Up @@ -1051,6 +1055,11 @@ func (s *Sandbox) Pause(
// Stop the health check before pausing the VM
s.Checks.Stop()

// Best-effort pre-pause guest reclaim (sync, drop_caches, compact_memory,
// fstrim) on the live VM via envd. Per-step caps are LD-flag-driven; all
// default to 0 which disables the chain entirely. Non-fatal.
s.bestEffortReclaim(ctx)

if err := s.process.Pause(ctx); err != nil {
return nil, fmt.Errorf("failed to pause VM: %w", err)
}
Expand Down
11 changes: 10 additions & 1 deletion packages/shared/pkg/featureflags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,16 @@ var (
BestOfKMaxOvercommit = NewIntFlag("best-of-k-max-overcommit", 400) // Default R=4 (stored as percentage, max over-commit ratio)
BestOfKAlpha = NewIntFlag("best-of-k-alpha", 50) // Default Alpha=0.5 (stored as percentage for int flag, current usage weight)
EnvdInitTimeoutMilliseconds = NewIntFlag("envd-init-request-timeout-milliseconds", 50) // Timeout for envd init request in milliseconds
HostStatsSamplingInterval = NewIntFlag("host-stats-sampling-interval", 5000) // Host stats sampling interval in milliseconds (default 5s)
// Per-step ceilings for the pre-pause guest reclaim chain (run via envd
// before snapshot). Each step is wrapped in `timeout -s KILL`; 0 skips it.
// All default to 0, so the feature is disabled until an operator opts in
// per step. A stuck step cannot starve the rest, so compact_memory always
// runs as long as its own cap is > 0.
ReclaimSyncTimeoutMs = NewIntFlag("reclaim-sync-timeout-ms", 0)
ReclaimDropCachesTimeoutMs = NewIntFlag("reclaim-drop-caches-timeout-ms", 0)
ReclaimCompactMemoryTimeoutMs = NewIntFlag("reclaim-compact-memory-timeout-ms", 0)
ReclaimFstrimTimeoutMs = NewIntFlag("reclaim-fstrim-timeout-ms", 0)
HostStatsSamplingInterval = NewIntFlag("host-stats-sampling-interval", 5000) // Host stats sampling interval in milliseconds (default 5s)
MaxCacheWriterConcurrencyFlag = NewIntFlag("max-cache-writer-concurrency", 10)

// BuildCacheMaxUsagePercentage the maximum percentage of the cache disk storage
Expand Down
Loading