Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .github/workflows/agentics-maintenance.yml
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,33 @@ jobs:
const { main } = require('${{ runner.temp }}/gh-aw/actions/close_expired_pull_requests.cjs');
await main();

cleanup-cache-memory:
if: ${{ !github.event.repository.fork && (github.event_name != 'workflow_dispatch' || github.event.inputs.operation == '') }}
runs-on: ubuntu-slim
permissions:
actions: write
steps:
- name: Checkout actions folder
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
sparse-checkout: |
actions
persist-credentials: false

- name: Setup Scripts
uses: ./actions/setup
with:
destination: ${{ runner.temp }}/gh-aw/actions

- name: Cleanup outdated cache-memory entries
uses: actions/github-script@373c709c69115d41ff229c7e5df9f8788daa9553 # v9
with:
script: |
const { setupGlobals } = require('${{ runner.temp }}/gh-aw/actions/setup_globals.cjs');
setupGlobals(core, github, context, exec, io, getOctokit);
const { main } = require('${{ runner.temp }}/gh-aw/actions/cleanup_cache_memory.cjs');
await main();

run_operation:
if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.operation != '' && github.event.inputs.operation != 'safe_outputs' && github.event.inputs.operation != 'create_labels' && github.event.inputs.operation != 'validate' && !github.event.repository.fork }}
runs-on: ubuntu-slim
Expand Down
335 changes: 335 additions & 0 deletions actions/setup/js/cleanup_cache_memory.cjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,335 @@
// @ts-check
/// <reference types="@actions/github-script" />

const { getErrorMessage } = require("./error_helpers.cjs");

/**
* Delay execution for a given number of milliseconds.
* Used to avoid GitHub API throttling between requests.
* @param {number} ms - Milliseconds to wait
* @returns {Promise<void>}
*/
function delay(ms) {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot find other delay impl, refactor

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Refactored in 56107ff — now imports delay from expired_entity_cleanup_helpers.cjs instead of duplicating it.

return new Promise(resolve => setTimeout(resolve, ms));
}

/**
* Check the current rate limit and determine if we should continue.
* Returns the remaining requests count, or -1 if we couldn't check.
* @param {any} github - GitHub REST client
* @returns {Promise<number>} Remaining requests, or -1 on error
*/
async function getRateLimitRemaining(github) {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot refactor to rate limiting helper file

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Extracted to rate_limit_helpers.cjs in 56107ff. Exports getRateLimitRemaining, checkRateLimit, and MIN_RATE_LIMIT_REMAINING. Also integrates fetchAndLogRateLimit for observability logging. Includes 6 unit tests.

try {
const { data } = await github.rest.rateLimit.get();
return data.rate.remaining;
} catch {
return -1;
}
}

/**
* Minimum rate limit remaining before we skip further operations.
* This reserves capacity for other workflow jobs and API consumers.
*/
const MIN_RATE_LIMIT_REMAINING = 100;

/**
* Default delay in ms between delete operations to avoid throttling.
*/
const DELETE_DELAY_MS = 250;

/**
* Default delay in ms between list pages to avoid throttling.
*/
const LIST_DELAY_MS = 100;

/**
* Extract the run ID from a cache key.
* Cache keys follow the pattern: memory-{parts}-{runID}
* where runID is the last numeric segment.
*
* @param {string} key - Cache key string
* @returns {number | null} The extracted run ID, or null if not found
*/
function extractRunId(key) {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot derive group and runid in 1 loop/function

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Combined into parseCacheKey(key) that returns { runId, groupKey } in a single pass in 56107ff.

const parts = key.split("-");
// Walk backwards to find the last purely numeric segment
for (let i = parts.length - 1; i >= 0; i--) {
if (/^\d+$/.test(parts[i])) {
return parseInt(parts[i], 10);
}
}
return null;
}

/**
* Derive the group key from a cache key by removing the run ID suffix.
* This groups caches that differ only by their run ID.
*
* @param {string} key - Cache key string
* @returns {string} The group key (everything before the run ID)
*/
function deriveGroupKey(key) {
const parts = key.split("-");
// Walk backwards to find the last purely numeric segment and strip it
for (let i = parts.length - 1; i >= 0; i--) {
if (/^\d+$/.test(parts[i])) {
return parts.slice(0, i).join("-");
}
}
// If no numeric segment found, return the full key
return key;
}

/**
* @typedef {Object} CacheEntry
* @property {number} id - Cache ID for deletion
* @property {string} key - Full cache key
* @property {number | null} runId - Extracted run ID
* @property {string} groupKey - Group key (key without run ID)
*/

/**
* List all caches starting with "memory-" prefix, handling pagination.
*
* @param {any} github - GitHub REST client
* @param {string} owner - Repository owner
* @param {string} repo - Repository name
* @param {number} [listDelayMs] - Delay between list pages in ms
* @returns {Promise<CacheEntry[]>} List of cache entries
*/
async function listMemoryCaches(github, owner, repo, listDelayMs = LIST_DELAY_MS) {
/** @type {CacheEntry[]} */
const caches = [];
let page = 1;
const perPage = 100;

while (true) {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot have upper bound

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added MAX_LIST_PAGES = 50 constant (allows up to 5000 caches) and changed the loop to while (page <= MAX_LIST_PAGES) in 56107ff. Logs a warning if the limit is reached.

const response = await github.rest.actions.getActionsCacheList({
owner,
repo,
key: "memory-",
per_page: perPage,
page,
sort: "key",
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot sort so that newest caches are returned first

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changed API sort to last_accessed_at with direction: "desc" so newest caches are returned first, in 56107ff.

direction: "asc",
});

const actionsCaches = response.data.actions_caches;
if (!actionsCaches || actionsCaches.length === 0) {
break;
}

for (const cache of actionsCaches) {
if (!cache.key || !cache.key.startsWith("memory-")) {
continue;
}
caches.push({
id: cache.id,
key: cache.key,
runId: extractRunId(cache.key),
groupKey: deriveGroupKey(cache.key),
});
}

if (actionsCaches.length < perPage) {
break;
}

page++;
// Throttle between list pages
await delay(listDelayMs);
}

return caches;
}

/**
* Group caches by their group key (everything except run ID),
* then for each group keep only the entry with the highest run ID
* and return the rest for deletion.
*
* @param {CacheEntry[]} caches - List of cache entries
* @returns {{ toDelete: CacheEntry[], kept: CacheEntry[] }}
*/
function identifyCachesToDelete(caches) {
/** @type {Map<string, CacheEntry[]>} */
const groups = new Map();

for (const cache of caches) {
if (cache.runId === null) {
// Skip caches without a recognizable run ID
continue;
}
const group = groups.get(cache.groupKey) || [];
group.push(cache);
groups.set(cache.groupKey, group);
}

/** @type {CacheEntry[]} */
const toDelete = [];
/** @type {CacheEntry[]} */
const kept = [];

for (const [, group] of groups) {
if (group.length <= 1) {
// Only one entry in this group, nothing to clean up
if (group.length === 1) {
kept.push(group[0]);
}
continue;
}

// Sort by run ID descending (highest first = latest)
group.sort((a, b) => (b.runId ?? 0) - (a.runId ?? 0));

// Keep the first (latest), mark the rest for deletion
kept.push(group[0]);
toDelete.push(...group.slice(1));
}

return { toDelete, kept };
}

/**
* Main entry point: cleanup outdated cache-memory caches.
*
* Lists all caches with "memory-" prefix, groups them by key prefix,
* keeps the latest run ID per group, and deletes the rest.
* Includes timeouts to avoid GitHub API throttling and skips
* if rate limiting is too high.
Copy link

Copilot AI Apr 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Docstring says "skips if rate limiting is too high" but the implementation skips when remaining rate limit is too low. Update the comment to match behavior to avoid confusion for future maintainers.

Suggested change
* if rate limiting is too high.
* if the remaining rate limit is too low.

Copilot uses AI. Check for mistakes.
*
* @param {Object} [options] - Optional configuration for testing
* @param {number} [options.deleteDelayMs] - Delay between deletions (default: DELETE_DELAY_MS)
* @param {number} [options.listDelayMs] - Delay between list pages (default: LIST_DELAY_MS)
*/
async function main(options = {}) {
const deleteDelayMs = options.deleteDelayMs ?? DELETE_DELAY_MS;
const listDelayMs = options.listDelayMs ?? LIST_DELAY_MS;

const owner = context.repo.owner;
const repo = context.repo.repo;

core.info("🧹 Starting cache-memory cleanup");

// Check rate limit before starting
const initialRemaining = await getRateLimitRemaining(github);
if (initialRemaining !== -1 && initialRemaining < MIN_RATE_LIMIT_REMAINING) {
core.warning(`⚠️ Rate limit too low (${initialRemaining} remaining, minimum: ${MIN_RATE_LIMIT_REMAINING}). Skipping cache cleanup.`);
core.summary.addRaw(`## Cache Memory Cleanup\n\n⚠️ Skipped: Rate limit too low (${initialRemaining} remaining, minimum required: ${MIN_RATE_LIMIT_REMAINING})\n`);
await core.summary.write();
return;
}

core.info(` Rate limit remaining: ${initialRemaining === -1 ? "unknown" : initialRemaining}`);

// List all memory caches
core.info("📋 Listing caches with 'memory-' prefix...");
let caches;
try {
caches = await listMemoryCaches(github, owner, repo, listDelayMs);
} catch (error) {
core.error(`❌ Failed to list caches: ${getErrorMessage(error)}`);
core.summary.addRaw(`## Cache Memory Cleanup\n\n❌ Failed to list caches: ${getErrorMessage(error)}\n`);
await core.summary.write();
return;
}

core.info(` Found ${caches.length} cache(s) with 'memory-' prefix`);

if (caches.length === 0) {
core.info("✅ No memory caches found. Nothing to clean up.");
core.summary.addRaw("## Cache Memory Cleanup\n\n✅ No memory caches found. Nothing to clean up.\n");
await core.summary.write();
return;
}

// Identify which caches to delete
const { toDelete, kept } = identifyCachesToDelete(caches);

core.info(` Groups with latest entries kept: ${kept.length}`);
core.info(` Outdated entries to delete: ${toDelete.length}`);

if (toDelete.length === 0) {
core.info("✅ No outdated caches to clean up. All entries are current.");
core.summary.addRaw(`## Cache Memory Cleanup\n\n✅ No outdated caches to clean up.\n- Total memory caches: ${caches.length}\n- Groups: ${kept.length}\n`);
await core.summary.write();
return;
}

// Delete outdated caches with throttling
core.info(`🗑️ Deleting ${toDelete.length} outdated cache(s)...`);
let deletedCount = 0;
let failedCount = 0;
/** @type {string[]} */
const errors = [];

for (const cache of toDelete) {
// Check rate limit periodically (every 10 deletions)
if (deletedCount > 0 && deletedCount % 10 === 0) {
Comment on lines +240 to +242
Copy link

Copilot AI Apr 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The periodic rate-limit check is keyed off deletedCount, so if deletions fail (and deletedCount doesn’t increment) the script may never re-check rate limits and could continue making API calls even as the limit drops. Track attempts/processed count (or use the loop index) for the modulo check instead of successful deletions.

Suggested change
for (const cache of toDelete) {
// Check rate limit periodically (every 10 deletions)
if (deletedCount > 0 && deletedCount % 10 === 0) {
for (let i = 0; i < toDelete.length; i++) {
const cache = toDelete[i];
// Check rate limit periodically (every 10 processed caches)
if (i > 0 && i % 10 === 0) {

Copilot uses AI. Check for mistakes.
const remaining = await getRateLimitRemaining(github);
if (remaining !== -1 && remaining < MIN_RATE_LIMIT_REMAINING) {
core.warning(`⚠️ Rate limit getting low (${remaining} remaining). Stopping deletion early.`);
core.warning(` Deleted ${deletedCount} of ${toDelete.length} caches before stopping.`);
break;
}
}

try {
await github.rest.actions.deleteActionsCacheById({
owner,
repo,
cache_id: cache.id,
});
deletedCount++;
core.info(` ✓ Deleted cache: ${cache.key} (run ID: ${cache.runId})`);
} catch (error) {
failedCount++;
const msg = `Failed to delete cache ${cache.key}: ${getErrorMessage(error)}`;
errors.push(msg);
core.warning(` ✗ ${msg}`);
}

// Throttle between deletions
await delay(deleteDelayMs);
}

// Summary
core.info(`\n📊 Cache cleanup complete:`);
core.info(` Total memory caches found: ${caches.length}`);
core.info(` Groups (latest kept): ${kept.length}`);
core.info(` Outdated deleted: ${deletedCount}`);
if (failedCount > 0) {
core.info(` Failed to delete: ${failedCount}`);
}

// Write job summary
let summary = `## Cache Memory Cleanup\n\n`;
summary += `| Metric | Count |\n|--------|-------|\n`;
summary += `| Total memory caches | ${caches.length} |\n`;
summary += `| Groups (latest kept) | ${kept.length} |\n`;
summary += `| Outdated deleted | ${deletedCount} |\n`;
if (failedCount > 0) {
summary += `| Failed to delete | ${failedCount} |\n`;
}
if (errors.length > 0) {
summary += `\n### Errors\n\n`;
for (const err of errors) {
summary += `- ${err}\n`;
}
}
core.summary.addRaw(summary);
await core.summary.write();

core.info("✅ Cache memory cleanup finished");
}

module.exports = {
main,
extractRunId,
deriveGroupKey,
identifyCachesToDelete,
listMemoryCaches,
MIN_RATE_LIMIT_REMAINING,
};
Loading
Loading