mirror of
https://code.forgejo.org/actions/cache.git
synced 2024-11-22 18:41:16 +01:00
Add paths as an output for easier access
This commit is contained in:
parent
0c45773b62
commit
b6cff214f7
3 changed files with 38 additions and 28 deletions
63
dist/restore-only/index.js
vendored
63
dist/restore-only/index.js
vendored
|
@ -174,7 +174,7 @@ function saveCache(paths, key, options, enableCrossOsArchive = false) {
|
|||
checkPaths(paths);
|
||||
checkKey(key);
|
||||
const compressionMethod = yield utils.getCompressionMethod();
|
||||
let cacheId = -1;
|
||||
let cacheId = -1;
|
||||
const cachePaths = yield utils.resolvePaths(paths);
|
||||
core.debug('Cache Paths:');
|
||||
core.debug(`${JSON.stringify(cachePaths)}`);
|
||||
|
@ -1260,7 +1260,7 @@ function retry(name, method, getStatusCode, maxAttempts = constants_1.DefaultRet
|
|||
exports.retry = retry;
|
||||
function retryTypedResponse(name, method, maxAttempts = constants_1.DefaultRetryAttempts, delay = constants_1.DefaultRetryDelay) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return yield retry(name, method, (response) => response.statusCode, maxAttempts, delay,
|
||||
return yield retry(name, method, (response) => response.statusCode, maxAttempts, delay,
|
||||
// If the error object contains the statusCode property, extract it and return
|
||||
// an TypedResponse<T> so it can be processed by the retry logic.
|
||||
(error) => {
|
||||
|
@ -2230,8 +2230,8 @@ class OidcClient {
|
|||
const res = yield httpclient
|
||||
.getJson(id_token_url)
|
||||
.catch(error => {
|
||||
throw new Error(`Failed to get ID Token. \n
|
||||
Error Code : ${error.statusCode}\n
|
||||
throw new Error(`Failed to get ID Token. \n
|
||||
Error Code : ${error.statusCode}\n
|
||||
Error Message: ${error.result.message}`);
|
||||
});
|
||||
const id_token = (_a = res.result) === null || _a === void 0 ? void 0 : _a.value;
|
||||
|
@ -7437,7 +7437,7 @@ class Serializer {
|
|||
/**
|
||||
* The provided model mapper.
|
||||
*/
|
||||
modelMappers = {},
|
||||
modelMappers = {},
|
||||
/**
|
||||
* Whether the contents are XML or not.
|
||||
*/
|
||||
|
@ -9553,7 +9553,7 @@ class NodeFetchHttpClient {
|
|||
body = uploadReportStream;
|
||||
}
|
||||
const platformSpecificRequestInit = await this.prepareRequest(httpRequest);
|
||||
const requestInit = Object.assign({ body: body, headers: httpRequest.headers.rawHeaders(), method: httpRequest.method,
|
||||
const requestInit = Object.assign({ body: body, headers: httpRequest.headers.rawHeaders(), method: httpRequest.method,
|
||||
// the types for RequestInit are from the browser, which expects AbortSignal to
|
||||
// have `reason` and `throwIfAborted`, but these don't exist on our polyfill
|
||||
// for Node.
|
||||
|
@ -9752,7 +9752,7 @@ class BaseRequestPolicy {
|
|||
/**
|
||||
* The next policy in the pipeline. Each policy is responsible for executing the next one if the request is to continue through the pipeline.
|
||||
*/
|
||||
_nextPolicy,
|
||||
_nextPolicy,
|
||||
/**
|
||||
* The options that can be passed to a given request policy.
|
||||
*/
|
||||
|
@ -10730,7 +10730,7 @@ function createTokenCycler(credential, scopes, tokenCyclerOptions) {
|
|||
const tryGetAccessToken = () => credential.getToken(scopes, getTokenOptions);
|
||||
// Take advantage of promise chaining to insert an assignment to `token`
|
||||
// before the refresh can be considered done.
|
||||
refreshWorker = beginRefresh(tryGetAccessToken, options.retryIntervalInMs,
|
||||
refreshWorker = beginRefresh(tryGetAccessToken, options.retryIntervalInMs,
|
||||
// If we don't have a token, then we should timeout immediately
|
||||
(_a = token === null || token === void 0 ? void 0 : token.expiresOnTimestamp) !== null && _a !== void 0 ? _a : Date.now())
|
||||
.then((_token) => {
|
||||
|
@ -11537,7 +11537,7 @@ class ServiceClient {
|
|||
* @param credentials - The credentials used for authentication with the service.
|
||||
* @param options - The service client options that govern the behavior of the client.
|
||||
*/
|
||||
constructor(credentials,
|
||||
constructor(credentials,
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options */
|
||||
options) {
|
||||
if (!options) {
|
||||
|
@ -31082,7 +31082,7 @@ function createTokenCycler(credential, scopes, tokenCyclerOptions) {
|
|||
const tryGetAccessToken = () => credential.getToken(scopes, getTokenOptions);
|
||||
// Take advantage of promise chaining to insert an assignment to `token`
|
||||
// before the refresh can be considered done.
|
||||
refreshWorker = beginRefresh(tryGetAccessToken, options.retryIntervalInMs,
|
||||
refreshWorker = beginRefresh(tryGetAccessToken, options.retryIntervalInMs,
|
||||
// If we don't have a token, then we should timeout immediately
|
||||
(_a = token === null || token === void 0 ? void 0 : token.expiresOnTimestamp) !== null && _a !== void 0 ? _a : Date.now())
|
||||
.then((_token) => {
|
||||
|
@ -35502,7 +35502,7 @@ const fsCreateReadStream = fs__namespace.createReadStream;
|
|||
* append blob, or page blob.
|
||||
*/
|
||||
class BlobClient extends StorageClient {
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -36547,7 +36547,7 @@ class BlobClient extends StorageClient {
|
|||
* AppendBlobClient defines a set of operations applicable to append blobs.
|
||||
*/
|
||||
class AppendBlobClient extends BlobClient {
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -36803,7 +36803,7 @@ class AppendBlobClient extends BlobClient {
|
|||
* BlockBlobClient defines a set of operations applicable to block blobs.
|
||||
*/
|
||||
class BlockBlobClient extends BlobClient {
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -37429,7 +37429,7 @@ class BlockBlobClient extends BlobClient {
|
|||
if (options.onProgress) {
|
||||
options.onProgress({ loadedBytes: transferProgress });
|
||||
}
|
||||
},
|
||||
},
|
||||
// concurrency should set a smaller value than maxConcurrency, which is helpful to
|
||||
// reduce the possibility when a outgoing handler waits for stream data, in
|
||||
// this situation, outgoing handlers are blocked.
|
||||
|
@ -37454,7 +37454,7 @@ class BlockBlobClient extends BlobClient {
|
|||
* PageBlobClient defines a set of operations applicable to page blobs.
|
||||
*/
|
||||
class PageBlobClient extends BlobClient {
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName, blobNameOrOptions,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -38712,7 +38712,7 @@ class BatchHeaderFilterPolicyFactory {
|
|||
* @see https://docs.microsoft.com/en-us/rest/api/storageservices/blob-batch
|
||||
*/
|
||||
class BlobBatchClient {
|
||||
constructor(url, credentialOrPipeline,
|
||||
constructor(url, credentialOrPipeline,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -38744,7 +38744,7 @@ class BlobBatchClient {
|
|||
createBatch() {
|
||||
return new BlobBatch();
|
||||
}
|
||||
async deleteBlobs(urlsOrBlobClients, credentialOrOptions,
|
||||
async deleteBlobs(urlsOrBlobClients, credentialOrOptions,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -38759,7 +38759,7 @@ class BlobBatchClient {
|
|||
}
|
||||
return this.submitBatch(batch);
|
||||
}
|
||||
async setBlobsAccessTier(urlsOrBlobClients, credentialOrTier, tierOrOptions,
|
||||
async setBlobsAccessTier(urlsOrBlobClients, credentialOrTier, tierOrOptions,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -38851,7 +38851,7 @@ class BlobBatchClient {
|
|||
* A ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
||||
*/
|
||||
class ContainerClient extends StorageClient {
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName,
|
||||
constructor(urlOrConnectionString, credentialOrPipelineOrContainerName,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -40565,7 +40565,7 @@ function generateAccountSASQueryParameters(accountSASSignatureValues, sharedKeyC
|
|||
* to manipulate blob containers.
|
||||
*/
|
||||
class BlobServiceClient extends StorageClient {
|
||||
constructor(url, credentialOrPipeline,
|
||||
constructor(url, credentialOrPipeline,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -40597,7 +40597,7 @@ class BlobServiceClient extends StorageClient {
|
|||
* `BlobEndpoint=https://myaccount.blob.core.windows.net/;QueueEndpoint=https://myaccount.queue.core.windows.net/;FileEndpoint=https://myaccount.file.core.windows.net/;TableEndpoint=https://myaccount.table.core.windows.net/;SharedAccessSignature=sasString`
|
||||
* @param options - Optional. Options to configure the HTTP pipeline.
|
||||
*/
|
||||
static fromConnectionString(connectionString,
|
||||
static fromConnectionString(connectionString,
|
||||
// Legacy, no fix for eslint error without breaking. Disable it for this interface.
|
||||
/* eslint-disable-next-line @azure/azure-sdk/ts-naming-options*/
|
||||
options) {
|
||||
|
@ -59331,11 +59331,13 @@ var Outputs;
|
|||
Outputs["CacheHit"] = "cache-hit";
|
||||
Outputs["CachePrimaryKey"] = "cache-primary-key";
|
||||
Outputs["CacheMatchedKey"] = "cache-matched-key"; // Output from restore action
|
||||
Outputs["CachePath"] = "cache-path";
|
||||
})(Outputs = exports.Outputs || (exports.Outputs = {}));
|
||||
var State;
|
||||
(function (State) {
|
||||
State["CachePrimaryKey"] = "CACHE_KEY";
|
||||
State["CacheMatchedKey"] = "CACHE_RESULT";
|
||||
State["CachePath"] = "CACHE_PATH";
|
||||
})(State = exports.State || (exports.State = {}));
|
||||
var Events;
|
||||
(function (Events) {
|
||||
|
@ -59407,6 +59409,10 @@ function restoreImpl(stateProvider, earlyExit) {
|
|||
const primaryKey = core.getInput(constants_1.Inputs.Key, { required: true });
|
||||
stateProvider.setState(constants_1.State.CachePrimaryKey, primaryKey);
|
||||
const restoreKeys = utils.getInputAsArray(constants_1.Inputs.RestoreKeys);
|
||||
|
||||
// Output the inputted path unchanged
|
||||
stateProvider.setState(constants_1.State.CachePath, getInput(constants_1.Inputs.Path));
|
||||
|
||||
const cachePaths = utils.getInputAsArray(constants_1.Inputs.Path, {
|
||||
required: true
|
||||
});
|
||||
|
@ -59535,7 +59541,8 @@ class NullStateProvider extends StateProviderBase {
|
|||
super(...arguments);
|
||||
this.stateToOutputMap = new Map([
|
||||
[constants_1.State.CacheMatchedKey, constants_1.Outputs.CacheMatchedKey],
|
||||
[constants_1.State.CachePrimaryKey, constants_1.Outputs.CachePrimaryKey]
|
||||
[constants_1.State.CachePrimaryKey, constants_1.Outputs.CachePrimaryKey],
|
||||
[constants_1.State.CachePath, constants_1.Outputs.CachePath]
|
||||
]);
|
||||
this.setState = (key, value) => {
|
||||
core.setOutput(this.stateToOutputMap.get(key), value);
|
||||
|
@ -59823,7 +59830,7 @@ module.exports = JSON.parse('[[[0,44],"disallowed_STD3_valid"],[[45,46],"valid"]
|
|||
/************************************************************************/
|
||||
/******/ // The module cache
|
||||
/******/ var __webpack_module_cache__ = {};
|
||||
/******/
|
||||
/******/
|
||||
/******/ // The require function
|
||||
/******/ function __nccwpck_require__(moduleId) {
|
||||
/******/ // Check if module is in cache
|
||||
|
@ -59837,7 +59844,7 @@ module.exports = JSON.parse('[[[0,44],"disallowed_STD3_valid"],[[45,46],"valid"]
|
|||
/******/ // no module.loaded needed
|
||||
/******/ exports: {}
|
||||
/******/ };
|
||||
/******/
|
||||
/******/
|
||||
/******/ // Execute the module function
|
||||
/******/ var threw = true;
|
||||
/******/ try {
|
||||
|
@ -59846,16 +59853,16 @@ module.exports = JSON.parse('[[[0,44],"disallowed_STD3_valid"],[[45,46],"valid"]
|
|||
/******/ } finally {
|
||||
/******/ if(threw) delete __webpack_module_cache__[moduleId];
|
||||
/******/ }
|
||||
/******/
|
||||
/******/
|
||||
/******/ // Return the exports of the module
|
||||
/******/ return module.exports;
|
||||
/******/ }
|
||||
/******/
|
||||
/******/
|
||||
/************************************************************************/
|
||||
/******/ /* webpack/runtime/compat */
|
||||
/******/
|
||||
/******/
|
||||
/******/ if (typeof __nccwpck_require__ !== 'undefined') __nccwpck_require__.ab = __dirname + "/";
|
||||
/******/
|
||||
/******/
|
||||
/************************************************************************/
|
||||
var __webpack_exports__ = {};
|
||||
// This entry need to be wrapped in an IIFE because it need to be in strict mode.
|
||||
|
|
|
@ -17,6 +17,7 @@ The restore action restores a cache. It works similarly to the `cache` action ex
|
|||
* `cache-hit` - A boolean value to indicate an exact match was found for the key.
|
||||
* `cache-primary-key` - Cache primary key passed in the input to use in subsequent steps of the workflow.
|
||||
* `cache-matched-key` - Key of the cache that was restored, it could either be the primary key on cache-hit or a partial/complete match of one of the restore keys.
|
||||
* `cache-path` - The list of files, directories, and wildcard patterns passed in the input.
|
||||
|
||||
> **Note**
|
||||
`cache-hit` will be set to `true` only when cache hit occurs for the exact `key` match. For a partial key match via `restore-keys` or a cache miss, it will be set to `false`.
|
||||
|
|
|
@ -30,6 +30,8 @@ outputs:
|
|||
description: 'A resolved cache key for which cache match was attempted'
|
||||
cache-matched-key:
|
||||
description: 'Key of the cache that was restored, it could either be the primary key on cache-hit or a partial/complete match of one of the restore keys'
|
||||
cache-path:
|
||||
description: 'The list of files, directories, and wildcard patterns passed in the input'
|
||||
runs:
|
||||
using: 'node20'
|
||||
main: '../dist/restore-only/index.js'
|
||||
|
|
Loading…
Reference in a new issue