mirror of
https://github.com/github/codeql-action.git
synced 2025-12-13 02:59:59 +08:00
For more information see https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ This change bumps a bunch of the internal actions packages. Note that the only required version change is `actions/core` to 1.10.0. The others are not required, but seem like a reasonable idea. It also changes all of the workflows that use `set-output`.
409 lines
24 KiB
JavaScript
409 lines
24 KiB
JavaScript
"use strict";
|
|
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
if (k2 === undefined) k2 = k;
|
|
Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });
|
|
}) : (function(o, m, k, k2) {
|
|
if (k2 === undefined) k2 = k;
|
|
o[k2] = m[k];
|
|
}));
|
|
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
}) : function(o, v) {
|
|
o["default"] = v;
|
|
});
|
|
var __importStar = (this && this.__importStar) || function (mod) {
|
|
if (mod && mod.__esModule) return mod;
|
|
var result = {};
|
|
if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
|
__setModuleDefault(result, mod);
|
|
return result;
|
|
};
|
|
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
return new (P || (P = Promise))(function (resolve, reject) {
|
|
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
});
|
|
};
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
exports.UploadHttpClient = void 0;
|
|
const fs = __importStar(require("fs"));
|
|
const core = __importStar(require("@actions/core"));
|
|
const tmp = __importStar(require("tmp-promise"));
|
|
const stream = __importStar(require("stream"));
|
|
const utils_1 = require("./utils");
|
|
const config_variables_1 = require("./config-variables");
|
|
const util_1 = require("util");
|
|
const url_1 = require("url");
|
|
const perf_hooks_1 = require("perf_hooks");
|
|
const status_reporter_1 = require("./status-reporter");
|
|
const http_client_1 = require("@actions/http-client");
|
|
const http_manager_1 = require("./http-manager");
|
|
const upload_gzip_1 = require("./upload-gzip");
|
|
const requestUtils_1 = require("./requestUtils");
|
|
const stat = util_1.promisify(fs.stat);
|
|
class UploadHttpClient {
|
|
constructor() {
|
|
this.uploadHttpManager = new http_manager_1.HttpManager(config_variables_1.getUploadFileConcurrency(), '@actions/artifact-upload');
|
|
this.statusReporter = new status_reporter_1.StatusReporter(10000);
|
|
}
|
|
/**
|
|
* Creates a file container for the new artifact in the remote blob storage/file service
|
|
* @param {string} artifactName Name of the artifact being created
|
|
* @returns The response from the Artifact Service if the file container was successfully created
|
|
*/
|
|
createArtifactInFileContainer(artifactName, options) {
|
|
return __awaiter(this, void 0, void 0, function* () {
|
|
const parameters = {
|
|
Type: 'actions_storage',
|
|
Name: artifactName
|
|
};
|
|
// calculate retention period
|
|
if (options && options.retentionDays) {
|
|
const maxRetentionStr = config_variables_1.getRetentionDays();
|
|
parameters.RetentionDays = utils_1.getProperRetention(options.retentionDays, maxRetentionStr);
|
|
}
|
|
const data = JSON.stringify(parameters, null, 2);
|
|
const artifactUrl = utils_1.getArtifactUrl();
|
|
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
|
|
const client = this.uploadHttpManager.getClient(0);
|
|
const headers = utils_1.getUploadHeaders('application/json', false);
|
|
// Extra information to display when a particular HTTP code is returned
|
|
// If a 403 is returned when trying to create a file container, the customer has exceeded
|
|
// their storage quota so no new artifact containers can be created
|
|
const customErrorMessages = new Map([
|
|
[
|
|
http_client_1.HttpCodes.Forbidden,
|
|
'Artifact storage quota has been hit. Unable to upload any new artifacts'
|
|
],
|
|
[
|
|
http_client_1.HttpCodes.BadRequest,
|
|
`The artifact name ${artifactName} is not valid. Request URL ${artifactUrl}`
|
|
]
|
|
]);
|
|
const response = yield requestUtils_1.retryHttpClientRequest('Create Artifact Container', () => __awaiter(this, void 0, void 0, function* () { return client.post(artifactUrl, data, headers); }), customErrorMessages);
|
|
const body = yield response.readBody();
|
|
return JSON.parse(body);
|
|
});
|
|
}
|
|
/**
|
|
* Concurrently upload all of the files in chunks
|
|
* @param {string} uploadUrl Base Url for the artifact that was created
|
|
* @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
|
|
* @returns The size of all the files uploaded in bytes
|
|
*/
|
|
uploadArtifactToFileContainer(uploadUrl, filesToUpload, options) {
|
|
return __awaiter(this, void 0, void 0, function* () {
|
|
const FILE_CONCURRENCY = config_variables_1.getUploadFileConcurrency();
|
|
const MAX_CHUNK_SIZE = config_variables_1.getUploadChunkSize();
|
|
core.debug(`File Concurrency: ${FILE_CONCURRENCY}, and Chunk Size: ${MAX_CHUNK_SIZE}`);
|
|
const parameters = [];
|
|
// by default, file uploads will continue if there is an error unless specified differently in the options
|
|
let continueOnError = true;
|
|
if (options) {
|
|
if (options.continueOnError === false) {
|
|
continueOnError = false;
|
|
}
|
|
}
|
|
// prepare the necessary parameters to upload all the files
|
|
for (const file of filesToUpload) {
|
|
const resourceUrl = new url_1.URL(uploadUrl);
|
|
resourceUrl.searchParams.append('itemPath', file.uploadFilePath);
|
|
parameters.push({
|
|
file: file.absoluteFilePath,
|
|
resourceUrl: resourceUrl.toString(),
|
|
maxChunkSize: MAX_CHUNK_SIZE,
|
|
continueOnError
|
|
});
|
|
}
|
|
const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()];
|
|
const failedItemsToReport = [];
|
|
let currentFile = 0;
|
|
let completedFiles = 0;
|
|
let uploadFileSize = 0;
|
|
let totalFileSize = 0;
|
|
let abortPendingFileUploads = false;
|
|
this.statusReporter.setTotalNumberOfFilesToProcess(filesToUpload.length);
|
|
this.statusReporter.start();
|
|
// only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
|
|
yield Promise.all(parallelUploads.map((index) => __awaiter(this, void 0, void 0, function* () {
|
|
while (currentFile < filesToUpload.length) {
|
|
const currentFileParameters = parameters[currentFile];
|
|
currentFile += 1;
|
|
if (abortPendingFileUploads) {
|
|
failedItemsToReport.push(currentFileParameters.file);
|
|
continue;
|
|
}
|
|
const startTime = perf_hooks_1.performance.now();
|
|
const uploadFileResult = yield this.uploadFileAsync(index, currentFileParameters);
|
|
if (core.isDebug()) {
|
|
core.debug(`File: ${++completedFiles}/${filesToUpload.length}. ${currentFileParameters.file} took ${(perf_hooks_1.performance.now() - startTime).toFixed(3)} milliseconds to finish upload`);
|
|
}
|
|
uploadFileSize += uploadFileResult.successfulUploadSize;
|
|
totalFileSize += uploadFileResult.totalSize;
|
|
if (uploadFileResult.isSuccess === false) {
|
|
failedItemsToReport.push(currentFileParameters.file);
|
|
if (!continueOnError) {
|
|
// fail fast
|
|
core.error(`aborting artifact upload`);
|
|
abortPendingFileUploads = true;
|
|
}
|
|
}
|
|
this.statusReporter.incrementProcessedCount();
|
|
}
|
|
})));
|
|
this.statusReporter.stop();
|
|
// done uploading, safety dispose all connections
|
|
this.uploadHttpManager.disposeAndReplaceAllClients();
|
|
core.info(`Total size of all the files uploaded is ${uploadFileSize} bytes`);
|
|
return {
|
|
uploadSize: uploadFileSize,
|
|
totalSize: totalFileSize,
|
|
failedItems: failedItemsToReport
|
|
};
|
|
});
|
|
}
|
|
/**
|
|
* Asynchronously uploads a file. The file is compressed and uploaded using GZip if it is determined to save space.
|
|
* If the upload file is bigger than the max chunk size it will be uploaded via multiple calls
|
|
* @param {number} httpClientIndex The index of the httpClient that is being used to make all of the calls
|
|
* @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
|
|
* @returns The size of the file that was uploaded in bytes along with any failed uploads
|
|
*/
|
|
uploadFileAsync(httpClientIndex, parameters) {
|
|
return __awaiter(this, void 0, void 0, function* () {
|
|
const fileStat = yield stat(parameters.file);
|
|
const totalFileSize = fileStat.size;
|
|
const isFIFO = fileStat.isFIFO();
|
|
let offset = 0;
|
|
let isUploadSuccessful = true;
|
|
let failedChunkSizes = 0;
|
|
let uploadFileSize = 0;
|
|
let isGzip = true;
|
|
// the file that is being uploaded is less than 64k in size to increase throughput and to minimize disk I/O
|
|
// for creating a new GZip file, an in-memory buffer is used for compression
|
|
// with named pipes the file size is reported as zero in that case don't read the file in memory
|
|
if (!isFIFO && totalFileSize < 65536) {
|
|
core.debug(`${parameters.file} is less than 64k in size. Creating a gzip file in-memory to potentially reduce the upload size`);
|
|
const buffer = yield upload_gzip_1.createGZipFileInBuffer(parameters.file);
|
|
// An open stream is needed in the event of a failure and we need to retry. If a NodeJS.ReadableStream is directly passed in,
|
|
// it will not properly get reset to the start of the stream if a chunk upload needs to be retried
|
|
let openUploadStream;
|
|
if (totalFileSize < buffer.byteLength) {
|
|
// compression did not help with reducing the size, use a readable stream from the original file for upload
|
|
core.debug(`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`);
|
|
openUploadStream = () => fs.createReadStream(parameters.file);
|
|
isGzip = false;
|
|
uploadFileSize = totalFileSize;
|
|
}
|
|
else {
|
|
// create a readable stream using a PassThrough stream that is both readable and writable
|
|
core.debug(`A gzip file created for ${parameters.file} helped with reducing the size of the original file. The file will be uploaded using gzip.`);
|
|
openUploadStream = () => {
|
|
const passThrough = new stream.PassThrough();
|
|
passThrough.end(buffer);
|
|
return passThrough;
|
|
};
|
|
uploadFileSize = buffer.byteLength;
|
|
}
|
|
const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, openUploadStream, 0, uploadFileSize - 1, uploadFileSize, isGzip, totalFileSize);
|
|
if (!result) {
|
|
// chunk failed to upload
|
|
isUploadSuccessful = false;
|
|
failedChunkSizes += uploadFileSize;
|
|
core.warning(`Aborting upload for ${parameters.file} due to failure`);
|
|
}
|
|
return {
|
|
isSuccess: isUploadSuccessful,
|
|
successfulUploadSize: uploadFileSize - failedChunkSizes,
|
|
totalSize: totalFileSize
|
|
};
|
|
}
|
|
else {
|
|
// the file that is being uploaded is greater than 64k in size, a temporary file gets created on disk using the
|
|
// npm tmp-promise package and this file gets used to create a GZipped file
|
|
const tempFile = yield tmp.file();
|
|
core.debug(`${parameters.file} is greater than 64k in size. Creating a gzip file on-disk ${tempFile.path} to potentially reduce the upload size`);
|
|
// create a GZip file of the original file being uploaded, the original file should not be modified in any way
|
|
uploadFileSize = yield upload_gzip_1.createGZipFileOnDisk(parameters.file, tempFile.path);
|
|
let uploadFilePath = tempFile.path;
|
|
// compression did not help with size reduction, use the original file for upload and delete the temp GZip file
|
|
// for named pipes totalFileSize is zero, this assumes compression did help
|
|
if (!isFIFO && totalFileSize < uploadFileSize) {
|
|
core.debug(`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`);
|
|
uploadFileSize = totalFileSize;
|
|
uploadFilePath = parameters.file;
|
|
isGzip = false;
|
|
}
|
|
else {
|
|
core.debug(`The gzip file created for ${parameters.file} is smaller than the original file. The file will be uploaded using gzip.`);
|
|
}
|
|
let abortFileUpload = false;
|
|
// upload only a single chunk at a time
|
|
while (offset < uploadFileSize) {
|
|
const chunkSize = Math.min(uploadFileSize - offset, parameters.maxChunkSize);
|
|
const startChunkIndex = offset;
|
|
const endChunkIndex = offset + chunkSize - 1;
|
|
offset += parameters.maxChunkSize;
|
|
if (abortFileUpload) {
|
|
// if we don't want to continue in the event of an error, any pending upload chunks will be marked as failed
|
|
failedChunkSizes += chunkSize;
|
|
continue;
|
|
}
|
|
const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, () => fs.createReadStream(uploadFilePath, {
|
|
start: startChunkIndex,
|
|
end: endChunkIndex,
|
|
autoClose: false
|
|
}), startChunkIndex, endChunkIndex, uploadFileSize, isGzip, totalFileSize);
|
|
if (!result) {
|
|
// Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
|
|
// successfully uploaded so the server may report a different size for what was uploaded
|
|
isUploadSuccessful = false;
|
|
failedChunkSizes += chunkSize;
|
|
core.warning(`Aborting upload for ${parameters.file} due to failure`);
|
|
abortFileUpload = true;
|
|
}
|
|
else {
|
|
// if an individual file is greater than 8MB (1024*1024*8) in size, display extra information about the upload status
|
|
if (uploadFileSize > 8388608) {
|
|
this.statusReporter.updateLargeFileStatus(parameters.file, startChunkIndex, endChunkIndex, uploadFileSize);
|
|
}
|
|
}
|
|
}
|
|
// Delete the temporary file that was created as part of the upload. If the temp file does not get manually deleted by
|
|
// calling cleanup, it gets removed when the node process exits. For more info see: https://www.npmjs.com/package/tmp-promise#about
|
|
core.debug(`deleting temporary gzip file ${tempFile.path}`);
|
|
yield tempFile.cleanup();
|
|
return {
|
|
isSuccess: isUploadSuccessful,
|
|
successfulUploadSize: uploadFileSize - failedChunkSizes,
|
|
totalSize: totalFileSize
|
|
};
|
|
}
|
|
});
|
|
}
|
|
/**
|
|
* Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
|
|
* indicates a retryable status, we try to upload the chunk as well
|
|
* @param {number} httpClientIndex The index of the httpClient being used to make all the necessary calls
|
|
* @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
|
|
* @param {NodeJS.ReadableStream} openStream Stream of the file that will be uploaded
|
|
* @param {number} start Starting byte index of file that the chunk belongs to
|
|
* @param {number} end Ending byte index of file that the chunk belongs to
|
|
* @param {number} uploadFileSize Total size of the file in bytes that is being uploaded
|
|
* @param {boolean} isGzip Denotes if we are uploading a Gzip compressed stream
|
|
* @param {number} totalFileSize Original total size of the file that is being uploaded
|
|
* @returns if the chunk was successfully uploaded
|
|
*/
|
|
uploadChunk(httpClientIndex, resourceUrl, openStream, start, end, uploadFileSize, isGzip, totalFileSize) {
|
|
return __awaiter(this, void 0, void 0, function* () {
|
|
// open a new stream and read it to compute the digest
|
|
const digest = yield utils_1.digestForStream(openStream());
|
|
// prepare all the necessary headers before making any http call
|
|
const headers = utils_1.getUploadHeaders('application/octet-stream', true, isGzip, totalFileSize, end - start + 1, utils_1.getContentRange(start, end, uploadFileSize), digest);
|
|
const uploadChunkRequest = () => __awaiter(this, void 0, void 0, function* () {
|
|
const client = this.uploadHttpManager.getClient(httpClientIndex);
|
|
return yield client.sendStream('PUT', resourceUrl, openStream(), headers);
|
|
});
|
|
let retryCount = 0;
|
|
const retryLimit = config_variables_1.getRetryLimit();
|
|
// Increments the current retry count and then checks if the retry limit has been reached
|
|
// If there have been too many retries, fail so the download stops
|
|
const incrementAndCheckRetryLimit = (response) => {
|
|
retryCount++;
|
|
if (retryCount > retryLimit) {
|
|
if (response) {
|
|
utils_1.displayHttpDiagnostics(response);
|
|
}
|
|
core.info(`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`);
|
|
return true;
|
|
}
|
|
return false;
|
|
};
|
|
const backOff = (retryAfterValue) => __awaiter(this, void 0, void 0, function* () {
|
|
this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex);
|
|
if (retryAfterValue) {
|
|
core.info(`Backoff due to too many requests, retry #${retryCount}. Waiting for ${retryAfterValue} milliseconds before continuing the upload`);
|
|
yield utils_1.sleep(retryAfterValue);
|
|
}
|
|
else {
|
|
const backoffTime = utils_1.getExponentialRetryTimeInMilliseconds(retryCount);
|
|
core.info(`Exponential backoff for retry #${retryCount}. Waiting for ${backoffTime} milliseconds before continuing the upload at offset ${start}`);
|
|
yield utils_1.sleep(backoffTime);
|
|
}
|
|
core.info(`Finished backoff for retry #${retryCount}, continuing with upload`);
|
|
return;
|
|
});
|
|
// allow for failed chunks to be retried multiple times
|
|
while (retryCount <= retryLimit) {
|
|
let response;
|
|
try {
|
|
response = yield uploadChunkRequest();
|
|
}
|
|
catch (error) {
|
|
// if an error is caught, it is usually indicative of a timeout so retry the upload
|
|
core.info(`An error has been caught http-client index ${httpClientIndex}, retrying the upload`);
|
|
// eslint-disable-next-line no-console
|
|
console.log(error);
|
|
if (incrementAndCheckRetryLimit()) {
|
|
return false;
|
|
}
|
|
yield backOff();
|
|
continue;
|
|
}
|
|
// Always read the body of the response. There is potential for a resource leak if the body is not read which will
|
|
// result in the connection remaining open along with unintended consequences when trying to dispose of the client
|
|
yield response.readBody();
|
|
if (utils_1.isSuccessStatusCode(response.message.statusCode)) {
|
|
return true;
|
|
}
|
|
else if (utils_1.isRetryableStatusCode(response.message.statusCode)) {
|
|
core.info(`A ${response.message.statusCode} status code has been received, will attempt to retry the upload`);
|
|
if (incrementAndCheckRetryLimit(response)) {
|
|
return false;
|
|
}
|
|
utils_1.isThrottledStatusCode(response.message.statusCode)
|
|
? yield backOff(utils_1.tryGetRetryAfterValueTimeInMilliseconds(response.message.headers))
|
|
: yield backOff();
|
|
}
|
|
else {
|
|
core.error(`Unexpected response. Unable to upload chunk to ${resourceUrl}`);
|
|
utils_1.displayHttpDiagnostics(response);
|
|
return false;
|
|
}
|
|
}
|
|
return false;
|
|
});
|
|
}
|
|
/**
|
|
* Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
|
|
* Updating the size indicates that we are done uploading all the contents of the artifact
|
|
*/
|
|
patchArtifactSize(size, artifactName) {
|
|
return __awaiter(this, void 0, void 0, function* () {
|
|
const resourceUrl = new url_1.URL(utils_1.getArtifactUrl());
|
|
resourceUrl.searchParams.append('artifactName', artifactName);
|
|
const parameters = { Size: size };
|
|
const data = JSON.stringify(parameters, null, 2);
|
|
core.debug(`URL is ${resourceUrl.toString()}`);
|
|
// use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
|
|
const client = this.uploadHttpManager.getClient(0);
|
|
const headers = utils_1.getUploadHeaders('application/json', false);
|
|
// Extra information to display when a particular HTTP code is returned
|
|
const customErrorMessages = new Map([
|
|
[
|
|
http_client_1.HttpCodes.NotFound,
|
|
`An Artifact with the name ${artifactName} was not found`
|
|
]
|
|
]);
|
|
// TODO retry for all possible response codes, the artifact upload is pretty much complete so it at all costs we should try to finish this
|
|
const response = yield requestUtils_1.retryHttpClientRequest('Finalize artifact upload', () => __awaiter(this, void 0, void 0, function* () { return client.patch(resourceUrl.toString(), data, headers); }), customErrorMessages);
|
|
yield response.readBody();
|
|
core.debug(`Artifact ${artifactName} has been successfully uploaded, total size in bytes: ${size}`);
|
|
});
|
|
}
|
|
}
|
|
exports.UploadHttpClient = UploadHttpClient;
|
|
//# sourceMappingURL=upload-http-client.js.map
|