6061 lines
242 KiB
JavaScript
6061 lines
242 KiB
JavaScript
|
|
(function (global, factory) {
|
||
|
|
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
|
||
|
|
typeof define === 'function' && define.amd ? define(['exports'], factory) :
|
||
|
|
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.zip = {}));
|
||
|
|
})(this, (function (exports) { 'use strict';
|
||
|
|
|
||
|
|
const { Array, Object, String, Number, BigInt, Math, Date, Map, Set, Response, URL, Error, Uint8Array, Uint16Array, Uint32Array, DataView, Blob, Promise, TextEncoder, TextDecoder, document, crypto, btoa, TransformStream, ReadableStream, WritableStream, CompressionStream, DecompressionStream, navigator, Worker } = typeof globalThis !== 'undefined' ? globalThis : this || self;
|
||
|
|
|
||
|
|
var _documentCurrentScript = typeof document !== 'undefined' ? document.currentScript : null;
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
const MAX_32_BITS = 0xffffffff;
|
||
|
|
const MAX_16_BITS = 0xffff;
|
||
|
|
const MAX_8_BITS = 0xff;
|
||
|
|
const COMPRESSION_METHOD_DEFLATE = 0x08;
|
||
|
|
const COMPRESSION_METHOD_DEFLATE_64 = 0x09;
|
||
|
|
const COMPRESSION_METHOD_STORE = 0x00;
|
||
|
|
const COMPRESSION_METHOD_AES = 0x63;
|
||
|
|
|
||
|
|
const LOCAL_FILE_HEADER_SIGNATURE = 0x04034b50;
|
||
|
|
const SPLIT_ZIP_FILE_SIGNATURE = 0x08074b50;
|
||
|
|
const DATA_DESCRIPTOR_RECORD_SIGNATURE = SPLIT_ZIP_FILE_SIGNATURE;
|
||
|
|
const CENTRAL_FILE_HEADER_SIGNATURE = 0x02014b50;
|
||
|
|
const END_OF_CENTRAL_DIR_SIGNATURE = 0x06054b50;
|
||
|
|
const ZIP64_END_OF_CENTRAL_DIR_SIGNATURE = 0x06064b50;
|
||
|
|
const ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIGNATURE = 0x07064b50;
|
||
|
|
const END_OF_CENTRAL_DIR_LENGTH = 22;
|
||
|
|
const ZIP64_END_OF_CENTRAL_DIR_LOCATOR_LENGTH = 20;
|
||
|
|
const ZIP64_END_OF_CENTRAL_DIR_LENGTH = 56;
|
||
|
|
const ZIP64_END_OF_CENTRAL_DIR_TOTAL_LENGTH = END_OF_CENTRAL_DIR_LENGTH + ZIP64_END_OF_CENTRAL_DIR_LOCATOR_LENGTH + ZIP64_END_OF_CENTRAL_DIR_LENGTH;
|
||
|
|
|
||
|
|
const DATA_DESCRIPTOR_RECORD_LENGTH = 12;
|
||
|
|
const DATA_DESCRIPTOR_RECORD_ZIP_64_LENGTH = 20;
|
||
|
|
const DATA_DESCRIPTOR_RECORD_SIGNATURE_LENGTH = 4;
|
||
|
|
|
||
|
|
const EXTRAFIELD_TYPE_ZIP64 = 0x0001;
|
||
|
|
const EXTRAFIELD_TYPE_AES = 0x9901;
|
||
|
|
const EXTRAFIELD_TYPE_NTFS = 0x000a;
|
||
|
|
const EXTRAFIELD_TYPE_NTFS_TAG1 = 0x0001;
|
||
|
|
const EXTRAFIELD_TYPE_EXTENDED_TIMESTAMP = 0x5455;
|
||
|
|
const EXTRAFIELD_TYPE_UNICODE_PATH = 0x7075;
|
||
|
|
const EXTRAFIELD_TYPE_UNICODE_COMMENT = 0x6375;
|
||
|
|
const EXTRAFIELD_TYPE_USDZ = 0x1986;
|
||
|
|
const EXTRAFIELD_TYPE_INFOZIP = 0x7875;
|
||
|
|
const EXTRAFIELD_TYPE_UNIX = 0x7855;
|
||
|
|
|
||
|
|
const BITFLAG_ENCRYPTED = 0b1;
|
||
|
|
const BITFLAG_LEVEL = 0b0110;
|
||
|
|
const BITFLAG_LEVEL_MAX_MASK = 0b010;
|
||
|
|
const BITFLAG_LEVEL_FAST_MASK = 0b100;
|
||
|
|
const BITFLAG_LEVEL_SUPER_FAST_MASK = 0b110;
|
||
|
|
const BITFLAG_DATA_DESCRIPTOR = 0b1000;
|
||
|
|
const BITFLAG_LANG_ENCODING_FLAG = 0b100000000000;
|
||
|
|
const FILE_ATTR_MSDOS_DIR_MASK = 0b10000;
|
||
|
|
const FILE_ATTR_MSDOS_READONLY_MASK = 0x01;
|
||
|
|
const FILE_ATTR_MSDOS_HIDDEN_MASK = 0x02;
|
||
|
|
const FILE_ATTR_MSDOS_SYSTEM_MASK = 0x04;
|
||
|
|
const FILE_ATTR_MSDOS_ARCHIVE_MASK = 0x20;
|
||
|
|
const FILE_ATTR_UNIX_TYPE_MASK = 0o170000;
|
||
|
|
const FILE_ATTR_UNIX_TYPE_DIR = 0o040000;
|
||
|
|
const FILE_ATTR_UNIX_EXECUTABLE_MASK = 0o111;
|
||
|
|
const FILE_ATTR_UNIX_DEFAULT_MASK = 0o644;
|
||
|
|
const FILE_ATTR_UNIX_SETUID_MASK = 0o4000;
|
||
|
|
const FILE_ATTR_UNIX_SETGID_MASK = 0o2000;
|
||
|
|
const FILE_ATTR_UNIX_STICKY_MASK = 0o1000;
|
||
|
|
|
||
|
|
const VERSION_DEFLATE = 0x14;
|
||
|
|
const VERSION_ZIP64 = 0x2D;
|
||
|
|
const VERSION_AES = 0x33;
|
||
|
|
|
||
|
|
const DIRECTORY_SIGNATURE = "/";
|
||
|
|
|
||
|
|
const HEADER_SIZE = 30;
|
||
|
|
const HEADER_OFFSET_SIGNATURE = 10;
|
||
|
|
const HEADER_OFFSET_COMPRESSED_SIZE = 14;
|
||
|
|
const HEADER_OFFSET_UNCOMPRESSED_SIZE = 18;
|
||
|
|
|
||
|
|
const MAX_DATE = new Date(2107, 11, 31);
|
||
|
|
const MIN_DATE = new Date(1980, 0, 1);
|
||
|
|
|
||
|
|
const UNDEFINED_VALUE = undefined;
|
||
|
|
const INFINITY_VALUE = Infinity;
|
||
|
|
const UNDEFINED_TYPE = "undefined";
|
||
|
|
const FUNCTION_TYPE = "function";
|
||
|
|
const OBJECT_TYPE = "object";
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const MINIMUM_CHUNK_SIZE = 64;
|
||
|
|
let maxWorkers = 2;
|
||
|
|
try {
|
||
|
|
if (typeof navigator != UNDEFINED_TYPE && navigator.hardwareConcurrency) {
|
||
|
|
maxWorkers = navigator.hardwareConcurrency;
|
||
|
|
}
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
const DEFAULT_CONFIGURATION = {
|
||
|
|
workerURI: "./core/web-worker-wasm.js",
|
||
|
|
wasmURI: "./core/streams/zlib-wasm/zlib-streams.wasm",
|
||
|
|
chunkSize: 64 * 1024,
|
||
|
|
maxWorkers,
|
||
|
|
terminateWorkerTimeout: 5000,
|
||
|
|
useWebWorkers: true,
|
||
|
|
useCompressionStream: true,
|
||
|
|
CompressionStream: typeof CompressionStream != UNDEFINED_TYPE && CompressionStream,
|
||
|
|
DecompressionStream: typeof DecompressionStream != UNDEFINED_TYPE && DecompressionStream
|
||
|
|
};
|
||
|
|
|
||
|
|
const config = Object.assign({}, DEFAULT_CONFIGURATION);
|
||
|
|
|
||
|
|
function getConfiguration() {
|
||
|
|
return config;
|
||
|
|
}
|
||
|
|
|
||
|
|
function getChunkSize(config) {
|
||
|
|
return Math.max(config.chunkSize, MINIMUM_CHUNK_SIZE);
|
||
|
|
}
|
||
|
|
|
||
|
|
function configure(configuration) {
|
||
|
|
const {
|
||
|
|
baseURI,
|
||
|
|
chunkSize,
|
||
|
|
maxWorkers,
|
||
|
|
terminateWorkerTimeout,
|
||
|
|
useCompressionStream,
|
||
|
|
useWebWorkers,
|
||
|
|
CompressionStream,
|
||
|
|
DecompressionStream,
|
||
|
|
CompressionStreamZlib,
|
||
|
|
DecompressionStreamZlib,
|
||
|
|
workerURI,
|
||
|
|
wasmURI
|
||
|
|
} = configuration;
|
||
|
|
setIfDefined("baseURI", baseURI);
|
||
|
|
setIfDefined("wasmURI", wasmURI);
|
||
|
|
setIfDefined("workerURI", workerURI);
|
||
|
|
setIfDefined("chunkSize", chunkSize);
|
||
|
|
setIfDefined("maxWorkers", maxWorkers);
|
||
|
|
setIfDefined("terminateWorkerTimeout", terminateWorkerTimeout);
|
||
|
|
setIfDefined("useCompressionStream", useCompressionStream);
|
||
|
|
setIfDefined("useWebWorkers", useWebWorkers);
|
||
|
|
setIfDefined("CompressionStream", CompressionStream);
|
||
|
|
setIfDefined("DecompressionStream", DecompressionStream);
|
||
|
|
setIfDefined("CompressionStreamZlib", CompressionStreamZlib);
|
||
|
|
setIfDefined("DecompressionStreamZlib", DecompressionStreamZlib);
|
||
|
|
}
|
||
|
|
|
||
|
|
function setIfDefined(propertyName, propertyValue) {
|
||
|
|
if (propertyValue !== UNDEFINED_VALUE) {
|
||
|
|
config[propertyName] = propertyValue;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
const A="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";function g(g){g({workerURI:g=>{const B="text/javascript",I=(g=>{g=(g=>{const B=(g=(g+"").replace(/[^A-Za-z0-9+/=]/g,"")).length,I=[];for(let C=0;B>C;C+=4){const B=A.indexOf(g[C])<<18|A.indexOf(g[C+1])<<12|(63&A.indexOf(g[C+2]))<<6|63&A.indexOf(g[C+3]);I.push(B>>16&255),"="!==g[C+2]&&I.push(B>>8&255),"="!==g[C+3]&&I.push(255&B);}return new Uint8Array(I)})(g);let B=new Uint8Array(1024),I=0;for(let A=0;A<g.length;){const E=g[A++];if(128&E){const Q=3+(127&E),Y=g[A++]<<8|g[A++],F=I-Y;C(I+Q);for(let A=0;Q>A;A++)B[I++]=B[F+A];}else {const Q=E;C(I+Q);for(let C=0;Q>C&&A<g.length;C++)B[I++]=g[A++];}}return (g=>{let B="";const I=g.length;let C=0;for(;I>C+2;C+=3){const I=g[C]<<16|g[C+1]<<8|g[C+2];B+=A[I>>18&63]+A[I>>12&63]+A[I>>6&63]+A[63&I];}const E=I-C;if(1===E){const I=g[C]<<16;B+=A[I>>18&63]+A[I>>12&63]+"==";}else if(2===E){const I=g[C]<<16|g[C+1]<<8;B+=A[I>>18&63]+A[I>>12&63]+A[I>>6&63]+"=";}return B})(new Uint8Array(B.buffer.slice(0,I)));function C(A){if(B.length<A){let g=2*B.length;for(;A>g;)g*=2;const C=new Uint8Array(g);C.set(B.subarray(0,I)),B=C;}}})("IChlPT57ImZ1bmN0aW9uIj09dHlwZW9mIGRlZmluZSYmgwAIBS5hbWQ/gwALCihlKTplKCl9KSiFADVEKCl7InVzZSBzdHJpY3QiO2NvbnN0e0FycmF5OmUsT2JqZWN0OnQsTnVtYmVyOnIsTWF0aDpuLEVycm9yOmksVWludDiDAC4Bb4IADQIxNoMADgFmggAOAjMygwAOA3MsSYcADQFsgABFInA6YSxEYXRhVmlldzpjLFByb21pc2U6dSxUZXh0RW5jb2SAAG4zdyxjcnlwdG86aCxwb3N0TWVzc2FnZTpkLFRyYW5zZm9ybVN0cmVhbTpwLFJlYWRhYmxlhAARBmssV3JpdIgAEQdiLENvbXBygABCgAD0hAAUBXksRGVjjgAWFm19PXNlbGYsdj12b2lkIDAsZz0idW6DAT8FZCIsUz2HAXAGLFQ9W107gACIFChsZXQgZT0wOzI1Nj5lO2UrKyl7gQATA3Q9ZYoAHwE4hAAdFzEmdD90PXQ+Pj4xXjM5ODgyOTIzODQ6gQAREj0xO1RbZV09dH1jbGFzcyB6e4IBkgRydWN0gABJCGUpe3RoaXMugABZC3x8LTF9YXBwZW5kgQAXgwBvAjB8gwAfhgB2BXI9MCxugAAXDmUubGVuZ3RoO24+cjtygACDgwB/EjheVFsyNTUmKHReZVtyXSldO4QAXwV0fWdldIACHAdyZXR1cm5+gwAWAX2EAJQCQyCAAaWAAHgDcyBwigCegwCGAWWDAkSAAI4ObmV3IHo7c3VwZXIoe3SFAbQEKGUscoAAxwEuhgC8CSxyLmVucXVldYECnwd9LGZsdXNogABMgwBGAXKCAEYFbyg0KTuBAAkPYyhyLmJ1ZmZlcikuc2V0gwJkBSgwLHQuggCwDSksZS52YWx1ZT1yfX2AAA0BPYEAuYAAt4IASwJfPYEAVANjYXSAAIIKdCl7aWYoMD09PYUBGgJ8fIEADgF0hAAOASmDAPkDIGUuhAAzAnQphADXBHI9ZVuFADQDLTFdgAFeB18uaShyKTuEADICMzKAAEcCbj+IADkMOl8ubyh0LG4sMHxygACgBXNsaWNlgACzhwBFASmAAPYBbIEBxYUBOYYBroQApAF0hQCPATCIAIUBdIAAfocAdQIqKIAAEAIpK4MAjAN9LHWGAOSAABuFAE8BPIYASIUBpAJyPYAEQ4cAkw5uLmNlaWwodC8zMikpKYUAhYQANA90Jj0zMSxyPjAmJnQmJiiAAjCAAIGAAP0BaIAA3IMADQ0mMjE0NzQ4MzY0OD4+gACPASyAANoGLGV9LGg6gQCOgAH2Aj0+ggEeB2U/dDoocj+AAq0cOnQ8PDMyLWUpKzEwOTk1MTE2Mjc3NzYqZSxpOoAE3ARuLnJvgAN8AyhlL4oAHwcpfHwzMixvgwBVBCxuKXuBAu6DA66BAXyAAJ0BboADmQopO3Q+PTMyO3QtgAAGBCluLnCBAlcBcoACboADiY0BWIAA+YQCGgEphgM7AWmBACgBPIcBi4ADNYUARQV8ZVtpXYADPwF0gQBOgQAMggDGAXSEAVUBaYYBwAE/igIrBDowLG+CAi0BaYYCLYQAUYIBRQMrbyaAAWCAAAeAASUDP3I6gAAbBG9wKCmCAUQPbn19LHg9e2J5dGVzOntwiQIrAl8ugQI6Ai84gACJgwMpgAKpgQC+AW6KAMgDdD5pggDBBTMmaXx8gAEegAC0Ay80XYAAvoAAvAI9boAAyggyNCxuPDw9OIUAmQRyfSxriQBngAToggRPgQRNggBagQFGAXKHAR6BBFMCbj2AAEMBOIABHAdyXSwzJn5ygABqAXSDAOABboAAwwI9MIcDIwImcoACRIcA+wM4KiiAABOAACSAAOuABGUEfSxBPYIEaY0E+4UAjYED1RA7dC5ibG9ja1NpemU9NTEygAQCGW09WzE3MzI1ODQxOTMsNDAyMzIzMzQxNyyABZaABWAHMzEwMiwyN4AAIxAzODc4LDMyODUzNzc1MjBdgAA8AXaAADwKNTE4NTAwMjQ5LIAACQE5gAAbATOAAEcFMjQwMDmAAA4BMIAAMg0zOTU0Njk3ODJdLGU/gADcAVOAAekBU4UDPYAAuwIuVIAAEQFUiQARAUOAABEFQyk6dC6ABmSBBKABfYQACIQAy4MEoIUBF4AATYAALwFthwBAgABNgAF5gAAHAUOABcYHZX11cGRhdIEFJIsBCQEigQe7Am5nhwf+gAf5gAPWBHguXy6BAcOHBLiBAKABX4YElwMuVCyABXsBboAAFgFDgAKXgQCrAW6ABD6BAk2BAx4Lbz45MDA3MTk5MjWAA9OAA30IMSl0aHJvdyCBAmUnaSgiQ2Fubm90IGhhc2ggbW9yZSB0aGFuIDJeNTMgLSAxIGJpdHMihQB6AWaCApoBc4EFJIECRAFshAJCgwcViAHHAytuLYAAl4gAD4ACJIYADoAE3QM7bz6AB1ACZSuJADQLKXQuQShmLnN1YmGBCFwGKDE2KmwsgAAFAyhsK4ADOQQpLGwrgAdRhQLUAy5zcIQE6YEAJ4ECbAFSjQGLhAe2Ai5UhwVpAy5TO4EDZ4UBOAIsW4ECs4EE3IAEYokAyIMDCgYrMjsxNSaDB+eEAuOAAvmBACiFAwwELmZsb4EC0A0uQy80Mjk0OTY3Mjk2gQL6hAAugAesAUOABLyFA1oCKWWAAOIBdIkAu4AB04YCMoQCUAQscn1ChwUPhQAeBz4xOT9lPjOBAAUBNYEABQM3OT+DBSYROnRecl5uOnQmcnx0Jm58ciaAAAyGABIBfoAAEwJ9RIMGQIUF+wQ8PGV8gQgwgQWsAX2AAI+HB56BATYGLGk9ci5TgAJGA2UoOIQA7YUI8gExhQkQAW+CCNWAAAWCAWMEZj1pW4ADeAFzgAAHgAdDAWyAAAeAA1QBYYAABwQzXSxjgAAHA
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
const table = [];
|
||
|
|
for (let i = 0; i < 256; i++) {
|
||
|
|
let t = i;
|
||
|
|
for (let j = 0; j < 8; j++) {
|
||
|
|
if (t & 1) {
|
||
|
|
t = (t >>> 1) ^ 0xEDB88320;
|
||
|
|
} else {
|
||
|
|
t = t >>> 1;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
table[i] = t;
|
||
|
|
}
|
||
|
|
|
||
|
|
class Crc32 {
|
||
|
|
|
||
|
|
constructor(crc) {
|
||
|
|
this.crc = crc || -1;
|
||
|
|
}
|
||
|
|
|
||
|
|
append(data) {
|
||
|
|
let crc = this.crc | 0;
|
||
|
|
for (let offset = 0, length = data.length | 0; offset < length; offset++) {
|
||
|
|
crc = (crc >>> 8) ^ table[(crc ^ data[offset]) & 0xFF];
|
||
|
|
}
|
||
|
|
this.crc = crc;
|
||
|
|
}
|
||
|
|
|
||
|
|
get() {
|
||
|
|
return ~this.crc;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
class Crc32Stream extends TransformStream {
|
||
|
|
|
||
|
|
constructor() {
|
||
|
|
// deno-lint-ignore prefer-const
|
||
|
|
let stream;
|
||
|
|
const crc32 = new Crc32();
|
||
|
|
super({
|
||
|
|
transform(chunk, controller) {
|
||
|
|
crc32.append(chunk);
|
||
|
|
controller.enqueue(chunk);
|
||
|
|
},
|
||
|
|
flush() {
|
||
|
|
const value = new Uint8Array(4);
|
||
|
|
const dataView = new DataView(value.buffer);
|
||
|
|
dataView.setUint32(0, crc32.get());
|
||
|
|
stream.value = value;
|
||
|
|
}
|
||
|
|
});
|
||
|
|
stream = this;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
function encodeText(value) {
|
||
|
|
// deno-lint-ignore valid-typeof
|
||
|
|
if (typeof TextEncoder == UNDEFINED_TYPE) {
|
||
|
|
value = unescape(encodeURIComponent(value));
|
||
|
|
const result = new Uint8Array(value.length);
|
||
|
|
for (let i = 0; i < result.length; i++) {
|
||
|
|
result[i] = value.charCodeAt(i);
|
||
|
|
}
|
||
|
|
return result;
|
||
|
|
} else {
|
||
|
|
return new TextEncoder().encode(value);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// Derived from https://github.com/xqdoo00o/jszip/blob/master/lib/sjcl.js and https://github.com/bitwiseshiftleft/sjcl
|
||
|
|
|
||
|
|
// deno-lint-ignore-file no-this-alias
|
||
|
|
|
||
|
|
/*
|
||
|
|
* SJCL is open. You can use, modify and redistribute it under a BSD
|
||
|
|
* license or under the GNU GPL, version 2.0.
|
||
|
|
*/
|
||
|
|
|
||
|
|
/** @fileOverview Javascript cryptography implementation.
|
||
|
|
*
|
||
|
|
* Crush to remove comments, shorten variable names and
|
||
|
|
* generally reduce transmission size.
|
||
|
|
*
|
||
|
|
* @author Emily Stark
|
||
|
|
* @author Mike Hamburg
|
||
|
|
* @author Dan Boneh
|
||
|
|
*/
|
||
|
|
|
||
|
|
/*jslint indent: 2, bitwise: false, nomen: false, plusplus: false, white: false, regexp: false */
|
||
|
|
|
||
|
|
/** @fileOverview Arrays of bits, encoded as arrays of Numbers.
|
||
|
|
*
|
||
|
|
* @author Emily Stark
|
||
|
|
* @author Mike Hamburg
|
||
|
|
* @author Dan Boneh
|
||
|
|
*/
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Arrays of bits, encoded as arrays of Numbers.
|
||
|
|
* @namespace
|
||
|
|
* @description
|
||
|
|
* <p>
|
||
|
|
* These objects are the currency accepted by SJCL's crypto functions.
|
||
|
|
* </p>
|
||
|
|
*
|
||
|
|
* <p>
|
||
|
|
* Most of our crypto primitives operate on arrays of 4-byte words internally,
|
||
|
|
* but many of them can take arguments that are not a multiple of 4 bytes.
|
||
|
|
* This library encodes arrays of bits (whose size need not be a multiple of 8
|
||
|
|
* bits) as arrays of 32-bit words. The bits are packed, big-endian, into an
|
||
|
|
* array of words, 32 bits at a time. Since the words are double-precision
|
||
|
|
* floating point numbers, they fit some extra data. We use this (in a private,
|
||
|
|
* possibly-changing manner) to encode the number of bits actually present
|
||
|
|
* in the last word of the array.
|
||
|
|
* </p>
|
||
|
|
*
|
||
|
|
* <p>
|
||
|
|
* Because bitwise ops clear this out-of-band data, these arrays can be passed
|
||
|
|
* to ciphers like AES which want arrays of words.
|
||
|
|
* </p>
|
||
|
|
*/
|
||
|
|
const bitArray = {
|
||
|
|
/**
|
||
|
|
* Concatenate two bit arrays.
|
||
|
|
* @param {bitArray} a1 The first array.
|
||
|
|
* @param {bitArray} a2 The second array.
|
||
|
|
* @return {bitArray} The concatenation of a1 and a2.
|
||
|
|
*/
|
||
|
|
concat(a1, a2) {
|
||
|
|
if (a1.length === 0 || a2.length === 0) {
|
||
|
|
return a1.concat(a2);
|
||
|
|
}
|
||
|
|
|
||
|
|
const last = a1[a1.length - 1], shift = bitArray.getPartial(last);
|
||
|
|
if (shift === 32) {
|
||
|
|
return a1.concat(a2);
|
||
|
|
} else {
|
||
|
|
return bitArray._shiftRight(a2, shift, last | 0, a1.slice(0, a1.length - 1));
|
||
|
|
}
|
||
|
|
},
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Find the length of an array of bits.
|
||
|
|
* @param {bitArray} a The array.
|
||
|
|
* @return {Number} The length of a, in bits.
|
||
|
|
*/
|
||
|
|
bitLength(a) {
|
||
|
|
const l = a.length;
|
||
|
|
if (l === 0) {
|
||
|
|
return 0;
|
||
|
|
}
|
||
|
|
const x = a[l - 1];
|
||
|
|
return (l - 1) * 32 + bitArray.getPartial(x);
|
||
|
|
},
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Truncate an array.
|
||
|
|
* @param {bitArray} a The array.
|
||
|
|
* @param {Number} len The length to truncate to, in bits.
|
||
|
|
* @return {bitArray} A new array, truncated to len bits.
|
||
|
|
*/
|
||
|
|
clamp(a, len) {
|
||
|
|
if (a.length * 32 < len) {
|
||
|
|
return a;
|
||
|
|
}
|
||
|
|
a = a.slice(0, Math.ceil(len / 32));
|
||
|
|
const l = a.length;
|
||
|
|
len = len & 31;
|
||
|
|
if (l > 0 && len) {
|
||
|
|
a[l - 1] = bitArray.partial(len, a[l - 1] & 0x80000000 >> (len - 1), 1);
|
||
|
|
}
|
||
|
|
return a;
|
||
|
|
},
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Make a partial word for a bit array.
|
||
|
|
* @param {Number} len The number of bits in the word.
|
||
|
|
* @param {Number} x The bits.
|
||
|
|
* @param {Number} [_end=0] Pass 1 if x has already been shifted to the high side.
|
||
|
|
* @return {Number} The partial word.
|
||
|
|
*/
|
||
|
|
partial(len, x, _end) {
|
||
|
|
if (len === 32) {
|
||
|
|
return x;
|
||
|
|
}
|
||
|
|
return (_end ? x | 0 : x << (32 - len)) + len * 0x10000000000;
|
||
|
|
},
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Get the number of bits used by a partial word.
|
||
|
|
* @param {Number} x The partial word.
|
||
|
|
* @return {Number} The number of bits used by the partial word.
|
||
|
|
*/
|
||
|
|
getPartial(x) {
|
||
|
|
return Math.round(x / 0x10000000000) || 32;
|
||
|
|
},
|
||
|
|
|
||
|
|
/** Shift an array right.
|
||
|
|
* @param {bitArray} a The array to shift.
|
||
|
|
* @param {Number} shift The number of bits to shift.
|
||
|
|
* @param {Number} [carry=0] A byte to carry in
|
||
|
|
* @param {bitArray} [out=[]] An array to prepend to the output.
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
_shiftRight(a, shift, carry, out) {
|
||
|
|
if (out === undefined) {
|
||
|
|
out = [];
|
||
|
|
}
|
||
|
|
|
||
|
|
for (; shift >= 32; shift -= 32) {
|
||
|
|
out.push(carry);
|
||
|
|
carry = 0;
|
||
|
|
}
|
||
|
|
if (shift === 0) {
|
||
|
|
return out.concat(a);
|
||
|
|
}
|
||
|
|
|
||
|
|
for (let i = 0; i < a.length; i++) {
|
||
|
|
out.push(carry | a[i] >>> shift);
|
||
|
|
carry = a[i] << (32 - shift);
|
||
|
|
}
|
||
|
|
const last2 = a.length ? a[a.length - 1] : 0;
|
||
|
|
const shift2 = bitArray.getPartial(last2);
|
||
|
|
out.push(bitArray.partial(shift + shift2 & 31, (shift + shift2 > 32) ? carry : out.pop(), 1));
|
||
|
|
return out;
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
/** @fileOverview Bit array codec implementations.
|
||
|
|
*
|
||
|
|
* @author Emily Stark
|
||
|
|
* @author Mike Hamburg
|
||
|
|
* @author Dan Boneh
|
||
|
|
*/
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Arrays of bytes
|
||
|
|
* @namespace
|
||
|
|
*/
|
||
|
|
const codec = {
|
||
|
|
bytes: {
|
||
|
|
/** Convert from a bitArray to an array of bytes. */
|
||
|
|
fromBits(arr) {
|
||
|
|
const bl = bitArray.bitLength(arr);
|
||
|
|
const byteLength = bl / 8;
|
||
|
|
const out = new Uint8Array(byteLength);
|
||
|
|
let tmp;
|
||
|
|
for (let i = 0; i < byteLength; i++) {
|
||
|
|
if ((i & 3) === 0) {
|
||
|
|
tmp = arr[i / 4];
|
||
|
|
}
|
||
|
|
out[i] = tmp >>> 24;
|
||
|
|
tmp <<= 8;
|
||
|
|
}
|
||
|
|
return out;
|
||
|
|
},
|
||
|
|
/** Convert from an array of bytes to a bitArray. */
|
||
|
|
toBits(bytes) {
|
||
|
|
const out = [];
|
||
|
|
let i;
|
||
|
|
let tmp = 0;
|
||
|
|
for (i = 0; i < bytes.length; i++) {
|
||
|
|
tmp = tmp << 8 | bytes[i];
|
||
|
|
if ((i & 3) === 3) {
|
||
|
|
out.push(tmp);
|
||
|
|
tmp = 0;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (i & 3) {
|
||
|
|
out.push(bitArray.partial(8 * (i & 3), tmp));
|
||
|
|
}
|
||
|
|
return out;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
const hash = {};
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Context for a SHA-1 operation in progress.
|
||
|
|
* @constructor
|
||
|
|
*/
|
||
|
|
hash.sha1 = class {
|
||
|
|
constructor(hash) {
|
||
|
|
const sha1 = this;
|
||
|
|
/**
|
||
|
|
* The hash's block size, in bits.
|
||
|
|
* @constant
|
||
|
|
*/
|
||
|
|
sha1.blockSize = 512;
|
||
|
|
/**
|
||
|
|
* The SHA-1 initialization vector.
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
sha1._init = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0];
|
||
|
|
/**
|
||
|
|
* The SHA-1 hash key.
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
sha1._key = [0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xCA62C1D6];
|
||
|
|
if (hash) {
|
||
|
|
sha1._h = hash._h.slice(0);
|
||
|
|
sha1._buffer = hash._buffer.slice(0);
|
||
|
|
sha1._length = hash._length;
|
||
|
|
} else {
|
||
|
|
sha1.reset();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Reset the hash state.
|
||
|
|
* @return this
|
||
|
|
*/
|
||
|
|
reset() {
|
||
|
|
const sha1 = this;
|
||
|
|
sha1._h = sha1._init.slice(0);
|
||
|
|
sha1._buffer = [];
|
||
|
|
sha1._length = 0;
|
||
|
|
return sha1;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Input several words to the hash.
|
||
|
|
* @param {bitArray|String} data the data to hash.
|
||
|
|
* @return this
|
||
|
|
*/
|
||
|
|
update(data) {
|
||
|
|
const sha1 = this;
|
||
|
|
if (typeof data === "string") {
|
||
|
|
data = codec.utf8String.toBits(data);
|
||
|
|
}
|
||
|
|
const b = sha1._buffer = bitArray.concat(sha1._buffer, data);
|
||
|
|
const ol = sha1._length;
|
||
|
|
const nl = sha1._length = ol + bitArray.bitLength(data);
|
||
|
|
if (nl > 9007199254740991) {
|
||
|
|
throw new Error("Cannot hash more than 2^53 - 1 bits");
|
||
|
|
}
|
||
|
|
const c = new Uint32Array(b);
|
||
|
|
let j = 0;
|
||
|
|
for (let i = sha1.blockSize + ol - ((sha1.blockSize + ol) & (sha1.blockSize - 1)); i <= nl;
|
||
|
|
i += sha1.blockSize) {
|
||
|
|
sha1._block(c.subarray(16 * j, 16 * (j + 1)));
|
||
|
|
j += 1;
|
||
|
|
}
|
||
|
|
b.splice(0, 16 * j);
|
||
|
|
return sha1;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Complete hashing and output the hash value.
|
||
|
|
* @return {bitArray} The hash value, an array of 5 big-endian words. TODO
|
||
|
|
*/
|
||
|
|
finalize() {
|
||
|
|
const sha1 = this;
|
||
|
|
let b = sha1._buffer;
|
||
|
|
const h = sha1._h;
|
||
|
|
|
||
|
|
// Round out and push the buffer
|
||
|
|
b = bitArray.concat(b, [bitArray.partial(1, 1)]);
|
||
|
|
// Round out the buffer to a multiple of 16 words, less the 2 length words.
|
||
|
|
for (let i = b.length + 2; i & 15; i++) {
|
||
|
|
b.push(0);
|
||
|
|
}
|
||
|
|
|
||
|
|
// append the length
|
||
|
|
b.push(Math.floor(sha1._length / 0x100000000));
|
||
|
|
b.push(sha1._length | 0);
|
||
|
|
|
||
|
|
while (b.length) {
|
||
|
|
sha1._block(b.splice(0, 16));
|
||
|
|
}
|
||
|
|
|
||
|
|
sha1.reset();
|
||
|
|
return h;
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* The SHA-1 logical functions f(0), f(1), ..., f(79).
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
_f(t, b, c, d) {
|
||
|
|
if (t <= 19) {
|
||
|
|
return (b & c) | (~b & d);
|
||
|
|
} else if (t <= 39) {
|
||
|
|
return b ^ c ^ d;
|
||
|
|
} else if (t <= 59) {
|
||
|
|
return (b & c) | (b & d) | (c & d);
|
||
|
|
} else if (t <= 79) {
|
||
|
|
return b ^ c ^ d;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Circular left-shift operator.
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
_S(n, x) {
|
||
|
|
return (x << n) | (x >>> 32 - n);
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Perform one cycle of SHA-1.
|
||
|
|
* @param {Uint32Array|bitArray} words one block of words.
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
_block(words) {
|
||
|
|
const sha1 = this;
|
||
|
|
const h = sha1._h;
|
||
|
|
// When words is passed to _block, it has 16 elements. SHA1 _block
|
||
|
|
// function extends words with new elements (at the end there are 80 elements).
|
||
|
|
// The problem is that if we use Uint32Array instead of Array,
|
||
|
|
// the length of Uint32Array cannot be changed. Thus, we replace words with a
|
||
|
|
// normal Array here.
|
||
|
|
const w = Array(80); // do not use Uint32Array here as the instantiation is slower
|
||
|
|
for (let j = 0; j < 16; j++) {
|
||
|
|
w[j] = words[j];
|
||
|
|
}
|
||
|
|
|
||
|
|
let a = h[0];
|
||
|
|
let b = h[1];
|
||
|
|
let c = h[2];
|
||
|
|
let d = h[3];
|
||
|
|
let e = h[4];
|
||
|
|
|
||
|
|
for (let t = 0; t <= 79; t++) {
|
||
|
|
if (t >= 16) {
|
||
|
|
w[t] = sha1._S(1, w[t - 3] ^ w[t - 8] ^ w[t - 14] ^ w[t - 16]);
|
||
|
|
}
|
||
|
|
const tmp = (sha1._S(5, a) + sha1._f(t, b, c, d) + e + w[t] +
|
||
|
|
sha1._key[Math.floor(t / 20)]) | 0;
|
||
|
|
e = d;
|
||
|
|
d = c;
|
||
|
|
c = sha1._S(30, b);
|
||
|
|
b = a;
|
||
|
|
a = tmp;
|
||
|
|
}
|
||
|
|
|
||
|
|
h[0] = (h[0] + a) | 0;
|
||
|
|
h[1] = (h[1] + b) | 0;
|
||
|
|
h[2] = (h[2] + c) | 0;
|
||
|
|
h[3] = (h[3] + d) | 0;
|
||
|
|
h[4] = (h[4] + e) | 0;
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
/** @fileOverview Low-level AES implementation.
|
||
|
|
*
|
||
|
|
* This file contains a low-level implementation of AES, optimized for
|
||
|
|
* size and for efficiency on several browsers. It is based on
|
||
|
|
* OpenSSL's aes_core.c, a public-domain implementation by Vincent
|
||
|
|
* Rijmen, Antoon Bosselaers and Paulo Barreto.
|
||
|
|
*
|
||
|
|
* An older version of this implementation is available in the public
|
||
|
|
* domain, but this one is (c) Emily Stark, Mike Hamburg, Dan Boneh,
|
||
|
|
* Stanford University 2008-2010 and BSD-licensed for liability
|
||
|
|
* reasons.
|
||
|
|
*
|
||
|
|
* @author Emily Stark
|
||
|
|
* @author Mike Hamburg
|
||
|
|
* @author Dan Boneh
|
||
|
|
*/
|
||
|
|
|
||
|
|
const cipher = {};
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Schedule out an AES key for both encryption and decryption. This
|
||
|
|
* is a low-level class. Use a cipher mode to do bulk encryption.
|
||
|
|
*
|
||
|
|
* @constructor
|
||
|
|
* @param {Array} key The key as an array of 4, 6 or 8 words.
|
||
|
|
*/
|
||
|
|
cipher.aes = class {
|
||
|
|
constructor(key) {
|
||
|
|
/**
|
||
|
|
* The expanded S-box and inverse S-box tables. These will be computed
|
||
|
|
* on the client so that we don't have to send them down the wire.
|
||
|
|
*
|
||
|
|
* There are two tables, _tables[0] is for encryption and
|
||
|
|
* _tables[1] is for decryption.
|
||
|
|
*
|
||
|
|
* The first 4 sub-tables are the expanded S-box with MixColumns. The
|
||
|
|
* last (_tables[01][4]) is the S-box itself.
|
||
|
|
*
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
const aes = this;
|
||
|
|
aes._tables = [[[], [], [], [], []], [[], [], [], [], []]];
|
||
|
|
|
||
|
|
if (!aes._tables[0][0][0]) {
|
||
|
|
aes._precompute();
|
||
|
|
}
|
||
|
|
|
||
|
|
const sbox = aes._tables[0][4];
|
||
|
|
const decTable = aes._tables[1];
|
||
|
|
const keyLen = key.length;
|
||
|
|
|
||
|
|
let i, encKey, decKey, rcon = 1;
|
||
|
|
|
||
|
|
if (keyLen !== 4 && keyLen !== 6 && keyLen !== 8) {
|
||
|
|
throw new Error("invalid aes key size");
|
||
|
|
}
|
||
|
|
|
||
|
|
aes._key = [encKey = key.slice(0), decKey = []];
|
||
|
|
|
||
|
|
// schedule encryption keys
|
||
|
|
for (i = keyLen; i < 4 * keyLen + 28; i++) {
|
||
|
|
let tmp = encKey[i - 1];
|
||
|
|
|
||
|
|
// apply sbox
|
||
|
|
if (i % keyLen === 0 || (keyLen === 8 && i % keyLen === 4)) {
|
||
|
|
tmp = sbox[tmp >>> 24] << 24 ^ sbox[tmp >> 16 & 255] << 16 ^ sbox[tmp >> 8 & 255] << 8 ^ sbox[tmp & 255];
|
||
|
|
|
||
|
|
// shift rows and add rcon
|
||
|
|
if (i % keyLen === 0) {
|
||
|
|
tmp = tmp << 8 ^ tmp >>> 24 ^ rcon << 24;
|
||
|
|
rcon = rcon << 1 ^ (rcon >> 7) * 283;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
encKey[i] = encKey[i - keyLen] ^ tmp;
|
||
|
|
}
|
||
|
|
|
||
|
|
// schedule decryption keys
|
||
|
|
for (let j = 0; i; j++, i--) {
|
||
|
|
const tmp = encKey[j & 3 ? i : i - 4];
|
||
|
|
if (i <= 4 || j < 4) {
|
||
|
|
decKey[j] = tmp;
|
||
|
|
} else {
|
||
|
|
decKey[j] = decTable[0][sbox[tmp >>> 24]] ^
|
||
|
|
decTable[1][sbox[tmp >> 16 & 255]] ^
|
||
|
|
decTable[2][sbox[tmp >> 8 & 255]] ^
|
||
|
|
decTable[3][sbox[tmp & 255]];
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
// public
|
||
|
|
/* Something like this might appear here eventually
|
||
|
|
name: "AES",
|
||
|
|
blockSize: 4,
|
||
|
|
keySizes: [4,6,8],
|
||
|
|
*/
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Encrypt an array of 4 big-endian words.
|
||
|
|
* @param {Array} data The plaintext.
|
||
|
|
* @return {Array} The ciphertext.
|
||
|
|
*/
|
||
|
|
encrypt(data) {
|
||
|
|
return this._crypt(data, 0);
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Decrypt an array of 4 big-endian words.
|
||
|
|
* @param {Array} data The ciphertext.
|
||
|
|
* @return {Array} The plaintext.
|
||
|
|
*/
|
||
|
|
decrypt(data) {
|
||
|
|
return this._crypt(data, 1);
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Expand the S-box tables.
|
||
|
|
*
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
_precompute() {
|
||
|
|
const encTable = this._tables[0];
|
||
|
|
const decTable = this._tables[1];
|
||
|
|
const sbox = encTable[4];
|
||
|
|
const sboxInv = decTable[4];
|
||
|
|
const d = [];
|
||
|
|
const th = [];
|
||
|
|
let xInv, x2, x4, x8;
|
||
|
|
|
||
|
|
// Compute double and third tables
|
||
|
|
for (let i = 0; i < 256; i++) {
|
||
|
|
th[(d[i] = i << 1 ^ (i >> 7) * 283) ^ i] = i;
|
||
|
|
}
|
||
|
|
|
||
|
|
for (let x = xInv = 0; !sbox[x]; x ^= x2 || 1, xInv = th[xInv] || 1) {
|
||
|
|
// Compute sbox
|
||
|
|
let s = xInv ^ xInv << 1 ^ xInv << 2 ^ xInv << 3 ^ xInv << 4;
|
||
|
|
s = s >> 8 ^ s & 255 ^ 99;
|
||
|
|
sbox[x] = s;
|
||
|
|
sboxInv[s] = x;
|
||
|
|
|
||
|
|
// Compute MixColumns
|
||
|
|
x8 = d[x4 = d[x2 = d[x]]];
|
||
|
|
let tDec = x8 * 0x1010101 ^ x4 * 0x10001 ^ x2 * 0x101 ^ x * 0x1010100;
|
||
|
|
let tEnc = d[s] * 0x101 ^ s * 0x1010100;
|
||
|
|
|
||
|
|
for (let i = 0; i < 4; i++) {
|
||
|
|
encTable[i][x] = tEnc = tEnc << 24 ^ tEnc >>> 8;
|
||
|
|
decTable[i][s] = tDec = tDec << 24 ^ tDec >>> 8;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// Compactify. Considerable speedup on Firefox.
|
||
|
|
for (let i = 0; i < 5; i++) {
|
||
|
|
encTable[i] = encTable[i].slice(0);
|
||
|
|
decTable[i] = decTable[i].slice(0);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Encryption and decryption core.
|
||
|
|
* @param {Array} input Four words to be encrypted or decrypted.
|
||
|
|
* @param dir The direction, 0 for encrypt and 1 for decrypt.
|
||
|
|
* @return {Array} The four encrypted or decrypted words.
|
||
|
|
* @private
|
||
|
|
*/
|
||
|
|
_crypt(input, dir) {
|
||
|
|
if (input.length !== 4) {
|
||
|
|
throw new Error("invalid aes block size");
|
||
|
|
}
|
||
|
|
|
||
|
|
const key = this._key[dir];
|
||
|
|
|
||
|
|
const nInnerRounds = key.length / 4 - 2;
|
||
|
|
const out = [0, 0, 0, 0];
|
||
|
|
const table = this._tables[dir];
|
||
|
|
|
||
|
|
// load up the tables
|
||
|
|
const t0 = table[0];
|
||
|
|
const t1 = table[1];
|
||
|
|
const t2 = table[2];
|
||
|
|
const t3 = table[3];
|
||
|
|
const sbox = table[4];
|
||
|
|
|
||
|
|
// state variables a,b,c,d are loaded with pre-whitened data
|
||
|
|
let a = input[0] ^ key[0];
|
||
|
|
let b = input[dir ? 3 : 1] ^ key[1];
|
||
|
|
let c = input[2] ^ key[2];
|
||
|
|
let d = input[dir ? 1 : 3] ^ key[3];
|
||
|
|
let kIndex = 4;
|
||
|
|
let a2, b2, c2;
|
||
|
|
|
||
|
|
// Inner rounds. Cribbed from OpenSSL.
|
||
|
|
for (let i = 0; i < nInnerRounds; i++) {
|
||
|
|
a2 = t0[a >>> 24] ^ t1[b >> 16 & 255] ^ t2[c >> 8 & 255] ^ t3[d & 255] ^ key[kIndex];
|
||
|
|
b2 = t0[b >>> 24] ^ t1[c >> 16 & 255] ^ t2[d >> 8 & 255] ^ t3[a & 255] ^ key[kIndex + 1];
|
||
|
|
c2 = t0[c >>> 24] ^ t1[d >> 16 & 255] ^ t2[a >> 8 & 255] ^ t3[b & 255] ^ key[kIndex + 2];
|
||
|
|
d = t0[d >>> 24] ^ t1[a >> 16 & 255] ^ t2[b >> 8 & 255] ^ t3[c & 255] ^ key[kIndex + 3];
|
||
|
|
kIndex += 4;
|
||
|
|
a = a2; b = b2; c = c2;
|
||
|
|
}
|
||
|
|
|
||
|
|
// Last round.
|
||
|
|
for (let i = 0; i < 4; i++) {
|
||
|
|
out[dir ? 3 & -i : i] =
|
||
|
|
sbox[a >>> 24] << 24 ^
|
||
|
|
sbox[b >> 16 & 255] << 16 ^
|
||
|
|
sbox[c >> 8 & 255] << 8 ^
|
||
|
|
sbox[d & 255] ^
|
||
|
|
key[kIndex++];
|
||
|
|
a2 = a; a = b; b = c; c = d; d = a2;
|
||
|
|
}
|
||
|
|
|
||
|
|
return out;
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Random values
|
||
|
|
* @namespace
|
||
|
|
*/
|
||
|
|
const random = {
|
||
|
|
/**
|
||
|
|
* Generate random words with pure js, cryptographically not as strong & safe as native implementation.
|
||
|
|
* @param {TypedArray} typedArray The array to fill.
|
||
|
|
* @return {TypedArray} The random values.
|
||
|
|
*/
|
||
|
|
getRandomValues(typedArray) {
|
||
|
|
const words = new Uint32Array(typedArray.buffer);
|
||
|
|
const r = (m_w) => {
|
||
|
|
let m_z = 0x3ade68b1;
|
||
|
|
const mask = 0xffffffff;
|
||
|
|
return function () {
|
||
|
|
m_z = (0x9069 * (m_z & 0xFFFF) + (m_z >> 0x10)) & mask;
|
||
|
|
m_w = (0x4650 * (m_w & 0xFFFF) + (m_w >> 0x10)) & mask;
|
||
|
|
const result = ((((m_z << 0x10) + m_w) & mask) / 0x100000000) + .5;
|
||
|
|
return result * (Math.random() > .5 ? 1 : -1);
|
||
|
|
};
|
||
|
|
};
|
||
|
|
for (let i = 0, rcache; i < typedArray.length; i += 4) {
|
||
|
|
const _r = r((rcache || Math.random()) * 0x100000000);
|
||
|
|
rcache = _r() * 0x3ade67b7;
|
||
|
|
words[i / 4] = (_r() * 0x100000000) | 0;
|
||
|
|
}
|
||
|
|
return typedArray;
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
/** @fileOverview CTR mode implementation.
|
||
|
|
*
|
||
|
|
* Special thanks to Roy Nicholson for pointing out a bug in our
|
||
|
|
* implementation.
|
||
|
|
*
|
||
|
|
* @author Emily Stark
|
||
|
|
* @author Mike Hamburg
|
||
|
|
* @author Dan Boneh
|
||
|
|
*/
|
||
|
|
|
||
|
|
/** Brian Gladman's CTR Mode.
|
||
|
|
* @constructor
|
||
|
|
* @param {Object} _prf The aes instance to generate key.
|
||
|
|
* @param {bitArray} _iv The iv for ctr mode, it must be 128 bits.
|
||
|
|
*/
|
||
|
|
|
||
|
|
const mode = {};
|
||
|
|
|
||
|
|
/**
|
||
|
|
* Brian Gladman's CTR Mode.
|
||
|
|
* @namespace
|
||
|
|
*/
|
||
|
|
mode.ctrGladman = class {
|
||
|
|
constructor(prf, iv) {
|
||
|
|
this._prf = prf;
|
||
|
|
this._initIv = iv;
|
||
|
|
this._iv = iv;
|
||
|
|
}
|
||
|
|
|
||
|
|
reset() {
|
||
|
|
this._iv = this._initIv;
|
||
|
|
}
|
||
|
|
|
||
|
|
/** Input some data to calculate.
|
||
|
|
* @param {bitArray} data the data to process, it must be intergral multiple of 128 bits unless it's the last.
|
||
|
|
*/
|
||
|
|
update(data) {
|
||
|
|
return this.calculate(this._prf, data, this._iv);
|
||
|
|
}
|
||
|
|
|
||
|
|
incWord(word) {
|
||
|
|
if (((word >> 24) & 0xff) === 0xff) { //overflow
|
||
|
|
let b1 = (word >> 16) & 0xff;
|
||
|
|
let b2 = (word >> 8) & 0xff;
|
||
|
|
let b3 = word & 0xff;
|
||
|
|
|
||
|
|
if (b1 === 0xff) { // overflow b1
|
||
|
|
b1 = 0;
|
||
|
|
if (b2 === 0xff) {
|
||
|
|
b2 = 0;
|
||
|
|
if (b3 === 0xff) {
|
||
|
|
b3 = 0;
|
||
|
|
} else {
|
||
|
|
++b3;
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
++b2;
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
++b1;
|
||
|
|
}
|
||
|
|
|
||
|
|
word = 0;
|
||
|
|
word += (b1 << 16);
|
||
|
|
word += (b2 << 8);
|
||
|
|
word += b3;
|
||
|
|
} else {
|
||
|
|
word += (0x01 << 24);
|
||
|
|
}
|
||
|
|
return word;
|
||
|
|
}
|
||
|
|
|
||
|
|
incCounter(counter) {
|
||
|
|
if ((counter[0] = this.incWord(counter[0])) === 0) {
|
||
|
|
// encr_data in fileenc.c from Dr Brian Gladman's counts only with DWORD j < 8
|
||
|
|
counter[1] = this.incWord(counter[1]);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
calculate(prf, data, iv) {
|
||
|
|
let l;
|
||
|
|
if (!(l = data.length)) {
|
||
|
|
return [];
|
||
|
|
}
|
||
|
|
const bl = bitArray.bitLength(data);
|
||
|
|
for (let i = 0; i < l; i += 4) {
|
||
|
|
this.incCounter(iv);
|
||
|
|
const e = prf.encrypt(iv);
|
||
|
|
data[i] ^= e[0];
|
||
|
|
data[i + 1] ^= e[1];
|
||
|
|
data[i + 2] ^= e[2];
|
||
|
|
data[i + 3] ^= e[3];
|
||
|
|
}
|
||
|
|
return bitArray.clamp(data, bl);
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
const misc = {
|
||
|
|
importKey(password) {
|
||
|
|
return new misc.hmacSha1(codec.bytes.toBits(password));
|
||
|
|
},
|
||
|
|
pbkdf2(prf, salt, count, length) {
|
||
|
|
count = count || 10000;
|
||
|
|
if (length < 0 || count < 0) {
|
||
|
|
throw new Error("invalid params to pbkdf2");
|
||
|
|
}
|
||
|
|
const byteLength = ((length >> 5) + 1) << 2;
|
||
|
|
let u, ui, i, j, k;
|
||
|
|
const arrayBuffer = new ArrayBuffer(byteLength);
|
||
|
|
const out = new DataView(arrayBuffer);
|
||
|
|
let outLength = 0;
|
||
|
|
const b = bitArray;
|
||
|
|
salt = codec.bytes.toBits(salt);
|
||
|
|
for (k = 1; outLength < (byteLength || 1); k++) {
|
||
|
|
u = ui = prf.encrypt(b.concat(salt, [k]));
|
||
|
|
for (i = 1; i < count; i++) {
|
||
|
|
ui = prf.encrypt(ui);
|
||
|
|
for (j = 0; j < ui.length; j++) {
|
||
|
|
u[j] ^= ui[j];
|
||
|
|
}
|
||
|
|
}
|
||
|
|
for (i = 0; outLength < (byteLength || 1) && i < u.length; i++) {
|
||
|
|
out.setInt32(outLength, u[i]);
|
||
|
|
outLength += 4;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
return arrayBuffer.slice(0, length / 8);
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
/** @fileOverview HMAC implementation.
|
||
|
|
*
|
||
|
|
* @author Emily Stark
|
||
|
|
* @author Mike Hamburg
|
||
|
|
* @author Dan Boneh
|
||
|
|
*/
|
||
|
|
|
||
|
|
/** HMAC with the specified hash function.
|
||
|
|
* @constructor
|
||
|
|
* @param {bitArray} key the key for HMAC.
|
||
|
|
* @param {Object} [Hash=hash.sha1] The hash function to use.
|
||
|
|
*/
|
||
|
|
misc.hmacSha1 = class {
|
||
|
|
|
||
|
|
constructor(key) {
|
||
|
|
const hmac = this;
|
||
|
|
const Hash = hmac._hash = hash.sha1;
|
||
|
|
const exKey = [[], []];
|
||
|
|
hmac._baseHash = [new Hash(), new Hash()];
|
||
|
|
const bs = hmac._baseHash[0].blockSize / 32;
|
||
|
|
|
||
|
|
if (key.length > bs) {
|
||
|
|
key = new Hash().update(key).finalize();
|
||
|
|
}
|
||
|
|
|
||
|
|
for (let i = 0; i < bs; i++) {
|
||
|
|
exKey[0][i] = key[i] ^ 0x36363636;
|
||
|
|
exKey[1][i] = key[i] ^ 0x5C5C5C5C;
|
||
|
|
}
|
||
|
|
|
||
|
|
hmac._baseHash[0].update(exKey[0]);
|
||
|
|
hmac._baseHash[1].update(exKey[1]);
|
||
|
|
hmac._resultHash = new Hash(hmac._baseHash[0]);
|
||
|
|
}
|
||
|
|
reset() {
|
||
|
|
const hmac = this;
|
||
|
|
hmac._resultHash = new hmac._hash(hmac._baseHash[0]);
|
||
|
|
hmac._updated = false;
|
||
|
|
}
|
||
|
|
|
||
|
|
update(data) {
|
||
|
|
const hmac = this;
|
||
|
|
hmac._updated = true;
|
||
|
|
hmac._resultHash.update(data);
|
||
|
|
}
|
||
|
|
|
||
|
|
digest() {
|
||
|
|
const hmac = this;
|
||
|
|
const w = hmac._resultHash.finalize();
|
||
|
|
const result = new (hmac._hash)(hmac._baseHash[1]).update(w).finalize();
|
||
|
|
|
||
|
|
hmac.reset();
|
||
|
|
|
||
|
|
return result;
|
||
|
|
}
|
||
|
|
|
||
|
|
encrypt(data) {
|
||
|
|
if (!this._updated) {
|
||
|
|
this.update(data);
|
||
|
|
return this.digest(data);
|
||
|
|
} else {
|
||
|
|
throw new Error("encrypt on already updated hmac called!");
|
||
|
|
}
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const GET_RANDOM_VALUES_SUPPORTED = typeof crypto != UNDEFINED_TYPE && typeof crypto.getRandomValues == FUNCTION_TYPE;
|
||
|
|
|
||
|
|
const ERR_INVALID_PASSWORD = "Invalid password";
|
||
|
|
const ERR_INVALID_SIGNATURE = "Invalid signature";
|
||
|
|
const ERR_ABORT_CHECK_PASSWORD = "zipjs-abort-check-password";
|
||
|
|
|
||
|
|
function getRandomValues(array) {
|
||
|
|
if (GET_RANDOM_VALUES_SUPPORTED) {
|
||
|
|
return crypto.getRandomValues(array);
|
||
|
|
} else {
|
||
|
|
return random.getRandomValues(array);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const BLOCK_LENGTH = 16;
|
||
|
|
const RAW_FORMAT = "raw";
|
||
|
|
const PBKDF2_ALGORITHM = { name: "PBKDF2" };
|
||
|
|
const HASH_ALGORITHM = { name: "HMAC" };
|
||
|
|
const HASH_FUNCTION = "SHA-1";
|
||
|
|
const BASE_KEY_ALGORITHM = Object.assign({ hash: HASH_ALGORITHM }, PBKDF2_ALGORITHM);
|
||
|
|
const DERIVED_BITS_ALGORITHM = Object.assign({ iterations: 1000, hash: { name: HASH_FUNCTION } }, PBKDF2_ALGORITHM);
|
||
|
|
const DERIVED_BITS_USAGE = ["deriveBits"];
|
||
|
|
const SALT_LENGTH = [8, 12, 16];
|
||
|
|
const KEY_LENGTH = [16, 24, 32];
|
||
|
|
const SIGNATURE_LENGTH = 10;
|
||
|
|
const COUNTER_DEFAULT_VALUE = [0, 0, 0, 0];
|
||
|
|
// deno-lint-ignore valid-typeof
|
||
|
|
const CRYPTO_API_SUPPORTED = typeof crypto != UNDEFINED_TYPE;
|
||
|
|
const subtle = CRYPTO_API_SUPPORTED && crypto.subtle;
|
||
|
|
const SUBTLE_API_SUPPORTED = CRYPTO_API_SUPPORTED && typeof subtle != UNDEFINED_TYPE;
|
||
|
|
const codecBytes = codec.bytes;
|
||
|
|
const Aes = cipher.aes;
|
||
|
|
const CtrGladman = mode.ctrGladman;
|
||
|
|
const HmacSha1 = misc.hmacSha1;
|
||
|
|
|
||
|
|
let IMPORT_KEY_SUPPORTED = CRYPTO_API_SUPPORTED && SUBTLE_API_SUPPORTED && typeof subtle.importKey == FUNCTION_TYPE;
|
||
|
|
let DERIVE_BITS_SUPPORTED = CRYPTO_API_SUPPORTED && SUBTLE_API_SUPPORTED && typeof subtle.deriveBits == FUNCTION_TYPE;
|
||
|
|
|
||
|
|
class AESDecryptionStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor({ password, rawPassword, signed, encryptionStrength, checkPasswordOnly }) {
|
||
|
|
super({
|
||
|
|
start() {
|
||
|
|
Object.assign(this, {
|
||
|
|
ready: new Promise(resolve => this.resolveReady = resolve),
|
||
|
|
password: encodePassword(password, rawPassword),
|
||
|
|
signed,
|
||
|
|
strength: encryptionStrength - 1,
|
||
|
|
pending: new Uint8Array()
|
||
|
|
});
|
||
|
|
},
|
||
|
|
async transform(chunk, controller) {
|
||
|
|
const aesCrypto = this;
|
||
|
|
const {
|
||
|
|
password,
|
||
|
|
strength,
|
||
|
|
resolveReady,
|
||
|
|
ready
|
||
|
|
} = aesCrypto;
|
||
|
|
if (password) {
|
||
|
|
await createDecryptionKeys(aesCrypto, strength, password, subarray(chunk, 0, SALT_LENGTH[strength] + 2));
|
||
|
|
chunk = subarray(chunk, SALT_LENGTH[strength] + 2);
|
||
|
|
if (checkPasswordOnly) {
|
||
|
|
controller.error(new Error(ERR_ABORT_CHECK_PASSWORD));
|
||
|
|
} else {
|
||
|
|
resolveReady();
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
await ready;
|
||
|
|
}
|
||
|
|
const output = new Uint8Array(chunk.length - SIGNATURE_LENGTH - ((chunk.length - SIGNATURE_LENGTH) % BLOCK_LENGTH));
|
||
|
|
controller.enqueue(append(aesCrypto, chunk, output, 0, SIGNATURE_LENGTH, true));
|
||
|
|
},
|
||
|
|
async flush(controller) {
|
||
|
|
const {
|
||
|
|
signed,
|
||
|
|
ctr,
|
||
|
|
hmac,
|
||
|
|
pending,
|
||
|
|
ready
|
||
|
|
} = this;
|
||
|
|
if (hmac && ctr) {
|
||
|
|
await ready;
|
||
|
|
const chunkToDecrypt = subarray(pending, 0, pending.length - SIGNATURE_LENGTH);
|
||
|
|
const originalSignature = subarray(pending, pending.length - SIGNATURE_LENGTH);
|
||
|
|
let decryptedChunkArray = new Uint8Array();
|
||
|
|
if (chunkToDecrypt.length) {
|
||
|
|
const encryptedChunk = toBits(codecBytes, chunkToDecrypt);
|
||
|
|
hmac.update(encryptedChunk);
|
||
|
|
const decryptedChunk = ctr.update(encryptedChunk);
|
||
|
|
decryptedChunkArray = fromBits(codecBytes, decryptedChunk);
|
||
|
|
}
|
||
|
|
if (signed) {
|
||
|
|
const signature = subarray(fromBits(codecBytes, hmac.digest()), 0, SIGNATURE_LENGTH);
|
||
|
|
for (let indexSignature = 0; indexSignature < SIGNATURE_LENGTH; indexSignature++) {
|
||
|
|
if (signature[indexSignature] != originalSignature[indexSignature]) {
|
||
|
|
throw new Error(ERR_INVALID_SIGNATURE);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
controller.enqueue(decryptedChunkArray);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class AESEncryptionStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor({ password, rawPassword, encryptionStrength }) {
|
||
|
|
// deno-lint-ignore prefer-const
|
||
|
|
let stream;
|
||
|
|
super({
|
||
|
|
start() {
|
||
|
|
Object.assign(this, {
|
||
|
|
ready: new Promise(resolve => this.resolveReady = resolve),
|
||
|
|
password: encodePassword(password, rawPassword),
|
||
|
|
strength: encryptionStrength - 1,
|
||
|
|
pending: new Uint8Array()
|
||
|
|
});
|
||
|
|
},
|
||
|
|
async transform(chunk, controller) {
|
||
|
|
const aesCrypto = this;
|
||
|
|
const {
|
||
|
|
password,
|
||
|
|
strength,
|
||
|
|
resolveReady,
|
||
|
|
ready
|
||
|
|
} = aesCrypto;
|
||
|
|
let preamble = new Uint8Array();
|
||
|
|
if (password) {
|
||
|
|
preamble = await createEncryptionKeys(aesCrypto, strength, password);
|
||
|
|
resolveReady();
|
||
|
|
} else {
|
||
|
|
await ready;
|
||
|
|
}
|
||
|
|
const output = new Uint8Array(preamble.length + chunk.length - (chunk.length % BLOCK_LENGTH));
|
||
|
|
output.set(preamble, 0);
|
||
|
|
controller.enqueue(append(aesCrypto, chunk, output, preamble.length, 0));
|
||
|
|
},
|
||
|
|
async flush(controller) {
|
||
|
|
const {
|
||
|
|
ctr,
|
||
|
|
hmac,
|
||
|
|
pending,
|
||
|
|
ready
|
||
|
|
} = this;
|
||
|
|
if (hmac && ctr) {
|
||
|
|
await ready;
|
||
|
|
let encryptedChunkArray = new Uint8Array();
|
||
|
|
if (pending.length) {
|
||
|
|
const encryptedChunk = ctr.update(toBits(codecBytes, pending));
|
||
|
|
hmac.update(encryptedChunk);
|
||
|
|
encryptedChunkArray = fromBits(codecBytes, encryptedChunk);
|
||
|
|
}
|
||
|
|
stream.signature = fromBits(codecBytes, hmac.digest()).slice(0, SIGNATURE_LENGTH);
|
||
|
|
controller.enqueue(concat(encryptedChunkArray, stream.signature));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
});
|
||
|
|
stream = this;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function append(aesCrypto, input, output, paddingStart, paddingEnd, verifySignature) {
|
||
|
|
const {
|
||
|
|
ctr,
|
||
|
|
hmac,
|
||
|
|
pending
|
||
|
|
} = aesCrypto;
|
||
|
|
const inputLength = input.length - paddingEnd;
|
||
|
|
if (pending.length) {
|
||
|
|
input = concat(pending, input);
|
||
|
|
output = expand(output, inputLength - (inputLength % BLOCK_LENGTH));
|
||
|
|
}
|
||
|
|
let offset;
|
||
|
|
for (offset = 0; offset <= inputLength - BLOCK_LENGTH; offset += BLOCK_LENGTH) {
|
||
|
|
const inputChunk = toBits(codecBytes, subarray(input, offset, offset + BLOCK_LENGTH));
|
||
|
|
if (verifySignature) {
|
||
|
|
hmac.update(inputChunk);
|
||
|
|
}
|
||
|
|
const outputChunk = ctr.update(inputChunk);
|
||
|
|
if (!verifySignature) {
|
||
|
|
hmac.update(outputChunk);
|
||
|
|
}
|
||
|
|
output.set(fromBits(codecBytes, outputChunk), offset + paddingStart);
|
||
|
|
}
|
||
|
|
aesCrypto.pending = subarray(input, offset);
|
||
|
|
return output;
|
||
|
|
}
|
||
|
|
|
||
|
|
async function createDecryptionKeys(decrypt, strength, password, preamble) {
|
||
|
|
const passwordVerificationKey = await createKeys$1(decrypt, strength, password, subarray(preamble, 0, SALT_LENGTH[strength]));
|
||
|
|
const passwordVerification = subarray(preamble, SALT_LENGTH[strength]);
|
||
|
|
if (passwordVerificationKey[0] != passwordVerification[0] || passwordVerificationKey[1] != passwordVerification[1]) {
|
||
|
|
throw new Error(ERR_INVALID_PASSWORD);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function createEncryptionKeys(encrypt, strength, password) {
|
||
|
|
const salt = getRandomValues(new Uint8Array(SALT_LENGTH[strength]));
|
||
|
|
const passwordVerification = await createKeys$1(encrypt, strength, password, salt);
|
||
|
|
return concat(salt, passwordVerification);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function createKeys$1(aesCrypto, strength, password, salt) {
|
||
|
|
aesCrypto.password = null;
|
||
|
|
const baseKey = await importKey(RAW_FORMAT, password, BASE_KEY_ALGORITHM, false, DERIVED_BITS_USAGE);
|
||
|
|
const derivedBits = await deriveBits(Object.assign({ salt }, DERIVED_BITS_ALGORITHM), baseKey, 8 * ((KEY_LENGTH[strength] * 2) + 2));
|
||
|
|
const compositeKey = new Uint8Array(derivedBits);
|
||
|
|
const key = toBits(codecBytes, subarray(compositeKey, 0, KEY_LENGTH[strength]));
|
||
|
|
const authentication = toBits(codecBytes, subarray(compositeKey, KEY_LENGTH[strength], KEY_LENGTH[strength] * 2));
|
||
|
|
const passwordVerification = subarray(compositeKey, KEY_LENGTH[strength] * 2);
|
||
|
|
Object.assign(aesCrypto, {
|
||
|
|
keys: {
|
||
|
|
key,
|
||
|
|
authentication,
|
||
|
|
passwordVerification
|
||
|
|
},
|
||
|
|
ctr: new CtrGladman(new Aes(key), Array.from(COUNTER_DEFAULT_VALUE)),
|
||
|
|
hmac: new HmacSha1(authentication)
|
||
|
|
});
|
||
|
|
return passwordVerification;
|
||
|
|
}
|
||
|
|
|
||
|
|
async function importKey(format, password, algorithm, extractable, keyUsages) {
|
||
|
|
if (IMPORT_KEY_SUPPORTED) {
|
||
|
|
try {
|
||
|
|
return await subtle.importKey(format, password, algorithm, extractable, keyUsages);
|
||
|
|
} catch {
|
||
|
|
IMPORT_KEY_SUPPORTED = false;
|
||
|
|
return misc.importKey(password);
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
return misc.importKey(password);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function deriveBits(algorithm, baseKey, length) {
|
||
|
|
if (DERIVE_BITS_SUPPORTED) {
|
||
|
|
try {
|
||
|
|
return await subtle.deriveBits(algorithm, baseKey, length);
|
||
|
|
} catch {
|
||
|
|
DERIVE_BITS_SUPPORTED = false;
|
||
|
|
return misc.pbkdf2(baseKey, algorithm.salt, DERIVED_BITS_ALGORITHM.iterations, length);
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
return misc.pbkdf2(baseKey, algorithm.salt, DERIVED_BITS_ALGORITHM.iterations, length);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function encodePassword(password, rawPassword) {
|
||
|
|
if (rawPassword === UNDEFINED_VALUE) {
|
||
|
|
return encodeText(password);
|
||
|
|
} else {
|
||
|
|
return rawPassword;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function concat(leftArray, rightArray) {
|
||
|
|
let array = leftArray;
|
||
|
|
if (leftArray.length + rightArray.length) {
|
||
|
|
array = new Uint8Array(leftArray.length + rightArray.length);
|
||
|
|
array.set(leftArray, 0);
|
||
|
|
array.set(rightArray, leftArray.length);
|
||
|
|
}
|
||
|
|
return array;
|
||
|
|
}
|
||
|
|
|
||
|
|
function expand(inputArray, length) {
|
||
|
|
if (length && length > inputArray.length) {
|
||
|
|
const array = inputArray;
|
||
|
|
inputArray = new Uint8Array(length);
|
||
|
|
inputArray.set(array, 0);
|
||
|
|
}
|
||
|
|
return inputArray;
|
||
|
|
}
|
||
|
|
|
||
|
|
function subarray(array, begin, end) {
|
||
|
|
return array.subarray(begin, end);
|
||
|
|
}
|
||
|
|
|
||
|
|
function fromBits(codecBytes, chunk) {
|
||
|
|
return codecBytes.fromBits(chunk);
|
||
|
|
}
|
||
|
|
function toBits(codecBytes, chunk) {
|
||
|
|
return codecBytes.toBits(chunk);
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const HEADER_LENGTH = 12;
|
||
|
|
|
||
|
|
class ZipCryptoDecryptionStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor({ password, passwordVerification, checkPasswordOnly }) {
|
||
|
|
super({
|
||
|
|
start() {
|
||
|
|
Object.assign(this, {
|
||
|
|
password,
|
||
|
|
passwordVerification
|
||
|
|
});
|
||
|
|
createKeys(this, password);
|
||
|
|
},
|
||
|
|
transform(chunk, controller) {
|
||
|
|
const zipCrypto = this;
|
||
|
|
if (zipCrypto.password) {
|
||
|
|
const decryptedHeader = decrypt(zipCrypto, chunk.subarray(0, HEADER_LENGTH));
|
||
|
|
zipCrypto.password = null;
|
||
|
|
if (decryptedHeader.at(-1) != zipCrypto.passwordVerification) {
|
||
|
|
throw new Error(ERR_INVALID_PASSWORD);
|
||
|
|
}
|
||
|
|
chunk = chunk.subarray(HEADER_LENGTH);
|
||
|
|
}
|
||
|
|
if (checkPasswordOnly) {
|
||
|
|
controller.error(new Error(ERR_ABORT_CHECK_PASSWORD));
|
||
|
|
} else {
|
||
|
|
controller.enqueue(decrypt(zipCrypto, chunk));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class ZipCryptoEncryptionStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor({ password, passwordVerification }) {
|
||
|
|
super({
|
||
|
|
start() {
|
||
|
|
Object.assign(this, {
|
||
|
|
password,
|
||
|
|
passwordVerification
|
||
|
|
});
|
||
|
|
createKeys(this, password);
|
||
|
|
},
|
||
|
|
transform(chunk, controller) {
|
||
|
|
const zipCrypto = this;
|
||
|
|
let output;
|
||
|
|
let offset;
|
||
|
|
if (zipCrypto.password) {
|
||
|
|
zipCrypto.password = null;
|
||
|
|
const header = getRandomValues(new Uint8Array(HEADER_LENGTH));
|
||
|
|
header[HEADER_LENGTH - 1] = zipCrypto.passwordVerification;
|
||
|
|
output = new Uint8Array(chunk.length + header.length);
|
||
|
|
output.set(encrypt(zipCrypto, header), 0);
|
||
|
|
offset = HEADER_LENGTH;
|
||
|
|
} else {
|
||
|
|
output = new Uint8Array(chunk.length);
|
||
|
|
offset = 0;
|
||
|
|
}
|
||
|
|
output.set(encrypt(zipCrypto, chunk), offset);
|
||
|
|
controller.enqueue(output);
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function decrypt(target, input) {
|
||
|
|
const output = new Uint8Array(input.length);
|
||
|
|
for (let index = 0; index < input.length; index++) {
|
||
|
|
output[index] = getByte(target) ^ input[index];
|
||
|
|
updateKeys(target, output[index]);
|
||
|
|
}
|
||
|
|
return output;
|
||
|
|
}
|
||
|
|
|
||
|
|
function encrypt(target, input) {
|
||
|
|
const output = new Uint8Array(input.length);
|
||
|
|
for (let index = 0; index < input.length; index++) {
|
||
|
|
output[index] = getByte(target) ^ input[index];
|
||
|
|
updateKeys(target, input[index]);
|
||
|
|
}
|
||
|
|
return output;
|
||
|
|
}
|
||
|
|
|
||
|
|
function createKeys(target, password) {
|
||
|
|
const keys = [0x12345678, 0x23456789, 0x34567890];
|
||
|
|
Object.assign(target, {
|
||
|
|
keys,
|
||
|
|
crcKey0: new Crc32(keys[0]),
|
||
|
|
crcKey2: new Crc32(keys[2])
|
||
|
|
});
|
||
|
|
for (let index = 0; index < password.length; index++) {
|
||
|
|
updateKeys(target, password.charCodeAt(index));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function updateKeys(target, byte) {
|
||
|
|
let [key0, key1, key2] = target.keys;
|
||
|
|
target.crcKey0.append([byte]);
|
||
|
|
key0 = ~target.crcKey0.get();
|
||
|
|
key1 = getInt32(Math.imul(getInt32(key1 + getInt8(key0)), 134775813) + 1);
|
||
|
|
target.crcKey2.append([key1 >>> 24]);
|
||
|
|
key2 = ~target.crcKey2.get();
|
||
|
|
target.keys = [key0, key1, key2];
|
||
|
|
}
|
||
|
|
|
||
|
|
function getByte(target) {
|
||
|
|
const temp = target.keys[2] | 2;
|
||
|
|
return getInt8(Math.imul(temp, (temp ^ 1)) >>> 8);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getInt8(number) {
|
||
|
|
return number & 0xFF;
|
||
|
|
}
|
||
|
|
|
||
|
|
function getInt32(number) {
|
||
|
|
return number & 0xFFFFFFFF;
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const ERR_INVALID_UNCOMPRESSED_SIZE = "Invalid uncompressed size";
|
||
|
|
const FORMAT_DEFLATE_RAW = "deflate-raw";
|
||
|
|
const FORMAT_DEFLATE64_RAW = "deflate64-raw";
|
||
|
|
|
||
|
|
class DeflateStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor(options, { chunkSize, CompressionStreamZlib, CompressionStream }) {
|
||
|
|
super({});
|
||
|
|
const { compressed, encrypted, useCompressionStream, zipCrypto, signed, level } = options;
|
||
|
|
const stream = this;
|
||
|
|
let crc32Stream, encryptionStream;
|
||
|
|
let readable = super.readable;
|
||
|
|
if ((!encrypted || zipCrypto) && signed) {
|
||
|
|
crc32Stream = new Crc32Stream();
|
||
|
|
readable = pipeThrough(readable, crc32Stream);
|
||
|
|
}
|
||
|
|
if (compressed) {
|
||
|
|
readable = pipeThroughCommpressionStream(readable, useCompressionStream, { level, chunkSize }, CompressionStream, CompressionStreamZlib, CompressionStream);
|
||
|
|
}
|
||
|
|
if (encrypted) {
|
||
|
|
if (zipCrypto) {
|
||
|
|
readable = pipeThrough(readable, new ZipCryptoEncryptionStream(options));
|
||
|
|
} else {
|
||
|
|
encryptionStream = new AESEncryptionStream(options);
|
||
|
|
readable = pipeThrough(readable, encryptionStream);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
setReadable(stream, readable, () => {
|
||
|
|
let signature;
|
||
|
|
if (encrypted && !zipCrypto) {
|
||
|
|
signature = encryptionStream.signature;
|
||
|
|
}
|
||
|
|
if ((!encrypted || zipCrypto) && signed) {
|
||
|
|
signature = new DataView(crc32Stream.value.buffer).getUint32(0);
|
||
|
|
}
|
||
|
|
stream.signature = signature;
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class InflateStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor(options, { chunkSize, DecompressionStreamZlib, DecompressionStream }) {
|
||
|
|
super({});
|
||
|
|
const { zipCrypto, encrypted, signed, signature, compressed, useCompressionStream, deflate64 } = options;
|
||
|
|
let crc32Stream, decryptionStream;
|
||
|
|
let readable = super.readable;
|
||
|
|
if (encrypted) {
|
||
|
|
if (zipCrypto) {
|
||
|
|
readable = pipeThrough(readable, new ZipCryptoDecryptionStream(options));
|
||
|
|
} else {
|
||
|
|
decryptionStream = new AESDecryptionStream(options);
|
||
|
|
readable = pipeThrough(readable, decryptionStream);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (compressed) {
|
||
|
|
readable = pipeThroughCommpressionStream(readable, useCompressionStream, { chunkSize, deflate64 }, DecompressionStream, DecompressionStreamZlib, DecompressionStream);
|
||
|
|
}
|
||
|
|
if ((!encrypted || zipCrypto) && signed) {
|
||
|
|
crc32Stream = new Crc32Stream();
|
||
|
|
readable = pipeThrough(readable, crc32Stream);
|
||
|
|
}
|
||
|
|
setReadable(this, readable, () => {
|
||
|
|
if ((!encrypted || zipCrypto) && signed) {
|
||
|
|
const dataViewSignature = new DataView(crc32Stream.value.buffer);
|
||
|
|
if (signature != dataViewSignature.getUint32(0, false)) {
|
||
|
|
throw new Error(ERR_INVALID_SIGNATURE);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function setReadable(stream, readable, flush) {
|
||
|
|
readable = pipeThrough(readable, new TransformStream({ flush }));
|
||
|
|
Object.defineProperty(stream, "readable", {
|
||
|
|
get() {
|
||
|
|
return readable;
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
function pipeThroughCommpressionStream(readable, useCompressionStream, options, CompressionStreamNative, CompressionStreamZlib, CompressionStream) {
|
||
|
|
const Stream = useCompressionStream && CompressionStreamNative ? CompressionStreamNative : CompressionStreamZlib || CompressionStream;
|
||
|
|
const format = options.deflate64 ? FORMAT_DEFLATE64_RAW : FORMAT_DEFLATE_RAW;
|
||
|
|
try {
|
||
|
|
readable = pipeThrough(readable, new Stream(format, options));
|
||
|
|
} catch (error) {
|
||
|
|
if (useCompressionStream) {
|
||
|
|
if (CompressionStreamZlib) {
|
||
|
|
readable = pipeThrough(readable, new CompressionStreamZlib(format, options));
|
||
|
|
} else if (CompressionStream) {
|
||
|
|
readable = pipeThrough(readable, new CompressionStream(format, options));
|
||
|
|
} else {
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
return readable;
|
||
|
|
}
|
||
|
|
|
||
|
|
function pipeThrough(readable, transformStream) {
|
||
|
|
return readable.pipeThrough(transformStream);
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const MESSAGE_EVENT_TYPE = "message";
|
||
|
|
const MESSAGE_START = "start";
|
||
|
|
const MESSAGE_PULL = "pull";
|
||
|
|
const MESSAGE_DATA = "data";
|
||
|
|
const MESSAGE_ACK_DATA = "ack";
|
||
|
|
const MESSAGE_CLOSE = "close";
|
||
|
|
const CODEC_DEFLATE = "deflate";
|
||
|
|
const CODEC_INFLATE = "inflate";
|
||
|
|
|
||
|
|
class CodecStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor(options, config) {
|
||
|
|
super({});
|
||
|
|
const codec = this;
|
||
|
|
const { codecType } = options;
|
||
|
|
let Stream;
|
||
|
|
if (codecType.startsWith(CODEC_DEFLATE)) {
|
||
|
|
Stream = DeflateStream;
|
||
|
|
} else if (codecType.startsWith(CODEC_INFLATE)) {
|
||
|
|
Stream = InflateStream;
|
||
|
|
}
|
||
|
|
codec.outputSize = 0;
|
||
|
|
let inputSize = 0;
|
||
|
|
const stream = new Stream(options, config);
|
||
|
|
const readable = super.readable;
|
||
|
|
const inputSizeStream = new TransformStream({
|
||
|
|
transform(chunk, controller) {
|
||
|
|
if (chunk && chunk.length) {
|
||
|
|
inputSize += chunk.length;
|
||
|
|
controller.enqueue(chunk);
|
||
|
|
}
|
||
|
|
},
|
||
|
|
flush() {
|
||
|
|
Object.assign(codec, {
|
||
|
|
inputSize
|
||
|
|
});
|
||
|
|
}
|
||
|
|
});
|
||
|
|
const outputSizeStream = new TransformStream({
|
||
|
|
transform(chunk, controller) {
|
||
|
|
if (chunk && chunk.length) {
|
||
|
|
controller.enqueue(chunk);
|
||
|
|
codec.outputSize += chunk.length;
|
||
|
|
if (options.outputSize !== UNDEFINED_VALUE && codec.outputSize > options.outputSize) {
|
||
|
|
throw new Error(ERR_INVALID_UNCOMPRESSED_SIZE);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
},
|
||
|
|
flush() {
|
||
|
|
const { signature } = stream;
|
||
|
|
Object.assign(codec, {
|
||
|
|
signature,
|
||
|
|
inputSize
|
||
|
|
});
|
||
|
|
}
|
||
|
|
});
|
||
|
|
Object.defineProperty(codec, "readable", {
|
||
|
|
get() {
|
||
|
|
return readable.pipeThrough(inputSizeStream).pipeThrough(stream).pipeThrough(outputSizeStream);
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class ChunkStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor(chunkSize) {
|
||
|
|
let pendingChunk;
|
||
|
|
super({
|
||
|
|
transform,
|
||
|
|
flush(controller) {
|
||
|
|
if (pendingChunk && pendingChunk.length) {
|
||
|
|
controller.enqueue(pendingChunk);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
});
|
||
|
|
|
||
|
|
function transform(chunk, controller) {
|
||
|
|
if (pendingChunk) {
|
||
|
|
const newChunk = new Uint8Array(pendingChunk.length + chunk.length);
|
||
|
|
newChunk.set(pendingChunk);
|
||
|
|
newChunk.set(chunk, pendingChunk.length);
|
||
|
|
chunk = newChunk;
|
||
|
|
pendingChunk = null;
|
||
|
|
}
|
||
|
|
if (chunk.length > chunkSize) {
|
||
|
|
controller.enqueue(chunk.slice(0, chunkSize));
|
||
|
|
transform(chunk.slice(chunkSize), controller);
|
||
|
|
} else {
|
||
|
|
pendingChunk = chunk;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const MODULE_WORKER_OPTIONS = { type: "module" };
|
||
|
|
|
||
|
|
let webWorkerSupported, webWorkerURI, webWorkerOptions;
|
||
|
|
let transferStreamsSupported = true;
|
||
|
|
try {
|
||
|
|
transferStreamsSupported = typeof structuredClone == FUNCTION_TYPE && structuredClone(new DOMException("", "AbortError")).code !== UNDEFINED_VALUE;
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
let initModule = () => { };
|
||
|
|
|
||
|
|
class CodecWorker {
|
||
|
|
|
||
|
|
constructor(workerData, { readable, writable }, { options, config, streamOptions, useWebWorkers, transferStreams, workerURI }, onTaskFinished) {
|
||
|
|
const { signal } = streamOptions;
|
||
|
|
Object.assign(workerData, {
|
||
|
|
busy: true,
|
||
|
|
readable: readable
|
||
|
|
.pipeThrough(new ChunkStream(config.chunkSize))
|
||
|
|
.pipeThrough(new ProgressWatcherStream(streamOptions), { signal }),
|
||
|
|
writable,
|
||
|
|
options: Object.assign({}, options),
|
||
|
|
workerURI,
|
||
|
|
transferStreams,
|
||
|
|
terminate() {
|
||
|
|
return new Promise(resolve => {
|
||
|
|
const { worker, busy } = workerData;
|
||
|
|
if (worker) {
|
||
|
|
if (busy) {
|
||
|
|
workerData.resolveTerminated = resolve;
|
||
|
|
} else {
|
||
|
|
worker.terminate();
|
||
|
|
resolve();
|
||
|
|
}
|
||
|
|
workerData.interface = null;
|
||
|
|
} else {
|
||
|
|
resolve();
|
||
|
|
}
|
||
|
|
});
|
||
|
|
},
|
||
|
|
onTaskFinished() {
|
||
|
|
const { resolveTerminated } = workerData;
|
||
|
|
if (resolveTerminated) {
|
||
|
|
workerData.resolveTerminated = null;
|
||
|
|
workerData.terminated = true;
|
||
|
|
workerData.worker.terminate();
|
||
|
|
resolveTerminated();
|
||
|
|
}
|
||
|
|
workerData.busy = false;
|
||
|
|
onTaskFinished(workerData);
|
||
|
|
}
|
||
|
|
});
|
||
|
|
if (webWorkerSupported === UNDEFINED_VALUE) {
|
||
|
|
// deno-lint-ignore valid-typeof
|
||
|
|
webWorkerSupported = typeof Worker != UNDEFINED_TYPE;
|
||
|
|
}
|
||
|
|
return (useWebWorkers && webWorkerSupported ? createWebWorkerInterface : createWorkerInterface)(workerData, config);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class ProgressWatcherStream extends TransformStream {
|
||
|
|
|
||
|
|
constructor({ onstart, onprogress, size, onend }) {
|
||
|
|
let chunkOffset = 0;
|
||
|
|
super({
|
||
|
|
async start() {
|
||
|
|
if (onstart) {
|
||
|
|
await callHandler(onstart, size);
|
||
|
|
}
|
||
|
|
},
|
||
|
|
async transform(chunk, controller) {
|
||
|
|
chunkOffset += chunk.length;
|
||
|
|
if (onprogress) {
|
||
|
|
await callHandler(onprogress, chunkOffset, size);
|
||
|
|
}
|
||
|
|
controller.enqueue(chunk);
|
||
|
|
},
|
||
|
|
async flush() {
|
||
|
|
if (onend) {
|
||
|
|
await callHandler(onend, chunkOffset);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function callHandler(handler, ...parameters) {
|
||
|
|
try {
|
||
|
|
await handler(...parameters);
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function createWorkerInterface(workerData, config) {
|
||
|
|
return {
|
||
|
|
run: () => runWorker$1(workerData, config)
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
function createWebWorkerInterface(workerData, config) {
|
||
|
|
const { baseURI, chunkSize } = config;
|
||
|
|
let { wasmURI } = config;
|
||
|
|
|
||
|
|
if (!workerData.interface) {
|
||
|
|
// deno-lint-ignore valid-typeof
|
||
|
|
if (typeof wasmURI == FUNCTION_TYPE) {
|
||
|
|
wasmURI = wasmURI();
|
||
|
|
}
|
||
|
|
let worker;
|
||
|
|
try {
|
||
|
|
worker = getWebWorker(workerData.workerURI, baseURI, workerData);
|
||
|
|
} catch {
|
||
|
|
webWorkerSupported = false;
|
||
|
|
return createWorkerInterface(workerData, config);
|
||
|
|
}
|
||
|
|
Object.assign(workerData, {
|
||
|
|
worker,
|
||
|
|
interface: {
|
||
|
|
run: () => runWebWorker(workerData, { chunkSize, wasmURI, baseURI })
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
return workerData.interface;
|
||
|
|
}
|
||
|
|
|
||
|
|
async function runWorker$1({ options, readable, writable, onTaskFinished }, config) {
|
||
|
|
let codecStream;
|
||
|
|
try {
|
||
|
|
if (!options.useCompressionStream) {
|
||
|
|
try {
|
||
|
|
await initModule(config);
|
||
|
|
} catch {
|
||
|
|
options.useCompressionStream = true;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
codecStream = new CodecStream(options, config);
|
||
|
|
await readable.pipeThrough(codecStream).pipeTo(writable, { preventClose: true, preventAbort: true });
|
||
|
|
const {
|
||
|
|
signature,
|
||
|
|
inputSize,
|
||
|
|
outputSize
|
||
|
|
} = codecStream;
|
||
|
|
return {
|
||
|
|
signature,
|
||
|
|
inputSize,
|
||
|
|
outputSize
|
||
|
|
};
|
||
|
|
} catch (error) {
|
||
|
|
if (codecStream) {
|
||
|
|
error.outputSize = codecStream.outputSize;
|
||
|
|
}
|
||
|
|
throw error;
|
||
|
|
} finally {
|
||
|
|
onTaskFinished();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function runWebWorker(workerData, config) {
|
||
|
|
let resolveResult, rejectResult;
|
||
|
|
const result = new Promise((resolve, reject) => {
|
||
|
|
resolveResult = resolve;
|
||
|
|
rejectResult = reject;
|
||
|
|
});
|
||
|
|
Object.assign(workerData, {
|
||
|
|
reader: null,
|
||
|
|
writer: null,
|
||
|
|
resolveResult,
|
||
|
|
rejectResult,
|
||
|
|
result
|
||
|
|
});
|
||
|
|
const { readable, options } = workerData;
|
||
|
|
const { writable, closed } = watchClosedStream(workerData.writable);
|
||
|
|
const streamsTransferred = sendMessage({
|
||
|
|
type: MESSAGE_START,
|
||
|
|
options,
|
||
|
|
config,
|
||
|
|
readable,
|
||
|
|
writable
|
||
|
|
}, workerData);
|
||
|
|
if (!streamsTransferred) {
|
||
|
|
Object.assign(workerData, {
|
||
|
|
reader: readable.getReader(),
|
||
|
|
writer: writable.getWriter()
|
||
|
|
});
|
||
|
|
}
|
||
|
|
const resultValue = await result;
|
||
|
|
if (!streamsTransferred) {
|
||
|
|
await writable.getWriter().close();
|
||
|
|
}
|
||
|
|
await closed;
|
||
|
|
return resultValue;
|
||
|
|
}
|
||
|
|
|
||
|
|
function watchClosedStream(writableSource) {
|
||
|
|
const { writable, readable } = new TransformStream();
|
||
|
|
const closed = readable.pipeTo(writableSource, { preventClose: true });
|
||
|
|
return { writable, closed };
|
||
|
|
}
|
||
|
|
|
||
|
|
function getWebWorker(url, baseURI, workerData, isModuleType, useBlobURI = true) {
|
||
|
|
let worker, resolvedURI, resolvedOptions;
|
||
|
|
if (webWorkerURI === UNDEFINED_VALUE) {
|
||
|
|
// deno-lint-ignore valid-typeof
|
||
|
|
const isFunctionURI = typeof url == FUNCTION_TYPE;
|
||
|
|
if (isFunctionURI) {
|
||
|
|
resolvedURI = url(useBlobURI);
|
||
|
|
} else {
|
||
|
|
resolvedURI = url;
|
||
|
|
}
|
||
|
|
const isDataURI = resolvedURI.startsWith("data:");
|
||
|
|
const isBlobURI = resolvedURI.startsWith("blob:");
|
||
|
|
if (isDataURI || isBlobURI) {
|
||
|
|
if (isModuleType === UNDEFINED_VALUE) {
|
||
|
|
isModuleType = false;
|
||
|
|
}
|
||
|
|
if (isModuleType) {
|
||
|
|
resolvedOptions = MODULE_WORKER_OPTIONS;
|
||
|
|
}
|
||
|
|
try {
|
||
|
|
worker = new Worker(resolvedURI, resolvedOptions);
|
||
|
|
} catch (error) {
|
||
|
|
if (isBlobURI) {
|
||
|
|
try {
|
||
|
|
URL.revokeObjectURL(resolvedURI);
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (isFunctionURI && isBlobURI) {
|
||
|
|
return getWebWorker(url, baseURI, workerData, isModuleType, false);
|
||
|
|
} else if (!isModuleType) {
|
||
|
|
return getWebWorker(url, baseURI, workerData, true, false);
|
||
|
|
} else {
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
if (isModuleType === UNDEFINED_VALUE) {
|
||
|
|
isModuleType = true;
|
||
|
|
}
|
||
|
|
if (isModuleType) {
|
||
|
|
resolvedOptions = MODULE_WORKER_OPTIONS;
|
||
|
|
}
|
||
|
|
try {
|
||
|
|
resolvedURI = new URL(resolvedURI, baseURI);
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
try {
|
||
|
|
worker = new Worker(resolvedURI, resolvedOptions);
|
||
|
|
} catch (error) {
|
||
|
|
if (!isModuleType) {
|
||
|
|
return getWebWorker(url, baseURI, workerData, false, useBlobURI);
|
||
|
|
} else {
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
webWorkerURI = resolvedURI;
|
||
|
|
webWorkerOptions = resolvedOptions;
|
||
|
|
} else {
|
||
|
|
worker = new Worker(webWorkerURI, webWorkerOptions);
|
||
|
|
}
|
||
|
|
worker.addEventListener(MESSAGE_EVENT_TYPE, event => onMessage(event, workerData));
|
||
|
|
return worker;
|
||
|
|
}
|
||
|
|
|
||
|
|
function sendMessage(message, { worker, writer, onTaskFinished, transferStreams }) {
|
||
|
|
try {
|
||
|
|
const { value, readable, writable } = message;
|
||
|
|
const transferables = [];
|
||
|
|
if (value) {
|
||
|
|
message.value = value;
|
||
|
|
transferables.push(message.value.buffer);
|
||
|
|
}
|
||
|
|
if (transferStreams && transferStreamsSupported) {
|
||
|
|
if (readable) {
|
||
|
|
transferables.push(readable);
|
||
|
|
}
|
||
|
|
if (writable) {
|
||
|
|
transferables.push(writable);
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
message.readable = message.writable = null;
|
||
|
|
}
|
||
|
|
if (transferables.length) {
|
||
|
|
try {
|
||
|
|
worker.postMessage(message, transferables);
|
||
|
|
return true;
|
||
|
|
} catch {
|
||
|
|
transferStreamsSupported = false;
|
||
|
|
message.readable = message.writable = null;
|
||
|
|
worker.postMessage(message);
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
worker.postMessage(message);
|
||
|
|
}
|
||
|
|
} catch (error) {
|
||
|
|
if (writer) {
|
||
|
|
writer.releaseLock();
|
||
|
|
}
|
||
|
|
onTaskFinished();
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function onMessage({ data }, workerData) {
|
||
|
|
const { type, value, messageId, result, error } = data;
|
||
|
|
const { reader, writer, resolveResult, rejectResult, onTaskFinished } = workerData;
|
||
|
|
try {
|
||
|
|
if (error) {
|
||
|
|
const { message, stack, code, name, outputSize } = error;
|
||
|
|
const responseError = new Error(message);
|
||
|
|
Object.assign(responseError, { stack, code, name, outputSize });
|
||
|
|
close(responseError);
|
||
|
|
} else {
|
||
|
|
if (type == MESSAGE_PULL) {
|
||
|
|
const { value, done } = await reader.read();
|
||
|
|
sendMessage({ type: MESSAGE_DATA, value, done, messageId }, workerData);
|
||
|
|
}
|
||
|
|
if (type == MESSAGE_DATA) {
|
||
|
|
await writer.ready;
|
||
|
|
await writer.write(new Uint8Array(value));
|
||
|
|
sendMessage({ type: MESSAGE_ACK_DATA, messageId }, workerData);
|
||
|
|
}
|
||
|
|
if (type == MESSAGE_CLOSE) {
|
||
|
|
close(null, result);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
} catch (error) {
|
||
|
|
sendMessage({ type: MESSAGE_CLOSE, messageId }, workerData);
|
||
|
|
close(error);
|
||
|
|
}
|
||
|
|
|
||
|
|
function close(error, result) {
|
||
|
|
if (error) {
|
||
|
|
rejectResult(error);
|
||
|
|
} else {
|
||
|
|
resolveResult(result);
|
||
|
|
}
|
||
|
|
if (writer) {
|
||
|
|
writer.releaseLock();
|
||
|
|
}
|
||
|
|
onTaskFinished();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
let pool = [];
|
||
|
|
const pendingRequests = [];
|
||
|
|
|
||
|
|
let indexWorker = 0;
|
||
|
|
|
||
|
|
async function runWorker(stream, workerOptions) {
|
||
|
|
const { options, config } = workerOptions;
|
||
|
|
const { transferStreams, useWebWorkers, useCompressionStream, compressed, signed, encrypted } = options;
|
||
|
|
const { workerURI, maxWorkers } = config;
|
||
|
|
workerOptions.transferStreams = transferStreams || transferStreams === UNDEFINED_VALUE;
|
||
|
|
const streamCopy = !compressed && !signed && !encrypted && !workerOptions.transferStreams;
|
||
|
|
workerOptions.useWebWorkers = !streamCopy && (useWebWorkers || (useWebWorkers === UNDEFINED_VALUE && config.useWebWorkers));
|
||
|
|
workerOptions.workerURI = workerOptions.useWebWorkers && workerURI ? workerURI : UNDEFINED_VALUE;
|
||
|
|
options.useCompressionStream = useCompressionStream || (useCompressionStream === UNDEFINED_VALUE && config.useCompressionStream);
|
||
|
|
return (await getWorker()).run();
|
||
|
|
|
||
|
|
// deno-lint-ignore require-await
|
||
|
|
async function getWorker() {
|
||
|
|
const workerData = pool.find(workerData => !workerData.busy);
|
||
|
|
if (workerData) {
|
||
|
|
clearTerminateTimeout(workerData);
|
||
|
|
return new CodecWorker(workerData, stream, workerOptions, onTaskFinished);
|
||
|
|
} else if (pool.length < maxWorkers) {
|
||
|
|
const workerData = { indexWorker };
|
||
|
|
indexWorker++;
|
||
|
|
pool.push(workerData);
|
||
|
|
return new CodecWorker(workerData, stream, workerOptions, onTaskFinished);
|
||
|
|
} else {
|
||
|
|
return new Promise(resolve => pendingRequests.push({ resolve, stream, workerOptions }));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function onTaskFinished(workerData) {
|
||
|
|
if (pendingRequests.length) {
|
||
|
|
const [{ resolve, stream, workerOptions }] = pendingRequests.splice(0, 1);
|
||
|
|
resolve(new CodecWorker(workerData, stream, workerOptions, onTaskFinished));
|
||
|
|
} else if (workerData.worker) {
|
||
|
|
clearTerminateTimeout(workerData);
|
||
|
|
terminateWorker(workerData, workerOptions);
|
||
|
|
} else {
|
||
|
|
pool = pool.filter(data => data != workerData);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function terminateWorker(workerData, workerOptions) {
|
||
|
|
const { config } = workerOptions;
|
||
|
|
const { terminateWorkerTimeout } = config;
|
||
|
|
if (Number.isFinite(terminateWorkerTimeout) && terminateWorkerTimeout >= 0) {
|
||
|
|
if (workerData.terminated) {
|
||
|
|
workerData.terminated = false;
|
||
|
|
} else {
|
||
|
|
workerData.terminateTimeout = setTimeout(async () => {
|
||
|
|
pool = pool.filter(data => data != workerData);
|
||
|
|
try {
|
||
|
|
await workerData.terminate();
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}, terminateWorkerTimeout);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function clearTerminateTimeout(workerData) {
|
||
|
|
const { terminateTimeout } = workerData;
|
||
|
|
if (terminateTimeout) {
|
||
|
|
clearTimeout(terminateTimeout);
|
||
|
|
workerData.terminateTimeout = null;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function terminateWorkers() {
|
||
|
|
await Promise.allSettled(pool.map(workerData => {
|
||
|
|
clearTerminateTimeout(workerData);
|
||
|
|
return workerData.terminate();
|
||
|
|
}));
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const ERR_HTTP_STATUS = "HTTP error ";
|
||
|
|
const ERR_HTTP_RANGE = "HTTP Range not supported";
|
||
|
|
const ERR_ITERATOR_COMPLETED_TOO_SOON = "Writer iterator completed too soon";
|
||
|
|
const ERR_WRITER_NOT_INITIALIZED = "Writer not initialized";
|
||
|
|
|
||
|
|
const CONTENT_TYPE_TEXT_PLAIN = "text/plain";
|
||
|
|
const HTTP_HEADER_CONTENT_LENGTH = "Content-Length";
|
||
|
|
const HTTP_HEADER_CONTENT_RANGE = "Content-Range";
|
||
|
|
const HTTP_HEADER_ACCEPT_RANGES = "Accept-Ranges";
|
||
|
|
const HTTP_HEADER_RANGE = "Range";
|
||
|
|
const HTTP_HEADER_CONTENT_TYPE = "Content-Type";
|
||
|
|
const HTTP_METHOD_HEAD = "HEAD";
|
||
|
|
const HTTP_METHOD_GET = "GET";
|
||
|
|
const HTTP_RANGE_UNIT = "bytes";
|
||
|
|
const DEFAULT_CHUNK_SIZE = 64 * 1024;
|
||
|
|
const DEFAULT_BUFFER_SIZE = 256 * 1024;
|
||
|
|
|
||
|
|
const PROPERTY_NAME_WRITABLE = "writable";
|
||
|
|
|
||
|
|
class Stream {
|
||
|
|
|
||
|
|
constructor() {
|
||
|
|
this.size = 0;
|
||
|
|
}
|
||
|
|
|
||
|
|
init() {
|
||
|
|
this.initialized = true;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class Reader extends Stream {
|
||
|
|
|
||
|
|
get readable() {
|
||
|
|
const reader = this;
|
||
|
|
const { chunkSize = DEFAULT_CHUNK_SIZE } = reader;
|
||
|
|
const readable = new ReadableStream({
|
||
|
|
start() {
|
||
|
|
this.chunkOffset = 0;
|
||
|
|
},
|
||
|
|
async pull(controller) {
|
||
|
|
const { offset = 0, size, diskNumberStart } = readable;
|
||
|
|
const { chunkOffset } = this;
|
||
|
|
const dataSize = size === UNDEFINED_VALUE ? chunkSize : Math.min(chunkSize, size - chunkOffset);
|
||
|
|
const data = await readUint8Array(reader, offset + chunkOffset, dataSize, diskNumberStart);
|
||
|
|
controller.enqueue(data);
|
||
|
|
if ((chunkOffset + chunkSize > size) || (size === UNDEFINED_VALUE && !data.length && dataSize)) {
|
||
|
|
controller.close();
|
||
|
|
} else {
|
||
|
|
this.chunkOffset += chunkSize;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
});
|
||
|
|
return readable;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class Writer extends Stream {
|
||
|
|
|
||
|
|
constructor() {
|
||
|
|
super();
|
||
|
|
const writer = this;
|
||
|
|
const writable = new WritableStream({
|
||
|
|
write(chunk) {
|
||
|
|
if (!writer.initialized) {
|
||
|
|
throw new Error(ERR_WRITER_NOT_INITIALIZED);
|
||
|
|
}
|
||
|
|
return writer.writeUint8Array(chunk);
|
||
|
|
}
|
||
|
|
});
|
||
|
|
Object.defineProperty(writer, PROPERTY_NAME_WRITABLE, {
|
||
|
|
get() {
|
||
|
|
return writable;
|
||
|
|
}
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
writeUint8Array() {
|
||
|
|
// abstract
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class Data64URIReader extends Reader {
|
||
|
|
|
||
|
|
constructor(dataURI) {
|
||
|
|
super();
|
||
|
|
let dataEnd = dataURI.length;
|
||
|
|
while (dataURI.charAt(dataEnd - 1) == "=") {
|
||
|
|
dataEnd--;
|
||
|
|
}
|
||
|
|
const dataStart = dataURI.indexOf(",") + 1;
|
||
|
|
Object.assign(this, {
|
||
|
|
dataURI,
|
||
|
|
dataStart,
|
||
|
|
size: Math.floor((dataEnd - dataStart) * 0.75)
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
readUint8Array(offset, length) {
|
||
|
|
const {
|
||
|
|
dataStart,
|
||
|
|
dataURI
|
||
|
|
} = this;
|
||
|
|
const dataArray = new Uint8Array(length);
|
||
|
|
const start = Math.floor(offset / 3) * 4;
|
||
|
|
const bytes = atob(dataURI.substring(start + dataStart, Math.ceil((offset + length) / 3) * 4 + dataStart));
|
||
|
|
const delta = offset - Math.floor(start / 4) * 3;
|
||
|
|
let effectiveLength = 0;
|
||
|
|
for (let indexByte = delta; indexByte < delta + length && indexByte < bytes.length; indexByte++) {
|
||
|
|
dataArray[indexByte - delta] = bytes.charCodeAt(indexByte);
|
||
|
|
effectiveLength++;
|
||
|
|
}
|
||
|
|
if (effectiveLength < dataArray.length) {
|
||
|
|
return dataArray.subarray(0, effectiveLength);
|
||
|
|
} else {
|
||
|
|
return dataArray;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class Data64URIWriter extends Writer {
|
||
|
|
|
||
|
|
constructor(contentType) {
|
||
|
|
super();
|
||
|
|
Object.assign(this, {
|
||
|
|
data: "data:" + (contentType || "") + ";base64,",
|
||
|
|
pending: []
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
writeUint8Array(array) {
|
||
|
|
const writer = this;
|
||
|
|
let indexArray = 0;
|
||
|
|
let dataString = writer.pending;
|
||
|
|
const delta = writer.pending.length;
|
||
|
|
writer.pending = "";
|
||
|
|
for (indexArray = 0; indexArray < (Math.floor((delta + array.length) / 3) * 3) - delta; indexArray++) {
|
||
|
|
dataString += String.fromCharCode(array[indexArray]);
|
||
|
|
}
|
||
|
|
for (; indexArray < array.length; indexArray++) {
|
||
|
|
writer.pending += String.fromCharCode(array[indexArray]);
|
||
|
|
}
|
||
|
|
if (dataString.length) {
|
||
|
|
if (dataString.length > 2) {
|
||
|
|
writer.data += btoa(dataString);
|
||
|
|
} else {
|
||
|
|
writer.pending += dataString;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
getData() {
|
||
|
|
return this.data + btoa(this.pending);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class BlobReader extends Reader {
|
||
|
|
|
||
|
|
constructor(blob) {
|
||
|
|
super();
|
||
|
|
Object.assign(this, {
|
||
|
|
blob,
|
||
|
|
size: blob.size
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
async readUint8Array(offset, length) {
|
||
|
|
const reader = this;
|
||
|
|
const offsetEnd = offset + length;
|
||
|
|
const blob = offset || offsetEnd < reader.size ? reader.blob.slice(offset, offsetEnd) : reader.blob;
|
||
|
|
let arrayBuffer = await blob.arrayBuffer();
|
||
|
|
if (arrayBuffer.byteLength > length) {
|
||
|
|
arrayBuffer = arrayBuffer.slice(offset, offsetEnd);
|
||
|
|
}
|
||
|
|
return new Uint8Array(arrayBuffer);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class BlobWriter extends Stream {
|
||
|
|
|
||
|
|
constructor(contentType) {
|
||
|
|
super();
|
||
|
|
const writer = this;
|
||
|
|
const transformStream = new TransformStream();
|
||
|
|
const headers = [];
|
||
|
|
if (contentType) {
|
||
|
|
headers.push([HTTP_HEADER_CONTENT_TYPE, contentType]);
|
||
|
|
}
|
||
|
|
Object.defineProperty(writer, PROPERTY_NAME_WRITABLE, {
|
||
|
|
get() {
|
||
|
|
return transformStream.writable;
|
||
|
|
}
|
||
|
|
});
|
||
|
|
writer.blob = new Response(transformStream.readable, { headers }).blob();
|
||
|
|
}
|
||
|
|
|
||
|
|
getData() {
|
||
|
|
return this.blob;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class TextReader extends BlobReader {
|
||
|
|
|
||
|
|
constructor(text) {
|
||
|
|
super(new Blob([text], { type: CONTENT_TYPE_TEXT_PLAIN }));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class TextWriter extends BlobWriter {
|
||
|
|
|
||
|
|
constructor(encoding) {
|
||
|
|
super(encoding);
|
||
|
|
Object.assign(this, {
|
||
|
|
encoding,
|
||
|
|
utf8: !encoding || encoding.toLowerCase() == "utf-8"
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
async getData() {
|
||
|
|
const {
|
||
|
|
encoding,
|
||
|
|
utf8
|
||
|
|
} = this;
|
||
|
|
const blob = await super.getData();
|
||
|
|
if (blob.text && utf8) {
|
||
|
|
return blob.text();
|
||
|
|
} else {
|
||
|
|
const reader = new FileReader();
|
||
|
|
return new Promise((resolve, reject) => {
|
||
|
|
Object.assign(reader, {
|
||
|
|
onload: ({ target }) => resolve(target.result),
|
||
|
|
onerror: () => reject(reader.error)
|
||
|
|
});
|
||
|
|
reader.readAsText(blob, encoding);
|
||
|
|
});
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class FetchReader extends Reader {
|
||
|
|
|
||
|
|
constructor(url, options) {
|
||
|
|
super();
|
||
|
|
createHttpReader(this, url, options);
|
||
|
|
}
|
||
|
|
|
||
|
|
async init() {
|
||
|
|
await initHttpReader(this, sendFetchRequest, getFetchRequestData);
|
||
|
|
super.init();
|
||
|
|
}
|
||
|
|
|
||
|
|
readUint8Array(index, length) {
|
||
|
|
return readUint8ArrayHttpReader(this, index, length, sendFetchRequest, getFetchRequestData);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class XHRReader extends Reader {
|
||
|
|
|
||
|
|
constructor(url, options) {
|
||
|
|
super();
|
||
|
|
createHttpReader(this, url, options);
|
||
|
|
}
|
||
|
|
|
||
|
|
async init() {
|
||
|
|
await initHttpReader(this, sendXMLHttpRequest, getXMLHttpRequestData);
|
||
|
|
super.init();
|
||
|
|
}
|
||
|
|
|
||
|
|
readUint8Array(index, length) {
|
||
|
|
return readUint8ArrayHttpReader(this, index, length, sendXMLHttpRequest, getXMLHttpRequestData);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function createHttpReader(httpReader, url, options) {
|
||
|
|
const {
|
||
|
|
preventHeadRequest,
|
||
|
|
useRangeHeader,
|
||
|
|
forceRangeRequests,
|
||
|
|
combineSizeEocd
|
||
|
|
} = options;
|
||
|
|
options = Object.assign({}, options);
|
||
|
|
delete options.preventHeadRequest;
|
||
|
|
delete options.useRangeHeader;
|
||
|
|
delete options.forceRangeRequests;
|
||
|
|
delete options.combineSizeEocd;
|
||
|
|
delete options.useXHR;
|
||
|
|
Object.assign(httpReader, {
|
||
|
|
url,
|
||
|
|
options,
|
||
|
|
preventHeadRequest,
|
||
|
|
useRangeHeader,
|
||
|
|
forceRangeRequests,
|
||
|
|
combineSizeEocd
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
async function initHttpReader(httpReader, sendRequest, getRequestData) {
|
||
|
|
const {
|
||
|
|
url,
|
||
|
|
preventHeadRequest,
|
||
|
|
useRangeHeader,
|
||
|
|
forceRangeRequests,
|
||
|
|
combineSizeEocd
|
||
|
|
} = httpReader;
|
||
|
|
if (isHttpFamily(url) && (useRangeHeader || forceRangeRequests) && (typeof preventHeadRequest == "undefined" || preventHeadRequest)) {
|
||
|
|
const response = await sendRequest(HTTP_METHOD_GET, httpReader, getRangeHeaders(httpReader, combineSizeEocd ? -END_OF_CENTRAL_DIR_LENGTH : undefined));
|
||
|
|
const acceptRanges = response.headers.get(HTTP_HEADER_ACCEPT_RANGES);
|
||
|
|
if (!forceRangeRequests && (!acceptRanges || acceptRanges.toLowerCase() != HTTP_RANGE_UNIT)) {
|
||
|
|
throw new Error(ERR_HTTP_RANGE);
|
||
|
|
} else {
|
||
|
|
if (combineSizeEocd) {
|
||
|
|
httpReader.eocdCache = new Uint8Array(await response.arrayBuffer());
|
||
|
|
}
|
||
|
|
let contentSize;
|
||
|
|
const contentRangeHeader = response.headers.get(HTTP_HEADER_CONTENT_RANGE);
|
||
|
|
if (contentRangeHeader) {
|
||
|
|
const splitHeader = contentRangeHeader.trim().split(/\s*\/\s*/);
|
||
|
|
if (splitHeader.length) {
|
||
|
|
const headerValue = splitHeader[1];
|
||
|
|
if (headerValue && headerValue != "*") {
|
||
|
|
contentSize = Number(headerValue);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (contentSize === UNDEFINED_VALUE) {
|
||
|
|
await getContentLength(httpReader, sendRequest, getRequestData);
|
||
|
|
} else {
|
||
|
|
httpReader.size = contentSize;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
await getContentLength(httpReader, sendRequest, getRequestData);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function readUint8ArrayHttpReader(httpReader, index, length, sendRequest, getRequestData) {
|
||
|
|
const {
|
||
|
|
useRangeHeader,
|
||
|
|
forceRangeRequests,
|
||
|
|
eocdCache,
|
||
|
|
size,
|
||
|
|
options
|
||
|
|
} = httpReader;
|
||
|
|
if (useRangeHeader || forceRangeRequests) {
|
||
|
|
if (eocdCache && index == size - END_OF_CENTRAL_DIR_LENGTH && length == END_OF_CENTRAL_DIR_LENGTH) {
|
||
|
|
return eocdCache;
|
||
|
|
}
|
||
|
|
if (index >= size) {
|
||
|
|
return new Uint8Array();
|
||
|
|
} else {
|
||
|
|
if (index + length > size) {
|
||
|
|
length = size - index;
|
||
|
|
}
|
||
|
|
const response = await sendRequest(HTTP_METHOD_GET, httpReader, getRangeHeaders(httpReader, index, length));
|
||
|
|
if (response.status != 206) {
|
||
|
|
throw new Error(ERR_HTTP_RANGE);
|
||
|
|
}
|
||
|
|
return new Uint8Array(await response.arrayBuffer());
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
const { data } = httpReader;
|
||
|
|
if (!data) {
|
||
|
|
await getRequestData(httpReader, options);
|
||
|
|
}
|
||
|
|
return new Uint8Array(httpReader.data.subarray(index, index + length));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function getRangeHeaders(httpReader, index = 0, length = 1) {
|
||
|
|
return Object.assign({}, getHeaders(httpReader), { [HTTP_HEADER_RANGE]: HTTP_RANGE_UNIT + "=" + (index < 0 ? index : index + "-" + (index + length - 1)) });
|
||
|
|
}
|
||
|
|
|
||
|
|
function getHeaders({ options }) {
|
||
|
|
const { headers } = options;
|
||
|
|
if (headers) {
|
||
|
|
if (Symbol.iterator in headers) {
|
||
|
|
return Object.fromEntries(headers);
|
||
|
|
} else {
|
||
|
|
return headers;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function getFetchRequestData(httpReader) {
|
||
|
|
await getRequestData(httpReader, sendFetchRequest);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function getXMLHttpRequestData(httpReader) {
|
||
|
|
await getRequestData(httpReader, sendXMLHttpRequest);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function getRequestData(httpReader, sendRequest) {
|
||
|
|
const response = await sendRequest(HTTP_METHOD_GET, httpReader, getHeaders(httpReader));
|
||
|
|
httpReader.data = new Uint8Array(await response.arrayBuffer());
|
||
|
|
if (!httpReader.size) {
|
||
|
|
httpReader.size = httpReader.data.length;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function getContentLength(httpReader, sendRequest, getRequestData) {
|
||
|
|
if (httpReader.preventHeadRequest) {
|
||
|
|
await getRequestData(httpReader, httpReader.options);
|
||
|
|
} else {
|
||
|
|
const response = await sendRequest(HTTP_METHOD_HEAD, httpReader, getHeaders(httpReader));
|
||
|
|
const contentLength = response.headers.get(HTTP_HEADER_CONTENT_LENGTH);
|
||
|
|
if (contentLength) {
|
||
|
|
httpReader.size = Number(contentLength);
|
||
|
|
} else {
|
||
|
|
await getRequestData(httpReader, httpReader.options);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function sendFetchRequest(method, { options, url }, headers) {
|
||
|
|
const response = await fetch(url, Object.assign({}, options, { method, headers }));
|
||
|
|
if (response.status < 400) {
|
||
|
|
return response;
|
||
|
|
} else {
|
||
|
|
throw response.status == 416 ? new Error(ERR_HTTP_RANGE) : new Error(ERR_HTTP_STATUS + (response.statusText || response.status));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function sendXMLHttpRequest(method, { url }, headers) {
|
||
|
|
return new Promise((resolve, reject) => {
|
||
|
|
const request = new XMLHttpRequest();
|
||
|
|
request.addEventListener("load", () => {
|
||
|
|
if (request.status < 400) {
|
||
|
|
const headers = [];
|
||
|
|
request.getAllResponseHeaders().trim().split(/[\r\n]+/).forEach(header => {
|
||
|
|
const splitHeader = header.trim().split(/\s*:\s*/);
|
||
|
|
splitHeader[0] = splitHeader[0].trim().replace(/^[a-z]|-[a-z]/g, value => value.toUpperCase());
|
||
|
|
headers.push(splitHeader);
|
||
|
|
});
|
||
|
|
resolve({
|
||
|
|
status: request.status,
|
||
|
|
arrayBuffer: () => request.response,
|
||
|
|
headers: new Map(headers)
|
||
|
|
});
|
||
|
|
} else {
|
||
|
|
reject(request.status == 416 ? new Error(ERR_HTTP_RANGE) : new Error(ERR_HTTP_STATUS + (request.statusText || request.status)));
|
||
|
|
}
|
||
|
|
}, false);
|
||
|
|
request.addEventListener("error", event => reject(event.detail ? event.detail.error : new Error("Network error")), false);
|
||
|
|
request.open(method, url);
|
||
|
|
if (headers) {
|
||
|
|
for (const entry of Object.entries(headers)) {
|
||
|
|
request.setRequestHeader(entry[0], entry[1]);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
request.responseType = "arraybuffer";
|
||
|
|
request.send();
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
class HttpReader extends Reader {
|
||
|
|
|
||
|
|
constructor(url, options = {}) {
|
||
|
|
super();
|
||
|
|
Object.assign(this, {
|
||
|
|
url,
|
||
|
|
reader: options.useXHR ? new XHRReader(url, options) : new FetchReader(url, options)
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
set size(value) {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
|
||
|
|
get size() {
|
||
|
|
return this.reader.size;
|
||
|
|
}
|
||
|
|
|
||
|
|
async init() {
|
||
|
|
await this.reader.init();
|
||
|
|
super.init();
|
||
|
|
}
|
||
|
|
|
||
|
|
readUint8Array(index, length) {
|
||
|
|
return this.reader.readUint8Array(index, length);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class HttpRangeReader extends HttpReader {
|
||
|
|
|
||
|
|
constructor(url, options = {}) {
|
||
|
|
options.useRangeHeader = true;
|
||
|
|
super(url, options);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
class Uint8ArrayReader extends Reader {
|
||
|
|
|
||
|
|
constructor(array) {
|
||
|
|
super();
|
||
|
|
array = new Uint8Array(array.buffer, array.byteOffset, array.byteLength);
|
||
|
|
Object.assign(this, {
|
||
|
|
array,
|
||
|
|
size: array.length
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
readUint8Array(index, length) {
|
||
|
|
return this.array.slice(index, index + length);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class Uint8ArrayWriter extends Writer {
|
||
|
|
|
||
|
|
constructor(defaultBufferSize) {
|
||
|
|
super();
|
||
|
|
this.defaultBufferSize = defaultBufferSize || DEFAULT_BUFFER_SIZE;
|
||
|
|
}
|
||
|
|
|
||
|
|
init(initSize = 0) {
|
||
|
|
Object.assign(this, {
|
||
|
|
offset: 0,
|
||
|
|
array: new Uint8Array(initSize > 0 ? initSize : this.defaultBufferSize)
|
||
|
|
});
|
||
|
|
super.init();
|
||
|
|
}
|
||
|
|
|
||
|
|
writeUint8Array(array) {
|
||
|
|
const writer = this;
|
||
|
|
const requiredLength = writer.offset + array.length;
|
||
|
|
if (requiredLength > writer.array.length) {
|
||
|
|
let newLength = writer.array.length ? writer.array.length * 2 : writer.defaultBufferSize;
|
||
|
|
while (newLength < requiredLength) {
|
||
|
|
newLength *= 2;
|
||
|
|
}
|
||
|
|
const previousArray = writer.array;
|
||
|
|
writer.array = new Uint8Array(newLength);
|
||
|
|
writer.array.set(previousArray);
|
||
|
|
}
|
||
|
|
writer.array.set(array, writer.offset);
|
||
|
|
writer.offset += array.length;
|
||
|
|
}
|
||
|
|
|
||
|
|
getData() {
|
||
|
|
if (this.offset === this.array.length) {
|
||
|
|
return this.array;
|
||
|
|
} else {
|
||
|
|
return this.array.slice(0, this.offset);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class SplitDataReader extends Reader {
|
||
|
|
|
||
|
|
constructor(readers) {
|
||
|
|
super();
|
||
|
|
this.readers = readers;
|
||
|
|
}
|
||
|
|
|
||
|
|
async init() {
|
||
|
|
const reader = this;
|
||
|
|
const { readers } = reader;
|
||
|
|
reader.lastDiskNumber = 0;
|
||
|
|
reader.lastDiskOffset = 0;
|
||
|
|
await Promise.all(readers.map(async (diskReader, indexDiskReader) => {
|
||
|
|
await diskReader.init();
|
||
|
|
if (indexDiskReader != readers.length - 1) {
|
||
|
|
reader.lastDiskOffset += diskReader.size;
|
||
|
|
}
|
||
|
|
reader.size += diskReader.size;
|
||
|
|
}));
|
||
|
|
super.init();
|
||
|
|
}
|
||
|
|
|
||
|
|
async readUint8Array(offset, length, diskNumber = 0) {
|
||
|
|
const reader = this;
|
||
|
|
const { readers } = this;
|
||
|
|
let result;
|
||
|
|
let currentDiskNumber = diskNumber;
|
||
|
|
if (currentDiskNumber == -1) {
|
||
|
|
currentDiskNumber = readers.length - 1;
|
||
|
|
}
|
||
|
|
let currentReaderOffset = offset;
|
||
|
|
while (readers[currentDiskNumber] && currentReaderOffset >= readers[currentDiskNumber].size) {
|
||
|
|
currentReaderOffset -= readers[currentDiskNumber].size;
|
||
|
|
currentDiskNumber++;
|
||
|
|
}
|
||
|
|
const currentReader = readers[currentDiskNumber];
|
||
|
|
if (currentReader) {
|
||
|
|
const currentReaderSize = currentReader.size;
|
||
|
|
if (currentReaderOffset + length <= currentReaderSize) {
|
||
|
|
result = await readUint8Array(currentReader, currentReaderOffset, length);
|
||
|
|
} else {
|
||
|
|
const chunkLength = currentReaderSize - currentReaderOffset;
|
||
|
|
result = new Uint8Array(length);
|
||
|
|
const firstPart = await readUint8Array(currentReader, currentReaderOffset, chunkLength);
|
||
|
|
result.set(firstPart, 0);
|
||
|
|
const secondPart = await reader.readUint8Array(offset + chunkLength, length - chunkLength, diskNumber);
|
||
|
|
result.set(secondPart, chunkLength);
|
||
|
|
if (firstPart.length + secondPart.length < length) {
|
||
|
|
result = result.subarray(0, firstPart.length + secondPart.length);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
result = new Uint8Array();
|
||
|
|
}
|
||
|
|
reader.lastDiskNumber = Math.max(currentDiskNumber, reader.lastDiskNumber);
|
||
|
|
return result;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class SplitDataWriter extends Stream {
|
||
|
|
|
||
|
|
constructor(writerGenerator, maxSize = 4294967295) {
|
||
|
|
super();
|
||
|
|
const writer = this;
|
||
|
|
Object.assign(writer, {
|
||
|
|
diskNumber: 0,
|
||
|
|
diskOffset: 0,
|
||
|
|
size: 0,
|
||
|
|
maxSize,
|
||
|
|
availableSize: maxSize
|
||
|
|
});
|
||
|
|
let diskSourceWriter, diskWritable, diskWriter;
|
||
|
|
const writable = new WritableStream({
|
||
|
|
async write(chunk) {
|
||
|
|
const { availableSize } = writer;
|
||
|
|
if (!diskWriter) {
|
||
|
|
const { value, done } = await writerGenerator.next();
|
||
|
|
if (done && !value) {
|
||
|
|
throw new Error(ERR_ITERATOR_COMPLETED_TOO_SOON);
|
||
|
|
} else {
|
||
|
|
diskSourceWriter = value;
|
||
|
|
diskSourceWriter.size = 0;
|
||
|
|
if (diskSourceWriter.maxSize) {
|
||
|
|
writer.maxSize = diskSourceWriter.maxSize;
|
||
|
|
}
|
||
|
|
writer.availableSize = writer.maxSize;
|
||
|
|
await initStream(diskSourceWriter);
|
||
|
|
diskWritable = value.writable;
|
||
|
|
diskWriter = diskWritable.getWriter();
|
||
|
|
}
|
||
|
|
await this.write(chunk);
|
||
|
|
} else if (chunk.length >= availableSize) {
|
||
|
|
await writeChunk(chunk.subarray(0, availableSize));
|
||
|
|
await closeDisk();
|
||
|
|
writer.diskOffset += diskSourceWriter.size;
|
||
|
|
writer.diskNumber++;
|
||
|
|
diskWriter = null;
|
||
|
|
await this.write(chunk.subarray(availableSize));
|
||
|
|
} else {
|
||
|
|
await writeChunk(chunk);
|
||
|
|
}
|
||
|
|
},
|
||
|
|
async close() {
|
||
|
|
await diskWriter.ready;
|
||
|
|
await closeDisk();
|
||
|
|
}
|
||
|
|
});
|
||
|
|
Object.defineProperty(writer, PROPERTY_NAME_WRITABLE, {
|
||
|
|
get() {
|
||
|
|
return writable;
|
||
|
|
}
|
||
|
|
});
|
||
|
|
|
||
|
|
async function writeChunk(chunk) {
|
||
|
|
const chunkLength = chunk.length;
|
||
|
|
if (chunkLength) {
|
||
|
|
await diskWriter.ready;
|
||
|
|
await diskWriter.write(chunk);
|
||
|
|
diskSourceWriter.size += chunkLength;
|
||
|
|
writer.size += chunkLength;
|
||
|
|
writer.availableSize -= chunkLength;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function closeDisk() {
|
||
|
|
await diskWriter.close();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class GenericReader {
|
||
|
|
|
||
|
|
constructor(reader) {
|
||
|
|
if (Array.isArray(reader)) {
|
||
|
|
reader = new SplitDataReader(reader);
|
||
|
|
}
|
||
|
|
if (reader instanceof ReadableStream) {
|
||
|
|
reader = {
|
||
|
|
readable: reader
|
||
|
|
};
|
||
|
|
}
|
||
|
|
return reader;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class GenericWriter {
|
||
|
|
|
||
|
|
constructor(writer) {
|
||
|
|
if (writer.writable === UNDEFINED_VALUE && typeof writer.next == FUNCTION_TYPE) {
|
||
|
|
writer = new SplitDataWriter(writer);
|
||
|
|
}
|
||
|
|
if (writer instanceof WritableStream) {
|
||
|
|
writer = {
|
||
|
|
writable: writer
|
||
|
|
};
|
||
|
|
}
|
||
|
|
if (writer.size === UNDEFINED_VALUE) {
|
||
|
|
writer.size = 0;
|
||
|
|
}
|
||
|
|
if (!(writer instanceof SplitDataWriter)) {
|
||
|
|
Object.assign(writer, {
|
||
|
|
diskNumber: 0,
|
||
|
|
diskOffset: 0,
|
||
|
|
availableSize: INFINITY_VALUE,
|
||
|
|
maxSize: INFINITY_VALUE
|
||
|
|
});
|
||
|
|
}
|
||
|
|
return writer;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function isHttpFamily(url) {
|
||
|
|
const { baseURI } = getConfiguration();
|
||
|
|
const { protocol } = new URL(url, baseURI);
|
||
|
|
return protocol == "http:" || protocol == "https:";
|
||
|
|
}
|
||
|
|
|
||
|
|
async function initStream(stream, initSize) {
|
||
|
|
if (stream.init && !stream.initialized) {
|
||
|
|
await stream.init(initSize);
|
||
|
|
} else {
|
||
|
|
return Promise.resolve();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function readUint8Array(reader, offset, size, diskNumber) {
|
||
|
|
return reader.readUint8Array(offset, size, diskNumber);
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
/* global TextDecoder */
|
||
|
|
|
||
|
|
const CP437 = "\0☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼ !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~⌂ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ ".split("");
|
||
|
|
const VALID_CP437 = CP437.length == 256;
|
||
|
|
|
||
|
|
function decodeCP437(stringValue) {
|
||
|
|
if (VALID_CP437) {
|
||
|
|
let result = "";
|
||
|
|
for (let indexCharacter = 0; indexCharacter < stringValue.length; indexCharacter++) {
|
||
|
|
result += CP437[stringValue[indexCharacter]];
|
||
|
|
}
|
||
|
|
return result;
|
||
|
|
} else {
|
||
|
|
return new TextDecoder().decode(stringValue);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
function decodeText(value, encoding) {
|
||
|
|
if (encoding && encoding.trim().toLowerCase() == "cp437") {
|
||
|
|
return decodeCP437(value);
|
||
|
|
} else {
|
||
|
|
return new TextDecoder(encoding).decode(value);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
const PROPERTY_NAME_FILENAME = "filename";
|
||
|
|
const PROPERTY_NAME_RAW_FILENAME = "rawFilename";
|
||
|
|
const PROPERTY_NAME_COMMENT = "comment";
|
||
|
|
const PROPERTY_NAME_RAW_COMMENT = "rawComment";
|
||
|
|
const PROPERTY_NAME_UNCOMPRESSED_SIZE = "uncompressedSize";
|
||
|
|
const PROPERTY_NAME_COMPRESSED_SIZE = "compressedSize";
|
||
|
|
const PROPERTY_NAME_OFFSET = "offset";
|
||
|
|
const PROPERTY_NAME_DISK_NUMBER_START = "diskNumberStart";
|
||
|
|
const PROPERTY_NAME_LAST_MODIFICATION_DATE = "lastModDate";
|
||
|
|
const PROPERTY_NAME_RAW_LAST_MODIFICATION_DATE = "rawLastModDate";
|
||
|
|
const PROPERTY_NAME_LAST_ACCESS_DATE = "lastAccessDate";
|
||
|
|
const PROPERTY_NAME_RAW_LAST_ACCESS_DATE = "rawLastAccessDate";
|
||
|
|
const PROPERTY_NAME_CREATION_DATE = "creationDate";
|
||
|
|
const PROPERTY_NAME_RAW_CREATION_DATE = "rawCreationDate";
|
||
|
|
const PROPERTY_NAME_INTERNAL_FILE_ATTRIBUTES = "internalFileAttributes";
|
||
|
|
const PROPERTY_NAME_EXTERNAL_FILE_ATTRIBUTES = "externalFileAttributes";
|
||
|
|
const PROPERTY_NAME_MSDOS_ATTRIBUTES_RAW = "msdosAttributesRaw";
|
||
|
|
const PROPERTY_NAME_MSDOS_ATTRIBUTES = "msdosAttributes";
|
||
|
|
const PROPERTY_NAME_MS_DOS_COMPATIBLE = "msDosCompatible";
|
||
|
|
const PROPERTY_NAME_ZIP64 = "zip64";
|
||
|
|
const PROPERTY_NAME_ENCRYPTED = "encrypted";
|
||
|
|
const PROPERTY_NAME_VERSION = "version";
|
||
|
|
const PROPERTY_NAME_VERSION_MADE_BY = "versionMadeBy";
|
||
|
|
const PROPERTY_NAME_ZIPCRYPTO = "zipCrypto";
|
||
|
|
const PROPERTY_NAME_DIRECTORY = "directory";
|
||
|
|
const PROPERTY_NAME_EXECUTABLE = "executable";
|
||
|
|
const PROPERTY_NAME_COMPRESSION_METHOD = "compressionMethod";
|
||
|
|
const PROPERTY_NAME_SIGNATURE = "signature";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD = "extraField";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_INFOZIP = "extraFieldInfoZip";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_UNIX = "extraFieldUnix";
|
||
|
|
const PROPERTY_NAME_UID = "uid";
|
||
|
|
const PROPERTY_NAME_GID = "gid";
|
||
|
|
const PROPERTY_NAME_UNIX_MODE = "unixMode";
|
||
|
|
const PROPERTY_NAME_SETUID = "setuid";
|
||
|
|
const PROPERTY_NAME_SETGID = "setgid";
|
||
|
|
const PROPERTY_NAME_STICKY = "sticky";
|
||
|
|
const PROPERTY_NAME_BITFLAG = "bitFlag";
|
||
|
|
const PROPERTY_NAME_FILENAME_UTF8 = "filenameUTF8";
|
||
|
|
const PROPERTY_NAME_COMMENT_UTF8 = "commentUTF8";
|
||
|
|
const PROPERTY_NAME_RAW_EXTRA_FIELD = "rawExtraField";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_ZIP64 = "extraFieldZip64";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_UNICODE_PATH = "extraFieldUnicodePath";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_UNICODE_COMMENT = "extraFieldUnicodeComment";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_AES = "extraFieldAES";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_NTFS = "extraFieldNTFS";
|
||
|
|
const PROPERTY_NAME_EXTRA_FIELD_EXTENDED_TIMESTAMP = "extraFieldExtendedTimestamp";
|
||
|
|
|
||
|
|
const PROPERTY_NAMES = [
|
||
|
|
PROPERTY_NAME_FILENAME,
|
||
|
|
PROPERTY_NAME_RAW_FILENAME,
|
||
|
|
PROPERTY_NAME_UNCOMPRESSED_SIZE,
|
||
|
|
PROPERTY_NAME_COMPRESSED_SIZE,
|
||
|
|
PROPERTY_NAME_LAST_MODIFICATION_DATE,
|
||
|
|
PROPERTY_NAME_RAW_LAST_MODIFICATION_DATE,
|
||
|
|
PROPERTY_NAME_COMMENT,
|
||
|
|
PROPERTY_NAME_RAW_COMMENT,
|
||
|
|
PROPERTY_NAME_LAST_ACCESS_DATE,
|
||
|
|
PROPERTY_NAME_CREATION_DATE,
|
||
|
|
PROPERTY_NAME_RAW_CREATION_DATE,
|
||
|
|
PROPERTY_NAME_OFFSET,
|
||
|
|
PROPERTY_NAME_DISK_NUMBER_START,
|
||
|
|
PROPERTY_NAME_INTERNAL_FILE_ATTRIBUTES,
|
||
|
|
PROPERTY_NAME_EXTERNAL_FILE_ATTRIBUTES,
|
||
|
|
PROPERTY_NAME_MSDOS_ATTRIBUTES_RAW,
|
||
|
|
PROPERTY_NAME_MSDOS_ATTRIBUTES,
|
||
|
|
PROPERTY_NAME_MS_DOS_COMPATIBLE,
|
||
|
|
PROPERTY_NAME_ZIP64,
|
||
|
|
PROPERTY_NAME_ENCRYPTED,
|
||
|
|
PROPERTY_NAME_VERSION,
|
||
|
|
PROPERTY_NAME_VERSION_MADE_BY,
|
||
|
|
PROPERTY_NAME_ZIPCRYPTO,
|
||
|
|
PROPERTY_NAME_DIRECTORY,
|
||
|
|
PROPERTY_NAME_EXECUTABLE,
|
||
|
|
PROPERTY_NAME_COMPRESSION_METHOD,
|
||
|
|
PROPERTY_NAME_SIGNATURE,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_UNIX,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_INFOZIP,
|
||
|
|
PROPERTY_NAME_UID,
|
||
|
|
PROPERTY_NAME_GID,
|
||
|
|
PROPERTY_NAME_UNIX_MODE,
|
||
|
|
PROPERTY_NAME_SETUID,
|
||
|
|
PROPERTY_NAME_SETGID,
|
||
|
|
PROPERTY_NAME_STICKY,
|
||
|
|
PROPERTY_NAME_BITFLAG,
|
||
|
|
PROPERTY_NAME_FILENAME_UTF8,
|
||
|
|
PROPERTY_NAME_COMMENT_UTF8,
|
||
|
|
PROPERTY_NAME_RAW_EXTRA_FIELD,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_ZIP64,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_UNICODE_PATH,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_UNICODE_COMMENT,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_AES,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_NTFS,
|
||
|
|
PROPERTY_NAME_EXTRA_FIELD_EXTENDED_TIMESTAMP
|
||
|
|
];
|
||
|
|
|
||
|
|
class Entry {
|
||
|
|
|
||
|
|
constructor(data) {
|
||
|
|
PROPERTY_NAMES.forEach(name => this[name] = data[name]);
|
||
|
|
}
|
||
|
|
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
const OPTION_FILENAME_ENCODING = "filenameEncoding";
|
||
|
|
const OPTION_COMMENT_ENCODING = "commentEncoding";
|
||
|
|
const OPTION_DECODE_TEXT = "decodeText";
|
||
|
|
const OPTION_EXTRACT_PREPENDED_DATA = "extractPrependedData";
|
||
|
|
const OPTION_EXTRACT_APPENDED_DATA = "extractAppendedData";
|
||
|
|
const OPTION_PASSWORD = "password";
|
||
|
|
const OPTION_RAW_PASSWORD = "rawPassword";
|
||
|
|
const OPTION_PASS_THROUGH = "passThrough";
|
||
|
|
const OPTION_SIGNAL = "signal";
|
||
|
|
const OPTION_CHECK_PASSWORD_ONLY = "checkPasswordOnly";
|
||
|
|
const OPTION_CHECK_OVERLAPPING_ENTRY_ONLY = "checkOverlappingEntryOnly";
|
||
|
|
const OPTION_CHECK_OVERLAPPING_ENTRY = "checkOverlappingEntry";
|
||
|
|
const OPTION_CHECK_SIGNATURE = "checkSignature";
|
||
|
|
const OPTION_USE_WEB_WORKERS = "useWebWorkers";
|
||
|
|
const OPTION_USE_COMPRESSION_STREAM = "useCompressionStream";
|
||
|
|
const OPTION_TRANSFER_STREAMS = "transferStreams";
|
||
|
|
const OPTION_PREVENT_CLOSE = "preventClose";
|
||
|
|
const OPTION_ENCRYPTION_STRENGTH = "encryptionStrength";
|
||
|
|
const OPTION_EXTENDED_TIMESTAMP = "extendedTimestamp";
|
||
|
|
const OPTION_KEEP_ORDER = "keepOrder";
|
||
|
|
const OPTION_LEVEL = "level";
|
||
|
|
const OPTION_BUFFERED_WRITE = "bufferedWrite";
|
||
|
|
const OPTION_CREATE_TEMP_STREAM = "createTempStream";
|
||
|
|
const OPTION_DATA_DESCRIPTOR_SIGNATURE = "dataDescriptorSignature";
|
||
|
|
const OPTION_USE_UNICODE_FILE_NAMES = "useUnicodeFileNames";
|
||
|
|
const OPTION_DATA_DESCRIPTOR = "dataDescriptor";
|
||
|
|
const OPTION_SUPPORT_ZIP64_SPLIT_FILE = "supportZip64SplitFile";
|
||
|
|
const OPTION_ENCODE_TEXT = "encodeText";
|
||
|
|
const OPTION_OFFSET = "offset";
|
||
|
|
const OPTION_USDZ = "usdz";
|
||
|
|
const OPTION_UNIX_EXTRA_FIELD_TYPE = "unixExtraFieldType";
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const ERR_BAD_FORMAT = "File format is not recognized";
|
||
|
|
const ERR_EOCDR_NOT_FOUND = "End of central directory not found";
|
||
|
|
const ERR_EOCDR_LOCATOR_ZIP64_NOT_FOUND = "End of Zip64 central directory locator not found";
|
||
|
|
const ERR_CENTRAL_DIRECTORY_NOT_FOUND = "Central directory header not found";
|
||
|
|
const ERR_LOCAL_FILE_HEADER_NOT_FOUND = "Local file header not found";
|
||
|
|
const ERR_EXTRAFIELD_ZIP64_NOT_FOUND = "Zip64 extra field not found";
|
||
|
|
const ERR_ENCRYPTED = "File contains encrypted entry";
|
||
|
|
const ERR_UNSUPPORTED_ENCRYPTION = "Encryption method not supported";
|
||
|
|
const ERR_UNSUPPORTED_COMPRESSION = "Compression method not supported";
|
||
|
|
const ERR_SPLIT_ZIP_FILE = "Split zip file";
|
||
|
|
const ERR_OVERLAPPING_ENTRY = "Overlapping entry found";
|
||
|
|
const CHARSET_UTF8 = "utf-8";
|
||
|
|
const PROPERTY_NAME_UTF8_SUFFIX = "UTF8";
|
||
|
|
const CHARSET_CP437 = "cp437";
|
||
|
|
const ZIP64_PROPERTIES = [
|
||
|
|
[PROPERTY_NAME_UNCOMPRESSED_SIZE, MAX_32_BITS],
|
||
|
|
[PROPERTY_NAME_COMPRESSED_SIZE, MAX_32_BITS],
|
||
|
|
[PROPERTY_NAME_OFFSET, MAX_32_BITS],
|
||
|
|
[PROPERTY_NAME_DISK_NUMBER_START, MAX_16_BITS]
|
||
|
|
];
|
||
|
|
const ZIP64_EXTRACTION = {
|
||
|
|
[MAX_16_BITS]: {
|
||
|
|
getValue: getUint32,
|
||
|
|
bytes: 4
|
||
|
|
},
|
||
|
|
[MAX_32_BITS]: {
|
||
|
|
getValue: getBigUint64,
|
||
|
|
bytes: 8
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
class ZipReader {
|
||
|
|
|
||
|
|
constructor(reader, options = {}) {
|
||
|
|
Object.assign(this, {
|
||
|
|
reader: new GenericReader(reader),
|
||
|
|
options,
|
||
|
|
config: getConfiguration(),
|
||
|
|
readRanges: []
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
async* getEntriesGenerator(options = {}) {
|
||
|
|
const zipReader = this;
|
||
|
|
let { reader } = zipReader;
|
||
|
|
const { config } = zipReader;
|
||
|
|
await initStream(reader);
|
||
|
|
if (reader.size === UNDEFINED_VALUE || !reader.readUint8Array) {
|
||
|
|
reader = new BlobReader(await new Response(reader.readable).blob());
|
||
|
|
await initStream(reader);
|
||
|
|
}
|
||
|
|
if (reader.size < END_OF_CENTRAL_DIR_LENGTH) {
|
||
|
|
throw new Error(ERR_BAD_FORMAT);
|
||
|
|
}
|
||
|
|
reader.chunkSize = getChunkSize(config);
|
||
|
|
const endOfDirectoryInfo = await seekSignature(reader, END_OF_CENTRAL_DIR_SIGNATURE, reader.size, END_OF_CENTRAL_DIR_LENGTH, MAX_16_BITS * 16);
|
||
|
|
if (!endOfDirectoryInfo) {
|
||
|
|
const signatureArray = await readUint8Array(reader, 0, 4);
|
||
|
|
const signatureView = getDataView$1(signatureArray);
|
||
|
|
if (getUint32(signatureView) == SPLIT_ZIP_FILE_SIGNATURE) {
|
||
|
|
throw new Error(ERR_SPLIT_ZIP_FILE);
|
||
|
|
} else {
|
||
|
|
throw new Error(ERR_EOCDR_NOT_FOUND);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const endOfDirectoryView = getDataView$1(endOfDirectoryInfo);
|
||
|
|
let directoryDataLength = getUint32(endOfDirectoryView, 12);
|
||
|
|
let directoryDataOffset = getUint32(endOfDirectoryView, 16);
|
||
|
|
const commentOffset = endOfDirectoryInfo.offset;
|
||
|
|
const commentLength = getUint16(endOfDirectoryView, 20);
|
||
|
|
const appendedDataOffset = commentOffset + END_OF_CENTRAL_DIR_LENGTH + commentLength;
|
||
|
|
let lastDiskNumber = getUint16(endOfDirectoryView, 4);
|
||
|
|
const expectedLastDiskNumber = reader.lastDiskNumber || 0;
|
||
|
|
let diskNumber = getUint16(endOfDirectoryView, 6);
|
||
|
|
let filesLength = getUint16(endOfDirectoryView, 8);
|
||
|
|
let prependedDataLength = 0;
|
||
|
|
let startOffset = 0;
|
||
|
|
if (directoryDataOffset == MAX_32_BITS || directoryDataLength == MAX_32_BITS || filesLength == MAX_16_BITS || diskNumber == MAX_16_BITS) {
|
||
|
|
const endOfDirectoryLocatorArray = await readUint8Array(reader, endOfDirectoryInfo.offset - ZIP64_END_OF_CENTRAL_DIR_LOCATOR_LENGTH, ZIP64_END_OF_CENTRAL_DIR_LOCATOR_LENGTH);
|
||
|
|
const endOfDirectoryLocatorView = getDataView$1(endOfDirectoryLocatorArray);
|
||
|
|
if (getUint32(endOfDirectoryLocatorView, 0) == ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIGNATURE) {
|
||
|
|
directoryDataOffset = getBigUint64(endOfDirectoryLocatorView, 8);
|
||
|
|
let endOfDirectoryArray = await readUint8Array(reader, directoryDataOffset, ZIP64_END_OF_CENTRAL_DIR_LENGTH, -1);
|
||
|
|
let endOfDirectoryView = getDataView$1(endOfDirectoryArray);
|
||
|
|
const expectedDirectoryDataOffset = endOfDirectoryInfo.offset - ZIP64_END_OF_CENTRAL_DIR_LOCATOR_LENGTH - ZIP64_END_OF_CENTRAL_DIR_LENGTH;
|
||
|
|
if (getUint32(endOfDirectoryView, 0) != ZIP64_END_OF_CENTRAL_DIR_SIGNATURE && directoryDataOffset != expectedDirectoryDataOffset) {
|
||
|
|
const originalDirectoryDataOffset = directoryDataOffset;
|
||
|
|
directoryDataOffset = expectedDirectoryDataOffset;
|
||
|
|
if (directoryDataOffset > originalDirectoryDataOffset) {
|
||
|
|
prependedDataLength = directoryDataOffset - originalDirectoryDataOffset;
|
||
|
|
}
|
||
|
|
endOfDirectoryArray = await readUint8Array(reader, directoryDataOffset, ZIP64_END_OF_CENTRAL_DIR_LENGTH, -1);
|
||
|
|
endOfDirectoryView = getDataView$1(endOfDirectoryArray);
|
||
|
|
}
|
||
|
|
if (getUint32(endOfDirectoryView, 0) != ZIP64_END_OF_CENTRAL_DIR_SIGNATURE) {
|
||
|
|
throw new Error(ERR_EOCDR_LOCATOR_ZIP64_NOT_FOUND);
|
||
|
|
}
|
||
|
|
if (lastDiskNumber == MAX_16_BITS) {
|
||
|
|
lastDiskNumber = getUint32(endOfDirectoryView, 16);
|
||
|
|
}
|
||
|
|
if (diskNumber == MAX_16_BITS) {
|
||
|
|
diskNumber = getUint32(endOfDirectoryView, 20);
|
||
|
|
}
|
||
|
|
if (filesLength == MAX_16_BITS) {
|
||
|
|
filesLength = getBigUint64(endOfDirectoryView, 32);
|
||
|
|
}
|
||
|
|
if (directoryDataLength == MAX_32_BITS) {
|
||
|
|
directoryDataLength = getBigUint64(endOfDirectoryView, 40);
|
||
|
|
}
|
||
|
|
directoryDataOffset -= directoryDataLength;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (directoryDataOffset >= reader.size) {
|
||
|
|
prependedDataLength = reader.size - directoryDataOffset - directoryDataLength - END_OF_CENTRAL_DIR_LENGTH;
|
||
|
|
directoryDataOffset = reader.size - directoryDataLength - END_OF_CENTRAL_DIR_LENGTH;
|
||
|
|
}
|
||
|
|
if (expectedLastDiskNumber != lastDiskNumber) {
|
||
|
|
throw new Error(ERR_SPLIT_ZIP_FILE);
|
||
|
|
}
|
||
|
|
if (directoryDataOffset < 0) {
|
||
|
|
throw new Error(ERR_BAD_FORMAT);
|
||
|
|
}
|
||
|
|
let offset = 0;
|
||
|
|
let directoryArray = await readUint8Array(reader, directoryDataOffset, directoryDataLength, diskNumber);
|
||
|
|
let directoryView = getDataView$1(directoryArray);
|
||
|
|
if (directoryDataLength) {
|
||
|
|
const expectedDirectoryDataOffset = endOfDirectoryInfo.offset - directoryDataLength;
|
||
|
|
if (getUint32(directoryView, offset) != CENTRAL_FILE_HEADER_SIGNATURE && directoryDataOffset != expectedDirectoryDataOffset) {
|
||
|
|
const originalDirectoryDataOffset = directoryDataOffset;
|
||
|
|
directoryDataOffset = expectedDirectoryDataOffset;
|
||
|
|
if (directoryDataOffset > originalDirectoryDataOffset) {
|
||
|
|
prependedDataLength += directoryDataOffset - originalDirectoryDataOffset;
|
||
|
|
}
|
||
|
|
directoryArray = await readUint8Array(reader, directoryDataOffset, directoryDataLength, diskNumber);
|
||
|
|
directoryView = getDataView$1(directoryArray);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const expectedDirectoryDataLength = endOfDirectoryInfo.offset - directoryDataOffset - (reader.lastDiskOffset || 0);
|
||
|
|
if (directoryDataLength != expectedDirectoryDataLength && expectedDirectoryDataLength >= 0) {
|
||
|
|
directoryDataLength = expectedDirectoryDataLength;
|
||
|
|
directoryArray = await readUint8Array(reader, directoryDataOffset, directoryDataLength, diskNumber);
|
||
|
|
directoryView = getDataView$1(directoryArray);
|
||
|
|
}
|
||
|
|
if (directoryDataOffset < 0 || directoryDataOffset >= reader.size) {
|
||
|
|
throw new Error(ERR_BAD_FORMAT);
|
||
|
|
}
|
||
|
|
const filenameEncoding = getOptionValue$1(zipReader, options, OPTION_FILENAME_ENCODING);
|
||
|
|
const commentEncoding = getOptionValue$1(zipReader, options, OPTION_COMMENT_ENCODING);
|
||
|
|
for (let indexFile = 0; indexFile < filesLength; indexFile++) {
|
||
|
|
const fileEntry = new ZipEntry(reader, config, zipReader.options);
|
||
|
|
if (getUint32(directoryView, offset) != CENTRAL_FILE_HEADER_SIGNATURE) {
|
||
|
|
throw new Error(ERR_CENTRAL_DIRECTORY_NOT_FOUND);
|
||
|
|
}
|
||
|
|
readCommonHeader(fileEntry, directoryView, offset + 6);
|
||
|
|
const languageEncodingFlag = Boolean(fileEntry.bitFlag.languageEncodingFlag);
|
||
|
|
const filenameOffset = offset + 46;
|
||
|
|
const extraFieldOffset = filenameOffset + fileEntry.filenameLength;
|
||
|
|
const commentOffset = extraFieldOffset + fileEntry.extraFieldLength;
|
||
|
|
const versionMadeBy = getUint16(directoryView, offset + 4);
|
||
|
|
const msDosCompatible = versionMadeBy >> 8 == 0;
|
||
|
|
const unixCompatible = versionMadeBy >> 8 == 3;
|
||
|
|
const rawFilename = directoryArray.subarray(filenameOffset, extraFieldOffset);
|
||
|
|
const commentLength = getUint16(directoryView, offset + 32);
|
||
|
|
const endOffset = commentOffset + commentLength;
|
||
|
|
const rawComment = directoryArray.subarray(commentOffset, endOffset);
|
||
|
|
const filenameUTF8 = languageEncodingFlag;
|
||
|
|
const commentUTF8 = languageEncodingFlag;
|
||
|
|
const externalFileAttributes = getUint32(directoryView, offset + 38);
|
||
|
|
const msdosAttributesRaw = externalFileAttributes & MAX_8_BITS;
|
||
|
|
const msdosAttributes = {
|
||
|
|
readOnly: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_READONLY_MASK),
|
||
|
|
hidden: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_HIDDEN_MASK),
|
||
|
|
system: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_SYSTEM_MASK),
|
||
|
|
directory: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_DIR_MASK),
|
||
|
|
archive: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_ARCHIVE_MASK)
|
||
|
|
};
|
||
|
|
const offsetFileEntry = getUint32(directoryView, offset + 42) + prependedDataLength;
|
||
|
|
const decode = getOptionValue$1(zipReader, options, OPTION_DECODE_TEXT) || decodeText;
|
||
|
|
const rawFilenameEncoding = filenameUTF8 ? CHARSET_UTF8 : filenameEncoding || CHARSET_CP437;
|
||
|
|
const rawCommentEncoding = commentUTF8 ? CHARSET_UTF8 : commentEncoding || CHARSET_CP437;
|
||
|
|
let filename = decode(rawFilename, rawFilenameEncoding);
|
||
|
|
if (filename === UNDEFINED_VALUE) {
|
||
|
|
filename = decodeText(rawFilename, rawFilenameEncoding);
|
||
|
|
}
|
||
|
|
let comment = decode(rawComment, rawCommentEncoding);
|
||
|
|
if (comment === UNDEFINED_VALUE) {
|
||
|
|
comment = decodeText(rawComment, rawCommentEncoding);
|
||
|
|
}
|
||
|
|
Object.assign(fileEntry, {
|
||
|
|
versionMadeBy,
|
||
|
|
msDosCompatible,
|
||
|
|
compressedSize: 0,
|
||
|
|
uncompressedSize: 0,
|
||
|
|
commentLength,
|
||
|
|
offset: offsetFileEntry,
|
||
|
|
diskNumberStart: getUint16(directoryView, offset + 34),
|
||
|
|
internalFileAttributes: getUint16(directoryView, offset + 36),
|
||
|
|
externalFileAttributes,
|
||
|
|
msdosAttributesRaw,
|
||
|
|
msdosAttributes,
|
||
|
|
rawFilename,
|
||
|
|
filenameUTF8,
|
||
|
|
commentUTF8,
|
||
|
|
rawExtraField: directoryArray.subarray(extraFieldOffset, commentOffset),
|
||
|
|
rawComment,
|
||
|
|
filename,
|
||
|
|
comment
|
||
|
|
});
|
||
|
|
startOffset = Math.max(offsetFileEntry, startOffset);
|
||
|
|
readCommonFooter(fileEntry, fileEntry, directoryView, offset + 6);
|
||
|
|
const unixExternalUpper = (fileEntry.externalFileAttributes >> 16) & MAX_16_BITS;
|
||
|
|
if (fileEntry.unixMode === UNDEFINED_VALUE && (unixExternalUpper & (FILE_ATTR_UNIX_DEFAULT_MASK | FILE_ATTR_UNIX_EXECUTABLE_MASK | FILE_ATTR_UNIX_TYPE_DIR)) != 0) {
|
||
|
|
fileEntry.unixMode = unixExternalUpper;
|
||
|
|
}
|
||
|
|
const setuid = Boolean(fileEntry.unixMode & FILE_ATTR_UNIX_SETUID_MASK);
|
||
|
|
const setgid = Boolean(fileEntry.unixMode & FILE_ATTR_UNIX_SETGID_MASK);
|
||
|
|
const sticky = Boolean(fileEntry.unixMode & FILE_ATTR_UNIX_STICKY_MASK);
|
||
|
|
const executable = (fileEntry.unixMode !== UNDEFINED_VALUE)
|
||
|
|
? ((fileEntry.unixMode & FILE_ATTR_UNIX_EXECUTABLE_MASK) != 0)
|
||
|
|
: (unixCompatible && ((unixExternalUpper & FILE_ATTR_UNIX_EXECUTABLE_MASK) != 0));
|
||
|
|
const modeIsDir = fileEntry.unixMode !== UNDEFINED_VALUE && ((fileEntry.unixMode & FILE_ATTR_UNIX_TYPE_MASK) == FILE_ATTR_UNIX_TYPE_DIR);
|
||
|
|
const upperIsDir = ((unixExternalUpper & FILE_ATTR_UNIX_TYPE_MASK) == FILE_ATTR_UNIX_TYPE_DIR);
|
||
|
|
Object.assign(fileEntry, {
|
||
|
|
setuid,
|
||
|
|
setgid,
|
||
|
|
sticky,
|
||
|
|
unixExternalUpper,
|
||
|
|
internalFileAttribute: fileEntry.internalFileAttributes,
|
||
|
|
externalFileAttribute: fileEntry.externalFileAttributes,
|
||
|
|
executable,
|
||
|
|
directory: modeIsDir || upperIsDir || (msDosCompatible && msdosAttributes.directory) || (filename.endsWith(DIRECTORY_SIGNATURE) && !fileEntry.uncompressedSize),
|
||
|
|
zipCrypto: fileEntry.encrypted && !fileEntry.extraFieldAES
|
||
|
|
});
|
||
|
|
const entry = new Entry(fileEntry);
|
||
|
|
entry.getData = (writer, options) => fileEntry.getData(writer, entry, zipReader.readRanges, options);
|
||
|
|
entry.arrayBuffer = async options => {
|
||
|
|
const writer = new TransformStream();
|
||
|
|
const [arrayBuffer] = await Promise.all([
|
||
|
|
new Response(writer.readable).arrayBuffer(),
|
||
|
|
fileEntry.getData(writer, entry, zipReader.readRanges, options)]);
|
||
|
|
return arrayBuffer;
|
||
|
|
};
|
||
|
|
offset = endOffset;
|
||
|
|
const { onprogress } = options;
|
||
|
|
if (onprogress) {
|
||
|
|
try {
|
||
|
|
await onprogress(indexFile + 1, filesLength, new Entry(fileEntry));
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
yield entry;
|
||
|
|
}
|
||
|
|
const extractPrependedData = getOptionValue$1(zipReader, options, OPTION_EXTRACT_PREPENDED_DATA);
|
||
|
|
const extractAppendedData = getOptionValue$1(zipReader, options, OPTION_EXTRACT_APPENDED_DATA);
|
||
|
|
if (extractPrependedData) {
|
||
|
|
zipReader.prependedData = startOffset > 0 ? await readUint8Array(reader, 0, startOffset) : new Uint8Array();
|
||
|
|
}
|
||
|
|
zipReader.comment = commentLength ? await readUint8Array(reader, commentOffset + END_OF_CENTRAL_DIR_LENGTH, commentLength) : new Uint8Array();
|
||
|
|
if (extractAppendedData) {
|
||
|
|
zipReader.appendedData = appendedDataOffset < reader.size ? await readUint8Array(reader, appendedDataOffset, reader.size - appendedDataOffset) : new Uint8Array();
|
||
|
|
}
|
||
|
|
return true;
|
||
|
|
}
|
||
|
|
|
||
|
|
async getEntries(options = {}) {
|
||
|
|
const entries = [];
|
||
|
|
for await (const entry of this.getEntriesGenerator(options)) {
|
||
|
|
entries.push(entry);
|
||
|
|
}
|
||
|
|
return entries;
|
||
|
|
}
|
||
|
|
|
||
|
|
async close() {
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class ZipReaderStream {
|
||
|
|
|
||
|
|
constructor(options = {}) {
|
||
|
|
const { readable, writable } = new TransformStream();
|
||
|
|
const gen = new ZipReader(readable, options).getEntriesGenerator();
|
||
|
|
this.readable = new ReadableStream({
|
||
|
|
async pull(controller) {
|
||
|
|
const { done, value } = await gen.next();
|
||
|
|
if (done)
|
||
|
|
return controller.close();
|
||
|
|
const chunk = {
|
||
|
|
...value,
|
||
|
|
readable: (function () {
|
||
|
|
const { readable, writable } = new TransformStream();
|
||
|
|
if (value.getData) {
|
||
|
|
value.getData(writable);
|
||
|
|
return readable;
|
||
|
|
}
|
||
|
|
})()
|
||
|
|
};
|
||
|
|
delete chunk.getData;
|
||
|
|
controller.enqueue(chunk);
|
||
|
|
}
|
||
|
|
});
|
||
|
|
this.writable = writable;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class ZipEntry {
|
||
|
|
|
||
|
|
constructor(reader, config, options) {
|
||
|
|
Object.assign(this, {
|
||
|
|
reader,
|
||
|
|
config,
|
||
|
|
options
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
async getData(writer, fileEntry, readRanges, options = {}) {
|
||
|
|
const zipEntry = this;
|
||
|
|
const {
|
||
|
|
reader,
|
||
|
|
offset,
|
||
|
|
diskNumberStart,
|
||
|
|
extraFieldAES,
|
||
|
|
extraFieldZip64,
|
||
|
|
compressionMethod,
|
||
|
|
config,
|
||
|
|
bitFlag,
|
||
|
|
signature,
|
||
|
|
rawLastModDate,
|
||
|
|
uncompressedSize,
|
||
|
|
compressedSize
|
||
|
|
} = zipEntry;
|
||
|
|
const {
|
||
|
|
dataDescriptor
|
||
|
|
} = bitFlag;
|
||
|
|
const localDirectory = fileEntry.localDirectory = {};
|
||
|
|
const dataArray = await readUint8Array(reader, offset, HEADER_SIZE, diskNumberStart);
|
||
|
|
const dataView = getDataView$1(dataArray);
|
||
|
|
let password = getOptionValue$1(zipEntry, options, OPTION_PASSWORD);
|
||
|
|
let rawPassword = getOptionValue$1(zipEntry, options, OPTION_RAW_PASSWORD);
|
||
|
|
const passThrough = getOptionValue$1(zipEntry, options, OPTION_PASS_THROUGH);
|
||
|
|
password = password && password.length && password;
|
||
|
|
rawPassword = rawPassword && rawPassword.length && rawPassword;
|
||
|
|
if (extraFieldAES) {
|
||
|
|
if (extraFieldAES.originalCompressionMethod != COMPRESSION_METHOD_AES) {
|
||
|
|
throw new Error(ERR_UNSUPPORTED_COMPRESSION);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if ((compressionMethod != COMPRESSION_METHOD_STORE && compressionMethod != COMPRESSION_METHOD_DEFLATE && compressionMethod != COMPRESSION_METHOD_DEFLATE_64) && !passThrough) {
|
||
|
|
throw new Error(ERR_UNSUPPORTED_COMPRESSION);
|
||
|
|
}
|
||
|
|
if (getUint32(dataView, 0) != LOCAL_FILE_HEADER_SIGNATURE) {
|
||
|
|
throw new Error(ERR_LOCAL_FILE_HEADER_NOT_FOUND);
|
||
|
|
}
|
||
|
|
readCommonHeader(localDirectory, dataView, 4);
|
||
|
|
const {
|
||
|
|
extraFieldLength,
|
||
|
|
filenameLength,
|
||
|
|
lastAccessDate,
|
||
|
|
creationDate
|
||
|
|
} = localDirectory;
|
||
|
|
localDirectory.rawExtraField = extraFieldLength ?
|
||
|
|
await readUint8Array(reader, offset + HEADER_SIZE + filenameLength, extraFieldLength, diskNumberStart) :
|
||
|
|
new Uint8Array();
|
||
|
|
readCommonFooter(zipEntry, localDirectory, dataView, 4, true);
|
||
|
|
Object.assign(fileEntry, { lastAccessDate, creationDate });
|
||
|
|
const encrypted = zipEntry.encrypted && localDirectory.encrypted && !passThrough;
|
||
|
|
const zipCrypto = encrypted && !extraFieldAES;
|
||
|
|
if (!passThrough) {
|
||
|
|
fileEntry.zipCrypto = zipCrypto;
|
||
|
|
}
|
||
|
|
if (encrypted) {
|
||
|
|
if (!zipCrypto && extraFieldAES.strength === UNDEFINED_VALUE) {
|
||
|
|
throw new Error(ERR_UNSUPPORTED_ENCRYPTION);
|
||
|
|
} else if (!password && !rawPassword) {
|
||
|
|
throw new Error(ERR_ENCRYPTED);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const dataOffset = offset + HEADER_SIZE + filenameLength + extraFieldLength;
|
||
|
|
const size = compressedSize;
|
||
|
|
const readable = reader.readable;
|
||
|
|
Object.assign(readable, {
|
||
|
|
diskNumberStart,
|
||
|
|
offset: dataOffset,
|
||
|
|
size
|
||
|
|
});
|
||
|
|
const signal = getOptionValue$1(zipEntry, options, OPTION_SIGNAL);
|
||
|
|
const checkPasswordOnly = getOptionValue$1(zipEntry, options, OPTION_CHECK_PASSWORD_ONLY);
|
||
|
|
let checkOverlappingEntry = getOptionValue$1(zipEntry, options, OPTION_CHECK_OVERLAPPING_ENTRY);
|
||
|
|
const checkOverlappingEntryOnly = getOptionValue$1(zipEntry, options, OPTION_CHECK_OVERLAPPING_ENTRY_ONLY);
|
||
|
|
if (checkOverlappingEntryOnly) {
|
||
|
|
checkOverlappingEntry = true;
|
||
|
|
}
|
||
|
|
const { onstart, onprogress, onend } = options;
|
||
|
|
const deflate64 = compressionMethod == COMPRESSION_METHOD_DEFLATE_64;
|
||
|
|
let useCompressionStream = getOptionValue$1(zipEntry, options, OPTION_USE_COMPRESSION_STREAM);
|
||
|
|
if (deflate64) {
|
||
|
|
useCompressionStream = false;
|
||
|
|
}
|
||
|
|
const workerOptions = {
|
||
|
|
options: {
|
||
|
|
codecType: CODEC_INFLATE,
|
||
|
|
password,
|
||
|
|
rawPassword,
|
||
|
|
zipCrypto,
|
||
|
|
encryptionStrength: extraFieldAES && extraFieldAES.strength,
|
||
|
|
signed: getOptionValue$1(zipEntry, options, OPTION_CHECK_SIGNATURE) && !passThrough,
|
||
|
|
passwordVerification: zipCrypto && (dataDescriptor ? ((rawLastModDate >>> 8) & MAX_8_BITS) : ((signature >>> 24) & MAX_8_BITS)),
|
||
|
|
outputSize: passThrough ? compressedSize : uncompressedSize,
|
||
|
|
signature,
|
||
|
|
compressed: compressionMethod != 0 && !passThrough,
|
||
|
|
encrypted: zipEntry.encrypted && !passThrough,
|
||
|
|
useWebWorkers: getOptionValue$1(zipEntry, options, OPTION_USE_WEB_WORKERS),
|
||
|
|
useCompressionStream,
|
||
|
|
transferStreams: getOptionValue$1(zipEntry, options, OPTION_TRANSFER_STREAMS),
|
||
|
|
deflate64,
|
||
|
|
checkPasswordOnly
|
||
|
|
},
|
||
|
|
config,
|
||
|
|
streamOptions: { signal, size, onstart, onprogress, onend }
|
||
|
|
};
|
||
|
|
if (checkOverlappingEntry) {
|
||
|
|
await detectOverlappingEntry({
|
||
|
|
reader,
|
||
|
|
fileEntry,
|
||
|
|
offset,
|
||
|
|
diskNumberStart,
|
||
|
|
signature,
|
||
|
|
compressedSize,
|
||
|
|
uncompressedSize,
|
||
|
|
dataOffset,
|
||
|
|
dataDescriptor: dataDescriptor || localDirectory.bitFlag.dataDescriptor,
|
||
|
|
extraFieldZip64: extraFieldZip64 || localDirectory.extraFieldZip64,
|
||
|
|
readRanges
|
||
|
|
});
|
||
|
|
}
|
||
|
|
let writable;
|
||
|
|
try {
|
||
|
|
if (!checkOverlappingEntryOnly) {
|
||
|
|
if (checkPasswordOnly) {
|
||
|
|
writer = new WritableStream();
|
||
|
|
}
|
||
|
|
writer = new GenericWriter(writer);
|
||
|
|
await initStream(writer, passThrough ? compressedSize : uncompressedSize);
|
||
|
|
({ writable } = writer);
|
||
|
|
const { outputSize } = await runWorker({ readable, writable }, workerOptions);
|
||
|
|
writer.size += outputSize;
|
||
|
|
if (outputSize != (passThrough ? compressedSize : uncompressedSize)) {
|
||
|
|
throw new Error(ERR_INVALID_UNCOMPRESSED_SIZE);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
} catch (error) {
|
||
|
|
if (error.outputSize !== UNDEFINED_VALUE) {
|
||
|
|
writer.size += error.outputSize;
|
||
|
|
}
|
||
|
|
if (!checkPasswordOnly || error.message != ERR_ABORT_CHECK_PASSWORD) {
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
} finally {
|
||
|
|
const preventClose = getOptionValue$1(zipEntry, options, OPTION_PREVENT_CLOSE);
|
||
|
|
if (!preventClose && writable && !writable.locked) {
|
||
|
|
await writable.getWriter().close();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
return checkPasswordOnly || checkOverlappingEntryOnly ? UNDEFINED_VALUE : writer.getData ? writer.getData() : writable;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function readCommonHeader(directory, dataView, offset) {
|
||
|
|
const rawBitFlag = directory.rawBitFlag = getUint16(dataView, offset + 2);
|
||
|
|
const encrypted = (rawBitFlag & BITFLAG_ENCRYPTED) == BITFLAG_ENCRYPTED;
|
||
|
|
const rawLastModDate = getUint32(dataView, offset + 6);
|
||
|
|
Object.assign(directory, {
|
||
|
|
encrypted,
|
||
|
|
version: getUint16(dataView, offset),
|
||
|
|
bitFlag: {
|
||
|
|
level: (rawBitFlag & BITFLAG_LEVEL) >> 1,
|
||
|
|
dataDescriptor: (rawBitFlag & BITFLAG_DATA_DESCRIPTOR) == BITFLAG_DATA_DESCRIPTOR,
|
||
|
|
languageEncodingFlag: (rawBitFlag & BITFLAG_LANG_ENCODING_FLAG) == BITFLAG_LANG_ENCODING_FLAG
|
||
|
|
},
|
||
|
|
rawLastModDate,
|
||
|
|
lastModDate: getDate(rawLastModDate),
|
||
|
|
filenameLength: getUint16(dataView, offset + 22),
|
||
|
|
extraFieldLength: getUint16(dataView, offset + 24)
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
function readCommonFooter(fileEntry, directory, dataView, offset, localDirectory) {
|
||
|
|
const { rawExtraField } = directory;
|
||
|
|
const extraField = directory.extraField = new Map();
|
||
|
|
const rawExtraFieldView = getDataView$1(new Uint8Array(rawExtraField));
|
||
|
|
let offsetExtraField = 0;
|
||
|
|
try {
|
||
|
|
while (offsetExtraField < rawExtraField.length) {
|
||
|
|
const type = getUint16(rawExtraFieldView, offsetExtraField);
|
||
|
|
const size = getUint16(rawExtraFieldView, offsetExtraField + 2);
|
||
|
|
extraField.set(type, {
|
||
|
|
type,
|
||
|
|
data: rawExtraField.slice(offsetExtraField + 4, offsetExtraField + 4 + size)
|
||
|
|
});
|
||
|
|
offsetExtraField += 4 + size;
|
||
|
|
}
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
const compressionMethod = getUint16(dataView, offset + 4);
|
||
|
|
Object.assign(directory, {
|
||
|
|
signature: getUint32(dataView, offset + HEADER_OFFSET_SIGNATURE),
|
||
|
|
compressedSize: getUint32(dataView, offset + HEADER_OFFSET_COMPRESSED_SIZE),
|
||
|
|
uncompressedSize: getUint32(dataView, offset + HEADER_OFFSET_UNCOMPRESSED_SIZE)
|
||
|
|
});
|
||
|
|
const extraFieldZip64 = extraField.get(EXTRAFIELD_TYPE_ZIP64);
|
||
|
|
if (extraFieldZip64) {
|
||
|
|
readExtraFieldZip64(extraFieldZip64, directory);
|
||
|
|
directory.extraFieldZip64 = extraFieldZip64;
|
||
|
|
}
|
||
|
|
const extraFieldUnicodePath = extraField.get(EXTRAFIELD_TYPE_UNICODE_PATH);
|
||
|
|
if (extraFieldUnicodePath) {
|
||
|
|
readExtraFieldUnicode(extraFieldUnicodePath, PROPERTY_NAME_FILENAME, PROPERTY_NAME_RAW_FILENAME, directory, fileEntry);
|
||
|
|
directory.extraFieldUnicodePath = extraFieldUnicodePath;
|
||
|
|
}
|
||
|
|
const extraFieldUnicodeComment = extraField.get(EXTRAFIELD_TYPE_UNICODE_COMMENT);
|
||
|
|
if (extraFieldUnicodeComment) {
|
||
|
|
readExtraFieldUnicode(extraFieldUnicodeComment, PROPERTY_NAME_COMMENT, PROPERTY_NAME_RAW_COMMENT, directory, fileEntry);
|
||
|
|
directory.extraFieldUnicodeComment = extraFieldUnicodeComment;
|
||
|
|
}
|
||
|
|
const extraFieldAES = extraField.get(EXTRAFIELD_TYPE_AES);
|
||
|
|
if (extraFieldAES) {
|
||
|
|
readExtraFieldAES(extraFieldAES, directory, compressionMethod);
|
||
|
|
directory.extraFieldAES = extraFieldAES;
|
||
|
|
} else {
|
||
|
|
directory.compressionMethod = compressionMethod;
|
||
|
|
}
|
||
|
|
const extraFieldNTFS = extraField.get(EXTRAFIELD_TYPE_NTFS);
|
||
|
|
if (extraFieldNTFS) {
|
||
|
|
readExtraFieldNTFS(extraFieldNTFS, directory);
|
||
|
|
directory.extraFieldNTFS = extraFieldNTFS;
|
||
|
|
}
|
||
|
|
const extraFieldUnix = extraField.get(EXTRAFIELD_TYPE_UNIX);
|
||
|
|
if (extraFieldUnix) {
|
||
|
|
readExtraFieldUnix(extraFieldUnix, directory, false);
|
||
|
|
directory.extraFieldUnix = extraFieldUnix;
|
||
|
|
} else {
|
||
|
|
const extraFieldInfoZip = extraField.get(EXTRAFIELD_TYPE_INFOZIP);
|
||
|
|
if (extraFieldInfoZip) {
|
||
|
|
readExtraFieldUnix(extraFieldInfoZip, directory, true);
|
||
|
|
directory.extraFieldInfoZip = extraFieldInfoZip;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const extraFieldExtendedTimestamp = extraField.get(EXTRAFIELD_TYPE_EXTENDED_TIMESTAMP);
|
||
|
|
if (extraFieldExtendedTimestamp) {
|
||
|
|
readExtraFieldExtendedTimestamp(extraFieldExtendedTimestamp, directory, localDirectory);
|
||
|
|
directory.extraFieldExtendedTimestamp = extraFieldExtendedTimestamp;
|
||
|
|
}
|
||
|
|
const extraFieldUSDZ = extraField.get(EXTRAFIELD_TYPE_USDZ);
|
||
|
|
if (extraFieldUSDZ) {
|
||
|
|
directory.extraFieldUSDZ = extraFieldUSDZ;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function readExtraFieldZip64(extraFieldZip64, directory) {
|
||
|
|
directory.zip64 = true;
|
||
|
|
const extraFieldView = getDataView$1(extraFieldZip64.data);
|
||
|
|
const missingProperties = ZIP64_PROPERTIES.filter(([propertyName, max]) => directory[propertyName] == max);
|
||
|
|
for (let indexMissingProperty = 0, offset = 0; indexMissingProperty < missingProperties.length; indexMissingProperty++) {
|
||
|
|
const [propertyName, max] = missingProperties[indexMissingProperty];
|
||
|
|
if (directory[propertyName] == max) {
|
||
|
|
const extraction = ZIP64_EXTRACTION[max];
|
||
|
|
directory[propertyName] = extraFieldZip64[propertyName] = extraction.getValue(extraFieldView, offset);
|
||
|
|
offset += extraction.bytes;
|
||
|
|
} else if (extraFieldZip64[propertyName]) {
|
||
|
|
throw new Error(ERR_EXTRAFIELD_ZIP64_NOT_FOUND);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function readExtraFieldUnicode(extraFieldUnicode, propertyName, rawPropertyName, directory, fileEntry) {
|
||
|
|
const extraFieldView = getDataView$1(extraFieldUnicode.data);
|
||
|
|
const crc32 = new Crc32();
|
||
|
|
crc32.append(fileEntry[rawPropertyName]);
|
||
|
|
const dataViewSignature = getDataView$1(new Uint8Array(4));
|
||
|
|
dataViewSignature.setUint32(0, crc32.get(), true);
|
||
|
|
const signature = getUint32(extraFieldView, 1);
|
||
|
|
Object.assign(extraFieldUnicode, {
|
||
|
|
version: getUint8(extraFieldView, 0),
|
||
|
|
[propertyName]: decodeText(extraFieldUnicode.data.subarray(5)),
|
||
|
|
valid: !fileEntry.bitFlag.languageEncodingFlag && signature == getUint32(dataViewSignature, 0)
|
||
|
|
});
|
||
|
|
if (extraFieldUnicode.valid) {
|
||
|
|
directory[propertyName] = extraFieldUnicode[propertyName];
|
||
|
|
directory[propertyName + PROPERTY_NAME_UTF8_SUFFIX] = true;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function readExtraFieldAES(extraFieldAES, directory, compressionMethod) {
|
||
|
|
const extraFieldView = getDataView$1(extraFieldAES.data);
|
||
|
|
const strength = getUint8(extraFieldView, 4);
|
||
|
|
Object.assign(extraFieldAES, {
|
||
|
|
vendorVersion: getUint8(extraFieldView, 0),
|
||
|
|
vendorId: getUint8(extraFieldView, 2),
|
||
|
|
strength,
|
||
|
|
originalCompressionMethod: compressionMethod,
|
||
|
|
compressionMethod: getUint16(extraFieldView, 5)
|
||
|
|
});
|
||
|
|
directory.compressionMethod = extraFieldAES.compressionMethod;
|
||
|
|
}
|
||
|
|
|
||
|
|
function readExtraFieldNTFS(extraFieldNTFS, directory) {
|
||
|
|
const extraFieldView = getDataView$1(extraFieldNTFS.data);
|
||
|
|
let offsetExtraField = 4;
|
||
|
|
let tag1Data;
|
||
|
|
try {
|
||
|
|
while (offsetExtraField < extraFieldNTFS.data.length && !tag1Data) {
|
||
|
|
const tagValue = getUint16(extraFieldView, offsetExtraField);
|
||
|
|
const attributeSize = getUint16(extraFieldView, offsetExtraField + 2);
|
||
|
|
if (tagValue == EXTRAFIELD_TYPE_NTFS_TAG1) {
|
||
|
|
tag1Data = extraFieldNTFS.data.slice(offsetExtraField + 4, offsetExtraField + 4 + attributeSize);
|
||
|
|
}
|
||
|
|
offsetExtraField += 4 + attributeSize;
|
||
|
|
}
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
try {
|
||
|
|
if (tag1Data && tag1Data.length == 24) {
|
||
|
|
const tag1View = getDataView$1(tag1Data);
|
||
|
|
const rawLastModDate = tag1View.getBigUint64(0, true);
|
||
|
|
const rawLastAccessDate = tag1View.getBigUint64(8, true);
|
||
|
|
const rawCreationDate = tag1View.getBigUint64(16, true);
|
||
|
|
Object.assign(extraFieldNTFS, {
|
||
|
|
rawLastModDate,
|
||
|
|
rawLastAccessDate,
|
||
|
|
rawCreationDate
|
||
|
|
});
|
||
|
|
const lastModDate = getDateNTFS(rawLastModDate);
|
||
|
|
const lastAccessDate = getDateNTFS(rawLastAccessDate);
|
||
|
|
const creationDate = getDateNTFS(rawCreationDate);
|
||
|
|
const extraFieldData = { lastModDate, lastAccessDate, creationDate };
|
||
|
|
Object.assign(extraFieldNTFS, extraFieldData);
|
||
|
|
Object.assign(directory, extraFieldData);
|
||
|
|
}
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function readExtraFieldUnix(extraField, directory, isInfoZip) {
|
||
|
|
try {
|
||
|
|
const view = getDataView$1(new Uint8Array(extraField.data));
|
||
|
|
let offset = 0;
|
||
|
|
const version = getUint8(view, offset++);
|
||
|
|
const uidSize = getUint8(view, offset++);
|
||
|
|
const uidBytes = extraField.data.subarray(offset, offset + uidSize);
|
||
|
|
offset += uidSize;
|
||
|
|
const uid = unpackUnixId(uidBytes);
|
||
|
|
const gidSize = getUint8(view, offset++);
|
||
|
|
const gidBytes = extraField.data.subarray(offset, offset + gidSize);
|
||
|
|
offset += gidSize;
|
||
|
|
const gid = unpackUnixId(gidBytes);
|
||
|
|
let unixMode = UNDEFINED_VALUE;
|
||
|
|
if (!isInfoZip && offset + 2 <= extraField.data.length) {
|
||
|
|
const base = extraField.data;
|
||
|
|
const modeView = new DataView(base.buffer, base.byteOffset + offset, 2);
|
||
|
|
unixMode = modeView.getUint16(0, true);
|
||
|
|
}
|
||
|
|
Object.assign(extraField, { version, uid, gid, unixMode });
|
||
|
|
if (uid !== UNDEFINED_VALUE) {
|
||
|
|
directory.uid = uid;
|
||
|
|
}
|
||
|
|
if (gid !== UNDEFINED_VALUE) {
|
||
|
|
directory.gid = gid;
|
||
|
|
}
|
||
|
|
if (unixMode !== UNDEFINED_VALUE) {
|
||
|
|
directory.unixMode = unixMode;
|
||
|
|
}
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function unpackUnixId(bytes) {
|
||
|
|
const buffer = new Uint8Array(4);
|
||
|
|
buffer.set(bytes, 0);
|
||
|
|
const view = new DataView(buffer.buffer, buffer.byteOffset, 4);
|
||
|
|
return view.getUint32(0, true);
|
||
|
|
}
|
||
|
|
|
||
|
|
function readExtraFieldExtendedTimestamp(extraFieldExtendedTimestamp, directory, localDirectory) {
|
||
|
|
const extraFieldView = getDataView$1(extraFieldExtendedTimestamp.data);
|
||
|
|
const flags = getUint8(extraFieldView, 0);
|
||
|
|
const timeProperties = [];
|
||
|
|
const timeRawProperties = [];
|
||
|
|
if (localDirectory) {
|
||
|
|
if ((flags & 0x1) == 0x1) {
|
||
|
|
timeProperties.push(PROPERTY_NAME_LAST_MODIFICATION_DATE);
|
||
|
|
timeRawProperties.push(PROPERTY_NAME_RAW_LAST_MODIFICATION_DATE);
|
||
|
|
}
|
||
|
|
if ((flags & 0x2) == 0x2) {
|
||
|
|
timeProperties.push(PROPERTY_NAME_LAST_ACCESS_DATE);
|
||
|
|
timeRawProperties.push(PROPERTY_NAME_RAW_LAST_ACCESS_DATE);
|
||
|
|
}
|
||
|
|
if ((flags & 0x4) == 0x4) {
|
||
|
|
timeProperties.push(PROPERTY_NAME_CREATION_DATE);
|
||
|
|
timeRawProperties.push(PROPERTY_NAME_RAW_CREATION_DATE);
|
||
|
|
}
|
||
|
|
} else if (extraFieldExtendedTimestamp.data.length >= 5) {
|
||
|
|
timeProperties.push(PROPERTY_NAME_LAST_MODIFICATION_DATE);
|
||
|
|
timeRawProperties.push(PROPERTY_NAME_RAW_LAST_MODIFICATION_DATE);
|
||
|
|
}
|
||
|
|
let offset = 1;
|
||
|
|
timeProperties.forEach((propertyName, indexProperty) => {
|
||
|
|
if (extraFieldExtendedTimestamp.data.length >= offset + 4) {
|
||
|
|
const time = getUint32(extraFieldView, offset);
|
||
|
|
directory[propertyName] = extraFieldExtendedTimestamp[propertyName] = new Date(time * 1000);
|
||
|
|
const rawPropertyName = timeRawProperties[indexProperty];
|
||
|
|
extraFieldExtendedTimestamp[rawPropertyName] = time;
|
||
|
|
}
|
||
|
|
offset += 4;
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
async function detectOverlappingEntry({
|
||
|
|
reader,
|
||
|
|
fileEntry,
|
||
|
|
offset,
|
||
|
|
diskNumberStart,
|
||
|
|
signature,
|
||
|
|
compressedSize,
|
||
|
|
uncompressedSize,
|
||
|
|
dataOffset,
|
||
|
|
dataDescriptor,
|
||
|
|
extraFieldZip64,
|
||
|
|
readRanges
|
||
|
|
}) {
|
||
|
|
let diskOffset = 0;
|
||
|
|
if (diskNumberStart) {
|
||
|
|
for (let indexReader = 0; indexReader < diskNumberStart; indexReader++) {
|
||
|
|
const diskReader = reader.readers[indexReader];
|
||
|
|
diskOffset += diskReader.size;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
let dataDescriptorLength = 0;
|
||
|
|
if (dataDescriptor) {
|
||
|
|
if (extraFieldZip64) {
|
||
|
|
dataDescriptorLength = DATA_DESCRIPTOR_RECORD_ZIP_64_LENGTH;
|
||
|
|
} else {
|
||
|
|
dataDescriptorLength = DATA_DESCRIPTOR_RECORD_LENGTH;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (dataDescriptorLength) {
|
||
|
|
const dataDescriptorArray = await readUint8Array(reader, dataOffset + compressedSize, dataDescriptorLength + DATA_DESCRIPTOR_RECORD_SIGNATURE_LENGTH, diskNumberStart);
|
||
|
|
const dataDescriptorSignature = getUint32(getDataView$1(dataDescriptorArray), 0) == DATA_DESCRIPTOR_RECORD_SIGNATURE;
|
||
|
|
if (dataDescriptorSignature) {
|
||
|
|
const readSignature = getUint32(getDataView$1(dataDescriptorArray), 4);
|
||
|
|
let readCompressedSize;
|
||
|
|
let readUncompressedSize;
|
||
|
|
if (extraFieldZip64) {
|
||
|
|
readCompressedSize = getBigUint64(getDataView$1(dataDescriptorArray), 8);
|
||
|
|
readUncompressedSize = getBigUint64(getDataView$1(dataDescriptorArray), 16);
|
||
|
|
} else {
|
||
|
|
readCompressedSize = getUint32(getDataView$1(dataDescriptorArray), 8);
|
||
|
|
readUncompressedSize = getUint32(getDataView$1(dataDescriptorArray), 12);
|
||
|
|
}
|
||
|
|
const matchSignature = (fileEntry.encrypted && !fileEntry.zipCrypto) || readSignature == signature;
|
||
|
|
if (matchSignature &&
|
||
|
|
readCompressedSize == compressedSize &&
|
||
|
|
readUncompressedSize == uncompressedSize) {
|
||
|
|
dataDescriptorLength += DATA_DESCRIPTOR_RECORD_SIGNATURE_LENGTH;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const range = {
|
||
|
|
start: diskOffset + offset,
|
||
|
|
end: diskOffset + dataOffset + compressedSize + dataDescriptorLength,
|
||
|
|
fileEntry
|
||
|
|
};
|
||
|
|
for (const otherRange of readRanges) {
|
||
|
|
if (otherRange.fileEntry != fileEntry && range.start >= otherRange.start && range.start < otherRange.end) {
|
||
|
|
const error = new Error(ERR_OVERLAPPING_ENTRY);
|
||
|
|
error.overlappingEntry = otherRange.fileEntry;
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
readRanges.push(range);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function seekSignature(reader, signature, startOffset, minimumBytes, maximumLength) {
|
||
|
|
const signatureArray = new Uint8Array(4);
|
||
|
|
const signatureView = getDataView$1(signatureArray);
|
||
|
|
setUint32$1(signatureView, 0, signature);
|
||
|
|
const maximumBytes = minimumBytes + maximumLength;
|
||
|
|
return (await seek(minimumBytes)) || await seek(Math.min(maximumBytes, startOffset));
|
||
|
|
|
||
|
|
async function seek(length) {
|
||
|
|
const offset = startOffset - length;
|
||
|
|
const bytes = await readUint8Array(reader, offset, length);
|
||
|
|
for (let indexByte = bytes.length - minimumBytes; indexByte >= 0; indexByte--) {
|
||
|
|
if (bytes[indexByte] == signatureArray[0] && bytes[indexByte + 1] == signatureArray[1] &&
|
||
|
|
bytes[indexByte + 2] == signatureArray[2] && bytes[indexByte + 3] == signatureArray[3]) {
|
||
|
|
return {
|
||
|
|
offset: offset + indexByte,
|
||
|
|
buffer: bytes.slice(indexByte, indexByte + minimumBytes).buffer
|
||
|
|
};
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function getOptionValue$1(zipReader, options, name) {
|
||
|
|
return options[name] === UNDEFINED_VALUE ? zipReader.options[name] : options[name];
|
||
|
|
}
|
||
|
|
|
||
|
|
function getDate(timeRaw) {
|
||
|
|
const date = (timeRaw & 0xffff0000) >> 16, time = timeRaw & MAX_16_BITS;
|
||
|
|
try {
|
||
|
|
return new Date(1980 + ((date & 0xFE00) >> 9), ((date & 0x01E0) >> 5) - 1, date & 0x001F, (time & 0xF800) >> 11, (time & 0x07E0) >> 5, (time & 0x001F) * 2, 0);
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function getDateNTFS(timeRaw) {
|
||
|
|
return new Date((Number((timeRaw / BigInt(10000)) - BigInt(11644473600000))));
|
||
|
|
}
|
||
|
|
|
||
|
|
function getUint8(view, offset) {
|
||
|
|
return view.getUint8(offset);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getUint16(view, offset) {
|
||
|
|
return view.getUint16(offset, true);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getUint32(view, offset) {
|
||
|
|
return view.getUint32(offset, true);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getBigUint64(view, offset) {
|
||
|
|
return Number(view.getBigUint64(offset, true));
|
||
|
|
}
|
||
|
|
|
||
|
|
function setUint32$1(view, offset, value) {
|
||
|
|
view.setUint32(offset, value, true);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getDataView$1(array) {
|
||
|
|
return new DataView(array.buffer);
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
const ERR_DUPLICATED_NAME = "File already exists";
|
||
|
|
const ERR_INVALID_COMMENT = "Zip file comment exceeds 64KB";
|
||
|
|
const ERR_INVALID_ENTRY_COMMENT = "File entry comment exceeds 64KB";
|
||
|
|
const ERR_INVALID_ENTRY_NAME = "File entry name exceeds 64KB";
|
||
|
|
const ERR_INVALID_VERSION = "Version exceeds 65535";
|
||
|
|
const ERR_INVALID_ENCRYPTION_STRENGTH = "The strength must equal 1, 2, or 3";
|
||
|
|
const ERR_INVALID_EXTRAFIELD_TYPE = "Extra field type exceeds 65535";
|
||
|
|
const ERR_INVALID_EXTRAFIELD_DATA = "Extra field data exceeds 64KB";
|
||
|
|
const ERR_UNSUPPORTED_FORMAT = "Zip64 is not supported (set the 'zip64' option to 'true')";
|
||
|
|
const ERR_UNDEFINED_UNCOMPRESSED_SIZE = "Undefined uncompressed size";
|
||
|
|
const ERR_ZIP_NOT_EMPTY = "Zip file not empty";
|
||
|
|
const ERR_INVALID_UID = "Invalid uid (must be integer 0..2^32-1)";
|
||
|
|
const ERR_INVALID_GID = "Invalid gid (must be integer 0..2^32-1)";
|
||
|
|
const ERR_INVALID_UNIX_MODE = "Invalid UNIX mode (must be integer 0..65535)";
|
||
|
|
const ERR_INVALID_UNIX_EXTRA_FIELD_TYPE = "Invalid unixExtraFieldType (must be 'infozip' or 'unix')";
|
||
|
|
const ERR_INVALID_MSDOS_ATTRIBUTES = "Invalid msdosAttributesRaw (must be integer 0..255)";
|
||
|
|
const ERR_INVALID_MSDOS_DATA = "Invalid msdosAttributes (must be an object with boolean flags)";
|
||
|
|
|
||
|
|
const EXTRAFIELD_DATA_AES = new Uint8Array([0x07, 0x00, 0x02, 0x00, 0x41, 0x45, 0x03, 0x00, 0x00]);
|
||
|
|
const INFOZIP_EXTRA_FIELD_TYPE = "infozip";
|
||
|
|
const UNIX_EXTRA_FIELD_TYPE = "unix";
|
||
|
|
|
||
|
|
let workers = 0;
|
||
|
|
const pendingEntries = [];
|
||
|
|
|
||
|
|
class ZipWriter {
|
||
|
|
|
||
|
|
constructor(writer, options = {}) {
|
||
|
|
writer = new GenericWriter(writer);
|
||
|
|
const addSplitZipSignature =
|
||
|
|
writer.availableSize !== UNDEFINED_VALUE && writer.availableSize > 0 && writer.availableSize !== INFINITY_VALUE &&
|
||
|
|
writer.maxSize !== UNDEFINED_VALUE && writer.maxSize > 0 && writer.maxSize !== INFINITY_VALUE;
|
||
|
|
Object.assign(this, {
|
||
|
|
writer,
|
||
|
|
addSplitZipSignature,
|
||
|
|
options,
|
||
|
|
config: getConfiguration(),
|
||
|
|
files: new Map(),
|
||
|
|
filenames: new Set(),
|
||
|
|
offset: options[OPTION_OFFSET] === UNDEFINED_VALUE ? writer.size || writer.writable.size || 0 : options[OPTION_OFFSET],
|
||
|
|
pendingEntriesSize: 0,
|
||
|
|
pendingAddFileCalls: new Set(),
|
||
|
|
bufferedWrites: 0
|
||
|
|
});
|
||
|
|
}
|
||
|
|
|
||
|
|
async prependZip(reader) {
|
||
|
|
if (this.filenames.size) {
|
||
|
|
throw new Error(ERR_ZIP_NOT_EMPTY);
|
||
|
|
}
|
||
|
|
reader = new GenericReader(reader);
|
||
|
|
const zipReader = new ZipReader(reader.readable);
|
||
|
|
const entries = await zipReader.getEntries();
|
||
|
|
await zipReader.close();
|
||
|
|
await reader.readable.pipeTo(this.writer.writable, { preventClose: true, preventAbort: true });
|
||
|
|
this.writer.size = this.offset = reader.size;
|
||
|
|
this.filenames = new Set(entries.map(entry => entry.filename));
|
||
|
|
this.files = new Map(entries.map(entry => {
|
||
|
|
const {
|
||
|
|
version,
|
||
|
|
compressionMethod,
|
||
|
|
lastModDate,
|
||
|
|
lastAccessDate,
|
||
|
|
creationDate,
|
||
|
|
rawFilename,
|
||
|
|
bitFlag,
|
||
|
|
encrypted,
|
||
|
|
uncompressedSize,
|
||
|
|
compressedSize,
|
||
|
|
diskOffset,
|
||
|
|
diskNumber,
|
||
|
|
zip64
|
||
|
|
} = entry;
|
||
|
|
let {
|
||
|
|
rawExtraFieldZip64,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
rawExtraFieldExtendedTimestamp,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraField,
|
||
|
|
} = entry;
|
||
|
|
const { level, languageEncodingFlag, dataDescriptor } = bitFlag;
|
||
|
|
rawExtraFieldZip64 = rawExtraFieldZip64 || new Uint8Array();
|
||
|
|
rawExtraFieldAES = rawExtraFieldAES || new Uint8Array();
|
||
|
|
rawExtraFieldExtendedTimestamp = rawExtraFieldExtendedTimestamp || new Uint8Array();
|
||
|
|
rawExtraFieldNTFS = rawExtraFieldNTFS || new Uint8Array();
|
||
|
|
rawExtraFieldUnix = entry.rawExtraFieldUnix || new Uint8Array();
|
||
|
|
rawExtraField = rawExtraField || new Uint8Array();
|
||
|
|
const extraFieldLength = getLength(rawExtraFieldZip64, rawExtraFieldAES, rawExtraFieldExtendedTimestamp, rawExtraFieldNTFS, rawExtraFieldUnix, rawExtraField);
|
||
|
|
const zip64UncompressedSize = zip64 && uncompressedSize > MAX_32_BITS;
|
||
|
|
const zip64CompressedSize = zip64 && compressedSize > MAX_32_BITS;
|
||
|
|
const {
|
||
|
|
headerArray,
|
||
|
|
headerView
|
||
|
|
} = getHeaderArrayData({
|
||
|
|
version,
|
||
|
|
bitFlag: getBitFlag(level, languageEncodingFlag, dataDescriptor, encrypted, compressionMethod),
|
||
|
|
compressionMethod,
|
||
|
|
uncompressedSize,
|
||
|
|
compressedSize,
|
||
|
|
lastModDate,
|
||
|
|
rawFilename,
|
||
|
|
zip64CompressedSize,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
extraFieldLength
|
||
|
|
});
|
||
|
|
Object.assign(entry, {
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize,
|
||
|
|
zip64Offset: zip64 && this.offset - diskOffset > MAX_32_BITS,
|
||
|
|
zip64DiskNumberStart: zip64 && diskNumber > MAX_16_BITS,
|
||
|
|
rawExtraFieldZip64,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
rawExtraFieldExtendedTimestamp,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraField,
|
||
|
|
extendedTimestamp: rawExtraFieldExtendedTimestamp.length > 0 || rawExtraFieldNTFS.length > 0,
|
||
|
|
extraFieldExtendedTimestampFlag: 0x1 + (lastAccessDate ? 0x2 : 0) + (creationDate ? 0x4 : 0),
|
||
|
|
headerArray,
|
||
|
|
headerView
|
||
|
|
});
|
||
|
|
return [entry.filename, entry];
|
||
|
|
}));
|
||
|
|
}
|
||
|
|
|
||
|
|
async add(name = "", reader, options = {}) {
|
||
|
|
const zipWriter = this;
|
||
|
|
const {
|
||
|
|
pendingAddFileCalls,
|
||
|
|
config
|
||
|
|
} = zipWriter;
|
||
|
|
if (workers < config.maxWorkers) {
|
||
|
|
workers++;
|
||
|
|
} else {
|
||
|
|
await new Promise(resolve => pendingEntries.push(resolve));
|
||
|
|
}
|
||
|
|
let promiseAddFile;
|
||
|
|
try {
|
||
|
|
name = name.trim();
|
||
|
|
if (zipWriter.filenames.has(name)) {
|
||
|
|
throw new Error(ERR_DUPLICATED_NAME);
|
||
|
|
}
|
||
|
|
zipWriter.filenames.add(name);
|
||
|
|
promiseAddFile = addFile(zipWriter, name, reader, options);
|
||
|
|
pendingAddFileCalls.add(promiseAddFile);
|
||
|
|
return await promiseAddFile;
|
||
|
|
} catch (error) {
|
||
|
|
zipWriter.filenames.delete(name);
|
||
|
|
throw error;
|
||
|
|
} finally {
|
||
|
|
pendingAddFileCalls.delete(promiseAddFile);
|
||
|
|
const pendingEntry = pendingEntries.shift();
|
||
|
|
if (pendingEntry) {
|
||
|
|
pendingEntry();
|
||
|
|
} else {
|
||
|
|
workers--;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
remove(entry) {
|
||
|
|
const { filenames, files } = this;
|
||
|
|
if (typeof entry == "string") {
|
||
|
|
entry = files.get(entry);
|
||
|
|
}
|
||
|
|
if (entry && entry.filename !== UNDEFINED_VALUE) {
|
||
|
|
const { filename } = entry;
|
||
|
|
if (filenames.has(filename) && files.has(filename)) {
|
||
|
|
filenames.delete(filename);
|
||
|
|
files.delete(filename);
|
||
|
|
return true;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
return false;
|
||
|
|
}
|
||
|
|
|
||
|
|
async close(comment = new Uint8Array(), options = {}) {
|
||
|
|
const zipWriter = this;
|
||
|
|
const { pendingAddFileCalls, writer } = this;
|
||
|
|
const { writable } = writer;
|
||
|
|
while (pendingAddFileCalls.size) {
|
||
|
|
await Promise.allSettled(Array.from(pendingAddFileCalls));
|
||
|
|
}
|
||
|
|
await closeFile(zipWriter, comment, options);
|
||
|
|
const preventClose = getOptionValue(zipWriter, options, OPTION_PREVENT_CLOSE);
|
||
|
|
if (!preventClose) {
|
||
|
|
await writable.getWriter().close();
|
||
|
|
}
|
||
|
|
return writer.getData ? writer.getData() : writable;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
class ZipWriterStream {
|
||
|
|
|
||
|
|
constructor(options = {}) {
|
||
|
|
const { readable, writable } = new TransformStream();
|
||
|
|
this.readable = readable;
|
||
|
|
this.zipWriter = new ZipWriter(writable, options);
|
||
|
|
}
|
||
|
|
|
||
|
|
transform(path) {
|
||
|
|
const { readable, writable } = new TransformStream({
|
||
|
|
flush: () => { this.zipWriter.close(); }
|
||
|
|
});
|
||
|
|
this.zipWriter.add(path, readable);
|
||
|
|
return { readable: this.readable, writable };
|
||
|
|
}
|
||
|
|
|
||
|
|
writable(path) {
|
||
|
|
const { readable, writable } = new TransformStream();
|
||
|
|
this.zipWriter.add(path, readable);
|
||
|
|
return writable;
|
||
|
|
}
|
||
|
|
|
||
|
|
close(comment = UNDEFINED_VALUE, options = {}) {
|
||
|
|
return this.zipWriter.close(comment, options);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function addFile(zipWriter, name, reader, options) {
|
||
|
|
name = name.trim();
|
||
|
|
let msDosCompatible = getOptionValue(zipWriter, options, PROPERTY_NAME_MS_DOS_COMPATIBLE);
|
||
|
|
let versionMadeBy = getOptionValue(zipWriter, options, PROPERTY_NAME_VERSION_MADE_BY, msDosCompatible ? 20 : 768);
|
||
|
|
const executable = getOptionValue(zipWriter, options, PROPERTY_NAME_EXECUTABLE);
|
||
|
|
const uid = getOptionValue(zipWriter, options, PROPERTY_NAME_UID);
|
||
|
|
const gid = getOptionValue(zipWriter, options, PROPERTY_NAME_GID);
|
||
|
|
let unixMode = getOptionValue(zipWriter, options, PROPERTY_NAME_UNIX_MODE);
|
||
|
|
const unixExtraFieldType = getOptionValue(zipWriter, options, OPTION_UNIX_EXTRA_FIELD_TYPE);
|
||
|
|
let setuid = getOptionValue(zipWriter, options, PROPERTY_NAME_SETUID);
|
||
|
|
let setgid = getOptionValue(zipWriter, options, PROPERTY_NAME_SETGID);
|
||
|
|
let sticky = getOptionValue(zipWriter, options, PROPERTY_NAME_STICKY);
|
||
|
|
if (uid !== UNDEFINED_VALUE && (uid < 0 || uid > MAX_32_BITS)) {
|
||
|
|
throw new Error(ERR_INVALID_UID);
|
||
|
|
}
|
||
|
|
if (gid !== UNDEFINED_VALUE && (gid < 0 || gid > MAX_32_BITS)) {
|
||
|
|
throw new Error(ERR_INVALID_GID);
|
||
|
|
}
|
||
|
|
if (unixMode !== UNDEFINED_VALUE && (unixMode < 0 || unixMode > MAX_16_BITS)) {
|
||
|
|
throw new Error(ERR_INVALID_UNIX_MODE);
|
||
|
|
}
|
||
|
|
if (unixExtraFieldType !== UNDEFINED_VALUE && unixExtraFieldType !== INFOZIP_EXTRA_FIELD_TYPE && unixExtraFieldType !== UNIX_EXTRA_FIELD_TYPE) {
|
||
|
|
throw new Error(ERR_INVALID_UNIX_EXTRA_FIELD_TYPE);
|
||
|
|
}
|
||
|
|
let msdosAttributesRaw = getOptionValue(zipWriter, options, PROPERTY_NAME_MSDOS_ATTRIBUTES_RAW);
|
||
|
|
let msdosAttributes = getOptionValue(zipWriter, options, PROPERTY_NAME_MSDOS_ATTRIBUTES);
|
||
|
|
const hasUnixMetadata = uid !== UNDEFINED_VALUE || gid !== UNDEFINED_VALUE || unixMode !== UNDEFINED_VALUE || unixExtraFieldType;
|
||
|
|
const hasMsDosProvided = msdosAttributesRaw !== UNDEFINED_VALUE || msdosAttributes !== UNDEFINED_VALUE;
|
||
|
|
if (hasUnixMetadata) {
|
||
|
|
msDosCompatible = false;
|
||
|
|
versionMadeBy = (versionMadeBy & MAX_16_BITS) | (3 << 8);
|
||
|
|
} else if (hasMsDosProvided) {
|
||
|
|
msDosCompatible = true;
|
||
|
|
versionMadeBy = (versionMadeBy & MAX_8_BITS);
|
||
|
|
}
|
||
|
|
if (msdosAttributesRaw !== UNDEFINED_VALUE && (msdosAttributesRaw < 0 || msdosAttributesRaw > MAX_8_BITS)) {
|
||
|
|
throw new Error(ERR_INVALID_MSDOS_ATTRIBUTES);
|
||
|
|
}
|
||
|
|
if (msdosAttributes && typeof msdosAttributes !== OBJECT_TYPE) {
|
||
|
|
throw new Error(ERR_INVALID_MSDOS_DATA);
|
||
|
|
}
|
||
|
|
if (versionMadeBy > MAX_16_BITS) {
|
||
|
|
throw new Error(ERR_INVALID_VERSION);
|
||
|
|
}
|
||
|
|
let externalFileAttributes = getOptionValue(zipWriter, options, PROPERTY_NAME_EXTERNAL_FILE_ATTRIBUTES, 0);
|
||
|
|
if (!options[PROPERTY_NAME_DIRECTORY] && name.endsWith(DIRECTORY_SIGNATURE)) {
|
||
|
|
options[PROPERTY_NAME_DIRECTORY] = true;
|
||
|
|
}
|
||
|
|
const directory = getOptionValue(zipWriter, options, PROPERTY_NAME_DIRECTORY);
|
||
|
|
if (directory) {
|
||
|
|
if (!name.endsWith(DIRECTORY_SIGNATURE)) {
|
||
|
|
name += DIRECTORY_SIGNATURE;
|
||
|
|
}
|
||
|
|
if (externalFileAttributes === 0) {
|
||
|
|
externalFileAttributes = FILE_ATTR_MSDOS_DIR_MASK;
|
||
|
|
if (!msDosCompatible) {
|
||
|
|
externalFileAttributes |= (FILE_ATTR_UNIX_TYPE_DIR | FILE_ATTR_UNIX_EXECUTABLE_MASK | FILE_ATTR_UNIX_DEFAULT_MASK) << 16;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
} else if (!msDosCompatible && externalFileAttributes === 0) {
|
||
|
|
if (executable) {
|
||
|
|
externalFileAttributes = (FILE_ATTR_UNIX_EXECUTABLE_MASK | FILE_ATTR_UNIX_DEFAULT_MASK) << 16;
|
||
|
|
} else {
|
||
|
|
externalFileAttributes = FILE_ATTR_UNIX_DEFAULT_MASK << 16;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
let unixExternalUpper;
|
||
|
|
if (!msDosCompatible) {
|
||
|
|
unixExternalUpper = (externalFileAttributes >> 16) & MAX_16_BITS;
|
||
|
|
unixMode = unixMode === UNDEFINED_VALUE ? unixExternalUpper : (unixMode & MAX_16_BITS);
|
||
|
|
if (setuid) {
|
||
|
|
unixMode |= FILE_ATTR_UNIX_SETUID_MASK;
|
||
|
|
} else {
|
||
|
|
setuid = Boolean(unixMode & FILE_ATTR_UNIX_SETUID_MASK);
|
||
|
|
}
|
||
|
|
if (setgid) {
|
||
|
|
unixMode |= FILE_ATTR_UNIX_SETGID_MASK;
|
||
|
|
} else {
|
||
|
|
setgid = Boolean(unixMode & FILE_ATTR_UNIX_SETGID_MASK);
|
||
|
|
}
|
||
|
|
if (sticky) {
|
||
|
|
unixMode |= FILE_ATTR_UNIX_STICKY_MASK;
|
||
|
|
} else {
|
||
|
|
sticky = Boolean(unixMode & FILE_ATTR_UNIX_STICKY_MASK);
|
||
|
|
}
|
||
|
|
if (directory) {
|
||
|
|
unixMode |= FILE_ATTR_UNIX_TYPE_DIR;
|
||
|
|
}
|
||
|
|
externalFileAttributes = ((unixMode & MAX_16_BITS) << 16) | (externalFileAttributes & MAX_8_BITS);
|
||
|
|
}
|
||
|
|
({ msdosAttributesRaw, msdosAttributes } = normalizeMsdosAttributes(msdosAttributesRaw, msdosAttributes));
|
||
|
|
if (hasMsDosProvided) {
|
||
|
|
externalFileAttributes = (externalFileAttributes & MAX_32_BITS) | (msdosAttributesRaw & MAX_8_BITS);
|
||
|
|
}
|
||
|
|
const encode = getOptionValue(zipWriter, options, OPTION_ENCODE_TEXT, encodeText);
|
||
|
|
let rawFilename = encode(name);
|
||
|
|
if (rawFilename === UNDEFINED_VALUE) {
|
||
|
|
rawFilename = encodeText(name);
|
||
|
|
}
|
||
|
|
if (getLength(rawFilename) > MAX_16_BITS) {
|
||
|
|
throw new Error(ERR_INVALID_ENTRY_NAME);
|
||
|
|
}
|
||
|
|
const comment = options[PROPERTY_NAME_COMMENT] || "";
|
||
|
|
let rawComment = encode(comment);
|
||
|
|
if (rawComment === UNDEFINED_VALUE) {
|
||
|
|
rawComment = encodeText(comment);
|
||
|
|
}
|
||
|
|
if (getLength(rawComment) > MAX_16_BITS) {
|
||
|
|
throw new Error(ERR_INVALID_ENTRY_COMMENT);
|
||
|
|
}
|
||
|
|
const version = getOptionValue(zipWriter, options, PROPERTY_NAME_VERSION, VERSION_DEFLATE);
|
||
|
|
if (version > MAX_16_BITS) {
|
||
|
|
throw new Error(ERR_INVALID_VERSION);
|
||
|
|
}
|
||
|
|
const lastModDate = getOptionValue(zipWriter, options, PROPERTY_NAME_LAST_MODIFICATION_DATE, new Date());
|
||
|
|
const lastAccessDate = getOptionValue(zipWriter, options, PROPERTY_NAME_LAST_ACCESS_DATE);
|
||
|
|
const creationDate = getOptionValue(zipWriter, options, PROPERTY_NAME_CREATION_DATE);
|
||
|
|
const internalFileAttributes = getOptionValue(zipWriter, options, PROPERTY_NAME_INTERNAL_FILE_ATTRIBUTES, 0);
|
||
|
|
const passThrough = getOptionValue(zipWriter, options, OPTION_PASS_THROUGH);
|
||
|
|
let password, rawPassword;
|
||
|
|
if (!passThrough) {
|
||
|
|
password = getOptionValue(zipWriter, options, OPTION_PASSWORD);
|
||
|
|
rawPassword = getOptionValue(zipWriter, options, OPTION_RAW_PASSWORD);
|
||
|
|
}
|
||
|
|
const encryptionStrength = getOptionValue(zipWriter, options, OPTION_ENCRYPTION_STRENGTH, 3);
|
||
|
|
const zipCrypto = getOptionValue(zipWriter, options, PROPERTY_NAME_ZIPCRYPTO);
|
||
|
|
const extendedTimestamp = getOptionValue(zipWriter, options, OPTION_EXTENDED_TIMESTAMP, true);
|
||
|
|
const keepOrder = getOptionValue(zipWriter, options, OPTION_KEEP_ORDER, true);
|
||
|
|
const useWebWorkers = getOptionValue(zipWriter, options, OPTION_USE_WEB_WORKERS);
|
||
|
|
const transferStreams = getOptionValue(zipWriter, options, OPTION_TRANSFER_STREAMS, true);
|
||
|
|
const bufferedWrite = getOptionValue(zipWriter, options, OPTION_BUFFERED_WRITE);
|
||
|
|
const createTempStream = getOptionValue(zipWriter, options, OPTION_CREATE_TEMP_STREAM);
|
||
|
|
const dataDescriptorSignature = getOptionValue(zipWriter, options, OPTION_DATA_DESCRIPTOR_SIGNATURE, false);
|
||
|
|
const signal = getOptionValue(zipWriter, options, OPTION_SIGNAL);
|
||
|
|
const useUnicodeFileNames = getOptionValue(zipWriter, options, OPTION_USE_UNICODE_FILE_NAMES, true);
|
||
|
|
const compressionMethod = getOptionValue(zipWriter, options, PROPERTY_NAME_COMPRESSION_METHOD);
|
||
|
|
let level = getOptionValue(zipWriter, options, OPTION_LEVEL);
|
||
|
|
let useCompressionStream = getOptionValue(zipWriter, options, OPTION_USE_COMPRESSION_STREAM);
|
||
|
|
let dataDescriptor = getOptionValue(zipWriter, options, OPTION_DATA_DESCRIPTOR);
|
||
|
|
if (bufferedWrite && dataDescriptor === UNDEFINED_VALUE) {
|
||
|
|
dataDescriptor = false;
|
||
|
|
}
|
||
|
|
if (dataDescriptor === UNDEFINED_VALUE || zipCrypto) {
|
||
|
|
dataDescriptor = true;
|
||
|
|
}
|
||
|
|
if (level !== UNDEFINED_VALUE && level != 6) {
|
||
|
|
useCompressionStream = false;
|
||
|
|
}
|
||
|
|
if (!useCompressionStream && (zipWriter.config.CompressionStream === UNDEFINED_VALUE && zipWriter.config.CompressionStreamZlib === UNDEFINED_VALUE)) {
|
||
|
|
level = 0;
|
||
|
|
}
|
||
|
|
let zip64 = getOptionValue(zipWriter, options, PROPERTY_NAME_ZIP64);
|
||
|
|
if (!zipCrypto && (password !== UNDEFINED_VALUE || rawPassword !== UNDEFINED_VALUE) && !(encryptionStrength >= 1 && encryptionStrength <= 3)) {
|
||
|
|
throw new Error(ERR_INVALID_ENCRYPTION_STRENGTH);
|
||
|
|
}
|
||
|
|
let rawExtraField = new Uint8Array();
|
||
|
|
const extraField = options[PROPERTY_NAME_EXTRA_FIELD];
|
||
|
|
if (extraField) {
|
||
|
|
let extraFieldSize = 0;
|
||
|
|
let offset = 0;
|
||
|
|
extraField.forEach(data => extraFieldSize += 4 + getLength(data));
|
||
|
|
rawExtraField = new Uint8Array(extraFieldSize);
|
||
|
|
extraField.forEach((data, type) => {
|
||
|
|
if (type > MAX_16_BITS) {
|
||
|
|
throw new Error(ERR_INVALID_EXTRAFIELD_TYPE);
|
||
|
|
}
|
||
|
|
if (getLength(data) > MAX_16_BITS) {
|
||
|
|
throw new Error(ERR_INVALID_EXTRAFIELD_DATA);
|
||
|
|
}
|
||
|
|
arraySet(rawExtraField, new Uint16Array([type]), offset);
|
||
|
|
arraySet(rawExtraField, new Uint16Array([getLength(data)]), offset + 2);
|
||
|
|
arraySet(rawExtraField, data, offset + 4);
|
||
|
|
offset += 4 + getLength(data);
|
||
|
|
});
|
||
|
|
}
|
||
|
|
let maximumCompressedSize = 0;
|
||
|
|
let maximumEntrySize = 0;
|
||
|
|
let uncompressedSize = 0;
|
||
|
|
if (passThrough) {
|
||
|
|
uncompressedSize = options[PROPERTY_NAME_UNCOMPRESSED_SIZE];
|
||
|
|
if (uncompressedSize === UNDEFINED_VALUE) {
|
||
|
|
throw new Error(ERR_UNDEFINED_UNCOMPRESSED_SIZE);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const zip64Enabled = zip64 === true;
|
||
|
|
if (reader) {
|
||
|
|
reader = new GenericReader(reader);
|
||
|
|
await initStream(reader);
|
||
|
|
if (!passThrough) {
|
||
|
|
if (reader.size === UNDEFINED_VALUE) {
|
||
|
|
dataDescriptor = true;
|
||
|
|
if (zip64 || zip64 === UNDEFINED_VALUE) {
|
||
|
|
zip64 = true;
|
||
|
|
uncompressedSize = maximumCompressedSize = MAX_32_BITS + 1;
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
options.uncompressedSize = uncompressedSize = reader.size;
|
||
|
|
maximumCompressedSize = getMaximumCompressedSize(uncompressedSize);
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
options.uncompressedSize = uncompressedSize;
|
||
|
|
maximumCompressedSize = getMaximumCompressedSize(uncompressedSize);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const { diskOffset, diskNumber } = zipWriter.writer;
|
||
|
|
const zip64UncompressedSize = zip64Enabled || uncompressedSize > MAX_32_BITS;
|
||
|
|
const zip64CompressedSize = zip64Enabled || maximumCompressedSize > MAX_32_BITS;
|
||
|
|
if (zip64UncompressedSize || zip64CompressedSize) {
|
||
|
|
if (zip64 === false) {
|
||
|
|
throw new Error(ERR_UNSUPPORTED_FORMAT);
|
||
|
|
} else {
|
||
|
|
zip64 = true;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
zip64 = zip64 || false;
|
||
|
|
const encrypted = getOptionValue(zipWriter, options, PROPERTY_NAME_ENCRYPTED);
|
||
|
|
options = Object.assign({}, options, {
|
||
|
|
rawFilename,
|
||
|
|
rawComment,
|
||
|
|
version,
|
||
|
|
versionMadeBy,
|
||
|
|
lastModDate,
|
||
|
|
lastAccessDate,
|
||
|
|
creationDate,
|
||
|
|
rawExtraField,
|
||
|
|
zip64,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize,
|
||
|
|
password,
|
||
|
|
rawPassword,
|
||
|
|
level,
|
||
|
|
useWebWorkers,
|
||
|
|
transferStreams,
|
||
|
|
encryptionStrength,
|
||
|
|
extendedTimestamp,
|
||
|
|
zipCrypto,
|
||
|
|
bufferedWrite,
|
||
|
|
createTempStream,
|
||
|
|
keepOrder,
|
||
|
|
useUnicodeFileNames,
|
||
|
|
dataDescriptor,
|
||
|
|
dataDescriptorSignature,
|
||
|
|
signal,
|
||
|
|
msDosCompatible,
|
||
|
|
internalFileAttribute: internalFileAttributes,
|
||
|
|
internalFileAttributes,
|
||
|
|
externalFileAttribute: externalFileAttributes,
|
||
|
|
externalFileAttributes,
|
||
|
|
useCompressionStream,
|
||
|
|
passThrough,
|
||
|
|
encrypted: Boolean((password && getLength(password)) || (rawPassword && getLength(rawPassword))) || (passThrough && encrypted),
|
||
|
|
signature: options[PROPERTY_NAME_SIGNATURE],
|
||
|
|
compressionMethod,
|
||
|
|
uncompressedSize,
|
||
|
|
offset: zipWriter.offset - diskOffset,
|
||
|
|
diskNumberStart: diskNumber,
|
||
|
|
uid,
|
||
|
|
gid,
|
||
|
|
setuid,
|
||
|
|
setgid,
|
||
|
|
sticky,
|
||
|
|
unixMode,
|
||
|
|
msdosAttributesRaw,
|
||
|
|
msdosAttributes,
|
||
|
|
unixExternalUpper
|
||
|
|
});
|
||
|
|
const headerInfo = getHeaderInfo(options);
|
||
|
|
const dataDescriptorInfo = getDataDescriptorInfo(options);
|
||
|
|
const metadataSize = getLength(headerInfo.localHeaderArray, dataDescriptorInfo.dataDescriptorArray);
|
||
|
|
maximumEntrySize = metadataSize + maximumCompressedSize;
|
||
|
|
if (zipWriter.options[OPTION_USDZ]) {
|
||
|
|
maximumEntrySize += maximumEntrySize + 64;
|
||
|
|
}
|
||
|
|
zipWriter.pendingEntriesSize += maximumEntrySize;
|
||
|
|
let fileEntry;
|
||
|
|
try {
|
||
|
|
fileEntry = await getFileEntry(zipWriter, name, reader, { headerInfo, dataDescriptorInfo, metadataSize }, options);
|
||
|
|
} finally {
|
||
|
|
zipWriter.pendingEntriesSize -= maximumEntrySize;
|
||
|
|
}
|
||
|
|
Object.assign(fileEntry, { name, comment, extraField });
|
||
|
|
return new Entry(fileEntry);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function getFileEntry(zipWriter, name, reader, entryInfo, options) {
|
||
|
|
const {
|
||
|
|
files,
|
||
|
|
writer
|
||
|
|
} = zipWriter;
|
||
|
|
const {
|
||
|
|
keepOrder,
|
||
|
|
dataDescriptor,
|
||
|
|
signal
|
||
|
|
} = options;
|
||
|
|
const {
|
||
|
|
headerInfo
|
||
|
|
} = entryInfo;
|
||
|
|
const usdz = zipWriter.options[OPTION_USDZ];
|
||
|
|
const previousFileEntry = Array.from(files.values()).pop();
|
||
|
|
let fileEntry = {};
|
||
|
|
let bufferedWrite;
|
||
|
|
let releaseLockWriter;
|
||
|
|
let releaseLockCurrentFileEntry;
|
||
|
|
let writingBufferedEntryData;
|
||
|
|
let writingEntryData;
|
||
|
|
let fileWriter;
|
||
|
|
files.set(name, fileEntry);
|
||
|
|
try {
|
||
|
|
let lockPreviousFileEntry;
|
||
|
|
if (keepOrder) {
|
||
|
|
lockPreviousFileEntry = previousFileEntry && previousFileEntry.lock;
|
||
|
|
requestLockCurrentFileEntry();
|
||
|
|
}
|
||
|
|
if ((options.bufferedWrite || !keepOrder || zipWriter.writerLocked || zipWriter.bufferedWrites || !dataDescriptor) && !usdz) {
|
||
|
|
if (options.createTempStream) {
|
||
|
|
fileWriter = await options.createTempStream();
|
||
|
|
} else {
|
||
|
|
fileWriter = new TransformStream(UNDEFINED_VALUE, UNDEFINED_VALUE, { highWaterMark: INFINITY_VALUE });
|
||
|
|
}
|
||
|
|
fileWriter.size = 0;
|
||
|
|
bufferedWrite = true;
|
||
|
|
zipWriter.bufferedWrites++;
|
||
|
|
await initStream(writer);
|
||
|
|
} else {
|
||
|
|
fileWriter = writer;
|
||
|
|
await requestLockWriter();
|
||
|
|
}
|
||
|
|
await initStream(fileWriter);
|
||
|
|
const { writable, diskOffset } = writer;
|
||
|
|
if (zipWriter.addSplitZipSignature) {
|
||
|
|
delete zipWriter.addSplitZipSignature;
|
||
|
|
const signatureArray = new Uint8Array(4);
|
||
|
|
const signatureArrayView = getDataView(signatureArray);
|
||
|
|
setUint32(signatureArrayView, 0, SPLIT_ZIP_FILE_SIGNATURE);
|
||
|
|
await writeData(writer, signatureArray);
|
||
|
|
zipWriter.offset += 4;
|
||
|
|
}
|
||
|
|
if (usdz) {
|
||
|
|
appendExtraFieldUSDZ(entryInfo, zipWriter.offset - diskOffset);
|
||
|
|
}
|
||
|
|
const {
|
||
|
|
localHeaderView,
|
||
|
|
localHeaderArray
|
||
|
|
} = headerInfo;
|
||
|
|
if (!bufferedWrite) {
|
||
|
|
await lockPreviousFileEntry;
|
||
|
|
await skipDiskIfNeeded(writable);
|
||
|
|
}
|
||
|
|
const { diskNumber } = writer;
|
||
|
|
fileEntry.diskNumberStart = diskNumber;
|
||
|
|
if (!bufferedWrite) {
|
||
|
|
writingEntryData = true;
|
||
|
|
await writeData(fileWriter, localHeaderArray);
|
||
|
|
}
|
||
|
|
fileEntry = await createFileEntry(reader, fileWriter, fileEntry, entryInfo, zipWriter.config, options);
|
||
|
|
if (!bufferedWrite) {
|
||
|
|
writingEntryData = false;
|
||
|
|
}
|
||
|
|
files.set(name, fileEntry);
|
||
|
|
fileEntry.filename = name;
|
||
|
|
if (bufferedWrite) {
|
||
|
|
await Promise.all([fileWriter.writable.getWriter().close(), lockPreviousFileEntry]);
|
||
|
|
await requestLockWriter();
|
||
|
|
writingBufferedEntryData = true;
|
||
|
|
fileEntry.diskNumberStart = writer.diskNumber;
|
||
|
|
fileEntry.offset = zipWriter.offset - writer.diskOffset;
|
||
|
|
updateLocalHeader(fileEntry, localHeaderView, options);
|
||
|
|
await skipDiskIfNeeded(writable);
|
||
|
|
await writeData(writer, localHeaderArray);
|
||
|
|
await fileWriter.readable.pipeTo(writable, { preventClose: true, preventAbort: true, signal });
|
||
|
|
writer.size += fileWriter.size;
|
||
|
|
writingBufferedEntryData = false;
|
||
|
|
} else {
|
||
|
|
fileEntry.offset = zipWriter.offset - diskOffset;
|
||
|
|
}
|
||
|
|
zipWriter.offset += fileEntry.size;
|
||
|
|
return fileEntry;
|
||
|
|
} catch (error) {
|
||
|
|
if (writingBufferedEntryData || writingEntryData) {
|
||
|
|
zipWriter.hasCorruptedEntries = true;
|
||
|
|
if (error) {
|
||
|
|
try {
|
||
|
|
error.corruptedEntry = true;
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (bufferedWrite) {
|
||
|
|
zipWriter.offset += fileWriter.size;
|
||
|
|
} else {
|
||
|
|
zipWriter.offset = fileWriter.size;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
files.delete(name);
|
||
|
|
throw error;
|
||
|
|
} finally {
|
||
|
|
if (bufferedWrite) {
|
||
|
|
zipWriter.bufferedWrites--;
|
||
|
|
}
|
||
|
|
if (releaseLockCurrentFileEntry) {
|
||
|
|
releaseLockCurrentFileEntry();
|
||
|
|
}
|
||
|
|
if (releaseLockWriter) {
|
||
|
|
releaseLockWriter();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function requestLockCurrentFileEntry() {
|
||
|
|
fileEntry.lock = new Promise(resolve => releaseLockCurrentFileEntry = resolve);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function requestLockWriter() {
|
||
|
|
zipWriter.writerLocked = true;
|
||
|
|
const { lockWriter } = zipWriter;
|
||
|
|
zipWriter.lockWriter = new Promise(resolve => releaseLockWriter = () => {
|
||
|
|
zipWriter.writerLocked = false;
|
||
|
|
resolve();
|
||
|
|
});
|
||
|
|
await lockWriter;
|
||
|
|
}
|
||
|
|
|
||
|
|
async function skipDiskIfNeeded(writable) {
|
||
|
|
if (getLength(headerInfo.localHeaderArray) > writer.availableSize) {
|
||
|
|
writer.availableSize = 0;
|
||
|
|
await writeData(writable, new Uint8Array());
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function createFileEntry(reader, writer, { diskNumberStart, lock }, entryInfo, config, options) {
|
||
|
|
const {
|
||
|
|
headerInfo,
|
||
|
|
dataDescriptorInfo,
|
||
|
|
metadataSize
|
||
|
|
} = entryInfo;
|
||
|
|
const {
|
||
|
|
headerArray,
|
||
|
|
headerView,
|
||
|
|
lastModDate,
|
||
|
|
rawLastModDate,
|
||
|
|
encrypted,
|
||
|
|
compressed,
|
||
|
|
version,
|
||
|
|
compressionMethod,
|
||
|
|
rawExtraFieldZip64,
|
||
|
|
localExtraFieldZip64Length,
|
||
|
|
rawExtraFieldExtendedTimestamp,
|
||
|
|
extraFieldExtendedTimestampFlag,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
} = headerInfo;
|
||
|
|
const { dataDescriptorArray } = dataDescriptorInfo;
|
||
|
|
const {
|
||
|
|
rawFilename,
|
||
|
|
lastAccessDate,
|
||
|
|
creationDate,
|
||
|
|
password,
|
||
|
|
rawPassword,
|
||
|
|
level,
|
||
|
|
zip64,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize,
|
||
|
|
zipCrypto,
|
||
|
|
dataDescriptor,
|
||
|
|
directory,
|
||
|
|
executable,
|
||
|
|
versionMadeBy,
|
||
|
|
rawComment,
|
||
|
|
rawExtraField,
|
||
|
|
useWebWorkers,
|
||
|
|
transferStreams,
|
||
|
|
onstart,
|
||
|
|
onprogress,
|
||
|
|
onend,
|
||
|
|
signal,
|
||
|
|
encryptionStrength,
|
||
|
|
extendedTimestamp,
|
||
|
|
msDosCompatible,
|
||
|
|
internalFileAttributes,
|
||
|
|
externalFileAttributes,
|
||
|
|
uid,
|
||
|
|
gid,
|
||
|
|
unixMode,
|
||
|
|
setuid,
|
||
|
|
setgid,
|
||
|
|
sticky,
|
||
|
|
unixExternalUpper,
|
||
|
|
msdosAttributesRaw,
|
||
|
|
msdosAttributes,
|
||
|
|
useCompressionStream,
|
||
|
|
passThrough
|
||
|
|
} = options;
|
||
|
|
const fileEntry = {
|
||
|
|
lock,
|
||
|
|
versionMadeBy,
|
||
|
|
zip64,
|
||
|
|
directory: Boolean(directory),
|
||
|
|
executable: Boolean(executable),
|
||
|
|
filenameUTF8: true,
|
||
|
|
rawFilename,
|
||
|
|
commentUTF8: true,
|
||
|
|
rawComment,
|
||
|
|
rawExtraFieldZip64,
|
||
|
|
localExtraFieldZip64Length,
|
||
|
|
rawExtraFieldExtendedTimestamp,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
rawExtraField,
|
||
|
|
extendedTimestamp,
|
||
|
|
msDosCompatible,
|
||
|
|
internalFileAttributes,
|
||
|
|
externalFileAttributes,
|
||
|
|
diskNumberStart,
|
||
|
|
uid,
|
||
|
|
gid,
|
||
|
|
unixMode,
|
||
|
|
setuid,
|
||
|
|
setgid,
|
||
|
|
sticky,
|
||
|
|
unixExternalUpper,
|
||
|
|
msdosAttributesRaw,
|
||
|
|
msdosAttributes
|
||
|
|
};
|
||
|
|
let {
|
||
|
|
signature,
|
||
|
|
uncompressedSize
|
||
|
|
} = options;
|
||
|
|
let compressedSize = 0;
|
||
|
|
if (!passThrough) {
|
||
|
|
uncompressedSize = 0;
|
||
|
|
}
|
||
|
|
const { writable } = writer;
|
||
|
|
if (reader) {
|
||
|
|
reader.chunkSize = getChunkSize(config);
|
||
|
|
const readable = reader.readable;
|
||
|
|
const size = reader.size;
|
||
|
|
const workerOptions = {
|
||
|
|
options: {
|
||
|
|
codecType: CODEC_DEFLATE,
|
||
|
|
level,
|
||
|
|
rawPassword,
|
||
|
|
password,
|
||
|
|
encryptionStrength,
|
||
|
|
zipCrypto: encrypted && zipCrypto,
|
||
|
|
passwordVerification: encrypted && zipCrypto && (rawLastModDate >> 8) & MAX_8_BITS,
|
||
|
|
signed: !passThrough,
|
||
|
|
compressed: compressed && !passThrough,
|
||
|
|
encrypted: encrypted && !passThrough,
|
||
|
|
useWebWorkers,
|
||
|
|
useCompressionStream,
|
||
|
|
transferStreams
|
||
|
|
},
|
||
|
|
config,
|
||
|
|
streamOptions: { signal, size, onstart, onprogress, onend }
|
||
|
|
};
|
||
|
|
try {
|
||
|
|
const result = await runWorker({ readable, writable }, workerOptions);
|
||
|
|
compressedSize = result.outputSize;
|
||
|
|
writer.size += compressedSize;
|
||
|
|
if (!passThrough) {
|
||
|
|
uncompressedSize = result.inputSize;
|
||
|
|
signature = result.signature;
|
||
|
|
}
|
||
|
|
} catch (error) {
|
||
|
|
if (error.outputSize !== UNDEFINED_VALUE) {
|
||
|
|
writer.size += error.outputSize;
|
||
|
|
}
|
||
|
|
throw error;
|
||
|
|
}
|
||
|
|
|
||
|
|
}
|
||
|
|
setEntryInfo({
|
||
|
|
signature,
|
||
|
|
compressedSize,
|
||
|
|
uncompressedSize,
|
||
|
|
headerInfo,
|
||
|
|
dataDescriptorInfo
|
||
|
|
}, options);
|
||
|
|
if (dataDescriptor) {
|
||
|
|
await writeData(writer, dataDescriptorArray);
|
||
|
|
}
|
||
|
|
Object.assign(fileEntry, {
|
||
|
|
uncompressedSize,
|
||
|
|
compressedSize,
|
||
|
|
lastModDate,
|
||
|
|
rawLastModDate,
|
||
|
|
creationDate,
|
||
|
|
lastAccessDate,
|
||
|
|
encrypted,
|
||
|
|
zipCrypto,
|
||
|
|
size: metadataSize + compressedSize,
|
||
|
|
compressionMethod,
|
||
|
|
version,
|
||
|
|
headerArray,
|
||
|
|
headerView,
|
||
|
|
signature,
|
||
|
|
extraFieldExtendedTimestampFlag,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize
|
||
|
|
});
|
||
|
|
return fileEntry;
|
||
|
|
}
|
||
|
|
|
||
|
|
function getHeaderInfo(options) {
|
||
|
|
const {
|
||
|
|
rawFilename,
|
||
|
|
lastModDate,
|
||
|
|
lastAccessDate,
|
||
|
|
creationDate,
|
||
|
|
level,
|
||
|
|
zip64,
|
||
|
|
zipCrypto,
|
||
|
|
useUnicodeFileNames,
|
||
|
|
dataDescriptor,
|
||
|
|
directory,
|
||
|
|
rawExtraField,
|
||
|
|
encryptionStrength,
|
||
|
|
extendedTimestamp,
|
||
|
|
passThrough,
|
||
|
|
encrypted,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize,
|
||
|
|
uncompressedSize
|
||
|
|
} = options;
|
||
|
|
let { version, compressionMethod } = options;
|
||
|
|
const compressed = !directory && (level > 0 || (level === UNDEFINED_VALUE && compressionMethod !== 0));
|
||
|
|
let rawLocalExtraFieldZip64;
|
||
|
|
const uncompressedFile = passThrough || !compressed;
|
||
|
|
const zip64ExtraFieldComplete = zip64 && (options.bufferedWrite || ((!zip64UncompressedSize && !zip64CompressedSize) || uncompressedFile));
|
||
|
|
if (zip64) {
|
||
|
|
let rawLocalExtraFieldZip64Length = 4;
|
||
|
|
if (zip64UncompressedSize) {
|
||
|
|
rawLocalExtraFieldZip64Length += 8;
|
||
|
|
}
|
||
|
|
if (zip64CompressedSize) {
|
||
|
|
rawLocalExtraFieldZip64Length += 8;
|
||
|
|
}
|
||
|
|
rawLocalExtraFieldZip64 = new Uint8Array(rawLocalExtraFieldZip64Length);
|
||
|
|
const rawLocalExtraFieldZip64View = getDataView(rawLocalExtraFieldZip64);
|
||
|
|
setUint16(rawLocalExtraFieldZip64View, 0, EXTRAFIELD_TYPE_ZIP64);
|
||
|
|
setUint16(rawLocalExtraFieldZip64View, 2, getLength(rawLocalExtraFieldZip64) - 4);
|
||
|
|
if (zip64ExtraFieldComplete) {
|
||
|
|
const rawLocalExtraFieldZip64View = getDataView(rawLocalExtraFieldZip64);
|
||
|
|
let rawLocalExtraFieldZip64Offset = 4;
|
||
|
|
if (zip64UncompressedSize) {
|
||
|
|
setBigUint64(rawLocalExtraFieldZip64View, rawLocalExtraFieldZip64Offset, BigInt(uncompressedSize));
|
||
|
|
rawLocalExtraFieldZip64Offset += 8;
|
||
|
|
}
|
||
|
|
if (zip64CompressedSize && uncompressedFile) {
|
||
|
|
setBigUint64(rawLocalExtraFieldZip64View, rawLocalExtraFieldZip64Offset, BigInt(uncompressedSize));
|
||
|
|
rawLocalExtraFieldZip64Offset += 8;
|
||
|
|
}
|
||
|
|
if (rawLocalExtraFieldZip64Offset == 4) {
|
||
|
|
rawLocalExtraFieldZip64 = new Uint8Array();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
rawLocalExtraFieldZip64 = new Uint8Array();
|
||
|
|
}
|
||
|
|
let rawExtraFieldAES;
|
||
|
|
if (encrypted && !zipCrypto) {
|
||
|
|
rawExtraFieldAES = new Uint8Array(getLength(EXTRAFIELD_DATA_AES) + 2);
|
||
|
|
const extraFieldAESView = getDataView(rawExtraFieldAES);
|
||
|
|
setUint16(extraFieldAESView, 0, EXTRAFIELD_TYPE_AES);
|
||
|
|
arraySet(rawExtraFieldAES, EXTRAFIELD_DATA_AES, 2);
|
||
|
|
setUint8(extraFieldAESView, 8, encryptionStrength);
|
||
|
|
} else {
|
||
|
|
rawExtraFieldAES = new Uint8Array();
|
||
|
|
}
|
||
|
|
let rawExtraFieldNTFS;
|
||
|
|
let rawExtraFieldExtendedTimestamp;
|
||
|
|
let extraFieldExtendedTimestampFlag;
|
||
|
|
if (extendedTimestamp) {
|
||
|
|
rawExtraFieldExtendedTimestamp = new Uint8Array(9 + (lastAccessDate ? 4 : 0) + (creationDate ? 4 : 0));
|
||
|
|
const extraFieldExtendedTimestampView = getDataView(rawExtraFieldExtendedTimestamp);
|
||
|
|
setUint16(extraFieldExtendedTimestampView, 0, EXTRAFIELD_TYPE_EXTENDED_TIMESTAMP);
|
||
|
|
setUint16(extraFieldExtendedTimestampView, 2, getLength(rawExtraFieldExtendedTimestamp) - 4);
|
||
|
|
extraFieldExtendedTimestampFlag = 0x1 + (lastAccessDate ? 0x2 : 0) + (creationDate ? 0x4 : 0);
|
||
|
|
setUint8(extraFieldExtendedTimestampView, 4, extraFieldExtendedTimestampFlag);
|
||
|
|
let offset = 5;
|
||
|
|
setUint32(extraFieldExtendedTimestampView, offset, Math.floor(lastModDate.getTime() / 1000));
|
||
|
|
offset += 4;
|
||
|
|
if (lastAccessDate) {
|
||
|
|
setUint32(extraFieldExtendedTimestampView, offset, Math.floor(lastAccessDate.getTime() / 1000));
|
||
|
|
offset += 4;
|
||
|
|
}
|
||
|
|
if (creationDate) {
|
||
|
|
setUint32(extraFieldExtendedTimestampView, offset, Math.floor(creationDate.getTime() / 1000));
|
||
|
|
}
|
||
|
|
try {
|
||
|
|
rawExtraFieldNTFS = new Uint8Array(36);
|
||
|
|
const extraFieldNTFSView = getDataView(rawExtraFieldNTFS);
|
||
|
|
const lastModTimeNTFS = getTimeNTFS(lastModDate);
|
||
|
|
setUint16(extraFieldNTFSView, 0, EXTRAFIELD_TYPE_NTFS);
|
||
|
|
setUint16(extraFieldNTFSView, 2, 32);
|
||
|
|
setUint16(extraFieldNTFSView, 8, EXTRAFIELD_TYPE_NTFS_TAG1);
|
||
|
|
setUint16(extraFieldNTFSView, 10, 24);
|
||
|
|
setBigUint64(extraFieldNTFSView, 12, lastModTimeNTFS);
|
||
|
|
setBigUint64(extraFieldNTFSView, 20, getTimeNTFS(lastAccessDate) || lastModTimeNTFS);
|
||
|
|
setBigUint64(extraFieldNTFSView, 28, getTimeNTFS(creationDate) || lastModTimeNTFS);
|
||
|
|
} catch {
|
||
|
|
rawExtraFieldNTFS = new Uint8Array();
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
rawExtraFieldNTFS = rawExtraFieldExtendedTimestamp = new Uint8Array();
|
||
|
|
}
|
||
|
|
let rawExtraFieldUnix;
|
||
|
|
try {
|
||
|
|
const { uid, gid, unixMode, setuid, setgid, sticky, unixExtraFieldType } = options;
|
||
|
|
if (unixExtraFieldType && (uid !== UNDEFINED_VALUE || gid !== UNDEFINED_VALUE || unixMode !== UNDEFINED_VALUE)) {
|
||
|
|
const uidBytes = packUnixId(uid);
|
||
|
|
const gidBytes = packUnixId(gid);
|
||
|
|
let modeArray = new Uint8Array();
|
||
|
|
if (unixExtraFieldType == UNIX_EXTRA_FIELD_TYPE && unixMode !== UNDEFINED_VALUE) {
|
||
|
|
let modeToWrite = unixMode & MAX_16_BITS;
|
||
|
|
if (setuid) {
|
||
|
|
modeToWrite |= FILE_ATTR_UNIX_SETUID_MASK;
|
||
|
|
}
|
||
|
|
if (setgid) {
|
||
|
|
modeToWrite |= FILE_ATTR_UNIX_SETGID_MASK;
|
||
|
|
}
|
||
|
|
if (sticky) {
|
||
|
|
modeToWrite |= FILE_ATTR_UNIX_STICKY_MASK;
|
||
|
|
}
|
||
|
|
modeArray = new Uint8Array(2);
|
||
|
|
const modeDataView = new DataView(modeArray.buffer);
|
||
|
|
modeDataView.setUint16(0, modeToWrite, true);
|
||
|
|
}
|
||
|
|
const payloadLength = 3 + uidBytes.length + gidBytes.length + modeArray.length;
|
||
|
|
rawExtraFieldUnix = new Uint8Array(4 + payloadLength);
|
||
|
|
const rawExtraFieldUnixView = getDataView(rawExtraFieldUnix);
|
||
|
|
setUint16(rawExtraFieldUnixView, 0, unixExtraFieldType == INFOZIP_EXTRA_FIELD_TYPE ? EXTRAFIELD_TYPE_INFOZIP : EXTRAFIELD_TYPE_UNIX);
|
||
|
|
setUint16(rawExtraFieldUnixView, 2, payloadLength);
|
||
|
|
setUint8(rawExtraFieldUnixView, 4, 1);
|
||
|
|
setUint8(rawExtraFieldUnixView, 5, uidBytes.length);
|
||
|
|
let offset = 6;
|
||
|
|
arraySet(rawExtraFieldUnix, uidBytes, offset);
|
||
|
|
offset += uidBytes.length;
|
||
|
|
setUint8(rawExtraFieldUnixView, offset, gidBytes.length);
|
||
|
|
offset++;
|
||
|
|
arraySet(rawExtraFieldUnix, gidBytes, offset);
|
||
|
|
offset += gidBytes.length;
|
||
|
|
arraySet(rawExtraFieldUnix, modeArray, offset);
|
||
|
|
} else {
|
||
|
|
rawExtraFieldUnix = new Uint8Array();
|
||
|
|
}
|
||
|
|
} catch {
|
||
|
|
rawExtraFieldUnix = new Uint8Array();
|
||
|
|
}
|
||
|
|
if (compressionMethod === UNDEFINED_VALUE) {
|
||
|
|
compressionMethod = compressed ? COMPRESSION_METHOD_DEFLATE : COMPRESSION_METHOD_STORE;
|
||
|
|
}
|
||
|
|
if (zip64) {
|
||
|
|
version = version > VERSION_ZIP64 ? version : VERSION_ZIP64;
|
||
|
|
}
|
||
|
|
if (encrypted && !zipCrypto) {
|
||
|
|
version = version > VERSION_AES ? version : VERSION_AES;
|
||
|
|
rawExtraFieldAES[9] = compressionMethod;
|
||
|
|
compressionMethod = COMPRESSION_METHOD_AES;
|
||
|
|
}
|
||
|
|
const localExtraFieldZip64Length = zip64ExtraFieldComplete ? getLength(rawLocalExtraFieldZip64) : 0;
|
||
|
|
const extraFieldLength = localExtraFieldZip64Length + getLength(rawExtraFieldAES, rawExtraFieldExtendedTimestamp, rawExtraFieldNTFS, rawExtraFieldUnix, rawExtraField);
|
||
|
|
const {
|
||
|
|
headerArray,
|
||
|
|
headerView,
|
||
|
|
rawLastModDate
|
||
|
|
} = getHeaderArrayData({
|
||
|
|
version,
|
||
|
|
bitFlag: getBitFlag(level, useUnicodeFileNames, dataDescriptor, encrypted, compressionMethod),
|
||
|
|
compressionMethod,
|
||
|
|
uncompressedSize,
|
||
|
|
lastModDate: lastModDate < MIN_DATE ? MIN_DATE : lastModDate > MAX_DATE ? MAX_DATE : lastModDate,
|
||
|
|
rawFilename,
|
||
|
|
zip64CompressedSize,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
extraFieldLength
|
||
|
|
});
|
||
|
|
let localHeaderOffset = HEADER_SIZE;
|
||
|
|
const localHeaderArray = new Uint8Array(localHeaderOffset + getLength(rawFilename) + extraFieldLength);
|
||
|
|
const localHeaderView = getDataView(localHeaderArray);
|
||
|
|
setUint32(localHeaderView, 0, LOCAL_FILE_HEADER_SIGNATURE);
|
||
|
|
arraySet(localHeaderArray, headerArray, 4);
|
||
|
|
arraySet(localHeaderArray, rawFilename, localHeaderOffset);
|
||
|
|
localHeaderOffset += getLength(rawFilename);
|
||
|
|
if (zip64ExtraFieldComplete) {
|
||
|
|
arraySet(localHeaderArray, rawLocalExtraFieldZip64, localHeaderOffset);
|
||
|
|
}
|
||
|
|
localHeaderOffset += localExtraFieldZip64Length;
|
||
|
|
arraySet(localHeaderArray, rawExtraFieldAES, localHeaderOffset);
|
||
|
|
localHeaderOffset += getLength(rawExtraFieldAES);
|
||
|
|
arraySet(localHeaderArray, rawExtraFieldExtendedTimestamp, localHeaderOffset);
|
||
|
|
localHeaderOffset += getLength(rawExtraFieldExtendedTimestamp);
|
||
|
|
arraySet(localHeaderArray, rawExtraFieldNTFS, localHeaderOffset);
|
||
|
|
localHeaderOffset += getLength(rawExtraFieldNTFS);
|
||
|
|
arraySet(localHeaderArray, rawExtraFieldUnix, localHeaderOffset);
|
||
|
|
localHeaderOffset += getLength(rawExtraFieldUnix);
|
||
|
|
arraySet(localHeaderArray, rawExtraField, localHeaderOffset);
|
||
|
|
if (dataDescriptor) {
|
||
|
|
setUint32(localHeaderView, HEADER_OFFSET_COMPRESSED_SIZE + 4, 0);
|
||
|
|
setUint32(localHeaderView, HEADER_OFFSET_UNCOMPRESSED_SIZE + 4, 0);
|
||
|
|
}
|
||
|
|
return {
|
||
|
|
localHeaderArray,
|
||
|
|
localHeaderView,
|
||
|
|
headerArray,
|
||
|
|
headerView,
|
||
|
|
lastModDate,
|
||
|
|
rawLastModDate,
|
||
|
|
encrypted,
|
||
|
|
compressed,
|
||
|
|
version,
|
||
|
|
compressionMethod,
|
||
|
|
extraFieldExtendedTimestampFlag,
|
||
|
|
rawExtraFieldZip64: new Uint8Array(),
|
||
|
|
localExtraFieldZip64Length,
|
||
|
|
rawExtraFieldExtendedTimestamp,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
extraFieldLength
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
function appendExtraFieldUSDZ(entryInfo, zipWriterOffset) {
|
||
|
|
const { headerInfo } = entryInfo;
|
||
|
|
let { localHeaderArray, extraFieldLength } = headerInfo;
|
||
|
|
let localHeaderArrayView = getDataView(localHeaderArray);
|
||
|
|
let extraBytesLength = 64 - ((zipWriterOffset + getLength(localHeaderArray)) % 64);
|
||
|
|
if (extraBytesLength < 4) {
|
||
|
|
extraBytesLength += 64;
|
||
|
|
}
|
||
|
|
const rawExtraFieldUSDZ = new Uint8Array(extraBytesLength);
|
||
|
|
const extraFieldUSDZView = getDataView(rawExtraFieldUSDZ);
|
||
|
|
setUint16(extraFieldUSDZView, 0, EXTRAFIELD_TYPE_USDZ);
|
||
|
|
setUint16(extraFieldUSDZView, 2, extraBytesLength - 2);
|
||
|
|
const previousLocalHeaderArray = localHeaderArray;
|
||
|
|
headerInfo.localHeaderArray = localHeaderArray = new Uint8Array(getLength(previousLocalHeaderArray) + extraBytesLength);
|
||
|
|
arraySet(localHeaderArray, previousLocalHeaderArray);
|
||
|
|
arraySet(localHeaderArray, rawExtraFieldUSDZ, getLength(previousLocalHeaderArray));
|
||
|
|
localHeaderArrayView = getDataView(localHeaderArray);
|
||
|
|
setUint16(localHeaderArrayView, 28, extraFieldLength + extraBytesLength);
|
||
|
|
entryInfo.metadataSize += extraBytesLength;
|
||
|
|
}
|
||
|
|
|
||
|
|
function packUnixId(id) {
|
||
|
|
if (id === UNDEFINED_VALUE) {
|
||
|
|
return new Uint8Array();
|
||
|
|
} else {
|
||
|
|
const dataArray = new Uint8Array(4);
|
||
|
|
const dataView = getDataView(dataArray);
|
||
|
|
dataView.setUint32(0, id, true);
|
||
|
|
let length = 4;
|
||
|
|
while (length > 1 && dataArray[length - 1] === 0) {
|
||
|
|
length--;
|
||
|
|
}
|
||
|
|
return dataArray.subarray(0, length);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function normalizeMsdosAttributes(msdosAttributesRaw, msdosAttributes) {
|
||
|
|
if (msdosAttributesRaw !== UNDEFINED_VALUE) {
|
||
|
|
msdosAttributesRaw = msdosAttributesRaw & MAX_8_BITS;
|
||
|
|
} else if (msdosAttributes !== UNDEFINED_VALUE) {
|
||
|
|
const { readOnly, hidden, system, directory: msdDir, archive } = msdosAttributes;
|
||
|
|
let raw = 0;
|
||
|
|
if (readOnly) raw |= FILE_ATTR_MSDOS_READONLY_MASK;
|
||
|
|
if (hidden) raw |= FILE_ATTR_MSDOS_HIDDEN_MASK;
|
||
|
|
if (system) raw |= FILE_ATTR_MSDOS_SYSTEM_MASK;
|
||
|
|
if (msdDir) raw |= FILE_ATTR_MSDOS_DIR_MASK;
|
||
|
|
if (archive) raw |= FILE_ATTR_MSDOS_ARCHIVE_MASK;
|
||
|
|
msdosAttributesRaw = raw & MAX_8_BITS;
|
||
|
|
}
|
||
|
|
if (msdosAttributes === UNDEFINED_VALUE) {
|
||
|
|
msdosAttributes = {
|
||
|
|
readOnly: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_READONLY_MASK),
|
||
|
|
hidden: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_HIDDEN_MASK),
|
||
|
|
system: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_SYSTEM_MASK),
|
||
|
|
directory: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_DIR_MASK),
|
||
|
|
archive: Boolean(msdosAttributesRaw & FILE_ATTR_MSDOS_ARCHIVE_MASK)
|
||
|
|
};
|
||
|
|
}
|
||
|
|
return { msdosAttributesRaw, msdosAttributes };
|
||
|
|
}
|
||
|
|
|
||
|
|
function getDataDescriptorInfo({
|
||
|
|
zip64,
|
||
|
|
dataDescriptor,
|
||
|
|
dataDescriptorSignature
|
||
|
|
}) {
|
||
|
|
let dataDescriptorArray = new Uint8Array();
|
||
|
|
let dataDescriptorView, dataDescriptorOffset = 0;
|
||
|
|
let dataDescriptorLength = zip64 ? DATA_DESCRIPTOR_RECORD_ZIP_64_LENGTH : DATA_DESCRIPTOR_RECORD_LENGTH;
|
||
|
|
if (dataDescriptorSignature) {
|
||
|
|
dataDescriptorLength += DATA_DESCRIPTOR_RECORD_SIGNATURE_LENGTH;
|
||
|
|
}
|
||
|
|
if (dataDescriptor) {
|
||
|
|
dataDescriptorArray = new Uint8Array(dataDescriptorLength);
|
||
|
|
dataDescriptorView = getDataView(dataDescriptorArray);
|
||
|
|
if (dataDescriptorSignature) {
|
||
|
|
dataDescriptorOffset = DATA_DESCRIPTOR_RECORD_SIGNATURE_LENGTH;
|
||
|
|
setUint32(dataDescriptorView, 0, DATA_DESCRIPTOR_RECORD_SIGNATURE);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
return {
|
||
|
|
dataDescriptorArray,
|
||
|
|
dataDescriptorView,
|
||
|
|
dataDescriptorOffset
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
function setEntryInfo({
|
||
|
|
signature,
|
||
|
|
compressedSize,
|
||
|
|
uncompressedSize,
|
||
|
|
headerInfo,
|
||
|
|
dataDescriptorInfo
|
||
|
|
}, {
|
||
|
|
zip64,
|
||
|
|
zipCrypto,
|
||
|
|
dataDescriptor
|
||
|
|
}) {
|
||
|
|
const {
|
||
|
|
headerView,
|
||
|
|
encrypted
|
||
|
|
} = headerInfo;
|
||
|
|
const {
|
||
|
|
dataDescriptorView,
|
||
|
|
dataDescriptorOffset
|
||
|
|
} = dataDescriptorInfo;
|
||
|
|
if ((!encrypted || zipCrypto) && signature !== UNDEFINED_VALUE) {
|
||
|
|
setUint32(headerView, HEADER_OFFSET_SIGNATURE, signature);
|
||
|
|
if (dataDescriptor) {
|
||
|
|
setUint32(dataDescriptorView, dataDescriptorOffset, signature);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (zip64) {
|
||
|
|
if (dataDescriptor) {
|
||
|
|
setBigUint64(dataDescriptorView, dataDescriptorOffset + 4, BigInt(compressedSize));
|
||
|
|
setBigUint64(dataDescriptorView, dataDescriptorOffset + 12, BigInt(uncompressedSize));
|
||
|
|
}
|
||
|
|
} else {
|
||
|
|
setUint32(headerView, HEADER_OFFSET_COMPRESSED_SIZE, compressedSize);
|
||
|
|
setUint32(headerView, HEADER_OFFSET_UNCOMPRESSED_SIZE, uncompressedSize);
|
||
|
|
if (dataDescriptor) {
|
||
|
|
setUint32(dataDescriptorView, dataDescriptorOffset + 4, compressedSize);
|
||
|
|
setUint32(dataDescriptorView, dataDescriptorOffset + 8, uncompressedSize);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function updateLocalHeader({
|
||
|
|
rawFilename,
|
||
|
|
encrypted,
|
||
|
|
zip64,
|
||
|
|
localExtraFieldZip64Length,
|
||
|
|
signature,
|
||
|
|
compressedSize,
|
||
|
|
uncompressedSize,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize
|
||
|
|
}, localHeaderView, { dataDescriptor }) {
|
||
|
|
if (!dataDescriptor) {
|
||
|
|
if (!encrypted) {
|
||
|
|
setUint32(localHeaderView, HEADER_OFFSET_SIGNATURE + 4, signature);
|
||
|
|
}
|
||
|
|
if (!zip64CompressedSize) {
|
||
|
|
setUint32(localHeaderView, HEADER_OFFSET_COMPRESSED_SIZE + 4, compressedSize);
|
||
|
|
}
|
||
|
|
if (!zip64UncompressedSize) {
|
||
|
|
setUint32(localHeaderView, HEADER_OFFSET_UNCOMPRESSED_SIZE + 4, uncompressedSize);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (zip64 && localExtraFieldZip64Length) {
|
||
|
|
let localHeaderOffset = HEADER_SIZE + getLength(rawFilename) + 4;
|
||
|
|
if (zip64UncompressedSize) {
|
||
|
|
setBigUint64(localHeaderView, localHeaderOffset, BigInt(uncompressedSize));
|
||
|
|
localHeaderOffset += 8;
|
||
|
|
}
|
||
|
|
if (zip64CompressedSize) {
|
||
|
|
setBigUint64(localHeaderView, localHeaderOffset, BigInt(compressedSize));
|
||
|
|
localHeaderOffset += 8;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
|
||
|
|
async function closeFile(zipWriter, comment, options) {
|
||
|
|
const { files, writer } = zipWriter;
|
||
|
|
const { diskOffset } = writer;
|
||
|
|
let { diskNumber } = writer;
|
||
|
|
let offset = 0;
|
||
|
|
let directoryDataLength = 0;
|
||
|
|
let directoryOffset = zipWriter.offset - diskOffset;
|
||
|
|
let filesLength = files.size;
|
||
|
|
for (const [, fileEntry] of files) {
|
||
|
|
const {
|
||
|
|
rawFilename,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
rawComment,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraField,
|
||
|
|
extendedTimestamp,
|
||
|
|
extraFieldExtendedTimestampFlag,
|
||
|
|
lastModDate,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize,
|
||
|
|
uncompressedSize,
|
||
|
|
compressedSize
|
||
|
|
} = fileEntry;
|
||
|
|
const zip64Offset = fileEntry.offset > MAX_32_BITS;
|
||
|
|
const zip64DiskNumberStart = fileEntry.diskNumberStart > MAX_16_BITS;
|
||
|
|
let rawExtraFieldZip64;
|
||
|
|
if (zip64Offset || zip64DiskNumberStart || zip64UncompressedSize || zip64CompressedSize) {
|
||
|
|
let length = 4;
|
||
|
|
if (zip64UncompressedSize) length += 8;
|
||
|
|
if (zip64CompressedSize) length += 8;
|
||
|
|
if (zip64Offset) length += 8;
|
||
|
|
if (zip64DiskNumberStart) length += 4;
|
||
|
|
rawExtraFieldZip64 = new Uint8Array(length);
|
||
|
|
const zip64View = getDataView(rawExtraFieldZip64);
|
||
|
|
setUint16(zip64View, 0, EXTRAFIELD_TYPE_ZIP64);
|
||
|
|
setUint16(zip64View, 2, length - 4);
|
||
|
|
let zip64FieldOffset = 4;
|
||
|
|
if (zip64UncompressedSize) { setBigUint64(zip64View, zip64FieldOffset, BigInt(uncompressedSize)); zip64FieldOffset += 8; }
|
||
|
|
if (zip64CompressedSize) { setBigUint64(zip64View, zip64FieldOffset, BigInt(compressedSize)); zip64FieldOffset += 8; }
|
||
|
|
if (zip64Offset) { setBigUint64(zip64View, zip64FieldOffset, BigInt(fileEntry.offset)); zip64FieldOffset += 8; }
|
||
|
|
if (zip64DiskNumberStart) { setUint32(zip64View, zip64FieldOffset, fileEntry.diskNumberStart); }
|
||
|
|
} else {
|
||
|
|
rawExtraFieldZip64 = new Uint8Array();
|
||
|
|
}
|
||
|
|
fileEntry.rawExtraFieldZip64 = rawExtraFieldZip64;
|
||
|
|
fileEntry.zip64Offset = zip64Offset;
|
||
|
|
fileEntry.zip64DiskNumberStart = zip64DiskNumberStart;
|
||
|
|
let rawExtraFieldTimestamp;
|
||
|
|
if (extendedTimestamp) {
|
||
|
|
rawExtraFieldTimestamp = new Uint8Array(9);
|
||
|
|
const extraFieldExtendedTimestampView = getDataView(rawExtraFieldTimestamp);
|
||
|
|
setUint16(extraFieldExtendedTimestampView, 0, EXTRAFIELD_TYPE_EXTENDED_TIMESTAMP);
|
||
|
|
setUint16(extraFieldExtendedTimestampView, 2, 5);
|
||
|
|
setUint8(extraFieldExtendedTimestampView, 4, extraFieldExtendedTimestampFlag);
|
||
|
|
setUint32(extraFieldExtendedTimestampView, 5, Math.floor(lastModDate.getTime() / 1000));
|
||
|
|
} else {
|
||
|
|
rawExtraFieldTimestamp = new Uint8Array();
|
||
|
|
}
|
||
|
|
fileEntry.rawExtraFieldExtendedTimestamp = rawExtraFieldTimestamp;
|
||
|
|
directoryDataLength += 46 +
|
||
|
|
getLength(
|
||
|
|
rawFilename,
|
||
|
|
rawComment,
|
||
|
|
rawExtraFieldZip64,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraFieldTimestamp,
|
||
|
|
rawExtraField);
|
||
|
|
}
|
||
|
|
const directoryArray = new Uint8Array(directoryDataLength);
|
||
|
|
const directoryView = getDataView(directoryArray);
|
||
|
|
await initStream(writer);
|
||
|
|
let directoryDiskOffset = 0;
|
||
|
|
for (const [indexFileEntry, fileEntry] of Array.from(files.values()).entries()) {
|
||
|
|
const {
|
||
|
|
offset: fileEntryOffset,
|
||
|
|
rawFilename,
|
||
|
|
rawExtraFieldZip64,
|
||
|
|
rawExtraFieldAES,
|
||
|
|
rawExtraFieldExtendedTimestamp,
|
||
|
|
rawExtraFieldNTFS,
|
||
|
|
rawExtraFieldUnix,
|
||
|
|
rawExtraField,
|
||
|
|
rawComment,
|
||
|
|
versionMadeBy,
|
||
|
|
headerArray,
|
||
|
|
headerView,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
zip64CompressedSize,
|
||
|
|
zip64DiskNumberStart,
|
||
|
|
zip64Offset,
|
||
|
|
internalFileAttributes,
|
||
|
|
externalFileAttributes,
|
||
|
|
diskNumberStart,
|
||
|
|
uncompressedSize,
|
||
|
|
compressedSize
|
||
|
|
} = fileEntry;
|
||
|
|
const extraFieldLength = getLength(rawExtraFieldZip64, rawExtraFieldAES, rawExtraFieldExtendedTimestamp, rawExtraFieldNTFS, rawExtraFieldUnix, rawExtraField);
|
||
|
|
setUint32(directoryView, offset, CENTRAL_FILE_HEADER_SIGNATURE);
|
||
|
|
setUint16(directoryView, offset + 4, versionMadeBy);
|
||
|
|
if (!zip64UncompressedSize) {
|
||
|
|
setUint32(headerView, HEADER_OFFSET_UNCOMPRESSED_SIZE, uncompressedSize);
|
||
|
|
}
|
||
|
|
if (!zip64CompressedSize) {
|
||
|
|
setUint32(headerView, HEADER_OFFSET_COMPRESSED_SIZE, compressedSize);
|
||
|
|
}
|
||
|
|
arraySet(directoryArray, headerArray, offset + 6);
|
||
|
|
let directoryOffset = offset + HEADER_SIZE;
|
||
|
|
setUint16(directoryView, directoryOffset, extraFieldLength);
|
||
|
|
directoryOffset += 2;
|
||
|
|
setUint16(directoryView, directoryOffset, getLength(rawComment));
|
||
|
|
directoryOffset += 2;
|
||
|
|
setUint16(directoryView, directoryOffset, zip64DiskNumberStart ? MAX_16_BITS : diskNumberStart);
|
||
|
|
directoryOffset += 2;
|
||
|
|
setUint16(directoryView, directoryOffset, internalFileAttributes);
|
||
|
|
directoryOffset += 2;
|
||
|
|
if (externalFileAttributes) {
|
||
|
|
setUint32(directoryView, directoryOffset, externalFileAttributes);
|
||
|
|
}
|
||
|
|
directoryOffset += 4;
|
||
|
|
setUint32(directoryView, directoryOffset, zip64Offset ? MAX_32_BITS : fileEntryOffset);
|
||
|
|
directoryOffset += 4;
|
||
|
|
arraySet(directoryArray, rawFilename, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawFilename);
|
||
|
|
arraySet(directoryArray, rawExtraFieldZip64, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawExtraFieldZip64);
|
||
|
|
arraySet(directoryArray, rawExtraFieldAES, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawExtraFieldAES);
|
||
|
|
arraySet(directoryArray, rawExtraFieldExtendedTimestamp, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawExtraFieldExtendedTimestamp);
|
||
|
|
arraySet(directoryArray, rawExtraFieldNTFS, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawExtraFieldNTFS);
|
||
|
|
arraySet(directoryArray, rawExtraFieldUnix, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawExtraFieldUnix);
|
||
|
|
arraySet(directoryArray, rawExtraField, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawExtraField);
|
||
|
|
arraySet(directoryArray, rawComment, directoryOffset);
|
||
|
|
directoryOffset += getLength(rawComment);
|
||
|
|
if (offset - directoryDiskOffset > writer.availableSize) {
|
||
|
|
writer.availableSize = 0;
|
||
|
|
await writeData(writer, directoryArray.slice(directoryDiskOffset, offset));
|
||
|
|
directoryDiskOffset = offset;
|
||
|
|
}
|
||
|
|
offset = directoryOffset;
|
||
|
|
if (options.onprogress) {
|
||
|
|
try {
|
||
|
|
await options.onprogress(indexFileEntry + 1, files.size, new Entry(fileEntry));
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
await writeData(writer, directoryDiskOffset ? directoryArray.slice(directoryDiskOffset) : directoryArray);
|
||
|
|
let lastDiskNumber = writer.diskNumber;
|
||
|
|
const { availableSize } = writer;
|
||
|
|
if (availableSize < END_OF_CENTRAL_DIR_LENGTH) {
|
||
|
|
lastDiskNumber++;
|
||
|
|
}
|
||
|
|
let zip64 = getOptionValue(zipWriter, options, PROPERTY_NAME_ZIP64);
|
||
|
|
if (directoryOffset > MAX_32_BITS || directoryDataLength > MAX_32_BITS || filesLength > MAX_16_BITS || lastDiskNumber > MAX_16_BITS) {
|
||
|
|
if (zip64 === false) {
|
||
|
|
throw new Error(ERR_UNSUPPORTED_FORMAT);
|
||
|
|
} else {
|
||
|
|
zip64 = true;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
const endOfdirectoryArray = new Uint8Array(zip64 ? ZIP64_END_OF_CENTRAL_DIR_TOTAL_LENGTH : END_OF_CENTRAL_DIR_LENGTH);
|
||
|
|
const endOfdirectoryView = getDataView(endOfdirectoryArray);
|
||
|
|
offset = 0;
|
||
|
|
if (zip64) {
|
||
|
|
setUint32(endOfdirectoryView, 0, ZIP64_END_OF_CENTRAL_DIR_SIGNATURE);
|
||
|
|
setBigUint64(endOfdirectoryView, 4, BigInt(44));
|
||
|
|
setUint16(endOfdirectoryView, 12, 45);
|
||
|
|
setUint16(endOfdirectoryView, 14, 45);
|
||
|
|
setUint32(endOfdirectoryView, 16, lastDiskNumber);
|
||
|
|
setUint32(endOfdirectoryView, 20, diskNumber);
|
||
|
|
setBigUint64(endOfdirectoryView, 24, BigInt(filesLength));
|
||
|
|
setBigUint64(endOfdirectoryView, 32, BigInt(filesLength));
|
||
|
|
setBigUint64(endOfdirectoryView, 40, BigInt(directoryDataLength));
|
||
|
|
setBigUint64(endOfdirectoryView, 48, BigInt(directoryOffset));
|
||
|
|
setUint32(endOfdirectoryView, 56, ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIGNATURE);
|
||
|
|
setBigUint64(endOfdirectoryView, 64, BigInt(directoryOffset) + BigInt(directoryDataLength));
|
||
|
|
setUint32(endOfdirectoryView, 72, lastDiskNumber + 1);
|
||
|
|
const supportZip64SplitFile = getOptionValue(zipWriter, options, OPTION_SUPPORT_ZIP64_SPLIT_FILE, true);
|
||
|
|
if (supportZip64SplitFile) {
|
||
|
|
lastDiskNumber = MAX_16_BITS;
|
||
|
|
diskNumber = MAX_16_BITS;
|
||
|
|
}
|
||
|
|
filesLength = MAX_16_BITS;
|
||
|
|
directoryOffset = MAX_32_BITS;
|
||
|
|
directoryDataLength = MAX_32_BITS;
|
||
|
|
offset += ZIP64_END_OF_CENTRAL_DIR_LENGTH + ZIP64_END_OF_CENTRAL_DIR_LOCATOR_LENGTH;
|
||
|
|
}
|
||
|
|
setUint32(endOfdirectoryView, offset, END_OF_CENTRAL_DIR_SIGNATURE);
|
||
|
|
setUint16(endOfdirectoryView, offset + 4, lastDiskNumber);
|
||
|
|
setUint16(endOfdirectoryView, offset + 6, diskNumber);
|
||
|
|
setUint16(endOfdirectoryView, offset + 8, filesLength);
|
||
|
|
setUint16(endOfdirectoryView, offset + 10, filesLength);
|
||
|
|
setUint32(endOfdirectoryView, offset + 12, directoryDataLength);
|
||
|
|
setUint32(endOfdirectoryView, offset + 16, directoryOffset);
|
||
|
|
const commentLength = getLength(comment);
|
||
|
|
if (commentLength) {
|
||
|
|
if (commentLength <= MAX_16_BITS) {
|
||
|
|
setUint16(endOfdirectoryView, offset + 20, commentLength);
|
||
|
|
} else {
|
||
|
|
throw new Error(ERR_INVALID_COMMENT);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
await writeData(writer, endOfdirectoryArray);
|
||
|
|
if (commentLength) {
|
||
|
|
await writeData(writer, comment);
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
async function writeData(writer, array) {
|
||
|
|
const { writable } = writer;
|
||
|
|
const streamWriter = writable.getWriter();
|
||
|
|
try {
|
||
|
|
await streamWriter.ready;
|
||
|
|
writer.size += getLength(array);
|
||
|
|
await streamWriter.write(array);
|
||
|
|
} finally {
|
||
|
|
streamWriter.releaseLock();
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function getTimeNTFS(date) {
|
||
|
|
if (date) {
|
||
|
|
return ((BigInt(date.getTime()) + BigInt(11644473600000)) * BigInt(10000));
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
function getOptionValue(zipWriter, options, name, defaultValue) {
|
||
|
|
const result = options[name] === UNDEFINED_VALUE ? zipWriter.options[name] : options[name];
|
||
|
|
return result === UNDEFINED_VALUE ? defaultValue : result;
|
||
|
|
}
|
||
|
|
|
||
|
|
function getMaximumCompressedSize(uncompressedSize) {
|
||
|
|
return uncompressedSize + (5 * (Math.floor(uncompressedSize / 16383) + 1));
|
||
|
|
}
|
||
|
|
|
||
|
|
function setUint8(view, offset, value) {
|
||
|
|
view.setUint8(offset, value);
|
||
|
|
}
|
||
|
|
|
||
|
|
function setUint16(view, offset, value) {
|
||
|
|
view.setUint16(offset, value, true);
|
||
|
|
}
|
||
|
|
|
||
|
|
function setUint32(view, offset, value) {
|
||
|
|
view.setUint32(offset, value, true);
|
||
|
|
}
|
||
|
|
|
||
|
|
function setBigUint64(view, offset, value) {
|
||
|
|
view.setBigUint64(offset, value, true);
|
||
|
|
}
|
||
|
|
|
||
|
|
function arraySet(array, typedArray, offset) {
|
||
|
|
array.set(typedArray, offset);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getDataView(array) {
|
||
|
|
return new DataView(array.buffer);
|
||
|
|
}
|
||
|
|
|
||
|
|
function getLength(...arrayLikes) {
|
||
|
|
let result = 0;
|
||
|
|
arrayLikes.forEach(arrayLike => arrayLike && (result += arrayLike.length));
|
||
|
|
return result;
|
||
|
|
}
|
||
|
|
|
||
|
|
function getHeaderArrayData({
|
||
|
|
version,
|
||
|
|
bitFlag,
|
||
|
|
compressionMethod,
|
||
|
|
uncompressedSize,
|
||
|
|
compressedSize,
|
||
|
|
lastModDate,
|
||
|
|
rawFilename,
|
||
|
|
zip64CompressedSize,
|
||
|
|
zip64UncompressedSize,
|
||
|
|
extraFieldLength
|
||
|
|
}) {
|
||
|
|
const headerArray = new Uint8Array(HEADER_SIZE - 4);
|
||
|
|
const headerView = getDataView(headerArray);
|
||
|
|
setUint16(headerView, 0, version);
|
||
|
|
setUint16(headerView, 2, bitFlag);
|
||
|
|
setUint16(headerView, 4, compressionMethod);
|
||
|
|
const dateArray = new Uint32Array(1);
|
||
|
|
const dateView = getDataView(dateArray);
|
||
|
|
setUint16(dateView, 0, (((lastModDate.getHours() << 6) | lastModDate.getMinutes()) << 5) | lastModDate.getSeconds() / 2);
|
||
|
|
setUint16(dateView, 2, ((((lastModDate.getFullYear() - 1980) << 4) | (lastModDate.getMonth() + 1)) << 5) | lastModDate.getDate());
|
||
|
|
const rawLastModDate = dateArray[0];
|
||
|
|
setUint32(headerView, 6, rawLastModDate);
|
||
|
|
if (zip64CompressedSize || compressedSize !== UNDEFINED_VALUE) {
|
||
|
|
setUint32(headerView, HEADER_OFFSET_COMPRESSED_SIZE, zip64CompressedSize ? MAX_32_BITS : compressedSize);
|
||
|
|
}
|
||
|
|
if (zip64UncompressedSize || uncompressedSize !== UNDEFINED_VALUE) {
|
||
|
|
setUint32(headerView, HEADER_OFFSET_UNCOMPRESSED_SIZE, zip64UncompressedSize ? MAX_32_BITS : uncompressedSize);
|
||
|
|
}
|
||
|
|
setUint16(headerView, 22, getLength(rawFilename));
|
||
|
|
setUint16(headerView, 24, extraFieldLength);
|
||
|
|
return {
|
||
|
|
headerArray,
|
||
|
|
headerView,
|
||
|
|
rawLastModDate
|
||
|
|
};
|
||
|
|
}
|
||
|
|
|
||
|
|
function getBitFlag(level, useUnicodeFileNames, dataDescriptor, encrypted, compressionMethod) {
|
||
|
|
let bitFlag = 0;
|
||
|
|
if (useUnicodeFileNames) {
|
||
|
|
bitFlag = bitFlag | BITFLAG_LANG_ENCODING_FLAG;
|
||
|
|
}
|
||
|
|
if (dataDescriptor) {
|
||
|
|
bitFlag = bitFlag | BITFLAG_DATA_DESCRIPTOR;
|
||
|
|
}
|
||
|
|
if (compressionMethod == COMPRESSION_METHOD_DEFLATE || compressionMethod == COMPRESSION_METHOD_DEFLATE_64) {
|
||
|
|
if (level >= 0 && level <= 3) {
|
||
|
|
bitFlag = bitFlag | BITFLAG_LEVEL_SUPER_FAST_MASK;
|
||
|
|
}
|
||
|
|
if (level > 3 && level <= 5) {
|
||
|
|
bitFlag = bitFlag | BITFLAG_LEVEL_FAST_MASK;
|
||
|
|
}
|
||
|
|
if (level == 9) {
|
||
|
|
bitFlag = bitFlag | BITFLAG_LEVEL_MAX_MASK;
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if (encrypted) {
|
||
|
|
bitFlag = bitFlag | BITFLAG_ENCRYPTED;
|
||
|
|
}
|
||
|
|
return bitFlag;
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2022 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
function getMimeType() {
|
||
|
|
return "application/octet-stream";
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
try {
|
||
|
|
configure({ baseURI: (typeof document === 'undefined' && typeof location === 'undefined' ? require('u' + 'rl').pathToFileURL(__filename).href : typeof document === 'undefined' ? location.href : (_documentCurrentScript && _documentCurrentScript.tagName.toUpperCase() === 'SCRIPT' && _documentCurrentScript.src || new URL('zip-legacy.js', document.baseURI).href)) });
|
||
|
|
} catch {
|
||
|
|
// ignored
|
||
|
|
}
|
||
|
|
|
||
|
|
/*
|
||
|
|
Copyright (c) 2025 Gildas Lormeau. All rights reserved.
|
||
|
|
|
||
|
|
Redistribution and use in source and binary forms, with or without
|
||
|
|
modification, are permitted provided that the following conditions are met:
|
||
|
|
|
||
|
|
1. Redistributions of source code must retain the above copyright notice,
|
||
|
|
this list of conditions and the following disclaimer.
|
||
|
|
|
||
|
|
2. Redistributions in binary form must reproduce the above copyright
|
||
|
|
notice, this list of conditions and the following disclaimer in
|
||
|
|
the documentation and/or other materials provided with the distribution.
|
||
|
|
|
||
|
|
3. The names of the authors may not be used to endorse or promote products
|
||
|
|
derived from this software without specific prior written permission.
|
||
|
|
|
||
|
|
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
||
|
|
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||
|
|
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
|
||
|
|
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
|
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||
|
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
|
||
|
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||
|
|
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||
|
|
*/
|
||
|
|
|
||
|
|
|
||
|
|
g(configure);
|
||
|
|
|
||
|
|
exports.BlobReader = BlobReader;
|
||
|
|
exports.BlobWriter = BlobWriter;
|
||
|
|
exports.Data64URIReader = Data64URIReader;
|
||
|
|
exports.Data64URIWriter = Data64URIWriter;
|
||
|
|
exports.ERR_BAD_FORMAT = ERR_BAD_FORMAT;
|
||
|
|
exports.ERR_CENTRAL_DIRECTORY_NOT_FOUND = ERR_CENTRAL_DIRECTORY_NOT_FOUND;
|
||
|
|
exports.ERR_DUPLICATED_NAME = ERR_DUPLICATED_NAME;
|
||
|
|
exports.ERR_ENCRYPTED = ERR_ENCRYPTED;
|
||
|
|
exports.ERR_EOCDR_LOCATOR_ZIP64_NOT_FOUND = ERR_EOCDR_LOCATOR_ZIP64_NOT_FOUND;
|
||
|
|
exports.ERR_EOCDR_NOT_FOUND = ERR_EOCDR_NOT_FOUND;
|
||
|
|
exports.ERR_EXTRAFIELD_ZIP64_NOT_FOUND = ERR_EXTRAFIELD_ZIP64_NOT_FOUND;
|
||
|
|
exports.ERR_HTTP_RANGE = ERR_HTTP_RANGE;
|
||
|
|
exports.ERR_INVALID_COMMENT = ERR_INVALID_COMMENT;
|
||
|
|
exports.ERR_INVALID_ENCRYPTION_STRENGTH = ERR_INVALID_ENCRYPTION_STRENGTH;
|
||
|
|
exports.ERR_INVALID_ENTRY_COMMENT = ERR_INVALID_ENTRY_COMMENT;
|
||
|
|
exports.ERR_INVALID_ENTRY_NAME = ERR_INVALID_ENTRY_NAME;
|
||
|
|
exports.ERR_INVALID_EXTRAFIELD_DATA = ERR_INVALID_EXTRAFIELD_DATA;
|
||
|
|
exports.ERR_INVALID_EXTRAFIELD_TYPE = ERR_INVALID_EXTRAFIELD_TYPE;
|
||
|
|
exports.ERR_INVALID_PASSWORD = ERR_INVALID_PASSWORD;
|
||
|
|
exports.ERR_INVALID_SIGNATURE = ERR_INVALID_SIGNATURE;
|
||
|
|
exports.ERR_INVALID_UNCOMPRESSED_SIZE = ERR_INVALID_UNCOMPRESSED_SIZE;
|
||
|
|
exports.ERR_INVALID_VERSION = ERR_INVALID_VERSION;
|
||
|
|
exports.ERR_LOCAL_FILE_HEADER_NOT_FOUND = ERR_LOCAL_FILE_HEADER_NOT_FOUND;
|
||
|
|
exports.ERR_OVERLAPPING_ENTRY = ERR_OVERLAPPING_ENTRY;
|
||
|
|
exports.ERR_SPLIT_ZIP_FILE = ERR_SPLIT_ZIP_FILE;
|
||
|
|
exports.ERR_UNDEFINED_UNCOMPRESSED_SIZE = ERR_UNDEFINED_UNCOMPRESSED_SIZE;
|
||
|
|
exports.ERR_UNSUPPORTED_COMPRESSION = ERR_UNSUPPORTED_COMPRESSION;
|
||
|
|
exports.ERR_UNSUPPORTED_ENCRYPTION = ERR_UNSUPPORTED_ENCRYPTION;
|
||
|
|
exports.ERR_UNSUPPORTED_FORMAT = ERR_UNSUPPORTED_FORMAT;
|
||
|
|
exports.ERR_ZIP_NOT_EMPTY = ERR_ZIP_NOT_EMPTY;
|
||
|
|
exports.HttpRangeReader = HttpRangeReader;
|
||
|
|
exports.HttpReader = HttpReader;
|
||
|
|
exports.Reader = Reader;
|
||
|
|
exports.SplitDataReader = SplitDataReader;
|
||
|
|
exports.SplitDataWriter = SplitDataWriter;
|
||
|
|
exports.TextReader = TextReader;
|
||
|
|
exports.TextWriter = TextWriter;
|
||
|
|
exports.Uint8ArrayReader = Uint8ArrayReader;
|
||
|
|
exports.Uint8ArrayWriter = Uint8ArrayWriter;
|
||
|
|
exports.Writer = Writer;
|
||
|
|
exports.ZipReader = ZipReader;
|
||
|
|
exports.ZipReaderStream = ZipReaderStream;
|
||
|
|
exports.ZipWriter = ZipWriter;
|
||
|
|
exports.ZipWriterStream = ZipWriterStream;
|
||
|
|
exports.configure = configure;
|
||
|
|
exports.getMimeType = getMimeType;
|
||
|
|
exports.terminateWorkers = terminateWorkers;
|
||
|
|
|
||
|
|
}));
|