mirror of
https://github.com/gchq/CyberChef
synced 2025-12-27 13:43:30 +00:00
Compare commits
33 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c35205315 | ||
|
|
10751934e4 | ||
|
|
d658f91106 | ||
|
|
4c3324aea1 | ||
|
|
ac2fcee90f | ||
|
|
94e00115fe | ||
|
|
29255d2338 | ||
|
|
39278cfce7 | ||
|
|
46cc48cfb9 | ||
|
|
eb4009949d | ||
|
|
57c48a4bd2 | ||
|
|
45011de494 | ||
|
|
5e51ed0a5f | ||
|
|
875802ef2a | ||
|
|
bbc255ef83 | ||
|
|
fc155ec3fc | ||
|
|
3a0c8a199a | ||
|
|
9c729c4490 | ||
|
|
19bdbd66e5 | ||
|
|
ea090f79ee | ||
|
|
1be6c54be2 | ||
|
|
a4eeb226b1 | ||
|
|
d136717636 | ||
|
|
ad0a2e6f58 | ||
|
|
7c672c5ee9 | ||
|
|
090bf3f8ec | ||
|
|
9f4ef9cdad | ||
|
|
b69e4567c0 | ||
|
|
ff585584f6 | ||
|
|
8a029e5147 | ||
|
|
4251089687 | ||
|
|
dbcd670ca8 | ||
|
|
462f619f43 |
@@ -2,6 +2,9 @@
|
||||
All major and minor version changes will be documented in this file. Details of patch-level version changes can be found in [commit messages](https://github.com/gchq/CyberChef/commits/master).
|
||||
|
||||
|
||||
### [9.20.0] - 2020-03-27
|
||||
- 'Parse ObjectID Timestamp' operation added [@dmfj] | [#987]
|
||||
|
||||
### [9.19.0] - 2020-03-24
|
||||
- Improvements to the 'Magic' operation, allowing it to recognise more data formats and provide more accurate results [@n1073645] [@n1474335] | [#966] [b765534b](https://github.com/gchq/CyberChef/commit/b765534b8b2a0454a5132a0a52d1d8844bcbdaaa)
|
||||
|
||||
@@ -221,6 +224,7 @@ All major and minor version changes will be documented in this file. Details of
|
||||
|
||||
|
||||
|
||||
[9.20.0]: https://github.com/gchq/CyberChef/releases/tag/v9.20.0
|
||||
[9.19.0]: https://github.com/gchq/CyberChef/releases/tag/v9.19.0
|
||||
[9.18.0]: https://github.com/gchq/CyberChef/releases/tag/v9.18.0
|
||||
[9.17.0]: https://github.com/gchq/CyberChef/releases/tag/v9.17.0
|
||||
@@ -318,6 +322,7 @@ All major and minor version changes will be documented in this file. Details of
|
||||
[@Flavsditz]: https://github.com/Flavsditz
|
||||
[@pointhi]: https://github.com/pointhi
|
||||
[@MarvinJWendt]: https://github.com/MarvinJWendt
|
||||
[@dmfj]: https://github.com/dmfj
|
||||
|
||||
[#95]: https://github.com/gchq/CyberChef/pull/299
|
||||
[#173]: https://github.com/gchq/CyberChef/pull/173
|
||||
@@ -389,3 +394,4 @@ All major and minor version changes will be documented in this file. Details of
|
||||
[#952]: https://github.com/gchq/CyberChef/pull/952
|
||||
[#965]: https://github.com/gchq/CyberChef/pull/965
|
||||
[#966]: https://github.com/gchq/CyberChef/pull/966
|
||||
[#987]: https://github.com/gchq/CyberChef/pull/987
|
||||
|
||||
@@ -37,7 +37,7 @@ module.exports = function (grunt) {
|
||||
]);
|
||||
|
||||
grunt.registerTask("configTests",
|
||||
"A task which configures config files in preparation for tests to be run. Use `npm tests` to run tests.",
|
||||
"A task which configures config files in preparation for tests to be run. Use `npm test` to run tests.",
|
||||
[
|
||||
"clean:config", "clean:nodeConfig", "exec:generateConfig", "exec:generateNodeIndex"
|
||||
]);
|
||||
|
||||
4344
package-lock.json
generated
4344
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "cyberchef",
|
||||
"version": "9.19.0",
|
||||
"version": "9.20.4",
|
||||
"description": "The Cyber Swiss Army Knife for encryption, encoding, compression and data analysis.",
|
||||
"author": "n1474335 <n1474335@gmail.com>",
|
||||
"homepage": "https://gchq.github.io/CyberChef",
|
||||
@@ -43,7 +43,7 @@
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-loader": "^8.0.6",
|
||||
"babel-plugin-dynamic-import-node": "^2.3.0",
|
||||
"chromedriver": "^80.0.1",
|
||||
"chromedriver": "^83.0.0",
|
||||
"cli-progress": "^3.6.0",
|
||||
"colors": "^1.4.0",
|
||||
"copy-webpack-plugin": "^5.1.1",
|
||||
@@ -108,7 +108,6 @@
|
||||
"es6-promisify": "^6.1.0",
|
||||
"escodegen": "^1.14.1",
|
||||
"esm": "^3.2.25",
|
||||
"esmangle": "^1.0.1",
|
||||
"esprima": "^4.0.1",
|
||||
"exif-parser": "^0.1.12",
|
||||
"file-saver": "^2.0.2",
|
||||
@@ -146,6 +145,7 @@
|
||||
"sortablejs": "^1.10.2",
|
||||
"split.js": "^1.5.11",
|
||||
"ssdeep.js": "0.0.2",
|
||||
"terser": "^4.3.9",
|
||||
"tesseract.js": "^2.0.2",
|
||||
"ua-parser-js": "^0.7.21",
|
||||
"unorm": "^1.6.0",
|
||||
|
||||
@@ -242,6 +242,7 @@
|
||||
"Convert co-ordinate format",
|
||||
"Show on map",
|
||||
"Parse UNIX file permissions",
|
||||
"Parse ObjectID timestamp",
|
||||
"Swap endianness",
|
||||
"Parse colour code",
|
||||
"Escape string",
|
||||
|
||||
@@ -17,7 +17,7 @@ class DishJSON extends DishType {
|
||||
*/
|
||||
static toArrayBuffer() {
|
||||
DishJSON.checkForValue(this.value);
|
||||
this.value = this.value ? Utils.strToArrayBuffer(JSON.stringify(this.value, null, 4)) : new ArrayBuffer;
|
||||
this.value = this.value !== undefined ? Utils.strToArrayBuffer(JSON.stringify(this.value, null, 4)) : new ArrayBuffer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -468,6 +468,34 @@ export const FILE_SIGNATURES = {
|
||||
],
|
||||
extractor: null
|
||||
},
|
||||
{
|
||||
name: "Targa Image",
|
||||
extension: "tga",
|
||||
mime: "image/x-targa",
|
||||
description: "",
|
||||
signature: [
|
||||
{ // This signature is not at the beginning of the file. The extractor works backwards.
|
||||
0: 0x54,
|
||||
1: 0x52,
|
||||
2: 0x55,
|
||||
3: 0x45,
|
||||
4: 0x56,
|
||||
5: 0x49,
|
||||
6: 0x53,
|
||||
7: 0x49,
|
||||
8: 0x4f,
|
||||
9: 0x4e,
|
||||
10: 0x2d,
|
||||
11: 0x58,
|
||||
12: 0x46,
|
||||
13: 0x49,
|
||||
14: 0x4c,
|
||||
15: 0x45,
|
||||
16: 0x2e
|
||||
}
|
||||
],
|
||||
extractor: extractTARGA
|
||||
}
|
||||
],
|
||||
"Video": [
|
||||
{ // Place before webm
|
||||
@@ -780,7 +808,7 @@ export const FILE_SIGNATURES = {
|
||||
1: 0xfb
|
||||
}
|
||||
],
|
||||
extractor: null
|
||||
extractor: extractMP3
|
||||
},
|
||||
{
|
||||
name: "MPEG-4 Part 14 audio",
|
||||
@@ -3047,6 +3075,90 @@ export function extractICO(bytes, offset) {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* TARGA extractor.
|
||||
*
|
||||
* @param {Uint8Array} bytes
|
||||
* @param {number} offset
|
||||
*/
|
||||
export function extractTARGA(bytes, offset) {
|
||||
// Need all the bytes since we do not know how far up the image goes.
|
||||
const stream = new Stream(bytes);
|
||||
stream.moveTo(offset - 8);
|
||||
|
||||
// Read in the offsets of the possible areas.
|
||||
const extensionOffset = stream.readInt(4, "le");
|
||||
const developerOffset = stream.readInt(4, "le");
|
||||
|
||||
stream.moveBackwardsBy(8);
|
||||
|
||||
/**
|
||||
* Moves backwards in the stream until it meet bytes that are the same as the amount of bytes moved.
|
||||
*
|
||||
* @param {number} sizeOfSize
|
||||
* @param {number} maxSize
|
||||
*/
|
||||
function moveBackwardsUntilSize(maxSize, sizeOfSize) {
|
||||
for (let i = 0; i < maxSize; i++) {
|
||||
stream.moveBackwardsBy(1);
|
||||
|
||||
// Read in sizeOfSize amount of bytes in.
|
||||
const size = stream.readInt(sizeOfSize, "le") - 1;
|
||||
stream.moveBackwardsBy(sizeOfSize);
|
||||
|
||||
// If the size matches.
|
||||
if (size === i)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves backwards in the stream until we meet bytes(when calculated) that are the same as the amount of bytes moved.
|
||||
*/
|
||||
function moveBackwardsUntilImageSize() {
|
||||
stream.moveBackwardsBy(5);
|
||||
|
||||
// The documentation said that 0x100000 was the largest the file could be.
|
||||
for (let i = 0; i < 0x100000; i++) {
|
||||
|
||||
// (Height * Width * pixel depth in bits)/8
|
||||
const total = (stream.readInt(2, "le") * stream.readInt(2, "le") * stream.readInt(1))/8;
|
||||
if (total === i-1)
|
||||
break;
|
||||
|
||||
stream.moveBackwardsBy(6);
|
||||
}
|
||||
}
|
||||
|
||||
if (extensionOffset || developerOffset) {
|
||||
if (extensionOffset) {
|
||||
// Size is stored in two bytes hence the maximum is 0xffff.
|
||||
moveBackwardsUntilSize(0xffff, 2);
|
||||
|
||||
// Move to where we think the start of the file is.
|
||||
stream.moveBackwardsBy(extensionOffset);
|
||||
} else if (developerOffset) {
|
||||
// Size is stored in 4 bytes hence the maxiumum is 0xffffffff.
|
||||
moveBackwardsUntilSize(0xffffffff, 4);
|
||||
|
||||
// Size is stored in byte position 6 so have to move back.
|
||||
stream.moveBackwardsBy(6);
|
||||
|
||||
// Move to where we think the start of the file is.
|
||||
stream.moveBackwardsBy(developerOffset);
|
||||
}
|
||||
} else {
|
||||
// Move backwards until size === number of bytes passed.
|
||||
moveBackwardsUntilImageSize();
|
||||
|
||||
// Move backwards over the reaminder of the header + the 5 we borrowed in moveBackwardsUntilImageSize().
|
||||
stream.moveBackwardsBy(0xc+5);
|
||||
}
|
||||
|
||||
return stream.carve(stream.position, offset+0x12);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* WAV extractor.
|
||||
*
|
||||
@@ -3067,6 +3179,79 @@ export function extractWAV(bytes, offset) {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* MP3 extractor.
|
||||
*
|
||||
* @param {Uint8Array} bytes
|
||||
* @param {Number} offset
|
||||
* @returns {Uint8Array}
|
||||
*/
|
||||
export function extractMP3(bytes, offset) {
|
||||
const stream = new Stream(bytes.slice(offset));
|
||||
|
||||
// Constants for flag byte.
|
||||
const bitRateIndexes = ["free", 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 320000, "bad"];
|
||||
|
||||
const samplingRateFrequencyIndex = [44100, 48000, 32000, "reserved"];
|
||||
|
||||
// ID3 tag, move over it.
|
||||
if ((stream.getBytes(3).toString() === [0x49, 0x44, 0x33].toString())) {
|
||||
stream.moveTo(6);
|
||||
const tagSize = (stream.readInt(1) << 21) | (stream.readInt(1) << 14) | (stream.readInt(1) << 7) | stream.readInt(1);
|
||||
stream.moveForwardsBy(tagSize);
|
||||
} else {
|
||||
stream.moveTo(0);
|
||||
}
|
||||
|
||||
// Loop over all the frame headers in the file.
|
||||
while (stream.hasMore()) {
|
||||
|
||||
// If it has an old TAG frame at the end of it, fixed size, 128 bytes.
|
||||
if (stream.getBytes(3) === [0x54, 0x41, 0x47].toString()) {
|
||||
stream.moveForwardsBy(125);
|
||||
break;
|
||||
}
|
||||
|
||||
// If not start of frame.
|
||||
if (stream.getBytes(2).toString() !== [0xff, 0xfb].toString()) {
|
||||
stream.moveBackwardsBy(2);
|
||||
break;
|
||||
}
|
||||
|
||||
// Read flag byte.
|
||||
const flags = stream.readInt(1);
|
||||
|
||||
// Extract frame bit rate from flag byte.
|
||||
const bitRate = bitRateIndexes[flags >> 4];
|
||||
|
||||
// Extract frame sample rate from flag byte.
|
||||
const sampleRate = samplingRateFrequencyIndex[(flags & 0x0f) >> 2];
|
||||
|
||||
// Padding if the frame size is not a multiple of the bitrate.
|
||||
const padding = (flags & 0x02) >> 1;
|
||||
|
||||
// Things that are either not standard or undocumented.
|
||||
if (bitRate === "free" || bitRate === "bad" || sampleRate === "reserved") {
|
||||
stream.moveBackwardsBy(1);
|
||||
break;
|
||||
}
|
||||
|
||||
// Formula: FrameLength = (144 * BitRate / SampleRate ) + Padding
|
||||
const frameSize = Math.floor(((144 * bitRate) / sampleRate) + padding);
|
||||
|
||||
// If the next move goes past the end of the bytestream then extract the entire bytestream.
|
||||
// We assume complete frames in the above formula because there is no field that suggests otherwise.
|
||||
if ((stream.position + frameSize) > stream.length) {
|
||||
stream.moveTo(stream.length);
|
||||
break;
|
||||
} else {
|
||||
stream.moveForwardsBy(frameSize - 3);
|
||||
}
|
||||
}
|
||||
return stream.carve();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* FLV extractor.
|
||||
*
|
||||
|
||||
@@ -303,11 +303,13 @@ export default class Stream {
|
||||
/**
|
||||
* Returns a slice of the stream up to the current position.
|
||||
*
|
||||
* @param {number} [start=0]
|
||||
* @param {number} [finish=this.position]
|
||||
* @returns {Uint8Array}
|
||||
*/
|
||||
carve() {
|
||||
if (this.bitPos > 0) this.position++;
|
||||
return this.bytes.slice(0, this.position);
|
||||
carve(start=0, finish=this.position) {
|
||||
if (this.bitPos > 0) finish++;
|
||||
return this.bytes.slice(start, finish);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,10 +4,9 @@
|
||||
* @license Apache-2.0
|
||||
*/
|
||||
|
||||
import OperationError from "../errors/OperationError.mjs";
|
||||
import Operation from "../Operation.mjs";
|
||||
import * as esprima from "esprima";
|
||||
import escodegen from "escodegen";
|
||||
import esmangle from "esmangle";
|
||||
import Terser from "terser";
|
||||
|
||||
/**
|
||||
* JavaScript Minify operation
|
||||
@@ -34,22 +33,11 @@ class JavaScriptMinify extends Operation {
|
||||
* @returns {string}
|
||||
*/
|
||||
run(input, args) {
|
||||
let result = "";
|
||||
const AST = esprima.parseScript(input),
|
||||
optimisedAST = esmangle.optimize(AST, null),
|
||||
mangledAST = esmangle.mangle(optimisedAST);
|
||||
|
||||
result = escodegen.generate(mangledAST, {
|
||||
format: {
|
||||
renumber: true,
|
||||
hexadecimal: true,
|
||||
escapeless: true,
|
||||
compact: true,
|
||||
semicolons: false,
|
||||
parentheses: false
|
||||
}
|
||||
});
|
||||
return result;
|
||||
const result = Terser.minify(input);
|
||||
if (result.error) {
|
||||
throw new OperationError(`Error minifying JavaScript. (${result.error})`);
|
||||
}
|
||||
return result.code;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
47
src/core/operations/ParseObjectIDTimestamp.mjs
Normal file
47
src/core/operations/ParseObjectIDTimestamp.mjs
Normal file
@@ -0,0 +1,47 @@
|
||||
/**
|
||||
* @author dmfj [dominic@dmfj.io]
|
||||
* @copyright Crown Copyright 2020
|
||||
* @license Apache-2.0
|
||||
*/
|
||||
|
||||
import Operation from "../Operation.mjs";
|
||||
import OperationError from "../errors/OperationError.mjs";
|
||||
import BSON from "bson";
|
||||
|
||||
/**
|
||||
* Parse ObjectID timestamp operation
|
||||
*/
|
||||
class ParseObjectIDTimestamp extends Operation {
|
||||
|
||||
/**
|
||||
* ParseObjectIDTimestamp constructor
|
||||
*/
|
||||
constructor() {
|
||||
super();
|
||||
|
||||
this.name = "Parse ObjectID timestamp";
|
||||
this.module = "Serialise";
|
||||
this.description = "Parse timestamp from MongoDB/BSON ObjectID hex string.";
|
||||
this.infoURL = "https://docs.mongodb.com/manual/reference/method/ObjectId.getTimestamp/";
|
||||
this.inputType = "string";
|
||||
this.outputType = "string";
|
||||
this.args = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} input
|
||||
* @param {Object[]} args
|
||||
* @returns {string}
|
||||
*/
|
||||
run(input, args) {
|
||||
try {
|
||||
const objectId = new BSON.ObjectID(input);
|
||||
return objectId.getTimestamp().toISOString();
|
||||
} catch (err) {
|
||||
throw new OperationError(err);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export default ParseObjectIDTimestamp;
|
||||
@@ -51,7 +51,6 @@ class RecipeWaiter {
|
||||
}
|
||||
}.bind(this),
|
||||
onSort: function(evt) {
|
||||
this.updateZIndices();
|
||||
if (evt.from.id === "rec-list") {
|
||||
document.dispatchEvent(this.manager.statechange);
|
||||
}
|
||||
@@ -150,19 +149,6 @@ class RecipeWaiter {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the z-index property on each operation to make sure that operations higher in the list
|
||||
* have a higher index, meaning dropdowns are not hidden underneath subsequent operations.
|
||||
*/
|
||||
updateZIndices() {
|
||||
const operations = document.getElementById("rec-list").children;
|
||||
for (let i = 0; i < operations.length; i++) {
|
||||
const operation = operations[i];
|
||||
operation.style.zIndex = 100 + operations.length - i;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Handler for favourite dragover events.
|
||||
* If the element being dragged is an operation, displays a visual cue so that the user knows it can
|
||||
@@ -480,7 +466,6 @@ class RecipeWaiter {
|
||||
log.debug(`'${e.target.querySelector(".op-title").textContent}' added to recipe`);
|
||||
|
||||
this.triggerArgEvents(e.target);
|
||||
this.updateZIndices();
|
||||
window.dispatchEvent(this.manager.statechange);
|
||||
}
|
||||
|
||||
|
||||
@@ -100,6 +100,7 @@ import "./tests/Lorenz.mjs";
|
||||
import "./tests/LuhnChecksum.mjs";
|
||||
import "./tests/CipherSaber2.mjs";
|
||||
import "./tests/Colossus.mjs";
|
||||
import "./tests/ParseObjectIDTimestamp.mjs";
|
||||
|
||||
|
||||
// Cannot test operations that use the File type yet
|
||||
@@ -120,4 +121,3 @@ const logOpsTestReport = logTestReport.bind(null, testStatus);
|
||||
const results = await TestRegister.runTests();
|
||||
logOpsTestReport(results);
|
||||
})();
|
||||
|
||||
|
||||
24
tests/operations/tests/ParseObjectIDTimestamp.mjs
Normal file
24
tests/operations/tests/ParseObjectIDTimestamp.mjs
Normal file
@@ -0,0 +1,24 @@
|
||||
/**
|
||||
* Parse ObjectID timestamp tests
|
||||
*
|
||||
* @author dmfj [dominic@dmfj.io]
|
||||
*
|
||||
* @copyright Crown Copyright 2018
|
||||
* @license Apache-2.0
|
||||
*/
|
||||
import TestRegister from "../../lib/TestRegister.mjs";
|
||||
|
||||
|
||||
TestRegister.addTests([
|
||||
{
|
||||
name: "Parse ISO timestamp from ObjectId",
|
||||
input: "000000000000000000000000",
|
||||
expectedOutput: "1970-01-01T00:00:00.000Z",
|
||||
recipeConfig: [
|
||||
{
|
||||
op: "Parse ObjectID timestamp",
|
||||
args: [],
|
||||
}
|
||||
],
|
||||
}
|
||||
]);
|
||||
Reference in New Issue
Block a user