I am writing a script that parses strava bulk export. On my laptop, the decoding of fit data is pretty slow. Overall, it takes about 25 seconds for a little over 1000 activities.
I found out that most of the time is spent copying data in stream.js. By modifying that file, and avoiding the superfluous copy, the decoding now task about 14seconds. I am sure there are still ways that can be optimized more. But with this simple change only, the decoding is already almost twice as fast. I don't know if garmin developers read that forum. But in case, here is the diff. Hopefully, a similar change can be integrated in the official sdk.
--- stream.js 2024-09-18 16:47:17.735895788 -0700
+++ stream_arno.js 2024-09-18 16:47:09.712617348 -0700
@@ -112,10 +112,10 @@
throw Error(`FIT Runtime Error end of stream at byte ${this.#position}`);
}
- const bytes = this.#arrayBuffer.slice(this.#position, this.#position + size);
+ const bytes = new Uint8Array(this.#arrayBuffer, this.#position, size);
this.#position += size;
- this.#crcCalculator?.addBytes(new Uint8Array(bytes), 0, size);
+ this.#crcCalculator?.addBytes(bytes, 0, size);
return bytes;
}
@@ -168,14 +168,14 @@
const baseTypeSize = FIT.BaseTypeDefinitions[baseType].size;
const baseTypeInvalid = FIT.BaseTypeDefinitions[baseType].invalid;
- const arrayBuffer = this.readBytes(size);
+ const bytes = this.readBytes(size);
if (size % baseTypeSize !== 0) {
return convertInvalidToNull ? null : baseTypeInvalid;
}
if (baseType === FIT.BaseType.STRING) {
- const string = this.#textDecoder.decode(arrayBuffer).replace(/\uFFFD/g, "");
+ const string = this.#textDecoder.decode(bytes).replace(/\uFFFD/g, "");
const strings = string.split('\0');
while (strings[strings.length - 1] === "") {
@@ -189,7 +189,7 @@
return strings.length === 1 ? strings[0] : strings;
}
- const dataView = new DataView(arrayBuffer);
+ const dataView = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength);
let values = [];
const count = size / baseTypeSize;