Compare commits
4 Commits
Author | SHA1 | Date | |
---|---|---|---|
3ea9dbb2f8 | |||
9654779f82 | |||
ee4eee473d | |||
9896c4caec |
13
README.md
13
README.md
@ -26,4 +26,15 @@
|
||||
- [ ] FLAC (need tested)
|
||||
- [ ] Wrap video element with customElements (Prototyping / Lit-html + Typescript)
|
||||
- [ ] Add WebCodecs polyfill with ffmpeg or libav (Todo / WASM)
|
||||
- [ ] Danmuku integrated (Todo / Typescript)
|
||||
- [x] Chrome/Edge/Android Webview: WebCodecs Native support
|
||||
- [ ] FIREFOX
|
||||
- [x] VP8/VP9/AV1 native support
|
||||
- [x] AVC/HEVC 8bit native support
|
||||
- [ ] AVC/HEVC >= 10bit polyfill needed
|
||||
- [ ] Firefox Android not support
|
||||
- [ ] Safari
|
||||
- [x] VP8/VP9/AV1 native support
|
||||
- [x] AVC/HEVC 8bit native support
|
||||
- [ ] AVC/HEVC >= 10bit polyfill needed for some devices
|
||||
- [ ] Audio Decoder polyfill needed
|
||||
- [ ] Danmuku integration (Todo / Typescript)
|
@ -3,7 +3,8 @@
|
||||
<head></head>
|
||||
|
||||
<body>
|
||||
<!-- <my-element />-->
|
||||
<!-- <video-pipeline-demo src="/api/static/video/test.webm"></video-pipeline-demo>-->
|
||||
<video-pipeline-demo src="/api/static/video/huge/[LoliHouse] Amagami-san Chi no Enmusubi - 23 [WebRip 1080p HEVC-10bit AAC SRTx2].mkv" width="800" height="450" />
|
||||
<!-- <my-element />-->
|
||||
<!-- <video-pipeline-demo src="/api/static/video/test-hevc.mkv" width="800" height="450"></video-pipeline-demo> -->
|
||||
<video-pipeline-demo src="/api/static/video/huge/test8.mkv" width="800" height="450"></video-pipeline-demo>
|
||||
<!-- <video-pipeline-demo src="/api/static/video/huge/[LoliHouse] Amagami-san Chi no Enmusubi - 23 [WebRip 1080p HEVC-10bit AAC SRTx2].mkv" width="800" height="450" /> -->
|
||||
</body>
|
@ -13,7 +13,10 @@ import {
|
||||
fromEvent,
|
||||
share,
|
||||
takeUntil,
|
||||
firstValueFrom, tap, throwIfEmpty, ReplaySubject, finalize, of, interval,
|
||||
firstValueFrom,
|
||||
tap,
|
||||
throwIfEmpty,
|
||||
ReplaySubject,
|
||||
} from 'rxjs';
|
||||
import { createMatroska } from '@konoplayer/matroska/model';
|
||||
import { createRef, ref, type Ref } from 'lit/directives/ref.js';
|
||||
@ -78,35 +81,47 @@ export class VideoPipelineDemo extends LitElement {
|
||||
videoTrackDecoder,
|
||||
audioTrackDecoder,
|
||||
},
|
||||
totalSize
|
||||
totalSize,
|
||||
} = await firstValueFrom(
|
||||
createMatroska({
|
||||
url: src,
|
||||
}).pipe(
|
||||
throwIfEmpty(() => new Error("failed to extract matroska"))
|
||||
)
|
||||
)
|
||||
}).pipe(throwIfEmpty(() => new Error('failed to extract matroska')))
|
||||
);
|
||||
|
||||
console.debug(`[MATROSKA]: loaded metadata, total size ${totalSize} bytes`)
|
||||
console.debug(`[MATROSKA]: loaded metadata, total size ${totalSize} bytes`);
|
||||
|
||||
const currentCluster$ = this.seeked$.pipe(
|
||||
switchMap((seekTime) => seek(seekTime)),
|
||||
share({ resetOnRefCountZero: false, resetOnError: false, resetOnComplete: false }),
|
||||
share({
|
||||
resetOnRefCountZero: false,
|
||||
resetOnError: false,
|
||||
resetOnComplete: false,
|
||||
})
|
||||
);
|
||||
|
||||
defaultVideoTrack$
|
||||
.pipe(take(1), takeUntil(destroyRef$), tap((track) => console.debug('[MATROSKA]: video track loaded,', track)))
|
||||
.pipe(
|
||||
take(1),
|
||||
takeUntil(destroyRef$),
|
||||
tap((track) => console.debug('[MATROSKA]: video track loaded,', track))
|
||||
)
|
||||
.subscribe(this.videoTrack$.next.bind(this.videoTrack$));
|
||||
|
||||
defaultAudioTrack$
|
||||
.pipe(take(1), takeUntil(destroyRef$), tap((track) => console.debug('[MATROSKA]: audio track loaded,', track)))
|
||||
.pipe(
|
||||
take(1),
|
||||
takeUntil(destroyRef$),
|
||||
tap((track) => console.debug('[MATROSKA]: audio track loaded,', track))
|
||||
)
|
||||
.subscribe(this.audioTrack$.next.bind(this.audioTrack$));
|
||||
|
||||
this.videoTrack$
|
||||
.pipe(
|
||||
takeUntil(this.destroyRef$),
|
||||
switchMap((track) =>
|
||||
track?.configuration ? videoTrackDecoder(track, currentCluster$) : EMPTY
|
||||
track?.configuration
|
||||
? videoTrackDecoder(track, currentCluster$)
|
||||
: EMPTY
|
||||
),
|
||||
switchMap(({ frame$ }) => frame$)
|
||||
)
|
||||
@ -120,7 +135,9 @@ export class VideoPipelineDemo extends LitElement {
|
||||
.pipe(
|
||||
takeUntil(this.destroyRef$),
|
||||
switchMap((track) =>
|
||||
track?.configuration ? audioTrackDecoder(track, currentCluster$) : EMPTY
|
||||
track?.configuration
|
||||
? audioTrackDecoder(track, currentCluster$)
|
||||
: EMPTY
|
||||
),
|
||||
switchMap(({ frame$ }) => frame$)
|
||||
)
|
||||
@ -144,22 +161,25 @@ export class VideoPipelineDemo extends LitElement {
|
||||
),
|
||||
}).pipe(
|
||||
takeUntil(this.destroyRef$),
|
||||
map(({ ended, paused, videoBuffered, audioBuffered }) => !paused && !ended && !!(videoBuffered || audioBuffered)),
|
||||
map(
|
||||
({ ended, paused, videoBuffered, audioBuffered }) =>
|
||||
!paused && !ended && !!(videoBuffered || audioBuffered)
|
||||
),
|
||||
tap((enabled) => {
|
||||
if (enabled) {
|
||||
playableStartTime = performance.now()
|
||||
playableStartTime = performance.now();
|
||||
}
|
||||
}),
|
||||
share()
|
||||
)
|
||||
);
|
||||
|
||||
let nextAudioStartTime = 0;
|
||||
playable
|
||||
.pipe(
|
||||
tap(() => {
|
||||
nextAudioStartTime = 0
|
||||
nextAudioStartTime = 0;
|
||||
}),
|
||||
switchMap((enabled) => (enabled ? animationFrames() : EMPTY)),
|
||||
switchMap((enabled) => (enabled ? animationFrames() : EMPTY))
|
||||
)
|
||||
.subscribe(() => {
|
||||
const audioFrameBuffer = this.audioFrameBuffer$.getValue();
|
||||
@ -169,7 +189,7 @@ export class VideoPipelineDemo extends LitElement {
|
||||
let audioChanged = false;
|
||||
while (audioFrameBuffer.size > 0) {
|
||||
const firstAudio = audioFrameBuffer.peek();
|
||||
if (firstAudio && (firstAudio.timestamp / 1000) <= accTime) {
|
||||
if (firstAudio && firstAudio.timestamp / 1000 <= accTime) {
|
||||
const audioFrame = audioFrameBuffer.dequeue()!;
|
||||
audioChanged = true;
|
||||
if (audioContext) {
|
||||
@ -187,10 +207,15 @@ export class VideoPipelineDemo extends LitElement {
|
||||
const fadeLength = Math.min(50, audioFrame.numberOfFrames);
|
||||
for (let channel = 0; channel < numberOfChannels; channel++) {
|
||||
const channelData = new Float32Array(numberOfFrames);
|
||||
audioFrame.copyTo(channelData, { planeIndex: channel, frameCount: numberOfFrames });
|
||||
audioFrame.copyTo(channelData, {
|
||||
format: 'f32-planar',
|
||||
planeIndex: channel,
|
||||
frameCount: numberOfFrames,
|
||||
});
|
||||
for (let i = 0; i < fadeLength; i++) {
|
||||
channelData[i] *= i / fadeLength; // fade-in
|
||||
channelData[audioFrame.numberOfFrames - 1 - i] *= i / fadeLength; // fade-out
|
||||
channelData[audioFrame.numberOfFrames - 1 - i] *=
|
||||
i / fadeLength; // fade-out
|
||||
}
|
||||
audioBuffer.copyToChannel(channelData, channel);
|
||||
}
|
||||
@ -222,9 +247,7 @@ export class VideoPipelineDemo extends LitElement {
|
||||
});
|
||||
|
||||
playable
|
||||
.pipe(
|
||||
switchMap((enabled) => (enabled ? animationFrames() : EMPTY)),
|
||||
)
|
||||
.pipe(switchMap((enabled) => (enabled ? animationFrames() : EMPTY)))
|
||||
.subscribe(async () => {
|
||||
const renderingContext = this.renderingContext;
|
||||
const videoFrameBuffer = this.videoFrameBuffer$.getValue();
|
||||
@ -233,7 +256,7 @@ export class VideoPipelineDemo extends LitElement {
|
||||
const accTime = nowTime - playableStartTime;
|
||||
while (videoFrameBuffer.size > 0) {
|
||||
const firstVideo = videoFrameBuffer.peek();
|
||||
if (firstVideo && (firstVideo.timestamp / 1000) <= accTime) {
|
||||
if (firstVideo && firstVideo.timestamp / 1000 <= accTime) {
|
||||
const videoFrame = videoFrameBuffer.dequeue()!;
|
||||
videoChanged = true;
|
||||
if (renderingContext) {
|
||||
@ -252,12 +275,30 @@ export class VideoPipelineDemo extends LitElement {
|
||||
|
||||
fromEvent(document.body, 'click')
|
||||
.pipe(takeUntil(this.destroyRef$))
|
||||
.subscribe(() => {
|
||||
.subscribe(async () => {
|
||||
const permissionStatus = await navigator.permissions.query({
|
||||
name: 'microphone',
|
||||
});
|
||||
if (permissionStatus.state === 'prompt') {
|
||||
await navigator.mediaDevices.getUserMedia({
|
||||
audio: true,
|
||||
});
|
||||
}
|
||||
this.audioContext.resume();
|
||||
this.audioFrameBuffer$.next(this.audioFrameBuffer$.getValue());
|
||||
});
|
||||
|
||||
this.seeked$.next(0)
|
||||
const permissionStatus = await navigator.permissions.query({
|
||||
name: 'microphone',
|
||||
});
|
||||
if (permissionStatus.state === 'granted') {
|
||||
await navigator.mediaDevices.getUserMedia({
|
||||
audio: true,
|
||||
});
|
||||
this.audioContext.resume();
|
||||
}
|
||||
|
||||
this.seeked$.next(0);
|
||||
}
|
||||
|
||||
async connectedCallback() {
|
||||
|
@ -5,7 +5,7 @@
|
||||
}
|
||||
```
|
||||
|
||||
#^https://konoplayer.com/api/static/*** resSpeed://10240+
|
||||
^https://konoplayer.com/api/static/*** resSpeed://10240
|
||||
^https://konoplayer.com/api*** reqHeaders://{x-forwarded.json} http://127.0.0.1:5001/api$1
|
||||
^https://konoplayer.com/*** reqHeaders://{x-forwarded.json} http://127.0.0.1:5000/$1 excludeFilter://^https://konoplayer.com/api weinre://test
|
||||
^wss://konoplayer.com/*** reqHeaders://{x-forwarded.json} ws://127.0.0.1:5000/$1 excludeFilter://^wss://konoplayer.com/api
|
@ -23,3 +23,9 @@ export class ParseCodecErrors extends Error {
|
||||
super('failed to parse codecs');
|
||||
}
|
||||
}
|
||||
|
||||
export class UnimplementedError extends Error {
|
||||
constructor(detail: string) {
|
||||
super(`unimplemented: ${detail}`);
|
||||
}
|
||||
}
|
||||
|
@ -6,10 +6,16 @@ import {
|
||||
shareReplay,
|
||||
map,
|
||||
combineLatest,
|
||||
of, type Observable, delayWhen, pipe, finalize, tap, throwIfEmpty,
|
||||
of,
|
||||
type Observable,
|
||||
delayWhen,
|
||||
throwIfEmpty,
|
||||
} from 'rxjs';
|
||||
import { isTagIdPos } from '../util';
|
||||
import {createRangedEbmlStream, type CreateRangedEbmlStreamOptions} from './resource';
|
||||
import {
|
||||
createRangedEbmlStream,
|
||||
type CreateRangedEbmlStreamOptions,
|
||||
} from './resource';
|
||||
import { type MatroskaSegmentModel, createMatroskaSegment } from './segment';
|
||||
|
||||
export type CreateMatroskaOptions = Omit<
|
||||
@ -24,7 +30,9 @@ export interface MatroskaModel {
|
||||
segment: MatroskaSegmentModel;
|
||||
}
|
||||
|
||||
export function createMatroska(options: CreateMatroskaOptions): Observable<MatroskaModel> {
|
||||
export function createMatroska(
|
||||
options: CreateMatroskaOptions
|
||||
): Observable<MatroskaModel> {
|
||||
const metadataRequest$ = createRangedEbmlStream({
|
||||
...options,
|
||||
byteStart: 0,
|
||||
@ -32,21 +40,20 @@ export function createMatroska(options: CreateMatroskaOptions): Observable<Matro
|
||||
|
||||
return metadataRequest$.pipe(
|
||||
switchMap(({ totalSize, ebml$, response }) => {
|
||||
|
||||
/**
|
||||
* while [matroska v4](https://www.matroska.org/technical/elements.html) doc tell that there is only one segment in a file
|
||||
* some mkv generated by strange tools will emit several
|
||||
*/
|
||||
const segment$ = ebml$.pipe(
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Segment, EbmlTagPosition.Start)),
|
||||
map((startTag) => createMatroskaSegment({
|
||||
map((startTag) =>
|
||||
createMatroskaSegment({
|
||||
startTag,
|
||||
matroskaOptions: options,
|
||||
ebml$,
|
||||
})),
|
||||
delayWhen(
|
||||
({ loadedMetadata$ }) => loadedMetadata$
|
||||
})
|
||||
),
|
||||
delayWhen(({ loadedMetadata$ }) => loadedMetadata$),
|
||||
take(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
@ -55,7 +62,7 @@ export function createMatroska(options: CreateMatroskaOptions): Observable<Matro
|
||||
filter(isTagIdPos(EbmlTagIdEnum.EBML, EbmlTagPosition.End)),
|
||||
take(1),
|
||||
shareReplay(1),
|
||||
throwIfEmpty(() => new Error("failed to find head tag"))
|
||||
throwIfEmpty(() => new Error('failed to find head tag'))
|
||||
);
|
||||
|
||||
return combineLatest({
|
||||
|
@ -24,6 +24,7 @@ import {
|
||||
finalize,
|
||||
delayWhen,
|
||||
from,
|
||||
combineLatest,
|
||||
} from 'rxjs';
|
||||
import type { CreateMatroskaOptions } from '.';
|
||||
import { type ClusterType, TrackTypeRestrictionEnum } from '../schema';
|
||||
@ -113,7 +114,7 @@ export function createMatroskaSegment({
|
||||
filter(({ canComplete }) => canComplete),
|
||||
map(({ segment }) => segment),
|
||||
take(1),
|
||||
shareReplay(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const loadedRemoteCues$ = loadedMetadata$.pipe(
|
||||
@ -304,22 +305,33 @@ export function createMatroskaSegment({
|
||||
map(({ decoder, frame$ }) => {
|
||||
const clusterSystem = segment.cluster;
|
||||
const infoSystem = segment.info;
|
||||
const trackSystem = segment.track;
|
||||
const timestampScale = Number(infoSystem.info.TimestampScale) / 1000;
|
||||
|
||||
const frameProcessing = trackSystem.buildFrameEncodingProcessor(
|
||||
track.trackEntry
|
||||
);
|
||||
|
||||
const decodeSubscription = cluster$.subscribe((cluster) => {
|
||||
for (const block of clusterSystem.enumerateBlocks(
|
||||
cluster,
|
||||
track.trackEntry
|
||||
)) {
|
||||
const blockTime = (Number(cluster.Timestamp) + block.relTime) * timestampScale;
|
||||
const blockTime =
|
||||
(Number(cluster.Timestamp) + block.relTime) * timestampScale;
|
||||
const blockDuration =
|
||||
frames.length > 1 ? track.predictBlockDuration(blockTime) * timestampScale : 0;
|
||||
frames.length > 1
|
||||
? track.predictBlockDuration(blockTime) * timestampScale
|
||||
: 0;
|
||||
const perFrameDuration =
|
||||
frames.length > 1 && blockDuration
|
||||
? blockDuration / block.frames.length
|
||||
: 0;
|
||||
|
||||
for (const frame of block.frames) {
|
||||
for (let frame of block.frames) {
|
||||
if (frameProcessing) {
|
||||
frame = frameProcessing(frame);
|
||||
}
|
||||
const chunk = new EncodedVideoChunk({
|
||||
type: block.keyframe ? 'key' : 'delta',
|
||||
data: frame,
|
||||
@ -334,13 +346,12 @@ export function createMatroskaSegment({
|
||||
return {
|
||||
track,
|
||||
decoder,
|
||||
frame$: frame$
|
||||
.pipe(
|
||||
frame$: frame$.pipe(
|
||||
finalize(() => {
|
||||
decodeSubscription.unsubscribe();
|
||||
})
|
||||
)
|
||||
}
|
||||
),
|
||||
};
|
||||
})
|
||||
);
|
||||
};
|
||||
@ -353,14 +364,20 @@ export function createMatroskaSegment({
|
||||
map(({ decoder, frame$ }) => {
|
||||
const clusterSystem = segment.cluster;
|
||||
const infoSystem = segment.info;
|
||||
const trackSystem = segment.track;
|
||||
const timestampScale = Number(infoSystem.info.TimestampScale) / 1000;
|
||||
|
||||
const frameProcessing = trackSystem.buildFrameEncodingProcessor(
|
||||
track.trackEntry
|
||||
);
|
||||
|
||||
const decodeSubscription = cluster$.subscribe((cluster) => {
|
||||
for (const block of clusterSystem.enumerateBlocks(
|
||||
cluster,
|
||||
track.trackEntry
|
||||
)) {
|
||||
const blockTime = (Number(cluster.Timestamp) + block.relTime) * timestampScale;
|
||||
const blockTime =
|
||||
(Number(cluster.Timestamp) + block.relTime) * timestampScale;
|
||||
const blockDuration =
|
||||
frames.length > 1 ? track.predictBlockDuration(blockTime) : 0;
|
||||
const perFrameDuration =
|
||||
@ -369,7 +386,10 @@ export function createMatroskaSegment({
|
||||
: 0;
|
||||
|
||||
let i = 0;
|
||||
for (const frame of block.frames) {
|
||||
for (let frame of block.frames) {
|
||||
if (frameProcessing) {
|
||||
frame = frameProcessing(frame);
|
||||
}
|
||||
const chunk = new EncodedAudioChunk({
|
||||
type: block.keyframe ? 'key' : 'delta',
|
||||
data: frame,
|
||||
@ -387,7 +407,8 @@ export function createMatroskaSegment({
|
||||
decoder,
|
||||
frame$: frame$.pipe(finalize(() => decodeSubscription.unsubscribe())),
|
||||
};
|
||||
}));
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
const defaultVideoTrack$ = loadedMetadata$.pipe(
|
||||
@ -422,6 +443,6 @@ export function createMatroskaSegment({
|
||||
videoTrackDecoder,
|
||||
audioTrackDecoder,
|
||||
defaultVideoTrack$,
|
||||
defaultAudioTrack$
|
||||
defaultAudioTrack$,
|
||||
};
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ import {
|
||||
type TrackEntryType,
|
||||
} from '../schema';
|
||||
import { type SegmentComponent } from './segment';
|
||||
import {SegmentComponentSystemTrait} from "./segment-component";
|
||||
import { SegmentComponentSystemTrait } from './segment-component';
|
||||
|
||||
export abstract class BlockViewTrait {
|
||||
abstract get keyframe(): boolean;
|
||||
@ -82,6 +82,21 @@ export class ClusterSystem extends SegmentComponentSystemTrait<
|
||||
cluster: ClusterType,
|
||||
track: TrackEntryType
|
||||
): Generator<BlockViewTrait> {
|
||||
if (cluster.BlockGroup && cluster.SimpleBlock) {
|
||||
const blocks = [];
|
||||
for (const block of cluster.BlockGroup) {
|
||||
if (block.Block.track === track.TrackNumber) {
|
||||
blocks.push(new BlockGroupView(block));
|
||||
}
|
||||
}
|
||||
for (const block of cluster.SimpleBlock) {
|
||||
if (block.track === track.TrackNumber) {
|
||||
blocks.push(new SimpleBlockView(block));
|
||||
}
|
||||
}
|
||||
blocks.sort((a, b) => a.relTime - b.relTime);
|
||||
yield* blocks;
|
||||
} else {
|
||||
if (cluster.SimpleBlock) {
|
||||
for (const block of cluster.SimpleBlock) {
|
||||
if (block.track === track.TrackNumber) {
|
||||
@ -97,4 +112,5 @@ export class ClusterSystem extends SegmentComponentSystemTrait<
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
import {
|
||||
ParseCodecErrors,
|
||||
UnimplementedError,
|
||||
UnsupportedCodecError,
|
||||
} from '@konoplayer/core/errors';
|
||||
import {
|
||||
@ -15,13 +16,14 @@ import {
|
||||
type VideoDecoderConfigExt,
|
||||
} from '../codecs';
|
||||
import {
|
||||
ContentCompAlgoRestrictionEnum,
|
||||
ContentEncodingTypeRestrictionEnum,
|
||||
TrackEntrySchema,
|
||||
type TrackEntryType,
|
||||
TrackTypeRestrictionEnum,
|
||||
} from '../schema';
|
||||
import type { SegmentComponent } from './segment';
|
||||
import {SegmentComponentSystemTrait} from "./segment-component";
|
||||
import {pick} from "lodash-es";
|
||||
import { SegmentComponentSystemTrait } from './segment-component';
|
||||
|
||||
export interface GetTrackEntryOptions {
|
||||
priority?: (v: SegmentComponent<TrackEntryType>) => number;
|
||||
@ -226,4 +228,49 @@ export class TrackSystem extends SegmentComponentSystemTrait<
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
buildFrameEncodingProcessor(
|
||||
track: TrackEntryType
|
||||
): undefined | ((source: Uint8Array) => Uint8Array) {
|
||||
let encodings = track.ContentEncodings?.ContentEncoding;
|
||||
if (!encodings?.length) {
|
||||
return undefined;
|
||||
}
|
||||
encodings = encodings.toSorted(
|
||||
(a, b) => Number(b.ContentEncodingOrder) - Number(a.ContentEncodingOrder)
|
||||
);
|
||||
const processors: Array<(source: Uint8Array) => Uint8Array> = [];
|
||||
for (const encoing of encodings) {
|
||||
if (
|
||||
encoing.ContentEncodingType ===
|
||||
ContentEncodingTypeRestrictionEnum.COMPRESSION
|
||||
) {
|
||||
const compression = encoing.ContentCompression;
|
||||
const algo = compression?.ContentCompAlgo;
|
||||
if (algo === ContentCompAlgoRestrictionEnum.HEADER_STRIPPING) {
|
||||
const settings = compression?.ContentCompSettings;
|
||||
if (settings?.length) {
|
||||
processors.push((source: Uint8Array) => {
|
||||
const dest = new Uint8Array(source.length + settings.length);
|
||||
dest.set(source);
|
||||
dest.set(settings, source.length);
|
||||
return dest;
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// TODO: dynamic import packages to support more compression algos
|
||||
throw new UnimplementedError(
|
||||
`compression algo ${ContentCompAlgoRestrictionEnum[algo as ContentCompAlgoRestrictionEnum]}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
return function processor(source: Uint8Array): Uint8Array<ArrayBufferLike> {
|
||||
let dest = source;
|
||||
for (const processor of processors) {
|
||||
dest = processor(dest);
|
||||
}
|
||||
return dest;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user