refactor: rewrite playground
This commit is contained in:
@@ -3,9 +3,11 @@ import { ArkErrors, type } from 'arktype';
|
||||
|
||||
export const AAC_CODEC_TYPE = 'AAC';
|
||||
|
||||
export const AudioObjectTypeSchema = type('1 | 2 | 3 | 4 | 5 | 29 | 67');
|
||||
export const AudioObjectTypeSchema = type('1 | 2 | 3 | 4 | 5 | 29 | 67 | 23');
|
||||
|
||||
export const SamplingFrequencyIndexSchema = type('1|2|3|4|5|6|7|8|9|10|11|12');
|
||||
export const SamplingFrequencyIndexSchema = type(
|
||||
'1 | 2 | 3 | 4 |5|6|7|8|9|10|11|12'
|
||||
);
|
||||
|
||||
export const ChannelConfigurationSchema = type('1 | 2 | 3 | 4 | 5 | 6 | 7');
|
||||
|
||||
@@ -108,3 +110,15 @@ export function genCodecIdByAudioSpecificConfig(
|
||||
) {
|
||||
return `mp4a.40.${config.audioObjectType}`;
|
||||
}
|
||||
|
||||
export function samplesPerFrameByAACAudioObjectType(audioObjectType: number) {
|
||||
switch (audioObjectType) {
|
||||
case 5:
|
||||
case 29:
|
||||
return 2048;
|
||||
case 23:
|
||||
return 512;
|
||||
default:
|
||||
return 1024;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import {ParseCodecError, UnsupportedCodecError} from '@konoplayer/core/errors';
|
||||
import {
|
||||
ParseCodecError,
|
||||
UnsupportedCodecError,
|
||||
} from '@konoplayer/core/errors';
|
||||
import { VideoCodec, AudioCodec } from '@konoplayer/core/codecs';
|
||||
import type { TrackEntryType } from '../schema';
|
||||
import {
|
||||
genCodecIdByAudioSpecificConfig,
|
||||
parseAudioSpecificConfig,
|
||||
samplesPerFrameByAACAudioObjectType,
|
||||
} from './aac';
|
||||
import {
|
||||
genCodecStringByAVCDecoderConfigurationRecord,
|
||||
@@ -19,7 +23,8 @@ import {
|
||||
} from './hevc.ts';
|
||||
import {
|
||||
genCodecStringByVP9DecoderConfigurationRecord,
|
||||
parseVP9DecoderConfigurationRecord, VP9_CODEC_TYPE,
|
||||
parseVP9DecoderConfigurationRecord,
|
||||
VP9_CODEC_TYPE,
|
||||
} from './vp9.ts';
|
||||
|
||||
export const VideoCodecId = {
|
||||
@@ -123,7 +128,7 @@ export interface VideoDecoderConfigExt extends VideoDecoderConfig {
|
||||
}
|
||||
|
||||
export function videoCodecIdRequirePeekingKeyframe(codecId: VideoCodecIdType) {
|
||||
return codecId === VideoCodecId.VP9
|
||||
return codecId === VideoCodecId.VP9;
|
||||
}
|
||||
|
||||
export function videoCodecIdToWebCodecs(
|
||||
@@ -146,7 +151,10 @@ export function videoCodecIdToWebCodecs(
|
||||
};
|
||||
case VideoCodecId.VP9:
|
||||
if (!keyframe) {
|
||||
throw new ParseCodecError(VP9_CODEC_TYPE, 'keyframe is required to parse VP9 codec')
|
||||
throw new ParseCodecError(
|
||||
VP9_CODEC_TYPE,
|
||||
'keyframe is required to parse VP9 codec'
|
||||
);
|
||||
}
|
||||
return {
|
||||
...shareOptions,
|
||||
@@ -200,11 +208,10 @@ export function videoCodecIdToWebCodecs(
|
||||
|
||||
export interface AudioDecoderConfigExt extends AudioDecoderConfig {
|
||||
codecType: AudioCodec;
|
||||
samplesPerFrame?: number;
|
||||
}
|
||||
|
||||
export function isAudioCodecIdRequirePeekingKeyframe (
|
||||
_track: TrackEntryType,
|
||||
) {
|
||||
export function isAudioCodecIdRequirePeekingKeyframe(_track: TrackEntryType) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -231,6 +238,7 @@ export function audioCodecIdToWebCodecs(
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AAC,
|
||||
codec: 'mp4a.40.1',
|
||||
samplesPerFrame: 1024,
|
||||
};
|
||||
case AudioCodecId.AAC_MPEG2_LC:
|
||||
case AudioCodecId.AAC_MPEG4_LC:
|
||||
@@ -238,6 +246,7 @@ export function audioCodecIdToWebCodecs(
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AAC,
|
||||
codec: 'mp4a.40.2',
|
||||
samplesPerFrame: 1024,
|
||||
};
|
||||
case AudioCodecId.AAC_MPEG2_SSR:
|
||||
case AudioCodecId.AAC_MPEG4_SSR:
|
||||
@@ -245,12 +254,14 @@ export function audioCodecIdToWebCodecs(
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AAC,
|
||||
codec: 'mp4a.40.3',
|
||||
samplesPerFrame: 1024,
|
||||
};
|
||||
case AudioCodecId.AAC_MPEG4_LTP:
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AAC,
|
||||
codec: 'mp4a.40.4',
|
||||
samplesPerFrame: 1024,
|
||||
};
|
||||
case AudioCodecId.AAC_MPEG2_LC_SBR:
|
||||
case AudioCodecId.AAC_MPEG4_SBR:
|
||||
@@ -258,16 +269,25 @@ export function audioCodecIdToWebCodecs(
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AAC,
|
||||
codec: 'mp4a.40.5',
|
||||
samplesPerFrame: 2048,
|
||||
};
|
||||
case AudioCodecId.AAC:
|
||||
if (codecPrivate) {
|
||||
const config = parseAudioSpecificConfig(codecPrivate);
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AAC,
|
||||
codec: genCodecIdByAudioSpecificConfig(config),
|
||||
samplesPerFrame: samplesPerFrameByAACAudioObjectType(
|
||||
config.audioObjectType
|
||||
),
|
||||
};
|
||||
}
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AAC,
|
||||
codec: codecPrivate
|
||||
? genCodecIdByAudioSpecificConfig(
|
||||
parseAudioSpecificConfig(codecPrivate)
|
||||
)
|
||||
: 'mp4a.40.2',
|
||||
codec: 'mp4a.40.2',
|
||||
samplesPerFrame: 1024,
|
||||
};
|
||||
case AudioCodecId.AC3:
|
||||
case AudioCodecId.AC3_BSID9:
|
||||
@@ -275,6 +295,7 @@ export function audioCodecIdToWebCodecs(
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.AC3,
|
||||
codec: 'ac-3',
|
||||
samplesPerFrame: 1536,
|
||||
};
|
||||
case AudioCodecId.EAC3:
|
||||
case AudioCodecId.AC3_BSID10:
|
||||
@@ -282,21 +303,75 @@ export function audioCodecIdToWebCodecs(
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.EAC3,
|
||||
codec: 'ec-3',
|
||||
// TODO: FIXME
|
||||
// parse frame header
|
||||
// samples per frame = numblkscod * 256
|
||||
// most time numblkscod = 6
|
||||
// samplesPerFrame: 1536,
|
||||
};
|
||||
case AudioCodecId.MPEG_L3:
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.MP3,
|
||||
codec: 'mp3',
|
||||
samplesPerFrame: 1152,
|
||||
};
|
||||
case AudioCodecId.VORBIS:
|
||||
return { ...shareOptions, codecType: AudioCodec.Vorbis, codec: 'vorbis' };
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.Vorbis,
|
||||
codec: 'vorbis',
|
||||
/**
|
||||
* TODO: FIXME
|
||||
* read code private
|
||||
* prase setup header
|
||||
* ShortBlockSize = 2 ^ blocksize_0
|
||||
* LongBlockSize = 2 ^ blocksize_1
|
||||
*/
|
||||
samplesPerFrame: 2048,
|
||||
};
|
||||
case AudioCodecId.FLAC:
|
||||
return { ...shareOptions, codecType: AudioCodec.FLAC, codec: 'flac' };
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.FLAC,
|
||||
codec: 'flac',
|
||||
/**
|
||||
* TODO: FIXME
|
||||
* read code private
|
||||
* get block size
|
||||
*/
|
||||
// samplesPerFrame: 4096,
|
||||
};
|
||||
case AudioCodecId.OPUS:
|
||||
return { ...shareOptions, codecType: AudioCodec.Opus, codec: 'opus' };
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.Opus,
|
||||
codec: 'opus',
|
||||
/**
|
||||
* TODO: FIXME
|
||||
* Read TOC header from frame data
|
||||
*/
|
||||
// samplesPerFrame: 960,
|
||||
};
|
||||
case AudioCodecId.ALAC:
|
||||
return { ...shareOptions, codecType: AudioCodec.ALAC, codec: 'alac' };
|
||||
return {
|
||||
...shareOptions,
|
||||
codecType: AudioCodec.ALAC,
|
||||
codec: 'alac',
|
||||
/**
|
||||
* TODO: FIXME
|
||||
* parse private data and get frame length
|
||||
* 00 00 10 00 // Frame Length (4096)
|
||||
00 00 00 00 // Compatible Version (0)
|
||||
00 10 // Bit Depth (16-bit)
|
||||
40 00 // PB (like 40)
|
||||
00 00 // MB (like 0)
|
||||
00 00 // KB (like 0)
|
||||
00 02 // Channels (2)
|
||||
00 00 AC 44 // Sample Rate (44100Hz)
|
||||
*/
|
||||
// samplesPerFrame: 4096,
|
||||
};
|
||||
case AudioCodecId.PCM_INT_BIG:
|
||||
if (bitDepth === 16) {
|
||||
return {
|
||||
|
||||
14
packages/matroska/src/model/cluster.ts
Normal file
14
packages/matroska/src/model/cluster.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import type { ClusterType } from '../schema';
|
||||
|
||||
export function* clusterBlocks(cluster: ClusterType) {
|
||||
if (cluster.SimpleBlock) {
|
||||
for (const simpleBlock of cluster.SimpleBlock) {
|
||||
yield simpleBlock;
|
||||
}
|
||||
}
|
||||
if (cluster.BlockGroup) {
|
||||
for (const block of cluster.BlockGroup) {
|
||||
yield block;
|
||||
}
|
||||
}
|
||||
}
|
||||
69
packages/matroska/src/model/index.ts
Normal file
69
packages/matroska/src/model/index.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import type { CreateRangedStreamOptions } from '@konoplayer/core/data';
|
||||
import { type EbmlEBMLTagType, EbmlTagIdEnum, EbmlTagPosition } from 'konoebml';
|
||||
import {
|
||||
switchMap,
|
||||
filter,
|
||||
take,
|
||||
shareReplay,
|
||||
map,
|
||||
combineLatest,
|
||||
of,
|
||||
} from 'rxjs';
|
||||
import { isTagIdPos } from '../util';
|
||||
import { createRangedEbmlStream } from './resource';
|
||||
import { type MatroskaSegmentModel, createMatroskaSegment } from './segment';
|
||||
|
||||
export type CreateMatroskaOptions = Omit<
|
||||
CreateRangedStreamOptions,
|
||||
'byteStart' | 'byteEnd'
|
||||
>;
|
||||
|
||||
export interface MatroskaModel {
|
||||
totalSize?: number;
|
||||
initResponse: Response;
|
||||
head: EbmlEBMLTagType;
|
||||
segment: MatroskaSegmentModel;
|
||||
}
|
||||
|
||||
export function createMatroska(options: CreateMatroskaOptions) {
|
||||
const metadataRequest$ = createRangedEbmlStream({
|
||||
...options,
|
||||
byteStart: 0,
|
||||
});
|
||||
|
||||
return metadataRequest$.pipe(
|
||||
switchMap(({ totalSize, ebml$, response }) => {
|
||||
const head$ = ebml$.pipe(
|
||||
filter(isTagIdPos(EbmlTagIdEnum.EBML, EbmlTagPosition.End)),
|
||||
take(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const segmentStart$ = ebml$.pipe(
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Segment, EbmlTagPosition.Start))
|
||||
);
|
||||
|
||||
/**
|
||||
* while [matroska v4](https://www.matroska.org/technical/elements.html) doc tell that there is only one segment in a file
|
||||
* some mkv generated by strange tools will emit several
|
||||
*/
|
||||
const segments$ = segmentStart$.pipe(
|
||||
map((startTag) =>
|
||||
createMatroskaSegment({
|
||||
startTag,
|
||||
matroskaOptions: options,
|
||||
ebml$,
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
return combineLatest({
|
||||
segment: segments$.pipe(take(1)),
|
||||
head: head$,
|
||||
totalSize: of(totalSize),
|
||||
initResponse: of(response),
|
||||
});
|
||||
}),
|
||||
shareReplay(1)
|
||||
);
|
||||
}
|
||||
90
packages/matroska/src/model/resource.ts
Normal file
90
packages/matroska/src/model/resource.ts
Normal file
@@ -0,0 +1,90 @@
|
||||
import {
|
||||
type CreateRangedStreamOptions,
|
||||
createRangedStream,
|
||||
} from '@konoplayer/core/data';
|
||||
import { type EbmlTagType, EbmlStreamDecoder, EbmlTagIdEnum } from 'konoebml';
|
||||
import { Observable, from, switchMap, share, defer, EMPTY, of } from 'rxjs';
|
||||
import { waitTick } from '../util';
|
||||
|
||||
export function createRangedEbmlStream({
|
||||
url,
|
||||
byteStart = 0,
|
||||
byteEnd,
|
||||
}: CreateRangedStreamOptions): Observable<{
|
||||
ebml$: Observable<EbmlTagType>;
|
||||
totalSize?: number;
|
||||
response: Response;
|
||||
body: ReadableStream<Uint8Array>;
|
||||
controller: AbortController;
|
||||
}> {
|
||||
const stream$ = from(createRangedStream({ url, byteStart, byteEnd }));
|
||||
|
||||
return stream$.pipe(
|
||||
switchMap(({ controller, body, totalSize, response }) => {
|
||||
let requestCompleted = false;
|
||||
|
||||
const originRequest$ = new Observable<EbmlTagType>((subscriber) => {
|
||||
body
|
||||
.pipeThrough(
|
||||
new EbmlStreamDecoder({
|
||||
streamStartOffset: byteStart,
|
||||
collectChild: (child) => child.id !== EbmlTagIdEnum.Cluster,
|
||||
backpressure: {
|
||||
eventLoop: waitTick,
|
||||
},
|
||||
})
|
||||
)
|
||||
.pipeTo(
|
||||
new WritableStream({
|
||||
write: async (tag) => {
|
||||
await waitTick();
|
||||
subscriber.next(tag);
|
||||
},
|
||||
close: () => {
|
||||
if (!requestCompleted) {
|
||||
requestCompleted = true;
|
||||
subscriber.complete();
|
||||
}
|
||||
},
|
||||
})
|
||||
)
|
||||
.catch((error) => {
|
||||
if (requestCompleted && error?.name === 'AbortError') {
|
||||
return;
|
||||
}
|
||||
requestCompleted = true;
|
||||
subscriber.error(error);
|
||||
});
|
||||
|
||||
return () => {
|
||||
requestCompleted = true;
|
||||
controller.abort();
|
||||
};
|
||||
}).pipe(
|
||||
share({
|
||||
resetOnComplete: false,
|
||||
resetOnError: false,
|
||||
resetOnRefCountZero: true,
|
||||
})
|
||||
);
|
||||
|
||||
const ebml$ = defer(() =>
|
||||
requestCompleted ? EMPTY : originRequest$
|
||||
).pipe(
|
||||
share({
|
||||
resetOnError: false,
|
||||
resetOnComplete: true,
|
||||
resetOnRefCountZero: true,
|
||||
})
|
||||
);
|
||||
|
||||
return of({
|
||||
ebml$,
|
||||
totalSize,
|
||||
response,
|
||||
body,
|
||||
controller,
|
||||
});
|
||||
})
|
||||
);
|
||||
}
|
||||
419
packages/matroska/src/model/segment.ts
Normal file
419
packages/matroska/src/model/segment.ts
Normal file
@@ -0,0 +1,419 @@
|
||||
import { createAudioDecodeStream } from '@konoplayer/core/audition';
|
||||
import { createVideoDecodeStream } from '@konoplayer/core/graphics';
|
||||
import {
|
||||
type EbmlSegmentTagType,
|
||||
type EbmlTagType,
|
||||
EbmlTagIdEnum,
|
||||
EbmlTagPosition,
|
||||
} from 'konoebml';
|
||||
import {
|
||||
type Observable,
|
||||
scan,
|
||||
takeWhile,
|
||||
share,
|
||||
map,
|
||||
last,
|
||||
switchMap,
|
||||
shareReplay,
|
||||
EMPTY,
|
||||
filter,
|
||||
withLatestFrom,
|
||||
take,
|
||||
of,
|
||||
merge,
|
||||
isEmpty,
|
||||
finalize,
|
||||
} from 'rxjs';
|
||||
import type { CreateMatroskaOptions } from '.';
|
||||
import { type ClusterType, TrackTypeRestrictionEnum } from '../schema';
|
||||
import {
|
||||
SegmentSystem,
|
||||
type SegmentComponent,
|
||||
type VideoTrackContext,
|
||||
type AudioTrackContext,
|
||||
SEEK_ID_KAX_CUES,
|
||||
SEEK_ID_KAX_TAGS,
|
||||
type CueSystem,
|
||||
} from '../systems';
|
||||
import {
|
||||
standardTrackPredicate,
|
||||
standardTrackPriority,
|
||||
} from '../systems/track';
|
||||
import { isTagIdPos } from '../util';
|
||||
import { createRangedEbmlStream } from './resource';
|
||||
|
||||
export interface CreateMatroskaSegmentOptions {
|
||||
matroskaOptions: CreateMatroskaOptions;
|
||||
startTag: EbmlSegmentTagType;
|
||||
ebml$: Observable<EbmlTagType>;
|
||||
}
|
||||
|
||||
export interface MatroskaSegmentModel {
|
||||
startTag: EbmlSegmentTagType;
|
||||
segment: SegmentSystem;
|
||||
metadataTags$: Observable<EbmlTagType>;
|
||||
loadedMetadata$: Observable<SegmentSystem>;
|
||||
loadedTags$: Observable<SegmentSystem>;
|
||||
loadedCues$: Observable<SegmentSystem>;
|
||||
seek: (seekTime: number) => Observable<SegmentComponent<ClusterType>>;
|
||||
videoTrackDecoder: (
|
||||
track: VideoTrackContext,
|
||||
cluster$: Observable<ClusterType>
|
||||
) => {
|
||||
track: VideoTrackContext;
|
||||
decoder: VideoDecoder;
|
||||
frame$: Observable<VideoFrame>;
|
||||
};
|
||||
audioTrackDecoder: (
|
||||
track: AudioTrackContext,
|
||||
cluster$: Observable<ClusterType>
|
||||
) => {
|
||||
track: AudioTrackContext;
|
||||
decoder: AudioDecoder;
|
||||
frame$: Observable<AudioData>;
|
||||
};
|
||||
defaultVideoTrack$: Observable<VideoTrackContext | undefined>;
|
||||
defaultAudioTrack$: Observable<AudioTrackContext | undefined>;
|
||||
}
|
||||
|
||||
export function createMatroskaSegment({
|
||||
matroskaOptions,
|
||||
startTag,
|
||||
ebml$,
|
||||
}: CreateMatroskaSegmentOptions): MatroskaSegmentModel {
|
||||
const segment = new SegmentSystem(startTag);
|
||||
const clusterSystem = segment.cluster;
|
||||
const seekSystem = segment.seek;
|
||||
|
||||
const metaScan$ = ebml$.pipe(
|
||||
scan(
|
||||
(acc, tag) => {
|
||||
acc.segment.scanMeta(tag);
|
||||
acc.tag = tag;
|
||||
return acc;
|
||||
},
|
||||
{
|
||||
segment,
|
||||
tag: undefined as unknown as EbmlTagType,
|
||||
}
|
||||
),
|
||||
takeWhile((acc) => acc.segment.canCompleteMeta(), true),
|
||||
share({
|
||||
resetOnComplete: false,
|
||||
resetOnError: false,
|
||||
resetOnRefCountZero: true,
|
||||
})
|
||||
);
|
||||
|
||||
const metadataTags$ = metaScan$.pipe(map(({ tag }) => tag));
|
||||
|
||||
const loadedMetadata$ = metaScan$.pipe(
|
||||
last(),
|
||||
switchMap(({ segment }) => segment.completeMeta()),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const loadedRemoteCues$ = loadedMetadata$.pipe(
|
||||
switchMap((s) => {
|
||||
const cueSystem = s.cue;
|
||||
const seekSystem = s.seek;
|
||||
if (cueSystem.prepared) {
|
||||
return EMPTY;
|
||||
}
|
||||
const remoteCuesTagStartOffset =
|
||||
seekSystem.seekOffsetBySeekId(SEEK_ID_KAX_CUES);
|
||||
if (remoteCuesTagStartOffset! >= 0) {
|
||||
return createRangedEbmlStream({
|
||||
...matroskaOptions,
|
||||
byteStart: remoteCuesTagStartOffset,
|
||||
}).pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Cues, EbmlTagPosition.End)),
|
||||
withLatestFrom(loadedMetadata$),
|
||||
map(([cues, withMeta]) => {
|
||||
withMeta.cue.prepareCuesWithTag(cues);
|
||||
return withMeta;
|
||||
})
|
||||
);
|
||||
}
|
||||
return EMPTY;
|
||||
}),
|
||||
take(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const loadedLocalCues$ = loadedMetadata$.pipe(
|
||||
switchMap((s) => (s.cue.prepared ? of(s) : EMPTY)),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const loadedEmptyCues$ = merge(loadedLocalCues$, loadedRemoteCues$).pipe(
|
||||
isEmpty(),
|
||||
switchMap((empty) => (empty ? loadedMetadata$ : EMPTY))
|
||||
);
|
||||
|
||||
const loadedCues$ = merge(
|
||||
loadedLocalCues$,
|
||||
loadedRemoteCues$,
|
||||
loadedEmptyCues$
|
||||
).pipe(take(1));
|
||||
|
||||
const loadedRemoteTags$ = loadedMetadata$.pipe(
|
||||
switchMap((s) => {
|
||||
const tagSystem = s.tag;
|
||||
const seekSystem = s.seek;
|
||||
if (tagSystem.prepared) {
|
||||
return EMPTY;
|
||||
}
|
||||
|
||||
const remoteTagsTagStartOffset =
|
||||
seekSystem.seekOffsetBySeekId(SEEK_ID_KAX_TAGS);
|
||||
if (remoteTagsTagStartOffset! >= 0) {
|
||||
return createRangedEbmlStream({
|
||||
...matroskaOptions,
|
||||
byteStart: remoteTagsTagStartOffset,
|
||||
}).pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Tags, EbmlTagPosition.End)),
|
||||
withLatestFrom(loadedMetadata$),
|
||||
map(([tags, withMeta]) => {
|
||||
withMeta.tag.prepareTagsWithTag(tags);
|
||||
return withMeta;
|
||||
})
|
||||
);
|
||||
}
|
||||
return EMPTY;
|
||||
}),
|
||||
take(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const loadedLocalTags$ = loadedMetadata$.pipe(
|
||||
switchMap((s) => (s.tag.prepared ? of(s) : EMPTY)),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const loadedEmptyTags$ = merge(loadedRemoteTags$, loadedLocalTags$).pipe(
|
||||
isEmpty(),
|
||||
switchMap((empty) => (empty ? loadedMetadata$ : EMPTY))
|
||||
);
|
||||
|
||||
const loadedTags$ = merge(
|
||||
loadedLocalTags$,
|
||||
loadedRemoteTags$,
|
||||
loadedEmptyTags$
|
||||
).pipe(take(1));
|
||||
|
||||
const seekWithoutCues = (
|
||||
seekTime: number
|
||||
): Observable<SegmentComponent<ClusterType>> => {
|
||||
const request$ = loadedMetadata$.pipe(
|
||||
switchMap(() =>
|
||||
createRangedEbmlStream({
|
||||
...matroskaOptions,
|
||||
byteStart: seekSystem.firstClusterOffset,
|
||||
})
|
||||
)
|
||||
);
|
||||
const cluster$ = request$.pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Cluster, EbmlTagPosition.End)),
|
||||
map((tag) => clusterSystem.addClusterWithTag(tag))
|
||||
);
|
||||
|
||||
if (seekTime === 0) {
|
||||
return cluster$;
|
||||
}
|
||||
|
||||
return cluster$.pipe(
|
||||
scan(
|
||||
(acc, curr) => {
|
||||
// avoid object recreation
|
||||
acc.prev = acc.next;
|
||||
acc.next = curr;
|
||||
return acc;
|
||||
},
|
||||
{
|
||||
prev: undefined as SegmentComponent<ClusterType> | undefined,
|
||||
next: undefined as SegmentComponent<ClusterType> | undefined,
|
||||
}
|
||||
),
|
||||
filter((c) => c.next?.Timestamp! > seekTime),
|
||||
map((c) => c.prev ?? c.next!)
|
||||
);
|
||||
};
|
||||
|
||||
const seekWithCues = (
|
||||
cueSystem: CueSystem,
|
||||
seekTime: number
|
||||
): Observable<SegmentComponent<ClusterType>> => {
|
||||
if (seekTime === 0) {
|
||||
return seekWithoutCues(seekTime);
|
||||
}
|
||||
|
||||
const cuePoint = cueSystem.findClosestCue(seekTime);
|
||||
|
||||
if (!cuePoint) {
|
||||
return seekWithoutCues(seekTime);
|
||||
}
|
||||
|
||||
return createRangedEbmlStream({
|
||||
...matroskaOptions,
|
||||
byteStart: seekSystem.offsetFromSeekPosition(
|
||||
cueSystem.getCueTrackPositions(cuePoint).CueClusterPosition as number
|
||||
),
|
||||
}).pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Cluster, EbmlTagPosition.End)),
|
||||
map(clusterSystem.addClusterWithTag.bind(clusterSystem))
|
||||
);
|
||||
};
|
||||
|
||||
const seek = (
|
||||
seekTime: number
|
||||
): Observable<SegmentComponent<ClusterType>> => {
|
||||
if (seekTime === 0) {
|
||||
const subscription = loadedCues$.subscribe();
|
||||
|
||||
// if seekTime equals to 0 at start, reuse the initialize stream
|
||||
return seekWithoutCues(seekTime).pipe(
|
||||
finalize(() => {
|
||||
subscription.unsubscribe();
|
||||
})
|
||||
);
|
||||
}
|
||||
return loadedCues$.pipe(
|
||||
switchMap((segment) => {
|
||||
const cueSystem = segment.cue;
|
||||
if (cueSystem.prepared) {
|
||||
return seekWithCues(cueSystem, seekTime);
|
||||
}
|
||||
return seekWithoutCues(seekTime);
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
const videoTrackDecoder = (
|
||||
track: VideoTrackContext,
|
||||
cluster$: Observable<ClusterType>
|
||||
) => {
|
||||
const { decoder, frame$ } = createVideoDecodeStream(track.configuration);
|
||||
|
||||
const clusterSystem = segment.cluster;
|
||||
|
||||
const decodeSubscription = cluster$.subscribe((cluster) => {
|
||||
for (const block of clusterSystem.enumerateBlocks(
|
||||
cluster,
|
||||
track.trackEntry
|
||||
)) {
|
||||
const blockTime = Number(cluster.Timestamp) + block.relTime;
|
||||
const blockDuration =
|
||||
frames.length > 1 ? track.predictBlockDuration(blockTime) : 0;
|
||||
const perFrameDuration =
|
||||
frames.length > 1 && blockDuration
|
||||
? blockDuration / block.frames.length
|
||||
: 0;
|
||||
|
||||
for (const frame of block.frames) {
|
||||
const chunk = new EncodedVideoChunk({
|
||||
type: block.keyframe ? 'key' : 'delta',
|
||||
data: frame,
|
||||
timestamp: blockTime + perFrameDuration,
|
||||
});
|
||||
|
||||
decoder.decode(chunk);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
track,
|
||||
decoder,
|
||||
frame$: frame$
|
||||
.pipe(
|
||||
finalize(() => {
|
||||
decodeSubscription.unsubscribe();
|
||||
})
|
||||
)
|
||||
.pipe(share()),
|
||||
};
|
||||
};
|
||||
|
||||
const audioTrackDecoder = (
|
||||
track: AudioTrackContext,
|
||||
cluster$: Observable<ClusterType>
|
||||
) => {
|
||||
const { decoder, frame$ } = createAudioDecodeStream(track.configuration);
|
||||
|
||||
const clusterSystem = segment.cluster;
|
||||
|
||||
const decodeSubscription = cluster$.subscribe((cluster) => {
|
||||
for (const block of clusterSystem.enumerateBlocks(
|
||||
cluster,
|
||||
track.trackEntry
|
||||
)) {
|
||||
const blockTime = Number(cluster.Timestamp) + block.relTime;
|
||||
const blockDuration =
|
||||
frames.length > 1 ? track.predictBlockDuration(blockTime) : 0;
|
||||
const perFrameDuration =
|
||||
frames.length > 1 && blockDuration
|
||||
? blockDuration / block.frames.length
|
||||
: 0;
|
||||
|
||||
let i = 0;
|
||||
for (const frame of block.frames) {
|
||||
const chunk = new EncodedAudioChunk({
|
||||
type: block.keyframe ? 'key' : 'delta',
|
||||
data: frame,
|
||||
timestamp: blockTime + perFrameDuration * i,
|
||||
});
|
||||
i++;
|
||||
|
||||
decoder.decode(chunk);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
track,
|
||||
decoder,
|
||||
frame$: frame$.pipe(finalize(() => decodeSubscription.unsubscribe())),
|
||||
};
|
||||
};
|
||||
|
||||
const defaultVideoTrack$ = loadedMetadata$.pipe(
|
||||
map((segment) =>
|
||||
segment.track.getTrackContext<VideoTrackContext>({
|
||||
predicate: (track) =>
|
||||
track.TrackType === TrackTypeRestrictionEnum.VIDEO &&
|
||||
standardTrackPredicate(track),
|
||||
priority: standardTrackPriority,
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
const defaultAudioTrack$ = loadedMetadata$.pipe(
|
||||
map((segment) =>
|
||||
segment.track.getTrackContext<AudioTrackContext>({
|
||||
predicate: (track) =>
|
||||
track.TrackType === TrackTypeRestrictionEnum.AUDIO &&
|
||||
standardTrackPredicate(track),
|
||||
priority: standardTrackPriority,
|
||||
})
|
||||
)
|
||||
);
|
||||
|
||||
return {
|
||||
startTag,
|
||||
segment,
|
||||
metadataTags$,
|
||||
loadedMetadata$,
|
||||
loadedTags$,
|
||||
loadedCues$,
|
||||
seek,
|
||||
videoTrackDecoder,
|
||||
audioTrackDecoder,
|
||||
defaultVideoTrack$,
|
||||
defaultAudioTrack$,
|
||||
};
|
||||
}
|
||||
@@ -1,399 +0,0 @@
|
||||
import {
|
||||
EbmlStreamDecoder,
|
||||
EbmlTagIdEnum,
|
||||
EbmlTagPosition,
|
||||
type EbmlTagType,
|
||||
} from 'konoebml';
|
||||
import {
|
||||
defer,
|
||||
EMPTY,
|
||||
filter,
|
||||
finalize,
|
||||
from,
|
||||
isEmpty, last,
|
||||
map,
|
||||
merge,
|
||||
Observable,
|
||||
of,
|
||||
scan,
|
||||
share,
|
||||
shareReplay,
|
||||
switchMap,
|
||||
take,
|
||||
takeWhile,
|
||||
withLatestFrom,
|
||||
} from 'rxjs';
|
||||
import {
|
||||
createRangedStream,
|
||||
type CreateRangedStreamOptions,
|
||||
} from '@konoplayer/core/data';
|
||||
import { isTagIdPos, waitTick } from './util';
|
||||
import type { ClusterType } from './schema';
|
||||
import {SEEK_ID_KAX_CUES, SEEK_ID_KAX_TAGS, type CueSystem, type SegmentComponent, SegmentSystem} from "./systems";
|
||||
|
||||
export interface CreateRangedEbmlStreamOptions
|
||||
extends CreateRangedStreamOptions {
|
||||
}
|
||||
|
||||
export function createRangedEbmlStream({
|
||||
url,
|
||||
byteStart = 0,
|
||||
byteEnd,
|
||||
}: CreateRangedEbmlStreamOptions): Observable<{
|
||||
ebml$: Observable<EbmlTagType>;
|
||||
totalSize?: number;
|
||||
response: Response;
|
||||
body: ReadableStream<Uint8Array>;
|
||||
controller: AbortController;
|
||||
}> {
|
||||
const stream$ = from(createRangedStream({ url, byteStart, byteEnd }));
|
||||
|
||||
return stream$.pipe(
|
||||
switchMap(({ controller, body, totalSize, response }) => {
|
||||
let requestCompleted = false;
|
||||
|
||||
const originRequest$ = new Observable<EbmlTagType>((subscriber) => {
|
||||
body
|
||||
.pipeThrough(
|
||||
new EbmlStreamDecoder({
|
||||
streamStartOffset: byteStart,
|
||||
collectChild: (child) => child.id !== EbmlTagIdEnum.Cluster,
|
||||
backpressure: {
|
||||
eventLoop: waitTick,
|
||||
},
|
||||
})
|
||||
)
|
||||
.pipeTo(
|
||||
new WritableStream({
|
||||
write: async (tag) => {
|
||||
await waitTick();
|
||||
subscriber.next(tag);
|
||||
},
|
||||
close: () => {
|
||||
if (!requestCompleted) {
|
||||
requestCompleted = true;
|
||||
subscriber.complete();
|
||||
}
|
||||
},
|
||||
})
|
||||
)
|
||||
.catch((error) => {
|
||||
if (requestCompleted && error?.name === 'AbortError') {
|
||||
return;
|
||||
}
|
||||
requestCompleted = true;
|
||||
subscriber.error(error);
|
||||
});
|
||||
|
||||
return () => {
|
||||
requestCompleted = true;
|
||||
controller.abort();
|
||||
};
|
||||
}).pipe(
|
||||
share({
|
||||
resetOnComplete: false,
|
||||
resetOnError: false,
|
||||
resetOnRefCountZero: true,
|
||||
})
|
||||
);
|
||||
|
||||
const ebml$ = defer(() =>
|
||||
requestCompleted ? EMPTY : originRequest$
|
||||
).pipe(
|
||||
share({
|
||||
resetOnError: false,
|
||||
resetOnComplete: true,
|
||||
resetOnRefCountZero: true,
|
||||
})
|
||||
);
|
||||
|
||||
return of({
|
||||
ebml$,
|
||||
totalSize,
|
||||
response,
|
||||
body,
|
||||
controller,
|
||||
});
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
export interface CreateEbmlControllerOptions
|
||||
extends Omit<CreateRangedEbmlStreamOptions, 'byteStart' | 'byteEnd'> {}
|
||||
|
||||
export function createEbmlController({
|
||||
url,
|
||||
...options
|
||||
}: CreateEbmlControllerOptions) {
|
||||
const metaRequest$ = createRangedEbmlStream({
|
||||
...options,
|
||||
url,
|
||||
byteStart: 0,
|
||||
});
|
||||
|
||||
const controller$ = metaRequest$.pipe(
|
||||
map(({ totalSize, ebml$, response, controller }) => {
|
||||
const head$ = ebml$.pipe(
|
||||
filter(isTagIdPos(EbmlTagIdEnum.EBML, EbmlTagPosition.End)),
|
||||
take(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
console.debug(
|
||||
`stream of video "${url}" created, total size is ${totalSize ?? 'unknown'}`
|
||||
);
|
||||
|
||||
const segmentStart$ = ebml$.pipe(
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Segment, EbmlTagPosition.Start))
|
||||
);
|
||||
|
||||
/**
|
||||
* while [matroska v4](https://www.matroska.org/technical/elements.html) doc tell that there is only one segment in a file
|
||||
* some mkv generated by strange tools will emit several
|
||||
*/
|
||||
const segments$ = segmentStart$.pipe(
|
||||
map((startTag) => {
|
||||
const segment = new SegmentSystem(startTag);
|
||||
const clusterSystem = segment.cluster;
|
||||
const seekSystem = segment.seek;
|
||||
|
||||
const metaScan$ = ebml$.pipe(
|
||||
scan(
|
||||
(acc, tag) => {
|
||||
acc.segment.scanMeta(tag);
|
||||
acc.tag = tag;
|
||||
return acc;
|
||||
},
|
||||
{
|
||||
segment,
|
||||
tag: undefined as unknown as EbmlTagType,
|
||||
}
|
||||
),
|
||||
takeWhile((acc) => acc.segment.canCompleteMeta(), true),
|
||||
share({
|
||||
resetOnComplete: false,
|
||||
resetOnError: false,
|
||||
resetOnRefCountZero: true,
|
||||
})
|
||||
);
|
||||
|
||||
const meta$ = metaScan$.pipe(
|
||||
map(({ tag }) => tag)
|
||||
);
|
||||
|
||||
const withMeta$ = metaScan$.pipe(
|
||||
last(),
|
||||
switchMap(({ segment }) => segment.completeMeta()),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const withRemoteCues$ = withMeta$.pipe(
|
||||
switchMap((s) => {
|
||||
const cueSystem = s.cue;
|
||||
const seekSystem = s.seek;
|
||||
if (cueSystem.prepared) {
|
||||
return EMPTY;
|
||||
}
|
||||
const remoteCuesTagStartOffset =
|
||||
seekSystem.seekOffsetBySeekId(SEEK_ID_KAX_CUES);
|
||||
if (remoteCuesTagStartOffset! >= 0) {
|
||||
return createRangedEbmlStream({
|
||||
...options,
|
||||
url,
|
||||
byteStart: remoteCuesTagStartOffset,
|
||||
}).pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Cues, EbmlTagPosition.End)),
|
||||
withLatestFrom(withMeta$),
|
||||
map(([cues, withMeta]) => {
|
||||
withMeta.cue.prepareCuesWithTag(cues);
|
||||
return withMeta;
|
||||
})
|
||||
);
|
||||
}
|
||||
return EMPTY;
|
||||
}),
|
||||
take(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const withLocalCues$ = withMeta$.pipe(
|
||||
switchMap((s) => (s.cue.prepared ? of(s) : EMPTY)),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const withRemoteTags$ = withMeta$.pipe(
|
||||
switchMap((s) => {
|
||||
const tagSystem = s.tag;
|
||||
const seekSystem = s.seek;
|
||||
if (tagSystem.prepared) {
|
||||
return EMPTY;
|
||||
}
|
||||
|
||||
const remoteTagsTagStartOffset =
|
||||
seekSystem.seekOffsetBySeekId(SEEK_ID_KAX_TAGS);
|
||||
if (remoteTagsTagStartOffset! >= 0) {
|
||||
return createRangedEbmlStream({
|
||||
...options,
|
||||
url,
|
||||
byteStart: remoteTagsTagStartOffset,
|
||||
}).pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Tags, EbmlTagPosition.End)),
|
||||
withLatestFrom(withMeta$),
|
||||
map(([tags, withMeta]) => {
|
||||
withMeta.tag.prepareTagsWithTag(tags);
|
||||
return withMeta;
|
||||
})
|
||||
);
|
||||
}
|
||||
return EMPTY;
|
||||
}),
|
||||
take(1),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const withLocalTags$ = withMeta$.pipe(
|
||||
switchMap((s) => (s.tag.prepared ? of(s) : EMPTY)),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
const withCues$ = merge(withLocalCues$, withRemoteCues$).pipe(
|
||||
take(1)
|
||||
);
|
||||
|
||||
const withoutCues$ = withCues$.pipe(
|
||||
isEmpty(),
|
||||
switchMap((empty) => (empty ? withMeta$ : EMPTY))
|
||||
);
|
||||
|
||||
const withTags$ = merge(withLocalTags$, withRemoteTags$).pipe(
|
||||
take(1)
|
||||
);
|
||||
|
||||
const withoutTags$ = withTags$.pipe(
|
||||
isEmpty(),
|
||||
switchMap((empty) => (empty ? withMeta$ : EMPTY))
|
||||
);
|
||||
|
||||
const seekWithoutCues = (
|
||||
seekTime: number
|
||||
): Observable<SegmentComponent<ClusterType>> => {
|
||||
const request$ = withMeta$.pipe(
|
||||
switchMap(() =>
|
||||
createRangedEbmlStream({
|
||||
...options,
|
||||
url,
|
||||
byteStart: seekSystem.firstClusterOffset,
|
||||
})
|
||||
)
|
||||
);
|
||||
const cluster$ = request$.pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Cluster, EbmlTagPosition.End)),
|
||||
map((tag) => clusterSystem.addClusterWithTag(tag))
|
||||
);
|
||||
|
||||
if (seekTime === 0) {
|
||||
return cluster$;
|
||||
}
|
||||
|
||||
return cluster$.pipe(
|
||||
scan(
|
||||
(acc, curr) => {
|
||||
// avoid object recreation
|
||||
acc.prev = acc.next;
|
||||
acc.next = curr;
|
||||
return acc;
|
||||
},
|
||||
{
|
||||
prev: undefined as SegmentComponent<ClusterType> | undefined,
|
||||
next: undefined as SegmentComponent<ClusterType> | undefined,
|
||||
}
|
||||
),
|
||||
filter((c) => c.next?.Timestamp! > seekTime),
|
||||
map((c) => c.prev ?? c.next!)
|
||||
);
|
||||
};
|
||||
|
||||
const seekWithCues = (
|
||||
cueSystem: CueSystem,
|
||||
seekTime: number
|
||||
): Observable<SegmentComponent<ClusterType>> => {
|
||||
if (seekTime === 0) {
|
||||
return seekWithoutCues(seekTime);
|
||||
}
|
||||
|
||||
const cuePoint = cueSystem.findClosestCue(seekTime);
|
||||
|
||||
if (!cuePoint) {
|
||||
return seekWithoutCues(seekTime);
|
||||
}
|
||||
|
||||
return createRangedEbmlStream({
|
||||
...options,
|
||||
url,
|
||||
byteStart: seekSystem.offsetFromSeekPosition(
|
||||
cueSystem.getCueTrackPositions(cuePoint)
|
||||
.CueClusterPosition as number
|
||||
),
|
||||
}).pipe(
|
||||
switchMap((req) => req.ebml$),
|
||||
filter(isTagIdPos(EbmlTagIdEnum.Cluster, EbmlTagPosition.End)),
|
||||
map(clusterSystem.addClusterWithTag.bind(clusterSystem))
|
||||
);
|
||||
};
|
||||
|
||||
const seek = (
|
||||
seekTime: number
|
||||
): Observable<SegmentComponent<ClusterType>> => {
|
||||
if (seekTime === 0) {
|
||||
const subscription = merge(withCues$, withoutCues$).subscribe();
|
||||
|
||||
// if seekTime equals to 0 at start, reuse the initialize stream
|
||||
return seekWithoutCues(seekTime).pipe(
|
||||
finalize(() => {
|
||||
subscription.unsubscribe();
|
||||
})
|
||||
);
|
||||
}
|
||||
return merge(
|
||||
withCues$.pipe(switchMap((s) => seekWithCues(s.cue, seekTime))),
|
||||
withoutCues$.pipe(switchMap((_) => seekWithoutCues(seekTime)))
|
||||
);
|
||||
};
|
||||
|
||||
return {
|
||||
startTag,
|
||||
head$,
|
||||
segment,
|
||||
meta$,
|
||||
withMeta$,
|
||||
withCues$,
|
||||
withoutCues$,
|
||||
withTags$,
|
||||
withoutTags$,
|
||||
seekWithCues,
|
||||
seekWithoutCues,
|
||||
seek,
|
||||
};
|
||||
})
|
||||
);
|
||||
|
||||
return {
|
||||
segments$,
|
||||
head$,
|
||||
totalSize,
|
||||
ebml$,
|
||||
controller,
|
||||
response,
|
||||
};
|
||||
}),
|
||||
shareReplay(1)
|
||||
);
|
||||
|
||||
return {
|
||||
controller$,
|
||||
request$: metaRequest$,
|
||||
};
|
||||
}
|
||||
@@ -2,8 +2,11 @@ import { type, match } from 'arktype';
|
||||
import { EbmlTagIdEnum, EbmlSimpleBlockTag, EbmlBlockTag } from 'konoebml';
|
||||
|
||||
export const BinarySchema = type.instanceOf(Uint8Array);
|
||||
export type BinaryType = typeof BinarySchema.infer;
|
||||
export const SimpleBlockSchema = type.instanceOf(EbmlSimpleBlockTag);
|
||||
export const BlockSchema = type.instanceOf(EbmlBlockTag);
|
||||
export type SimpleBlockType = typeof SimpleBlockSchema.infer;
|
||||
export type BlockType = typeof BlockSchema.infer;
|
||||
|
||||
export const DocTypeExtensionSchema = type({
|
||||
DocTypeExtensionName: type.string,
|
||||
|
||||
@@ -1,6 +1,65 @@
|
||||
import type {EbmlClusterTagType} from "konoebml";
|
||||
import {ClusterSchema, type ClusterType} from "../schema";
|
||||
import {type SegmentComponent, SegmentComponentSystemTrait} from "./segment";
|
||||
import type { EbmlClusterTagType } from 'konoebml';
|
||||
import {
|
||||
ClusterSchema,
|
||||
type SimpleBlockType,
|
||||
type ClusterType,
|
||||
type BlockGroupType,
|
||||
type TrackEntryType,
|
||||
} from '../schema';
|
||||
import { type SegmentComponent, SegmentComponentSystemTrait } from './segment';
|
||||
|
||||
export abstract class BlockViewTrait {
|
||||
abstract get keyframe(): boolean;
|
||||
|
||||
abstract get frames(): Uint8Array[];
|
||||
|
||||
abstract get trackNum(): number | bigint;
|
||||
|
||||
abstract get relTime(): number;
|
||||
}
|
||||
|
||||
export class SimpleBlockView extends BlockViewTrait {
|
||||
constructor(public readonly block: SimpleBlockType) {
|
||||
super();
|
||||
}
|
||||
|
||||
get keyframe() {
|
||||
return !!this.block.keyframe;
|
||||
}
|
||||
|
||||
get frames(): Uint8Array<ArrayBufferLike>[] {
|
||||
return this.block.frames;
|
||||
}
|
||||
|
||||
get trackNum() {
|
||||
return this.block.track;
|
||||
}
|
||||
|
||||
get relTime() {
|
||||
return this.block.value;
|
||||
}
|
||||
}
|
||||
|
||||
export class BlockGroupView extends BlockViewTrait {
|
||||
constructor(public readonly block: BlockGroupType) {
|
||||
super();
|
||||
}
|
||||
|
||||
get keyframe() {
|
||||
return !this.block.ReferenceBlock;
|
||||
}
|
||||
|
||||
get frames(): Uint8Array<ArrayBufferLike>[] {
|
||||
return this.block.Block.frames;
|
||||
}
|
||||
get trackNum() {
|
||||
return this.block.Block.track;
|
||||
}
|
||||
|
||||
get relTime() {
|
||||
return this.block.Block.value;
|
||||
}
|
||||
}
|
||||
|
||||
export class ClusterSystem extends SegmentComponentSystemTrait<
|
||||
EbmlClusterTagType,
|
||||
@@ -14,7 +73,27 @@ export class ClusterSystem extends SegmentComponentSystemTrait<
|
||||
|
||||
addClusterWithTag(tag: EbmlClusterTagType) {
|
||||
const cluster = this.componentFromTag(tag);
|
||||
this.clustersBuffer.push(cluster);
|
||||
// this.clustersBuffer.push(cluster);
|
||||
return cluster;
|
||||
}
|
||||
}
|
||||
|
||||
*enumerateBlocks(
|
||||
cluster: ClusterType,
|
||||
track: TrackEntryType
|
||||
): Generator<BlockViewTrait> {
|
||||
if (cluster.SimpleBlock) {
|
||||
for (const block of cluster.SimpleBlock) {
|
||||
if (block.track === track.TrackNumber) {
|
||||
yield new SimpleBlockView(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (cluster.BlockGroup) {
|
||||
for (const block of cluster.BlockGroup) {
|
||||
if (block.Block.track === track.TrackNumber) {
|
||||
yield new BlockGroupView(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,44 +1,69 @@
|
||||
import {ParseCodecErrors, UnsupportedCodecError} from "@konoplayer/core/errors.ts";
|
||||
import {
|
||||
ParseCodecErrors,
|
||||
UnsupportedCodecError,
|
||||
} from '@konoplayer/core/errors.ts';
|
||||
import {
|
||||
EbmlTagIdEnum,
|
||||
type EbmlTrackEntryTagType,
|
||||
type EbmlTracksTagType
|
||||
} from "konoebml";
|
||||
type EbmlTracksTagType,
|
||||
} from 'konoebml';
|
||||
import {
|
||||
audioCodecIdToWebCodecs,
|
||||
videoCodecIdRequirePeekingKeyframe,
|
||||
videoCodecIdToWebCodecs, type AudioDecoderConfigExt, type VideoDecoderConfigExt
|
||||
} from "../codecs";
|
||||
import {TrackEntrySchema, type TrackEntryType, TrackTypeRestrictionEnum} from "../schema";
|
||||
import {type SegmentComponent, SegmentComponentSystemTrait} from "./segment";
|
||||
videoCodecIdToWebCodecs,
|
||||
type AudioDecoderConfigExt,
|
||||
type VideoDecoderConfigExt,
|
||||
} from '../codecs';
|
||||
import {
|
||||
TrackEntrySchema,
|
||||
type TrackEntryType,
|
||||
TrackTypeRestrictionEnum,
|
||||
} from '../schema';
|
||||
import { type SegmentComponent, SegmentComponentSystemTrait } from './segment';
|
||||
|
||||
export interface GetTrackEntryOptions {
|
||||
priority?: (v: SegmentComponent<TrackEntryType>) => number;
|
||||
predicate?: (v: SegmentComponent<TrackEntryType>) => boolean;
|
||||
predicate: (v: SegmentComponent<TrackEntryType>) => boolean;
|
||||
}
|
||||
|
||||
export abstract class TrackContext {
|
||||
peekingKeyframe?: Uint8Array;
|
||||
trackEntry: TrackEntryType
|
||||
trackEntry: TrackEntryType;
|
||||
timecodeScale: number;
|
||||
lastBlockTimestamp = Number.NaN;
|
||||
averageBlockDuration = Number.NaN;
|
||||
|
||||
constructor(trackEntry: TrackEntryType) {
|
||||
constructor(trackEntry: TrackEntryType, timecodeScale: number) {
|
||||
this.trackEntry = trackEntry;
|
||||
this.timecodeScale = timecodeScale;
|
||||
}
|
||||
|
||||
peekKeyframe (payload: Uint8Array) {
|
||||
peekKeyframe(payload: Uint8Array) {
|
||||
this.peekingKeyframe = payload;
|
||||
}
|
||||
|
||||
preparedToConfigure () {
|
||||
preparedToConfigure() {
|
||||
if (this.requirePeekKeyframe()) {
|
||||
return !!this.peekingKeyframe;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
abstract requirePeekKeyframe (): boolean;
|
||||
abstract requirePeekKeyframe(): boolean;
|
||||
|
||||
abstract buildConfiguration (): Promise<void>;
|
||||
abstract buildConfiguration(): Promise<void>;
|
||||
|
||||
predictBlockDuration(blockTimestamp: number): number {
|
||||
if (this.trackEntry.DefaultDuration) {
|
||||
return Number(this.trackEntry.DefaultDuration);
|
||||
}
|
||||
const delta = blockTimestamp - this.lastBlockTimestamp;
|
||||
this.lastBlockTimestamp = blockTimestamp;
|
||||
this.averageBlockDuration = this.averageBlockDuration
|
||||
? this.averageBlockDuration * 0.5 + delta * 0.5
|
||||
: delta;
|
||||
return this.averageBlockDuration;
|
||||
}
|
||||
}
|
||||
|
||||
export class DefaultTrackContext extends TrackContext {
|
||||
@@ -46,18 +71,22 @@ export class DefaultTrackContext extends TrackContext {
|
||||
return false;
|
||||
}
|
||||
|
||||
// biome-ignore lint/suspicious/noEmptyBlockStatements: <explanation>
|
||||
override async buildConfiguration(): Promise<void> {}
|
||||
}
|
||||
|
||||
export class VideoTrackContext extends TrackContext {
|
||||
configuration!: VideoDecoderConfigExt;
|
||||
|
||||
override requirePeekKeyframe (): boolean {
|
||||
override requirePeekKeyframe(): boolean {
|
||||
return videoCodecIdRequirePeekingKeyframe(this.trackEntry.CodecID);
|
||||
}
|
||||
|
||||
async buildConfiguration () {
|
||||
const configuration = videoCodecIdToWebCodecs(this.trackEntry, this.peekingKeyframe);
|
||||
async buildConfiguration() {
|
||||
const configuration = videoCodecIdToWebCodecs(
|
||||
this.trackEntry,
|
||||
this.peekingKeyframe
|
||||
);
|
||||
if (await VideoDecoder.isConfigSupported(configuration)) {
|
||||
throw new UnsupportedCodecError(configuration.codec, 'video decoder');
|
||||
}
|
||||
@@ -68,21 +97,50 @@ export class VideoTrackContext extends TrackContext {
|
||||
export class AudioTrackContext extends TrackContext {
|
||||
configuration!: AudioDecoderConfigExt;
|
||||
|
||||
override requirePeekKeyframe (): boolean {
|
||||
override requirePeekKeyframe(): boolean {
|
||||
return videoCodecIdRequirePeekingKeyframe(this.trackEntry.CodecID);
|
||||
}
|
||||
|
||||
async buildConfiguration () {
|
||||
const configuration = audioCodecIdToWebCodecs(this.trackEntry, this.peekingKeyframe);
|
||||
async buildConfiguration() {
|
||||
const configuration = audioCodecIdToWebCodecs(
|
||||
this.trackEntry,
|
||||
this.peekingKeyframe
|
||||
);
|
||||
if (await AudioDecoder.isConfigSupported(configuration)) {
|
||||
throw new UnsupportedCodecError(configuration.codec, 'audio decoder');
|
||||
}
|
||||
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
override predictBlockDuration(blockTimestamp: number): number {
|
||||
if (this.trackEntry.DefaultDuration) {
|
||||
return Number(this.trackEntry.DefaultDuration);
|
||||
}
|
||||
if (this.configuration.samplesPerFrame) {
|
||||
return (
|
||||
Number(
|
||||
this.configuration.samplesPerFrame / this.configuration.sampleRate
|
||||
) *
|
||||
(1_000_000_000 / Number(this.timecodeScale))
|
||||
);
|
||||
}
|
||||
const delta = blockTimestamp - this.lastBlockTimestamp;
|
||||
this.lastBlockTimestamp = blockTimestamp;
|
||||
this.averageBlockDuration = this.averageBlockDuration
|
||||
? this.averageBlockDuration * 0.5 + delta * 0.5
|
||||
: delta;
|
||||
return this.averageBlockDuration;
|
||||
}
|
||||
}
|
||||
|
||||
export function standardTrackPredicate(track: TrackEntryType) {
|
||||
return track.FlagEnabled !== 0;
|
||||
}
|
||||
|
||||
export function standardTrackPriority(track: TrackEntryType) {
|
||||
return (Number(!!track.FlagForced) << 8) + (Number(!!track.FlagDefault) << 4);
|
||||
}
|
||||
|
||||
export class TrackSystem extends SegmentComponentSystemTrait<
|
||||
EbmlTrackEntryTagType,
|
||||
@@ -96,37 +154,45 @@ export class TrackSystem extends SegmentComponentSystemTrait<
|
||||
trackContexts: Map<number | bigint, TrackContext> = new Map();
|
||||
|
||||
getTrackEntry({
|
||||
priority = (track) =>
|
||||
(Number(!!track.FlagForced) << 4) + Number(!!track.FlagDefault),
|
||||
predicate = (track) => track.FlagEnabled !== 0,
|
||||
}: GetTrackEntryOptions) {
|
||||
priority = standardTrackPriority,
|
||||
predicate,
|
||||
}: GetTrackEntryOptions) {
|
||||
return this.tracks
|
||||
.filter(predicate)
|
||||
.toSorted((a, b) => priority(b) - priority(a))
|
||||
.at(0);
|
||||
}
|
||||
|
||||
getTrackContext <T extends TrackContext>(options: GetTrackEntryOptions): T | undefined {
|
||||
getTrackContext<T extends TrackContext>(
|
||||
options: GetTrackEntryOptions
|
||||
): T | undefined {
|
||||
const trackEntry = this.getTrackEntry(options);
|
||||
const trackNum = trackEntry?.TrackNumber!;
|
||||
return this.trackContexts.get(trackNum) as T | undefined;
|
||||
}
|
||||
|
||||
prepareTracksWithTag(tag: EbmlTracksTagType) {
|
||||
const infoSystem = this.segment.info;
|
||||
this.tracks = tag.children
|
||||
.filter((c) => c.id === EbmlTagIdEnum.TrackEntry)
|
||||
.map((c) => this.componentFromTag(c));
|
||||
for (const track of this.tracks) {
|
||||
if (track.TrackType === TrackTypeRestrictionEnum.VIDEO) {
|
||||
this.trackContexts.set(track.TrackNumber, new VideoTrackContext(track))
|
||||
this.trackContexts.set(
|
||||
track.TrackNumber,
|
||||
new VideoTrackContext(track, Number(infoSystem.info.TimestampScale))
|
||||
);
|
||||
} else if (track.TrackType === TrackTypeRestrictionEnum.AUDIO) {
|
||||
this.trackContexts.set(track.TrackNumber, new AudioTrackContext(track))
|
||||
this.trackContexts.set(
|
||||
track.TrackNumber,
|
||||
new AudioTrackContext(track, Number(infoSystem.info.TimestampScale))
|
||||
);
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
async buildTracksConfiguration () {
|
||||
async buildTracksConfiguration() {
|
||||
const parseErrors = new ParseCodecErrors();
|
||||
|
||||
for (const context of this.trackContexts.values()) {
|
||||
@@ -141,15 +207,15 @@ export class TrackSystem extends SegmentComponentSystemTrait<
|
||||
}
|
||||
}
|
||||
|
||||
tryPeekKeyframe (tag: { track: number | bigint, frames: Uint8Array[] }) {
|
||||
tryPeekKeyframe(tag: { track: number | bigint; frames: Uint8Array[] }) {
|
||||
for (const c of this.trackContexts.values()) {
|
||||
if (c.trackEntry.TrackNumber === tag.track) {
|
||||
c.peekKeyframe(tag.frames?.[0])
|
||||
c.peekKeyframe(tag.frames?.[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
preparedToConfigureTracks (): boolean {
|
||||
preparedToConfigureTracks(): boolean {
|
||||
for (const c of this.trackContexts.values()) {
|
||||
if (!c.preparedToConfigure()) {
|
||||
return false;
|
||||
@@ -157,4 +223,4 @@ export class TrackSystem extends SegmentComponentSystemTrait<
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user