fix: fix audio issues

This commit is contained in:
master 2025-03-26 06:55:37 +08:00
parent 39e17eb6a5
commit 8cc1a2bab1
25 changed files with 371 additions and 306 deletions

View File

@ -1 +1,29 @@
# konoplayer
**A project initially launched solely to watch animations in the widely used but poorly supported MKV format in browsers, just for fun.**
## State of Prototype
- [x] Matroska support
- [x] Parse EBML and demux (Done / Typescript)
- [x] Data validating fit matroska v4 doc (Done / Typescript)
- [x] WebCodecs decode + Canvas rendering (Prototyping / Typescript)
- [x] Parsing track CodecId/Private and generate Codec String (Partial / Typescript)
- Video:
- [x] VP9
- [x] VP8
- [x] AVC
- [x] HEVC
- [x] AV1
- Audio:
- [x] AAC
- [x] MP3
- [x] AC3
- [ ] OPUS (not tested, need more work)
- [ ] VORBIS (need fix)
- [ ] EAC-3 (need fix)
- [ ] PCM (need tested)
- [ ] ALAC (need tested)
- [ ] FLAC (need tested)
- [ ] Wrap video element with customElements (Prototyping / Lit-html + Typescript)
- [ ] Add WebCodecs polyfill with ffmpeg or libav (Todo / WASM)
- [ ] Danmuku integrated (Todo / Typescript)

View File

@ -21,6 +21,6 @@
"node_modules",
"dist",
"test",
"**/*spec.ts"
"**/*spec"
]
}

View File

@ -3,7 +3,7 @@
<head></head>
<body>
<my-element />
<video-pipeline-demo src="/api/static/video/test.webm"></video-pipeline-demo>
<!-- <video-pipeline-demo src="/api/static/video/huge/[LoliHouse] Amagami-san Chi no Enmusubi - 23 [WebRip 1080p HEVC-10bit AAC SRTx2].mkv" width="800" height="450" /> -->
<!-- <my-element />-->
<!-- <video-pipeline-demo src="/api/static/video/test.webm"></video-pipeline-demo>-->
<video-pipeline-demo src="/api/static/video/huge/[LoliHouse] Amagami-san Chi no Enmusubi - 23 [WebRip 1080p HEVC-10bit AAC SRTx2].mkv" width="800" height="450" />
</body>

View File

@ -13,7 +13,7 @@ import {
fromEvent,
share,
takeUntil,
firstValueFrom,
firstValueFrom, tap, throwIfEmpty, ReplaySubject, finalize, of, interval,
} from 'rxjs';
import { createMatroska } from '@konoplayer/matroska/model';
import { createRef, ref, type Ref } from 'lit/directives/ref.js';
@ -45,14 +45,12 @@ export class VideoPipelineDemo extends LitElement {
videoRef: Ref<HTMLVideoElement> = createRef();
renderingContext = createRenderingContext();
audioContext = new AudioContext();
canvasSource = new MediaSource();
audioContext = new AudioContext({});
seeked$ = new Subject<number>();
seeked$ = new ReplaySubject<number>(1);
videoFrameBuffer$ = new BehaviorSubject(new Queue<VideoFrame>());
audioFrameBuffer$ = new BehaviorSubject(new Queue<AudioData>());
private startTime = 0;
paused$ = new BehaviorSubject<boolean>(false);
ended$ = new BehaviorSubject<boolean>(false);
@ -80,37 +78,37 @@ export class VideoPipelineDemo extends LitElement {
videoTrackDecoder,
audioTrackDecoder,
},
totalSize
} = await firstValueFrom(
createMatroska({
url: src,
})
);
}).pipe(
throwIfEmpty(() => new Error("failed to extract matroska"))
)
)
console.debug(`[MATROSKA]: loaded metadata, total size ${totalSize} bytes`)
const currentCluster$ = this.seeked$.pipe(
switchMap((seekTime) => seek(seekTime)),
share()
share({ resetOnRefCountZero: false, resetOnError: false, resetOnComplete: false }),
);
defaultVideoTrack$
.pipe(takeUntil(destroyRef$), take(1))
.subscribe(this.videoTrack$);
.pipe(take(1), takeUntil(destroyRef$), tap((track) => console.debug('[MATROSKA]: video track loaded,', track)))
.subscribe(this.videoTrack$.next.bind(this.videoTrack$));
defaultAudioTrack$
.pipe(takeUntil(destroyRef$), take(1))
.subscribe(this.audioTrack$);
.pipe(take(1), takeUntil(destroyRef$), tap((track) => console.debug('[MATROSKA]: audio track loaded,', track)))
.subscribe(this.audioTrack$.next.bind(this.audioTrack$));
this.videoTrack$
.pipe(
takeUntil(this.destroyRef$),
map((track) =>
track ? videoTrackDecoder(track, currentCluster$) : undefined
switchMap((track) =>
track?.configuration ? videoTrackDecoder(track, currentCluster$) : EMPTY
),
switchMap((decoder) => {
if (!decoder) {
return EMPTY;
}
return decoder.frame$;
})
switchMap(({ frame$ }) => frame$)
)
.subscribe((frame) => {
const buffer = this.videoFrameBuffer$.value;
@ -121,15 +119,10 @@ export class VideoPipelineDemo extends LitElement {
this.audioTrack$
.pipe(
takeUntil(this.destroyRef$),
map((track) =>
track ? audioTrackDecoder(track, currentCluster$) : undefined
switchMap((track) =>
track?.configuration ? audioTrackDecoder(track, currentCluster$) : EMPTY
),
switchMap((decoder) => {
if (!decoder) {
return EMPTY;
}
return decoder.frame$;
})
switchMap(({ frame$ }) => frame$)
)
.subscribe((frame) => {
const buffer = this.audioFrameBuffer$.value;
@ -137,39 +130,52 @@ export class VideoPipelineDemo extends LitElement {
this.audioFrameBuffer$.next(buffer);
});
combineLatest({
let playableStartTime = 0;
const playable = combineLatest({
paused: this.paused$,
ended: this.ended$,
buffered: this.audioFrameBuffer$.pipe(
audioBuffered: this.audioFrameBuffer$.pipe(
map((q) => q.size >= 1),
distinctUntilChanged()
),
})
videoBuffered: this.videoFrameBuffer$.pipe(
map((q) => q.size >= 1),
distinctUntilChanged()
),
}).pipe(
takeUntil(this.destroyRef$),
map(({ ended, paused, videoBuffered, audioBuffered }) => !paused && !ended && !!(videoBuffered || audioBuffered)),
tap((enabled) => {
if (enabled) {
playableStartTime = performance.now()
}
}),
share()
)
let nextAudioStartTime = 0;
playable
.pipe(
takeUntil(this.destroyRef$),
map(({ ended, paused, buffered }) => !paused && !ended && !!buffered),
switchMap((enabled) => (enabled ? animationFrames() : EMPTY))
tap(() => {
nextAudioStartTime = 0
}),
switchMap((enabled) => (enabled ? animationFrames() : EMPTY)),
)
.subscribe(() => {
const audioFrameBuffer = this.audioFrameBuffer$.getValue();
const audioContext = this.audioContext;
const nowTime = performance.now();
const accTime = nowTime - this.startTime;
const accTime = nowTime - playableStartTime;
let audioChanged = false;
while (audioFrameBuffer.size > 0) {
const firstAudio = audioFrameBuffer.peek();
if (firstAudio && firstAudio.timestamp <= accTime * 1000) {
if (firstAudio && (firstAudio.timestamp / 1000) <= accTime) {
const audioFrame = audioFrameBuffer.dequeue()!;
audioChanged = true;
const audioContext = this.audioContext;
if (audioContext) {
const numberOfChannels = audioFrame.numberOfChannels;
const sampleRate = audioFrame.sampleRate;
const numberOfFrames = audioFrame.numberOfFrames;
const data = new Float32Array(numberOfFrames * numberOfChannels);
audioFrame.copyTo(data, {
planeIndex: 0,
});
const audioBuffer = audioContext.createBuffer(
numberOfChannels,
@ -177,14 +183,22 @@ export class VideoPipelineDemo extends LitElement {
sampleRate
);
// add fade-in-out
const fadeLength = Math.min(50, audioFrame.numberOfFrames);
for (let channel = 0; channel < numberOfChannels; channel++) {
const channelData = audioBuffer.getChannelData(channel);
for (let i = 0; i < numberOfFrames; i++) {
channelData[i] = data[i * numberOfChannels + channel];
const channelData = new Float32Array(numberOfFrames);
audioFrame.copyTo(channelData, { planeIndex: channel, frameCount: numberOfFrames });
for (let i = 0; i < fadeLength; i++) {
channelData[i] *= i / fadeLength; // fade-in
channelData[audioFrame.numberOfFrames - 1 - i] *= i / fadeLength; // fade-out
}
audioBuffer.copyToChannel(channelData, channel);
}
const audioTime = audioFrame.timestamp / 1000000;
/**
* @TODO: ADD TIME SYNC
*/
const audioTime = audioFrame.timestamp / 1_000_000;
audioFrame.close();
@ -192,11 +206,10 @@ export class VideoPipelineDemo extends LitElement {
const audioSource = audioContext.createBufferSource();
audioSource.buffer = audioBuffer;
audioSource.connect(audioContext.destination);
audioSource.start(
audioContext.currentTime +
Math.max(0, audioTime - accTime / 1000)
);
const currentTime = audioContext.currentTime;
nextAudioStartTime = Math.max(nextAudioStartTime, currentTime); // 确保不早于当前时间
audioSource.start(nextAudioStartTime);
nextAudioStartTime += audioBuffer.duration;
}
}
} else {
@ -208,35 +221,26 @@ export class VideoPipelineDemo extends LitElement {
}
});
combineLatest({
paused: this.paused$,
ended: this.ended$,
buffered: this.videoFrameBuffer$.pipe(
map((q) => q.size >= 1),
distinctUntilChanged()
),
})
playable
.pipe(
takeUntil(this.destroyRef$),
map(({ ended, paused, buffered }) => !paused && !ended && !!buffered),
switchMap((enabled) => (enabled ? animationFrames() : EMPTY))
switchMap((enabled) => (enabled ? animationFrames() : EMPTY)),
)
.subscribe(async () => {
const renderingContext = this.renderingContext;
const videoFrameBuffer = this.videoFrameBuffer$.getValue();
let videoChanged = false;
const nowTime = performance.now();
const accTime = nowTime - this.startTime;
const accTime = nowTime - playableStartTime;
while (videoFrameBuffer.size > 0) {
const firstVideo = videoFrameBuffer.peek();
if (firstVideo && firstVideo.timestamp <= accTime * 1000) {
if (firstVideo && (firstVideo.timestamp / 1000) <= accTime) {
const videoFrame = videoFrameBuffer.dequeue()!;
const renderingContext = this.renderingContext;
videoChanged = true;
if (renderingContext) {
const bitmap = await createImageBitmap(videoFrame);
renderBitmapAtRenderingContext(renderingContext, bitmap);
videoFrame.close();
videoChanged = true;
}
videoFrame.close();
} else {
break;
}
@ -252,22 +256,18 @@ export class VideoPipelineDemo extends LitElement {
this.audioContext.resume();
this.audioFrameBuffer$.next(this.audioFrameBuffer$.getValue());
});
this.seeked$.next(0)
}
connectedCallback(): void {
async connectedCallback() {
super.connectedCallback();
this.preparePipeline();
await this.preparePipeline();
}
disconnectedCallback(): void {
super.disconnectedCallback();
this.destroyRef$.next();
}
render() {
return html`
<video ref=${ref(this.videoRef)}></video>
`;
this.destroyRef$.next(undefined);
}
firstUpdated() {
@ -303,8 +303,16 @@ export class VideoPipelineDemo extends LitElement {
frameRate$
.pipe(takeUntil(destroyRef$), distinctUntilChanged())
.subscribe((frameRate) =>
captureCanvasAsVideoSrcObject(video, canvas, frameRate)
);
.subscribe((frameRate) => {
canvas.width = this.width || 1;
canvas.height = this.height || 1;
captureCanvasAsVideoSrcObject(video, canvas, frameRate);
});
}
render() {
return html`
<video ref=${ref(this.videoRef)} width=${this.width} height=${this.height} autoplay muted></video>
`;
}
}

View File

@ -5,7 +5,7 @@
}
```
^https://konoplayer.com/api/static/*** resSpeed://10240
#^https://konoplayer.com/api/static/*** resSpeed://10240+
^https://konoplayer.com/api*** reqHeaders://{x-forwarded.json} http://127.0.0.1:5001/api$1
^https://konoplayer.com/*** reqHeaders://{x-forwarded.json} http://127.0.0.1:5000/$1 excludeFilter://^https://konoplayer.com/api
^https://konoplayer.com/*** reqHeaders://{x-forwarded.json} http://127.0.0.1:5000/$1 excludeFilter://^https://konoplayer.com/api weinre://test
^wss://konoplayer.com/*** reqHeaders://{x-forwarded.json} ws://127.0.0.1:5000/$1 excludeFilter://^wss://konoplayer.com/api

View File

@ -1 +1 @@
{"version":"3.0.9","results":[[":src/matroska/codecs/av1.spec.ts",{"duration":52.71331099999952,"failed":false}]]}
{"version":"3.0.9","results":[[":src/matroska/codecs/av1.spec",{"duration":52.71331099999952,"failed":false}]]}

View File

@ -5,9 +5,9 @@ import { defineConfig } from 'vitest/config';
export default defineConfig({
cacheDir: '.vitest',
test: {
setupFiles: ['src/init-test.ts'],
setupFiles: ['src/init-test'],
environment: 'happy-dom',
include: ['src/**/*.spec.ts'],
include: ['src/**/*.spec'],
globals: true,
restoreMocks: true,
coverage: {

View File

@ -3,8 +3,8 @@
"version": "0.0.1",
"description": "A strange player, like the dumtruck, taking you to Isekai.",
"scripts": {
"codegen-mkv": "tsx --tsconfig=./tsconfig.scripts.json ./scripts/codegen-mkv.ts",
"download-samples": "tsx --tsconfig=./tsconfig.scripts.json ./scripts/download-samples.ts"
"codegen-mkv": "tsx --tsconfig=./tsconfig.scripts.json ./scripts/codegen-mkv",
"download-samples": "tsx --tsconfig=./tsconfig.scripts.json ./scripts/download-samples"
},
"keywords": [],
"author": "lonelyhentxi",

View File

@ -1,18 +1,20 @@
import { Observable } from 'rxjs';
import {map, Observable, Subject} from 'rxjs';
// biome-ignore lint/correctness/noUndeclaredVariables: <explanation>
export function createAudioDecodeStream(configuration: AudioDecoderConfig): {
export function createAudioDecodeStream(configuration: AudioDecoderConfig): Observable<{
decoder: AudioDecoder;
frame$: Observable<AudioData>;
} {
let decoder!: VideoDecoder;
const frame$ = new Observable<AudioData>((subscriber) => {
}> {
const frame$ = new Subject<AudioData>()
const decoder$ = new Observable<AudioDecoder>((subscriber) => {
let isFinalized = false;
decoder = new AudioDecoder({
output: (frame) => subscriber.next(frame),
const decoder = new AudioDecoder({
output: (frame) => frame$.next(frame),
error: (e) => {
if (!isFinalized) {
isFinalized = true;
frame$.error(e);
subscriber.error(e);
}
},
@ -20,16 +22,19 @@ export function createAudioDecodeStream(configuration: AudioDecoderConfig): {
decoder.configure(configuration);
subscriber.next(decoder);
return () => {
if (!isFinalized) {
isFinalized = true;
frame$.complete();
decoder.close();
}
};
});
})
return {
return decoder$.pipe(map((decoder) => ({
decoder,
frame$,
};
frame$
})));
}

View File

@ -1,4 +1,4 @@
import { Observable } from 'rxjs';
import {map, Observable, Subject} from 'rxjs';
export type RenderingContext =
| ImageBitmapRenderingContext
@ -42,18 +42,19 @@ export function captureCanvasAsVideoSrcObject(
video.srcObject = canvas.captureStream(frameRate);
}
export function createVideoDecodeStream(configuration: VideoDecoderConfig): {
export function createVideoDecodeStream(configuration: VideoDecoderConfig): Observable<{
decoder: VideoDecoder;
frame$: Observable<VideoFrame>;
} {
let decoder!: VideoDecoder;
const frame$ = new Observable<VideoFrame>((subscriber) => {
}> {
const frame$ = new Subject<VideoFrame>()
const decoder$ = new Observable<VideoDecoder>((subscriber) => {
let isFinalized = false;
decoder = new VideoDecoder({
output: (frame) => subscriber.next(frame),
const decoder = new VideoDecoder({
output: (frame) => frame$.next(frame),
error: (e) => {
if (!isFinalized) {
isFinalized = true;
frame$.error(e);
subscriber.error(e);
}
},
@ -61,16 +62,19 @@ export function createVideoDecodeStream(configuration: VideoDecoderConfig): {
decoder.configure(configuration);
subscriber.next(decoder);
return () => {
if (!isFinalized) {
isFinalized = true;
frame$.complete();
decoder.close();
}
};
});
})
return {
return decoder$.pipe(map((decoder) => ({
decoder,
frame$,
};
frame$
})));
}

View File

@ -16,16 +16,16 @@ import {
import {
genCodecStringByAV1DecoderConfigurationRecord,
parseAV1DecoderConfigurationRecord,
} from './av1.ts';
} from './av1';
import {
genCodecStringByHEVCDecoderConfigurationRecord,
parseHEVCDecoderConfigurationRecord,
} from './hevc.ts';
} from './hevc';
import {
genCodecStringByVP9DecoderConfigurationRecord,
parseVP9DecoderConfigurationRecord,
VP9_CODEC_TYPE,
} from './vp9.ts';
} from './vp9';
export const VideoCodecId = {
VCM: 'V_MS/VFW/FOURCC',

View File

@ -1,4 +1,3 @@
import type { CreateRangedStreamOptions } from '@konoplayer/core/data';
import { type EbmlEBMLTagType, EbmlTagIdEnum, EbmlTagPosition } from 'konoebml';
import {
switchMap,
@ -7,14 +6,14 @@ import {
shareReplay,
map,
combineLatest,
of,
of, type Observable, delayWhen, pipe, finalize, tap, throwIfEmpty,
} from 'rxjs';
import { isTagIdPos } from '../util';
import { createRangedEbmlStream } from './resource';
import {createRangedEbmlStream, type CreateRangedEbmlStreamOptions} from './resource';
import { type MatroskaSegmentModel, createMatroskaSegment } from './segment';
export type CreateMatroskaOptions = Omit<
CreateRangedStreamOptions,
CreateRangedEbmlStreamOptions,
'byteStart' | 'byteEnd'
>;
@ -25,7 +24,7 @@ export interface MatroskaModel {
segment: MatroskaSegmentModel;
}
export function createMatroska(options: CreateMatroskaOptions) {
export function createMatroska(options: CreateMatroskaOptions): Observable<MatroskaModel> {
const metadataRequest$ = createRangedEbmlStream({
...options,
byteStart: 0,
@ -33,32 +32,34 @@ export function createMatroska(options: CreateMatroskaOptions) {
return metadataRequest$.pipe(
switchMap(({ totalSize, ebml$, response }) => {
const head$ = ebml$.pipe(
filter(isTagIdPos(EbmlTagIdEnum.EBML, EbmlTagPosition.End)),
take(1),
shareReplay(1)
);
const segmentStart$ = ebml$.pipe(
filter(isTagIdPos(EbmlTagIdEnum.Segment, EbmlTagPosition.Start))
);
/**
* while [matroska v4](https://www.matroska.org/technical/elements.html) doc tell that there is only one segment in a file
* some mkv generated by strange tools will emit several
*/
const segments$ = segmentStart$.pipe(
map((startTag) =>
createMatroskaSegment({
startTag,
matroskaOptions: options,
ebml$,
})
)
const segment$ = ebml$.pipe(
filter(isTagIdPos(EbmlTagIdEnum.Segment, EbmlTagPosition.Start)),
map((startTag) => createMatroskaSegment({
startTag,
matroskaOptions: options,
ebml$,
})),
delayWhen(
({ loadedMetadata$ }) => loadedMetadata$
),
take(1),
shareReplay(1)
);
const head$ = ebml$.pipe(
filter(isTagIdPos(EbmlTagIdEnum.EBML, EbmlTagPosition.End)),
take(1),
shareReplay(1),
throwIfEmpty(() => new Error("failed to find head tag"))
);
return combineLatest({
segment: segments$.pipe(take(1)),
segment: segment$,
head: head$,
totalSize: of(totalSize),
initResponse: of(response),

View File

@ -3,14 +3,18 @@ import {
createRangedStream,
} from '@konoplayer/core/data';
import { type EbmlTagType, EbmlStreamDecoder, EbmlTagIdEnum } from 'konoebml';
import { Observable, from, switchMap, share, defer, EMPTY, of } from 'rxjs';
import {Observable, from, switchMap, share, defer, EMPTY, of, tap} from 'rxjs';
import { waitTick } from '../util';
export interface CreateRangedEbmlStreamOptions extends CreateRangedStreamOptions {
refCount?: boolean
}
export function createRangedEbmlStream({
url,
byteStart = 0,
byteEnd,
}: CreateRangedStreamOptions): Observable<{
byteEnd
}: CreateRangedEbmlStreamOptions): Observable<{
ebml$: Observable<EbmlTagType>;
totalSize?: number;
response: Response;
@ -23,7 +27,10 @@ export function createRangedEbmlStream({
switchMap(({ controller, body, totalSize, response }) => {
let requestCompleted = false;
const originRequest$ = new Observable<EbmlTagType>((subscriber) => {
const ebml$ = new Observable<EbmlTagType>((subscriber) => {
if (requestCompleted) {
subscriber.complete();
}
body
.pipeThrough(
new EbmlStreamDecoder({
@ -57,8 +64,10 @@ export function createRangedEbmlStream({
});
return () => {
requestCompleted = true;
controller.abort();
if (!requestCompleted) {
requestCompleted = true;
controller.abort();
}
};
}).pipe(
share({
@ -68,22 +77,12 @@ export function createRangedEbmlStream({
})
);
const ebml$ = defer(() =>
requestCompleted ? EMPTY : originRequest$
).pipe(
share({
resetOnError: false,
resetOnComplete: true,
resetOnRefCountZero: true,
})
);
return of({
ebml$,
totalSize,
response,
body,
controller,
ebml$
});
})
);

View File

@ -12,7 +12,6 @@ import {
takeWhile,
share,
map,
last,
switchMap,
shareReplay,
EMPTY,
@ -23,6 +22,8 @@ import {
merge,
isEmpty,
finalize,
delayWhen,
from,
} from 'rxjs';
import type { CreateMatroskaOptions } from '.';
import { type ClusterType, TrackTypeRestrictionEnum } from '../schema';
@ -51,7 +52,6 @@ export interface CreateMatroskaSegmentOptions {
export interface MatroskaSegmentModel {
startTag: EbmlSegmentTagType;
segment: SegmentSystem;
metadataTags$: Observable<EbmlTagType>;
loadedMetadata$: Observable<SegmentSystem>;
loadedTags$: Observable<SegmentSystem>;
loadedCues$: Observable<SegmentSystem>;
@ -59,19 +59,19 @@ export interface MatroskaSegmentModel {
videoTrackDecoder: (
track: VideoTrackContext,
cluster$: Observable<ClusterType>
) => {
) => Observable<{
track: VideoTrackContext;
decoder: VideoDecoder;
frame$: Observable<VideoFrame>;
};
}>;
audioTrackDecoder: (
track: AudioTrackContext,
cluster$: Observable<ClusterType>
) => {
) => Observable<{
track: AudioTrackContext;
decoder: AudioDecoder;
frame$: Observable<AudioData>;
};
}>;
defaultVideoTrack$: Observable<VideoTrackContext | undefined>;
defaultAudioTrack$: Observable<AudioTrackContext | undefined>;
}
@ -88,16 +88,20 @@ export function createMatroskaSegment({
const metaScan$ = ebml$.pipe(
scan(
(acc, tag) => {
acc.segment.scanMeta(tag);
const segment = acc.segment;
segment.scanMeta(tag);
acc.tag = tag;
acc.canComplete = segment.canCompleteMeta();
return acc;
},
{
segment,
tag: undefined as unknown as EbmlTagType,
canComplete: false,
}
),
takeWhile((acc) => acc.segment.canCompleteMeta(), true),
takeWhile(({ canComplete }) => !canComplete, true),
delayWhen(({ segment }) => from(segment.completeMeta())),
share({
resetOnComplete: false,
resetOnError: false,
@ -105,12 +109,11 @@ export function createMatroskaSegment({
})
);
const metadataTags$ = metaScan$.pipe(map(({ tag }) => tag));
const loadedMetadata$ = metaScan$.pipe(
last(),
switchMap(({ segment }) => segment.completeMeta()),
shareReplay(1)
filter(({ canComplete }) => canComplete),
map(({ segment }) => segment),
take(1),
shareReplay(1),
);
const loadedRemoteCues$ = loadedMetadata$.pipe(
@ -297,88 +300,94 @@ export function createMatroskaSegment({
track: VideoTrackContext,
cluster$: Observable<ClusterType>
) => {
const { decoder, frame$ } = createVideoDecodeStream(track.configuration);
return createVideoDecodeStream(track.configuration).pipe(
map(({ decoder, frame$ }) => {
const clusterSystem = segment.cluster;
const infoSystem = segment.info;
const timestampScale = Number(infoSystem.info.TimestampScale) / 1000;
const clusterSystem = segment.cluster;
const decodeSubscription = cluster$.subscribe((cluster) => {
for (const block of clusterSystem.enumerateBlocks(
cluster,
track.trackEntry
)) {
const blockTime = (Number(cluster.Timestamp) + block.relTime) * timestampScale;
const blockDuration =
frames.length > 1 ? track.predictBlockDuration(blockTime) * timestampScale : 0;
const perFrameDuration =
frames.length > 1 && blockDuration
? blockDuration / block.frames.length
: 0;
const decodeSubscription = cluster$.subscribe((cluster) => {
for (const block of clusterSystem.enumerateBlocks(
cluster,
track.trackEntry
)) {
const blockTime = Number(cluster.Timestamp) + block.relTime;
const blockDuration =
frames.length > 1 ? track.predictBlockDuration(blockTime) : 0;
const perFrameDuration =
frames.length > 1 && blockDuration
? blockDuration / block.frames.length
: 0;
for (const frame of block.frames) {
const chunk = new EncodedVideoChunk({
type: block.keyframe ? 'key' : 'delta',
data: frame,
timestamp: blockTime + perFrameDuration,
});
for (const frame of block.frames) {
const chunk = new EncodedVideoChunk({
type: block.keyframe ? 'key' : 'delta',
data: frame,
timestamp: blockTime + perFrameDuration,
});
decoder.decode(chunk);
}
}
});
decoder.decode(chunk);
return {
track,
decoder,
frame$: frame$
.pipe(
finalize(() => {
decodeSubscription.unsubscribe();
})
)
}
}
});
return {
track,
decoder,
frame$: frame$
.pipe(
finalize(() => {
decodeSubscription.unsubscribe();
})
)
.pipe(share()),
};
})
);
};
const audioTrackDecoder = (
track: AudioTrackContext,
cluster$: Observable<ClusterType>
) => {
const { decoder, frame$ } = createAudioDecodeStream(track.configuration);
return createAudioDecodeStream(track.configuration).pipe(
map(({ decoder, frame$ }) => {
const clusterSystem = segment.cluster;
const infoSystem = segment.info;
const timestampScale = Number(infoSystem.info.TimestampScale) / 1000;
const clusterSystem = segment.cluster;
const decodeSubscription = cluster$.subscribe((cluster) => {
for (const block of clusterSystem.enumerateBlocks(
cluster,
track.trackEntry
)) {
const blockTime = (Number(cluster.Timestamp) + block.relTime) * timestampScale;
const blockDuration =
frames.length > 1 ? track.predictBlockDuration(blockTime) : 0;
const perFrameDuration =
frames.length > 1 && blockDuration
? blockDuration / block.frames.length
: 0;
const decodeSubscription = cluster$.subscribe((cluster) => {
for (const block of clusterSystem.enumerateBlocks(
cluster,
track.trackEntry
)) {
const blockTime = Number(cluster.Timestamp) + block.relTime;
const blockDuration =
frames.length > 1 ? track.predictBlockDuration(blockTime) : 0;
const perFrameDuration =
frames.length > 1 && blockDuration
? blockDuration / block.frames.length
: 0;
let i = 0;
for (const frame of block.frames) {
const chunk = new EncodedAudioChunk({
type: block.keyframe ? 'key' : 'delta',
data: frame,
timestamp: blockTime + perFrameDuration * i,
});
i++;
let i = 0;
for (const frame of block.frames) {
const chunk = new EncodedAudioChunk({
type: block.keyframe ? 'key' : 'delta',
data: frame,
timestamp: blockTime + perFrameDuration * i,
});
i++;
decoder.decode(chunk);
}
}
});
decoder.decode(chunk);
}
}
});
return {
track,
decoder,
frame$: frame$.pipe(finalize(() => decodeSubscription.unsubscribe())),
};
return {
track,
decoder,
frame$: frame$.pipe(finalize(() => decodeSubscription.unsubscribe())),
};
}));
};
const defaultVideoTrack$ = loadedMetadata$.pipe(
@ -406,7 +415,6 @@ export function createMatroskaSegment({
return {
startTag,
segment,
metadataTags$,
loadedMetadata$,
loadedTags$,
loadedCues$,
@ -414,6 +422,6 @@ export function createMatroskaSegment({
videoTrackDecoder,
audioTrackDecoder,
defaultVideoTrack$,
defaultAudioTrack$,
defaultAudioTrack$
};
}

View File

@ -6,7 +6,8 @@ import {
type BlockGroupType,
type TrackEntryType,
} from '../schema';
import { type SegmentComponent, SegmentComponentSystemTrait } from './segment';
import { type SegmentComponent } from './segment';
import {SegmentComponentSystemTrait} from "./segment-component";
export abstract class BlockViewTrait {
abstract get keyframe(): boolean;

View File

@ -1,7 +1,8 @@
import {type EbmlCuePointTagType, type EbmlCuesTagType, EbmlTagIdEnum} from "konoebml";
import {CuePointSchema, type CuePointType, type CueTrackPositionsType} from "../schema.ts";
import {CuePointSchema, type CuePointType, type CueTrackPositionsType} from "../schema";
import {maxBy} from "lodash-es";
import {type SegmentComponent, SegmentComponentSystemTrait} from "./segment.ts";
import type {SegmentComponent} from "./segment";
import {SegmentComponentSystemTrait} from "./segment-component";
export class CueSystem extends SegmentComponentSystemTrait<
EbmlCuePointTagType,

View File

@ -3,5 +3,6 @@ export { CueSystem } from './cue';
export { TagSystem } from './tag';
export { ClusterSystem } from './cluster';
export { InfoSystem } from './info';
export { type SegmentComponent, SegmentSystem, SegmentComponentSystemTrait, withSegment } from './segment';
export { type SegmentComponent, SegmentSystem, withSegment } from './segment';
export { SeekSystem, SEEK_ID_KAX_CUES, SEEK_ID_KAX_INFO, SEEK_ID_KAX_TAGS, SEEK_ID_KAX_TRACKS } from './seek';
export {SegmentComponentSystemTrait} from "./segment-component";

View File

@ -1,6 +1,7 @@
import type {EbmlInfoTagType} from "konoebml";
import {InfoSchema, type InfoType} from "../schema.ts";
import {type SegmentComponent, SegmentComponentSystemTrait} from "./segment.ts";
import {InfoSchema, type InfoType} from "../schema";
import type {SegmentComponent} from "./segment";
import {SegmentComponentSystemTrait} from "./segment-component";
export class InfoSystem extends SegmentComponentSystemTrait<
EbmlInfoTagType,

View File

@ -1,9 +1,8 @@
import type {EbmlSeekHeadTagType, EbmlTagType} from "konoebml";
import {SeekHeadSchema, type SeekHeadType} from "../schema.ts";
import {SeekHeadSchema, type SeekHeadType} from "../schema";
import {isEqual} from "lodash-es";
import {UnreachableOrLogicError} from "@konoplayer/core/errors.ts";
import {SegmentComponentSystemTrait} from "./segment.ts";
import {UnreachableOrLogicError} from "@konoplayer/core/errors";
import {SegmentComponentSystemTrait} from "./segment-component";
export const SEEK_ID_KAX_INFO = new Uint8Array([0x15, 0x49, 0xa9, 0x66]);
export const SEEK_ID_KAX_TRACKS = new Uint8Array([0x16, 0x54, 0xae, 0x6b]);

View File

@ -0,0 +1,37 @@
import type {EbmlMasterTagType} from "konoebml";
import {ArkErrors, type Type} from "arktype";
import {convertEbmlTagToComponent, type InferType} from "../util";
import type {SegmentComponent, SegmentSystem} from "./segment";
export class SegmentComponentSystemTrait<
E extends EbmlMasterTagType,
S extends Type<any>,
> {
segment: SegmentSystem;
get schema(): S {
throw new Error('unimplemented!');
}
constructor(segment: SegmentSystem) {
this.segment = segment;
}
componentFromTag(tag: E): SegmentComponent<InferType<S>> {
const extracted = convertEbmlTagToComponent(tag);
const result = this.schema(extracted) as
| (InferType<S> & { segment: SegmentSystem })
| ArkErrors;
if (result instanceof ArkErrors) {
const errors = result;
console.error(
'Parse component from tag error:',
tag.toDebugRecord(),
errors.flatProblemsByPath
);
throw errors;
}
result.segment = this.segment;
return result;
}
}

View File

@ -1,20 +1,18 @@
import {
type EbmlClusterTagType,
type EbmlMasterTagType,
type EbmlSegmentTagType,
EbmlTagIdEnum,
EbmlTagPosition,
type EbmlTagType
} from "konoebml";
import {ArkErrors, type Type} from "arktype";
import {convertEbmlTagToComponent, type InferType} from "../util.ts";
import {CueSystem} from "./cue.ts";
import {ClusterSystem} from "./cluster.ts";
import {SEEK_ID_KAX_CUES, SEEK_ID_KAX_INFO, SEEK_ID_KAX_TAGS, SEEK_ID_KAX_TRACKS, SeekSystem} from "./seek.ts";
import {InfoSystem} from "./info.ts";
import {TrackSystem} from "./track.ts";
import {TagSystem} from "./tag.ts";
import type {BlockGroupType} from "../schema.ts";
import {convertEbmlTagToComponent} from "../util";
import {CueSystem} from "./cue";
import {ClusterSystem} from "./cluster";
import {SEEK_ID_KAX_CUES, SEEK_ID_KAX_INFO, SEEK_ID_KAX_TAGS, SEEK_ID_KAX_TRACKS, SeekSystem} from "./seek";
import {InfoSystem} from "./info";
import {TrackSystem} from "./track";
import {TagSystem} from "./tag";
import type {BlockGroupType} from "../schema";
export class SegmentSystem {
startTag: EbmlSegmentTagType;
@ -70,7 +68,9 @@ export class SegmentSystem {
this.seek.addSeekHeadTag(tag);
}
this.metaTags.push(tag);
this.seek.memoOffset(tag);
if (tag.position !== EbmlTagPosition.Start) {
this.seek.memoOffset(tag);
}
if (tag.id === EbmlTagIdEnum.Cluster && !this.firstCluster) {
this.firstCluster = tag;
this.seekLocal();
@ -97,7 +97,7 @@ export class SegmentSystem {
if (lastTag.id === EbmlTagIdEnum.Segment && lastTag.position === EbmlTagPosition.End) {
return true;
}
return !!(this.firstCluster && this.track.preparedToConfigureTracks());
return (!!this.firstCluster && this.track.preparedToConfigureTracks());
}
async completeMeta() {
@ -122,35 +122,3 @@ export function withSegment<T extends object>(
return component_;
}
export class SegmentComponentSystemTrait<
E extends EbmlMasterTagType,
S extends Type<any>,
> {
segment: SegmentSystem;
get schema(): S {
throw new Error('unimplemented!');
}
constructor(segment: SegmentSystem) {
this.segment = segment;
}
componentFromTag(tag: E): SegmentComponent<InferType<S>> {
const extracted = convertEbmlTagToComponent(tag);
const result = this.schema(extracted) as
| (InferType<S> & { segment: SegmentSystem })
| ArkErrors;
if (result instanceof ArkErrors) {
const errors = result;
console.error(
'Parse component from tag error:',
tag.toDebugRecord(),
errors.flatProblemsByPath
);
throw errors;
}
result.segment = this.segment;
return result;
}
}

View File

@ -1,7 +1,8 @@
import {EbmlTagIdEnum, type EbmlTagsTagType, type EbmlTagTagType} from "konoebml";
import {TagSchema, type TagType} from "../schema.ts";
import {TagSchema, type TagType} from "../schema";
import {type SegmentComponent, SegmentComponentSystemTrait} from "./segment.ts";
import type {SegmentComponent} from "./segment";
import {SegmentComponentSystemTrait} from "./segment-component";
export class TagSystem extends SegmentComponentSystemTrait<
EbmlTagTagType,

View File

@ -1,7 +1,7 @@
import {
ParseCodecErrors,
UnsupportedCodecError,
} from '@konoplayer/core/errors.ts';
} from '@konoplayer/core/errors';
import {
EbmlTagIdEnum,
type EbmlTrackEntryTagType,
@ -19,7 +19,9 @@ import {
type TrackEntryType,
TrackTypeRestrictionEnum,
} from '../schema';
import { type SegmentComponent, SegmentComponentSystemTrait } from './segment';
import type { SegmentComponent } from './segment';
import {SegmentComponentSystemTrait} from "./segment-component";
import {pick} from "lodash-es";
export interface GetTrackEntryOptions {
priority?: (v: SegmentComponent<TrackEntryType>) => number;
@ -29,13 +31,13 @@ export interface GetTrackEntryOptions {
export abstract class TrackContext {
peekingKeyframe?: Uint8Array;
trackEntry: TrackEntryType;
timecodeScale: number;
timestampScale: number;
lastBlockTimestamp = Number.NaN;
averageBlockDuration = Number.NaN;
constructor(trackEntry: TrackEntryType, timecodeScale: number) {
constructor(trackEntry: TrackEntryType, timestampScale: number) {
this.trackEntry = trackEntry;
this.timecodeScale = timecodeScale;
this.timestampScale = Number(timestampScale);
}
peekKeyframe(payload: Uint8Array) {
@ -87,7 +89,8 @@ export class VideoTrackContext extends TrackContext {
this.trackEntry,
this.peekingKeyframe
);
if (await VideoDecoder.isConfigSupported(configuration)) {
const checkResult = await VideoDecoder?.isConfigSupported?.(configuration);
if (!checkResult?.supported) {
throw new UnsupportedCodecError(configuration.codec, 'video decoder');
}
this.configuration = configuration;
@ -106,7 +109,8 @@ export class AudioTrackContext extends TrackContext {
this.trackEntry,
this.peekingKeyframe
);
if (await AudioDecoder.isConfigSupported(configuration)) {
const checkResult = await AudioDecoder?.isConfigSupported?.(configuration);
if (!checkResult?.supported) {
throw new UnsupportedCodecError(configuration.codec, 'audio decoder');
}
@ -121,8 +125,7 @@ export class AudioTrackContext extends TrackContext {
return (
Number(
this.configuration.samplesPerFrame / this.configuration.sampleRate
) *
(1_000_000_000 / Number(this.timecodeScale))
) * this.timestampScale
);
}
const delta = blockTimestamp - this.lastBlockTimestamp;
@ -203,7 +206,7 @@ export class TrackSystem extends SegmentComponentSystemTrait<
}
}
if (parseErrors.cause.length > 0) {
console.error(parseErrors);
console.error(parseErrors, parseErrors.cause);
}
}

View File

@ -437,7 +437,7 @@ function main() {
const elementSchemas = extractElementAll();
const files = {
'schema.ts': [
'schema': [
generateMkvSchemaImports(elementSchemas),
generateMkvSchemaHierarchy(elementSchemas),
],

View File

@ -22,7 +22,7 @@
"moduleDetection": "force",
"moduleResolution": "bundler",
"resolveJsonModule": true,
"allowImportingTsExtensions": true,
"allowImportingTsExtensions": false,
"emitDeclarationOnly": true,
"skipLibCheck": true,
"target": "ES2021",