Home Reference Source

src/controller/timeline-controller.ts

  1. import { Events } from '../events';
  2. import Cea608Parser, { CaptionScreen } from '../utils/cea-608-parser';
  3. import OutputFilter from '../utils/output-filter';
  4. import { parseWebVTT } from '../utils/webvtt-parser';
  5. import {
  6. sendAddTrackEvent,
  7. clearCurrentCues,
  8. addCueToTrack,
  9. removeCuesInRange,
  10. } from '../utils/texttrack-utils';
  11. import { parseIMSC1, IMSC1_CODEC } from '../utils/imsc1-ttml-parser';
  12. import { appendUint8Array } from '../utils/mp4-tools';
  13. import { PlaylistLevelType } from '../types/loader';
  14. import { Fragment } from '../loader/fragment';
  15. import {
  16. FragParsingUserdataData,
  17. FragLoadedData,
  18. FragDecryptedData,
  19. MediaAttachingData,
  20. ManifestLoadedData,
  21. InitPTSFoundData,
  22. SubtitleTracksUpdatedData,
  23. BufferFlushingData,
  24. FragLoadingData,
  25. } from '../types/events';
  26. import { logger } from '../utils/logger';
  27. import type Hls from '../hls';
  28. import type { ComponentAPI } from '../types/component-api';
  29. import type { HlsConfig } from '../config';
  30. import type { CuesInterface } from '../utils/cues';
  31. import type { MediaPlaylist } from '../types/media-playlist';
  32. import type { VTTCCs } from '../types/vtt';
  33.  
  34. type TrackProperties = {
  35. label: string;
  36. languageCode: string;
  37. media?: MediaPlaylist;
  38. };
  39.  
  40. type NonNativeCaptionsTrack = {
  41. _id?: string;
  42. label: string;
  43. kind: string;
  44. default: boolean;
  45. closedCaptions?: MediaPlaylist;
  46. subtitleTrack?: MediaPlaylist;
  47. };
  48.  
  49. export class TimelineController implements ComponentAPI {
  50. private hls: Hls;
  51. private media: HTMLMediaElement | null = null;
  52. private config: HlsConfig;
  53. private enabled: boolean = true;
  54. private Cues: CuesInterface;
  55. private textTracks: Array<TextTrack> = [];
  56. private tracks: Array<MediaPlaylist> = [];
  57. private initPTS: Array<number> = [];
  58. private timescale: Array<number> = [];
  59. private unparsedVttFrags: Array<FragLoadedData | FragDecryptedData> = [];
  60. private captionsTracks: Record<string, TextTrack> = {};
  61. private nonNativeCaptionsTracks: Record<string, NonNativeCaptionsTrack> = {};
  62. private cea608Parser1!: Cea608Parser;
  63. private cea608Parser2!: Cea608Parser;
  64. private lastSn: number = -1;
  65. private lastPartIndex: number = -1;
  66. private prevCC: number = -1;
  67. private vttCCs: VTTCCs = newVTTCCs();
  68. private captionsProperties: {
  69. textTrack1: TrackProperties;
  70. textTrack2: TrackProperties;
  71. textTrack3: TrackProperties;
  72. textTrack4: TrackProperties;
  73. };
  74.  
  75. constructor(hls: Hls) {
  76. this.hls = hls;
  77. this.config = hls.config;
  78. this.Cues = hls.config.cueHandler;
  79.  
  80. this.captionsProperties = {
  81. textTrack1: {
  82. label: this.config.captionsTextTrack1Label,
  83. languageCode: this.config.captionsTextTrack1LanguageCode,
  84. },
  85. textTrack2: {
  86. label: this.config.captionsTextTrack2Label,
  87. languageCode: this.config.captionsTextTrack2LanguageCode,
  88. },
  89. textTrack3: {
  90. label: this.config.captionsTextTrack3Label,
  91. languageCode: this.config.captionsTextTrack3LanguageCode,
  92. },
  93. textTrack4: {
  94. label: this.config.captionsTextTrack4Label,
  95. languageCode: this.config.captionsTextTrack4LanguageCode,
  96. },
  97. };
  98.  
  99. if (this.config.enableCEA708Captions) {
  100. const channel1 = new OutputFilter(this, 'textTrack1');
  101. const channel2 = new OutputFilter(this, 'textTrack2');
  102. const channel3 = new OutputFilter(this, 'textTrack3');
  103. const channel4 = new OutputFilter(this, 'textTrack4');
  104. this.cea608Parser1 = new Cea608Parser(1, channel1, channel2);
  105. this.cea608Parser2 = new Cea608Parser(3, channel3, channel4);
  106. }
  107.  
  108. hls.on(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  109. hls.on(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  110. hls.on(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  111. hls.on(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  112. hls.on(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  113. hls.on(Events.FRAG_LOADING, this.onFragLoading, this);
  114. hls.on(Events.FRAG_LOADED, this.onFragLoaded, this);
  115. hls.on(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  116. hls.on(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  117. hls.on(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  118. hls.on(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  119. hls.on(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  120. }
  121.  
  122. public destroy(): void {
  123. const { hls } = this;
  124. hls.off(Events.MEDIA_ATTACHING, this.onMediaAttaching, this);
  125. hls.off(Events.MEDIA_DETACHING, this.onMediaDetaching, this);
  126. hls.off(Events.MANIFEST_LOADING, this.onManifestLoading, this);
  127. hls.off(Events.MANIFEST_LOADED, this.onManifestLoaded, this);
  128. hls.off(Events.SUBTITLE_TRACKS_UPDATED, this.onSubtitleTracksUpdated, this);
  129. hls.off(Events.FRAG_LOADING, this.onFragLoading, this);
  130. hls.off(Events.FRAG_LOADED, this.onFragLoaded, this);
  131. hls.off(Events.FRAG_PARSING_USERDATA, this.onFragParsingUserdata, this);
  132. hls.off(Events.FRAG_DECRYPTED, this.onFragDecrypted, this);
  133. hls.off(Events.INIT_PTS_FOUND, this.onInitPtsFound, this);
  134. hls.off(Events.SUBTITLE_TRACKS_CLEARED, this.onSubtitleTracksCleared, this);
  135. hls.off(Events.BUFFER_FLUSHING, this.onBufferFlushing, this);
  136. // @ts-ignore
  137. this.hls = this.config = this.cea608Parser1 = this.cea608Parser2 = null;
  138. }
  139.  
  140. public addCues(
  141. trackName: string,
  142. startTime: number,
  143. endTime: number,
  144. screen: CaptionScreen,
  145. cueRanges: Array<[number, number]>
  146. ) {
  147. // skip cues which overlap more than 50% with previously parsed time ranges
  148. let merged = false;
  149. for (let i = cueRanges.length; i--; ) {
  150. const cueRange = cueRanges[i];
  151. const overlap = intersection(
  152. cueRange[0],
  153. cueRange[1],
  154. startTime,
  155. endTime
  156. );
  157. if (overlap >= 0) {
  158. cueRange[0] = Math.min(cueRange[0], startTime);
  159. cueRange[1] = Math.max(cueRange[1], endTime);
  160. merged = true;
  161. if (overlap / (endTime - startTime) > 0.5) {
  162. return;
  163. }
  164. }
  165. }
  166. if (!merged) {
  167. cueRanges.push([startTime, endTime]);
  168. }
  169.  
  170. if (this.config.renderTextTracksNatively) {
  171. const track = this.captionsTracks[trackName];
  172. this.Cues.newCue(track, startTime, endTime, screen);
  173. } else {
  174. const cues = this.Cues.newCue(null, startTime, endTime, screen);
  175. this.hls.trigger(Events.CUES_PARSED, {
  176. type: 'captions',
  177. cues,
  178. track: trackName,
  179. });
  180. }
  181. }
  182.  
  183. // Triggered when an initial PTS is found; used for synchronisation of WebVTT.
  184. private onInitPtsFound(
  185. event: Events.INIT_PTS_FOUND,
  186. { frag, id, initPTS, timescale }: InitPTSFoundData
  187. ) {
  188. const { unparsedVttFrags } = this;
  189. if (id === 'main') {
  190. this.initPTS[frag.cc] = initPTS;
  191. this.timescale[frag.cc] = timescale;
  192. }
  193.  
  194. // Due to asynchronous processing, initial PTS may arrive later than the first VTT fragments are loaded.
  195. // Parse any unparsed fragments upon receiving the initial PTS.
  196. if (unparsedVttFrags.length) {
  197. this.unparsedVttFrags = [];
  198. unparsedVttFrags.forEach((frag) => {
  199. this.onFragLoaded(Events.FRAG_LOADED, frag as FragLoadedData);
  200. });
  201. }
  202. }
  203.  
  204. private getExistingTrack(trackName: string): TextTrack | null {
  205. const { media } = this;
  206. if (media) {
  207. for (let i = 0; i < media.textTracks.length; i++) {
  208. const textTrack = media.textTracks[i];
  209. if (textTrack[trackName]) {
  210. return textTrack;
  211. }
  212. }
  213. }
  214. return null;
  215. }
  216.  
  217. public createCaptionsTrack(trackName: string) {
  218. if (this.config.renderTextTracksNatively) {
  219. this.createNativeTrack(trackName);
  220. } else {
  221. this.createNonNativeTrack(trackName);
  222. }
  223. }
  224.  
  225. private createNativeTrack(trackName: string) {
  226. if (this.captionsTracks[trackName]) {
  227. return;
  228. }
  229. const { captionsProperties, captionsTracks, media } = this;
  230. const { label, languageCode } = captionsProperties[trackName];
  231. // Enable reuse of existing text track.
  232. const existingTrack = this.getExistingTrack(trackName);
  233. if (!existingTrack) {
  234. const textTrack = this.createTextTrack('captions', label, languageCode);
  235. if (textTrack) {
  236. // Set a special property on the track so we know it's managed by Hls.js
  237. textTrack[trackName] = true;
  238. captionsTracks[trackName] = textTrack;
  239. }
  240. } else {
  241. captionsTracks[trackName] = existingTrack;
  242. clearCurrentCues(captionsTracks[trackName]);
  243. sendAddTrackEvent(captionsTracks[trackName], media as HTMLMediaElement);
  244. }
  245. }
  246.  
  247. private createNonNativeTrack(trackName: string) {
  248. if (this.nonNativeCaptionsTracks[trackName]) {
  249. return;
  250. }
  251. // Create a list of a single track for the provider to consume
  252. const trackProperties: TrackProperties = this.captionsProperties[trackName];
  253. if (!trackProperties) {
  254. return;
  255. }
  256. const label = trackProperties.label as string;
  257. const track = {
  258. _id: trackName,
  259. label,
  260. kind: 'captions',
  261. default: trackProperties.media ? !!trackProperties.media.default : false,
  262. closedCaptions: trackProperties.media,
  263. };
  264. this.nonNativeCaptionsTracks[trackName] = track;
  265. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, { tracks: [track] });
  266. }
  267.  
  268. private createTextTrack(
  269. kind: TextTrackKind,
  270. label: string,
  271. lang?: string
  272. ): TextTrack | undefined {
  273. const media = this.media;
  274. if (!media) {
  275. return;
  276. }
  277. return media.addTextTrack(kind, label, lang);
  278. }
  279.  
  280. private onMediaAttaching(
  281. event: Events.MEDIA_ATTACHING,
  282. data: MediaAttachingData
  283. ) {
  284. this.media = data.media;
  285. this._cleanTracks();
  286. }
  287.  
  288. private onMediaDetaching() {
  289. const { captionsTracks } = this;
  290. Object.keys(captionsTracks).forEach((trackName) => {
  291. clearCurrentCues(captionsTracks[trackName]);
  292. delete captionsTracks[trackName];
  293. });
  294. this.nonNativeCaptionsTracks = {};
  295. }
  296.  
  297. private onManifestLoading() {
  298. this.lastSn = -1; // Detect discontinuity in fragment parsing
  299. this.lastPartIndex = -1;
  300. this.prevCC = -1;
  301. this.vttCCs = newVTTCCs(); // Detect discontinuity in subtitle manifests
  302. this._cleanTracks();
  303. this.tracks = [];
  304. this.captionsTracks = {};
  305. this.nonNativeCaptionsTracks = {};
  306. this.textTracks = [];
  307. this.unparsedVttFrags = this.unparsedVttFrags || [];
  308. this.initPTS = [];
  309. this.timescale = [];
  310. if (this.cea608Parser1 && this.cea608Parser2) {
  311. this.cea608Parser1.reset();
  312. this.cea608Parser2.reset();
  313. }
  314. }
  315.  
  316. private _cleanTracks() {
  317. // clear outdated subtitles
  318. const { media } = this;
  319. if (!media) {
  320. return;
  321. }
  322. const textTracks = media.textTracks;
  323. if (textTracks) {
  324. for (let i = 0; i < textTracks.length; i++) {
  325. clearCurrentCues(textTracks[i]);
  326. }
  327. }
  328. }
  329.  
  330. private onSubtitleTracksUpdated(
  331. event: Events.SUBTITLE_TRACKS_UPDATED,
  332. data: SubtitleTracksUpdatedData
  333. ) {
  334. this.textTracks = [];
  335. const tracks: Array<MediaPlaylist> = data.subtitleTracks || [];
  336. const hasIMSC1 = tracks.some((track) => track.textCodec === IMSC1_CODEC);
  337. if (this.config.enableWebVTT || (hasIMSC1 && this.config.enableIMSC1)) {
  338. const sameTracks =
  339. this.tracks && tracks && this.tracks.length === tracks.length;
  340. this.tracks = tracks || [];
  341.  
  342. if (this.config.renderTextTracksNatively) {
  343. const inUseTracks = this.media ? this.media.textTracks : [];
  344.  
  345. this.tracks.forEach((track, index) => {
  346. let textTrack: TextTrack | undefined;
  347. if (index < inUseTracks.length) {
  348. let inUseTrack: TextTrack | null = null;
  349.  
  350. for (let i = 0; i < inUseTracks.length; i++) {
  351. if (canReuseVttTextTrack(inUseTracks[i], track)) {
  352. inUseTrack = inUseTracks[i];
  353. break;
  354. }
  355. }
  356.  
  357. // Reuse tracks with the same label, but do not reuse 608/708 tracks
  358. if (inUseTrack) {
  359. textTrack = inUseTrack;
  360. }
  361. }
  362. if (textTrack) {
  363. clearCurrentCues(textTrack);
  364. } else {
  365. const textTrackKind =
  366. this._captionsOrSubtitlesFromCharacteristics(track);
  367. textTrack = this.createTextTrack(
  368. textTrackKind,
  369. track.name,
  370. track.lang
  371. );
  372. if (textTrack) {
  373. textTrack.mode = 'disabled';
  374. }
  375. }
  376. if (textTrack) {
  377. (textTrack as any).groupId = track.groupId;
  378. this.textTracks.push(textTrack);
  379. }
  380. });
  381. } else if (!sameTracks && this.tracks && this.tracks.length) {
  382. // Create a list of tracks for the provider to consume
  383. const tracksList = this.tracks.map((track) => {
  384. return {
  385. label: track.name,
  386. kind: track.type.toLowerCase(),
  387. default: track.default,
  388. subtitleTrack: track,
  389. };
  390. });
  391. this.hls.trigger(Events.NON_NATIVE_TEXT_TRACKS_FOUND, {
  392. tracks: tracksList,
  393. });
  394. }
  395. }
  396. }
  397.  
  398. private _captionsOrSubtitlesFromCharacteristics(
  399. track: MediaPlaylist
  400. ): TextTrackKind {
  401. if (track.attrs?.CHARACTERISTICS) {
  402. const transcribesSpokenDialog = /transcribes-spoken-dialog/gi.test(
  403. track.attrs.CHARACTERISTICS
  404. );
  405. const describesMusicAndSound = /describes-music-and-sound/gi.test(
  406. track.attrs.CHARACTERISTICS
  407. );
  408.  
  409. if (transcribesSpokenDialog && describesMusicAndSound) {
  410. return 'captions';
  411. }
  412. }
  413.  
  414. return 'subtitles';
  415. }
  416.  
  417. private onManifestLoaded(
  418. event: Events.MANIFEST_LOADED,
  419. data: ManifestLoadedData
  420. ) {
  421. if (this.config.enableCEA708Captions && data.captions) {
  422. data.captions.forEach((captionsTrack) => {
  423. const instreamIdMatch = /(?:CC|SERVICE)([1-4])/.exec(
  424. captionsTrack.instreamId as string
  425. );
  426. if (!instreamIdMatch) {
  427. return;
  428. }
  429. const trackName = `textTrack${instreamIdMatch[1]}`;
  430. const trackProperties: TrackProperties =
  431. this.captionsProperties[trackName];
  432. if (!trackProperties) {
  433. return;
  434. }
  435. trackProperties.label = captionsTrack.name;
  436. if (captionsTrack.lang) {
  437. // optional attribute
  438. trackProperties.languageCode = captionsTrack.lang;
  439. }
  440. trackProperties.media = captionsTrack;
  441. });
  442. }
  443. }
  444.  
  445. private closedCaptionsForLevel(frag: Fragment): string | undefined {
  446. const level = this.hls.levels[frag.level];
  447. return level?.attrs['CLOSED-CAPTIONS'];
  448. }
  449.  
  450. private onFragLoading(event: Events.FRAG_LOADING, data: FragLoadingData) {
  451. const { cea608Parser1, cea608Parser2, lastSn, lastPartIndex } = this;
  452. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  453. return;
  454. }
  455. // if this frag isn't contiguous, clear the parser so cues with bad start/end times aren't added to the textTrack
  456. if (data.frag.type === PlaylistLevelType.MAIN) {
  457. const sn = data.frag.sn;
  458. const partIndex = data?.part?.index ?? -1;
  459. if (
  460. !(
  461. sn === lastSn + 1 ||
  462. (sn === lastSn && partIndex === lastPartIndex + 1)
  463. )
  464. ) {
  465. cea608Parser1.reset();
  466. cea608Parser2.reset();
  467. }
  468. this.lastSn = sn as number;
  469. this.lastPartIndex = partIndex;
  470. }
  471. }
  472.  
  473. private onFragLoaded(
  474. event: Events.FRAG_LOADED,
  475. data: FragDecryptedData | FragLoadedData
  476. ) {
  477. const { frag, payload } = data;
  478. const { initPTS, unparsedVttFrags } = this;
  479. if (frag.type === PlaylistLevelType.SUBTITLE) {
  480. // If fragment is subtitle type, parse as WebVTT.
  481. if (payload.byteLength) {
  482. // We need an initial synchronisation PTS. Store fragments as long as none has arrived.
  483. if (!Number.isFinite(initPTS[frag.cc])) {
  484. unparsedVttFrags.push(data);
  485. if (initPTS.length) {
  486. // finish unsuccessfully, otherwise the subtitle-stream-controller could be blocked from loading new frags.
  487. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  488. success: false,
  489. frag,
  490. error: new Error('Missing initial subtitle PTS'),
  491. });
  492. }
  493. return;
  494. }
  495.  
  496. const decryptData = frag.decryptdata;
  497. // fragment after decryption has a stats object
  498. const decrypted = 'stats' in data;
  499. // If the subtitles are not encrypted, parse VTTs now. Otherwise, we need to wait.
  500. if (
  501. decryptData == null ||
  502. decryptData.key == null ||
  503. decryptData.method !== 'AES-128' ||
  504. decrypted
  505. ) {
  506. const trackPlaylistMedia = this.tracks[frag.level];
  507. const vttCCs = this.vttCCs;
  508. if (!vttCCs[frag.cc]) {
  509. vttCCs[frag.cc] = {
  510. start: frag.start,
  511. prevCC: this.prevCC,
  512. new: true,
  513. };
  514. this.prevCC = frag.cc;
  515. }
  516. if (
  517. trackPlaylistMedia &&
  518. trackPlaylistMedia.textCodec === IMSC1_CODEC
  519. ) {
  520. this._parseIMSC1(frag, payload);
  521. } else {
  522. this._parseVTTs(frag, payload, vttCCs);
  523. }
  524. }
  525. } else {
  526. // In case there is no payload, finish unsuccessfully.
  527. this.hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  528. success: false,
  529. frag,
  530. error: new Error('Empty subtitle payload'),
  531. });
  532. }
  533. }
  534. }
  535.  
  536. private _parseIMSC1(frag: Fragment, payload: ArrayBuffer) {
  537. const hls = this.hls;
  538. parseIMSC1(
  539. payload,
  540. this.initPTS[frag.cc],
  541. this.timescale[frag.cc],
  542. (cues) => {
  543. this._appendCues(cues, frag.level);
  544. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  545. success: true,
  546. frag: frag,
  547. });
  548. },
  549. (error) => {
  550. logger.log(`Failed to parse IMSC1: ${error}`);
  551. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  552. success: false,
  553. frag: frag,
  554. error,
  555. });
  556. }
  557. );
  558. }
  559.  
  560. private _parseVTTs(frag: Fragment, payload: ArrayBuffer, vttCCs: any) {
  561. const hls = this.hls;
  562. // Parse the WebVTT file contents.
  563. const payloadWebVTT = frag.initSegment?.data
  564. ? appendUint8Array(frag.initSegment.data, new Uint8Array(payload))
  565. : payload;
  566. parseWebVTT(
  567. payloadWebVTT,
  568. this.initPTS[frag.cc],
  569. this.timescale[frag.cc],
  570. vttCCs,
  571. frag.cc,
  572. frag.start,
  573. (cues) => {
  574. this._appendCues(cues, frag.level);
  575. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  576. success: true,
  577. frag: frag,
  578. });
  579. },
  580. (error) => {
  581. this._fallbackToIMSC1(frag, payload);
  582. // Something went wrong while parsing. Trigger event with success false.
  583. logger.log(`Failed to parse VTT cue: ${error}`);
  584. hls.trigger(Events.SUBTITLE_FRAG_PROCESSED, {
  585. success: false,
  586. frag: frag,
  587. error,
  588. });
  589. }
  590. );
  591. }
  592.  
  593. private _fallbackToIMSC1(frag: Fragment, payload: ArrayBuffer) {
  594. // If textCodec is unknown, try parsing as IMSC1. Set textCodec based on the result
  595. const trackPlaylistMedia = this.tracks[frag.level];
  596. if (!trackPlaylistMedia.textCodec) {
  597. parseIMSC1(
  598. payload,
  599. this.initPTS[frag.cc],
  600. this.timescale[frag.cc],
  601. () => {
  602. trackPlaylistMedia.textCodec = IMSC1_CODEC;
  603. this._parseIMSC1(frag, payload);
  604. },
  605. () => {
  606. trackPlaylistMedia.textCodec = 'wvtt';
  607. }
  608. );
  609. }
  610. }
  611.  
  612. private _appendCues(cues: VTTCue[], fragLevel: number) {
  613. const hls = this.hls;
  614. if (this.config.renderTextTracksNatively) {
  615. const textTrack = this.textTracks[fragLevel];
  616. // WebVTTParser.parse is an async method and if the currently selected text track mode is set to "disabled"
  617. // before parsing is done then don't try to access currentTrack.cues.getCueById as cues will be null
  618. // and trying to access getCueById method of cues will throw an exception
  619. // Because we check if the mode is disabled, we can force check `cues` below. They can't be null.
  620. if (!textTrack || textTrack.mode === 'disabled') {
  621. return;
  622. }
  623. cues.forEach((cue) => addCueToTrack(textTrack, cue));
  624. } else {
  625. const currentTrack = this.tracks[fragLevel];
  626. if (!currentTrack) {
  627. return;
  628. }
  629. const track = currentTrack.default ? 'default' : 'subtitles' + fragLevel;
  630. hls.trigger(Events.CUES_PARSED, { type: 'subtitles', cues, track });
  631. }
  632. }
  633.  
  634. private onFragDecrypted(
  635. event: Events.FRAG_DECRYPTED,
  636. data: FragDecryptedData
  637. ) {
  638. const { frag } = data;
  639. if (frag.type === PlaylistLevelType.SUBTITLE) {
  640. if (!Number.isFinite(this.initPTS[frag.cc])) {
  641. this.unparsedVttFrags.push(data as unknown as FragLoadedData);
  642. return;
  643. }
  644. this.onFragLoaded(Events.FRAG_LOADED, data as unknown as FragLoadedData);
  645. }
  646. }
  647.  
  648. private onSubtitleTracksCleared() {
  649. this.tracks = [];
  650. this.captionsTracks = {};
  651. }
  652.  
  653. private onFragParsingUserdata(
  654. event: Events.FRAG_PARSING_USERDATA,
  655. data: FragParsingUserdataData
  656. ) {
  657. const { cea608Parser1, cea608Parser2 } = this;
  658. if (!this.enabled || !(cea608Parser1 && cea608Parser2)) {
  659. return;
  660. }
  661.  
  662. const { frag, samples } = data;
  663. if (
  664. frag.type === PlaylistLevelType.MAIN &&
  665. this.closedCaptionsForLevel(frag) === 'NONE'
  666. ) {
  667. return;
  668. }
  669. // If the event contains captions (found in the bytes property), push all bytes into the parser immediately
  670. // It will create the proper timestamps based on the PTS value
  671. for (let i = 0; i < samples.length; i++) {
  672. const ccBytes = samples[i].bytes;
  673. if (ccBytes) {
  674. const ccdatas = this.extractCea608Data(ccBytes);
  675. cea608Parser1.addData(samples[i].pts, ccdatas[0]);
  676. cea608Parser2.addData(samples[i].pts, ccdatas[1]);
  677. }
  678. }
  679. }
  680.  
  681. onBufferFlushing(
  682. event: Events.BUFFER_FLUSHING,
  683. { startOffset, endOffset, endOffsetSubtitles, type }: BufferFlushingData
  684. ) {
  685. const { media } = this;
  686. if (!media || media.currentTime < endOffset) {
  687. return;
  688. }
  689. // Clear 608 caption cues from the captions TextTracks when the video back buffer is flushed
  690. // Forward cues are never removed because we can loose streamed 608 content from recent fragments
  691. if (!type || type === 'video') {
  692. const { captionsTracks } = this;
  693. Object.keys(captionsTracks).forEach((trackName) =>
  694. removeCuesInRange(captionsTracks[trackName], startOffset, endOffset)
  695. );
  696. }
  697. if (this.config.renderTextTracksNatively) {
  698. // Clear VTT/IMSC1 subtitle cues from the subtitle TextTracks when the back buffer is flushed
  699. if (startOffset === 0 && endOffsetSubtitles !== undefined) {
  700. const { textTracks } = this;
  701. Object.keys(textTracks).forEach((trackName) =>
  702. removeCuesInRange(
  703. textTracks[trackName],
  704. startOffset,
  705. endOffsetSubtitles
  706. )
  707. );
  708. }
  709. }
  710. }
  711.  
  712. private extractCea608Data(byteArray: Uint8Array): number[][] {
  713. const actualCCBytes: number[][] = [[], []];
  714. const count = byteArray[0] & 0x1f;
  715. let position = 2;
  716.  
  717. for (let j = 0; j < count; j++) {
  718. const tmpByte = byteArray[position++];
  719. const ccbyte1 = 0x7f & byteArray[position++];
  720. const ccbyte2 = 0x7f & byteArray[position++];
  721. if (ccbyte1 === 0 && ccbyte2 === 0) {
  722. continue;
  723. }
  724. const ccValid = (0x04 & tmpByte) !== 0; // Support all four channels
  725. if (ccValid) {
  726. const ccType = 0x03 & tmpByte;
  727. if (
  728. 0x00 /* CEA608 field1*/ === ccType ||
  729. 0x01 /* CEA608 field2*/ === ccType
  730. ) {
  731. // Exclude CEA708 CC data.
  732. actualCCBytes[ccType].push(ccbyte1);
  733. actualCCBytes[ccType].push(ccbyte2);
  734. }
  735. }
  736. }
  737. return actualCCBytes;
  738. }
  739. }
  740.  
  741. function canReuseVttTextTrack(inUseTrack, manifestTrack): boolean {
  742. return (
  743. inUseTrack &&
  744. inUseTrack.label === manifestTrack.name &&
  745. !(inUseTrack.textTrack1 || inUseTrack.textTrack2)
  746. );
  747. }
  748.  
  749. function intersection(x1: number, x2: number, y1: number, y2: number): number {
  750. return Math.min(x2, y2) - Math.max(x1, y1);
  751. }
  752.  
  753. function newVTTCCs(): VTTCCs {
  754. return {
  755. ccOffset: 0,
  756. presentationOffset: 0,
  757. 0: {
  758. start: 0,
  759. prevCC: -1,
  760. new: true,
  761. },
  762. };
  763. }