`\n // and attach a portal programmatically in the parent component. When Angular does the first CD\n // round, it will fire the setter with empty string, causing the user's content to be cleared.\n if (this.hasAttached() && !portal && !this._isInitialized) {\n return;\n }\n if (this.hasAttached()) {\n super.detach();\n }\n if (portal) {\n super.attach(portal);\n }\n this._attachedPortal = portal || null;\n }\n /** Component or view reference that is attached to the portal. */\n get attachedRef() {\n return this._attachedRef;\n }\n ngOnInit() {\n this._isInitialized = true;\n }\n ngOnDestroy() {\n super.dispose();\n this._attachedRef = this._attachedPortal = null;\n }\n /**\n * Attach the given ComponentPortal to this PortalOutlet using the ComponentFactoryResolver.\n *\n * @param portal Portal to be attached to the portal outlet.\n * @returns Reference to the created component.\n */\n attachComponentPortal(portal) {\n portal.setAttachedHost(this);\n // If the portal specifies an origin, use that as the logical location of the component\n // in the application tree. Otherwise use the location of this PortalOutlet.\n const viewContainerRef = portal.viewContainerRef != null ? portal.viewContainerRef : this._viewContainerRef;\n const resolver = portal.componentFactoryResolver || this._componentFactoryResolver;\n const componentFactory = resolver.resolveComponentFactory(portal.component);\n const ref = viewContainerRef.createComponent(componentFactory, viewContainerRef.length, portal.injector || viewContainerRef.injector, portal.projectableNodes || undefined);\n // If we're using a view container that's different from the injected one (e.g. when the portal\n // specifies its own) we need to move the component into the outlet, otherwise it'll be rendered\n // inside of the alternate view container.\n if (viewContainerRef !== this._viewContainerRef) {\n this._getRootNode().appendChild(ref.hostView.rootNodes[0]);\n }\n super.setDisposeFn(() => ref.destroy());\n this._attachedPortal = portal;\n this._attachedRef = ref;\n this.attached.emit(ref);\n return ref;\n }\n /**\n * Attach the given TemplatePortal to this PortalHost as an embedded View.\n * @param portal Portal to be attached.\n * @returns Reference to the created embedded view.\n */\n attachTemplatePortal(portal) {\n portal.setAttachedHost(this);\n const viewRef = this._viewContainerRef.createEmbeddedView(portal.templateRef, portal.context, {\n injector: portal.injector\n });\n super.setDisposeFn(() => this._viewContainerRef.clear());\n this._attachedPortal = portal;\n this._attachedRef = viewRef;\n this.attached.emit(viewRef);\n return viewRef;\n }\n /** Gets the root node of the portal outlet. */\n _getRootNode() {\n const nativeElement = this._viewContainerRef.element.nativeElement;\n // The directive could be set on a template which will result in a comment\n // node being the root. Use the comment's parent node if that is the case.\n return nativeElement.nodeType === nativeElement.ELEMENT_NODE ? nativeElement : nativeElement.parentNode;\n }\n static {\n this.ɵfac = function CdkPortalOutlet_Factory(t) {\n return new (t || CdkPortalOutlet)(i0.ɵɵdirectiveInject(i0.ComponentFactoryResolver), i0.ɵɵdirectiveInject(i0.ViewContainerRef), i0.ɵɵdirectiveInject(DOCUMENT));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkPortalOutlet,\n selectors: [[\"\", \"cdkPortalOutlet\", \"\"]],\n inputs: {\n portal: [i0.ɵɵInputFlags.None, \"cdkPortalOutlet\", \"portal\"]\n },\n outputs: {\n attached: \"attached\"\n },\n exportAs: [\"cdkPortalOutlet\"],\n standalone: true,\n features: [i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkPortalOutlet, [{\n type: Directive,\n args: [{\n selector: '[cdkPortalOutlet]',\n exportAs: 'cdkPortalOutlet',\n standalone: true\n }]\n }], () => [{\n type: i0.ComponentFactoryResolver\n }, {\n type: i0.ViewContainerRef\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }], {\n portal: [{\n type: Input,\n args: ['cdkPortalOutlet']\n }],\n attached: [{\n type: Output\n }]\n });\n})();\n/**\n * @deprecated Use `CdkPortalOutlet` instead.\n * @breaking-change 9.0.0\n */\nclass PortalHostDirective extends CdkPortalOutlet {\n static {\n this.ɵfac = /* @__PURE__ */(() => {\n let ɵPortalHostDirective_BaseFactory;\n return function PortalHostDirective_Factory(t) {\n return (ɵPortalHostDirective_BaseFactory || (ɵPortalHostDirective_BaseFactory = i0.ɵɵgetInheritedFactory(PortalHostDirective)))(t || PortalHostDirective);\n };\n })();\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: PortalHostDirective,\n selectors: [[\"\", \"cdkPortalHost\", \"\"], [\"\", \"portalHost\", \"\"]],\n inputs: {\n portal: [i0.ɵɵInputFlags.None, \"cdkPortalHost\", \"portal\"]\n },\n exportAs: [\"cdkPortalHost\"],\n standalone: true,\n features: [i0.ɵɵProvidersFeature([{\n provide: CdkPortalOutlet,\n useExisting: PortalHostDirective\n }]), i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(PortalHostDirective, [{\n type: Directive,\n args: [{\n selector: '[cdkPortalHost], [portalHost]',\n exportAs: 'cdkPortalHost',\n inputs: [{\n name: 'portal',\n alias: 'cdkPortalHost'\n }],\n providers: [{\n provide: CdkPortalOutlet,\n useExisting: PortalHostDirective\n }],\n standalone: true\n }]\n }], null, null);\n})();\nclass PortalModule {\n static {\n this.ɵfac = function PortalModule_Factory(t) {\n return new (t || PortalModule)();\n };\n }\n static {\n this.ɵmod = /* @__PURE__ */i0.ɵɵdefineNgModule({\n type: PortalModule\n });\n }\n static {\n this.ɵinj = /* @__PURE__ */i0.ɵɵdefineInjector({});\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(PortalModule, [{\n type: NgModule,\n args: [{\n imports: [CdkPortal, CdkPortalOutlet, TemplatePortalDirective, PortalHostDirective],\n exports: [CdkPortal, CdkPortalOutlet, TemplatePortalDirective, PortalHostDirective]\n }]\n }], null, null);\n})();\n\n/**\n * Custom injector to be used when providing custom\n * injection tokens to components inside a portal.\n * @docs-private\n * @deprecated Use `Injector.create` instead.\n * @breaking-change 11.0.0\n */\nclass PortalInjector {\n constructor(_parentInjector, _customTokens) {\n this._parentInjector = _parentInjector;\n this._customTokens = _customTokens;\n }\n get(token, notFoundValue) {\n const value = this._customTokens.get(token);\n if (typeof value !== 'undefined') {\n return value;\n }\n return this._parentInjector.get(token, notFoundValue);\n }\n}\n\n/**\n * Generated bundle index. Do not edit.\n */\n\nexport { BasePortalHost, BasePortalOutlet, CdkPortal, CdkPortalOutlet, ComponentPortal, DomPortal, DomPortalHost, DomPortalOutlet, Portal, PortalHostDirective, PortalInjector, PortalModule, TemplatePortal, TemplatePortalDirective };\n","import { CdkAccordionItem, CdkAccordion, CdkAccordionModule } from '@angular/cdk/accordion';\nimport { TemplatePortal, CdkPortalOutlet, PortalModule } from '@angular/cdk/portal';\nimport * as i0 from '@angular/core';\nimport { InjectionToken, Directive, Inject, Optional, EventEmitter, ANIMATION_MODULE_TYPE, booleanAttribute, Component, ViewEncapsulation, ChangeDetectionStrategy, SkipSelf, Input, Output, ContentChild, ViewChild, numberAttribute, Host, Attribute, QueryList, ContentChildren, NgModule } from '@angular/core';\nimport { MatCommonModule } from '@angular/material/core';\nimport * as i2 from '@angular/cdk/a11y';\nimport { FocusKeyManager } from '@angular/cdk/a11y';\nimport { startWith, filter, take } from 'rxjs/operators';\nimport { ENTER, hasModifierKey, SPACE } from '@angular/cdk/keycodes';\nimport { Subject, Subscription, EMPTY, merge } from 'rxjs';\nimport { trigger, state, style, transition, animate } from '@angular/animations';\nimport * as i1 from '@angular/cdk/collections';\nimport { DOCUMENT } from '@angular/common';\n\n/**\n * Token used to provide a `MatAccordion` to `MatExpansionPanel`.\n * Used primarily to avoid circular imports between `MatAccordion` and `MatExpansionPanel`.\n */\nconst _c0 = [\"body\"];\nconst _c1 = [[[\"mat-expansion-panel-header\"]], \"*\", [[\"mat-action-row\"]]];\nconst _c2 = [\"mat-expansion-panel-header\", \"*\", \"mat-action-row\"];\nfunction MatExpansionPanel_ng_template_5_Template(rf, ctx) {}\nconst _c3 = [[[\"mat-panel-title\"]], [[\"mat-panel-description\"]], \"*\"];\nconst _c4 = [\"mat-panel-title\", \"mat-panel-description\", \"*\"];\nfunction MatExpansionPanelHeader_Conditional_4_Template(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵelementStart(0, \"span\", 1);\n i0.ɵɵnamespaceSVG();\n i0.ɵɵelementStart(1, \"svg\", 2);\n i0.ɵɵelement(2, \"path\", 3);\n i0.ɵɵelementEnd()();\n }\n if (rf & 2) {\n const ctx_r0 = i0.ɵɵnextContext();\n i0.ɵɵproperty(\"@indicatorRotate\", ctx_r0._getExpandedState());\n }\n}\nconst MAT_ACCORDION = new InjectionToken('MAT_ACCORDION');\n\n/** Time and timing curve for expansion panel animations. */\n// Note: Keep this in sync with the Sass variable for the panel header animation.\nconst EXPANSION_PANEL_ANIMATION_TIMING = '225ms cubic-bezier(0.4,0.0,0.2,1)';\n/**\n * Animations used by the Material expansion panel.\n *\n * A bug in angular animation's `state` when ViewContainers are moved using ViewContainerRef.move()\n * causes the animation state of moved components to become `void` upon exit, and not update again\n * upon reentry into the DOM. This can lead a to situation for the expansion panel where the state\n * of the panel is `expanded` or `collapsed` but the animation state is `void`.\n *\n * To correctly handle animating to the next state, we animate between `void` and `collapsed` which\n * are defined to have the same styles. Since angular animates from the current styles to the\n * destination state's style definition, in situations where we are moving from `void`'s styles to\n * `collapsed` this acts a noop since no style values change.\n *\n * In the case where angular's animation state is out of sync with the expansion panel's state, the\n * expansion panel being `expanded` and angular animations being `void`, the animation from the\n * `expanded`'s effective styles (though in a `void` animation state) to the collapsed state will\n * occur as expected.\n *\n * Angular Bug: https://github.com/angular/angular/issues/18847\n *\n * @docs-private\n */\nconst matExpansionAnimations = {\n /** Animation that rotates the indicator arrow. */\n indicatorRotate: trigger('indicatorRotate', [state('collapsed, void', style({\n transform: 'rotate(0deg)'\n })), state('expanded', style({\n transform: 'rotate(180deg)'\n })), transition('expanded <=> collapsed, void => collapsed', animate(EXPANSION_PANEL_ANIMATION_TIMING))]),\n /** Animation that expands and collapses the panel content. */\n bodyExpansion: trigger('bodyExpansion', [state('collapsed, void', style({\n height: '0px',\n visibility: 'hidden'\n })),\n // Clear the `visibility` while open, otherwise the content will be visible when placed in\n // a parent that's `visibility: hidden`, because `visibility` doesn't apply to descendants\n // that have a `visibility` of their own (see #27436).\n state('expanded', style({\n height: '*',\n visibility: ''\n })), transition('expanded <=> collapsed, void => collapsed', animate(EXPANSION_PANEL_ANIMATION_TIMING))])\n};\n\n/**\n * Token used to provide a `MatExpansionPanel` to `MatExpansionPanelContent`.\n * Used to avoid circular imports between `MatExpansionPanel` and `MatExpansionPanelContent`.\n */\nconst MAT_EXPANSION_PANEL = new InjectionToken('MAT_EXPANSION_PANEL');\n\n/**\n * Expansion panel content that will be rendered lazily\n * after the panel is opened for the first time.\n */\nclass MatExpansionPanelContent {\n constructor(_template, _expansionPanel) {\n this._template = _template;\n this._expansionPanel = _expansionPanel;\n }\n static {\n this.ɵfac = function MatExpansionPanelContent_Factory(t) {\n return new (t || MatExpansionPanelContent)(i0.ɵɵdirectiveInject(i0.TemplateRef), i0.ɵɵdirectiveInject(MAT_EXPANSION_PANEL, 8));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatExpansionPanelContent,\n selectors: [[\"ng-template\", \"matExpansionPanelContent\", \"\"]],\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatExpansionPanelContent, [{\n type: Directive,\n args: [{\n selector: 'ng-template[matExpansionPanelContent]',\n standalone: true\n }]\n }], () => [{\n type: i0.TemplateRef\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [MAT_EXPANSION_PANEL]\n }, {\n type: Optional\n }]\n }], null);\n})();\n\n/** Counter for generating unique element ids. */\nlet uniqueId = 0;\n/**\n * Injection token that can be used to configure the default\n * options for the expansion panel component.\n */\nconst MAT_EXPANSION_PANEL_DEFAULT_OPTIONS = new InjectionToken('MAT_EXPANSION_PANEL_DEFAULT_OPTIONS');\n/**\n * This component can be used as a single element to show expandable content, or as one of\n * multiple children of an element with the MatAccordion directive attached.\n */\nclass MatExpansionPanel extends CdkAccordionItem {\n /** Whether the toggle indicator should be hidden. */\n get hideToggle() {\n return this._hideToggle || this.accordion && this.accordion.hideToggle;\n }\n set hideToggle(value) {\n this._hideToggle = value;\n }\n /** The position of the expansion indicator. */\n get togglePosition() {\n return this._togglePosition || this.accordion && this.accordion.togglePosition;\n }\n set togglePosition(value) {\n this._togglePosition = value;\n }\n constructor(accordion, _changeDetectorRef, _uniqueSelectionDispatcher, _viewContainerRef, _document, _animationMode, defaultOptions) {\n super(accordion, _changeDetectorRef, _uniqueSelectionDispatcher);\n this._viewContainerRef = _viewContainerRef;\n this._animationMode = _animationMode;\n this._hideToggle = false;\n /** An event emitted after the body's expansion animation happens. */\n this.afterExpand = new EventEmitter();\n /** An event emitted after the body's collapse animation happens. */\n this.afterCollapse = new EventEmitter();\n /** Stream that emits for changes in `@Input` properties. */\n this._inputChanges = new Subject();\n /** ID for the associated header element. Used for a11y labelling. */\n this._headerId = `mat-expansion-panel-header-${uniqueId++}`;\n this.accordion = accordion;\n this._document = _document;\n this._animationsDisabled = _animationMode === 'NoopAnimations';\n if (defaultOptions) {\n this.hideToggle = defaultOptions.hideToggle;\n }\n }\n /** Determines whether the expansion panel should have spacing between it and its siblings. */\n _hasSpacing() {\n if (this.accordion) {\n return this.expanded && this.accordion.displayMode === 'default';\n }\n return false;\n }\n /** Gets the expanded state string. */\n _getExpandedState() {\n return this.expanded ? 'expanded' : 'collapsed';\n }\n /** Toggles the expanded state of the expansion panel. */\n toggle() {\n this.expanded = !this.expanded;\n }\n /** Sets the expanded state of the expansion panel to false. */\n close() {\n this.expanded = false;\n }\n /** Sets the expanded state of the expansion panel to true. */\n open() {\n this.expanded = true;\n }\n ngAfterContentInit() {\n if (this._lazyContent && this._lazyContent._expansionPanel === this) {\n // Render the content as soon as the panel becomes open.\n this.opened.pipe(startWith(null), filter(() => this.expanded && !this._portal), take(1)).subscribe(() => {\n this._portal = new TemplatePortal(this._lazyContent._template, this._viewContainerRef);\n });\n }\n }\n ngOnChanges(changes) {\n this._inputChanges.next(changes);\n }\n ngOnDestroy() {\n super.ngOnDestroy();\n this._inputChanges.complete();\n }\n /** Checks whether the expansion panel's content contains the currently-focused element. */\n _containsFocus() {\n if (this._body) {\n const focusedElement = this._document.activeElement;\n const bodyElement = this._body.nativeElement;\n return focusedElement === bodyElement || bodyElement.contains(focusedElement);\n }\n return false;\n }\n /** Called when the expansion animation has started. */\n _animationStarted(event) {\n if (!isInitialAnimation(event) && !this._animationsDisabled && this._body) {\n // Prevent the user from tabbing into the content while it's animating.\n // TODO(crisbeto): maybe use `inert` to prevent focus from entering while closed as well\n // instead of `visibility`? Will allow us to clean up some code but needs more testing.\n this._body?.nativeElement.setAttribute('inert', '');\n }\n }\n /** Called when the expansion animation has finished. */\n _animationDone(event) {\n if (!isInitialAnimation(event)) {\n if (event.toState === 'expanded') {\n this.afterExpand.emit();\n } else if (event.toState === 'collapsed') {\n this.afterCollapse.emit();\n }\n // Re-enable tabbing once the animation is finished.\n if (!this._animationsDisabled && this._body) {\n this._body.nativeElement.removeAttribute('inert');\n }\n }\n }\n static {\n this.ɵfac = function MatExpansionPanel_Factory(t) {\n return new (t || MatExpansionPanel)(i0.ɵɵdirectiveInject(MAT_ACCORDION, 12), i0.ɵɵdirectiveInject(i0.ChangeDetectorRef), i0.ɵɵdirectiveInject(i1.UniqueSelectionDispatcher), i0.ɵɵdirectiveInject(i0.ViewContainerRef), i0.ɵɵdirectiveInject(DOCUMENT), i0.ɵɵdirectiveInject(ANIMATION_MODULE_TYPE, 8), i0.ɵɵdirectiveInject(MAT_EXPANSION_PANEL_DEFAULT_OPTIONS, 8));\n };\n }\n static {\n this.ɵcmp = /* @__PURE__ */i0.ɵɵdefineComponent({\n type: MatExpansionPanel,\n selectors: [[\"mat-expansion-panel\"]],\n contentQueries: function MatExpansionPanel_ContentQueries(rf, ctx, dirIndex) {\n if (rf & 1) {\n i0.ɵɵcontentQuery(dirIndex, MatExpansionPanelContent, 5);\n }\n if (rf & 2) {\n let _t;\n i0.ɵɵqueryRefresh(_t = i0.ɵɵloadQuery()) && (ctx._lazyContent = _t.first);\n }\n },\n viewQuery: function MatExpansionPanel_Query(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵviewQuery(_c0, 5);\n }\n if (rf & 2) {\n let _t;\n i0.ɵɵqueryRefresh(_t = i0.ɵɵloadQuery()) && (ctx._body = _t.first);\n }\n },\n hostAttrs: [1, \"mat-expansion-panel\"],\n hostVars: 6,\n hostBindings: function MatExpansionPanel_HostBindings(rf, ctx) {\n if (rf & 2) {\n i0.ɵɵclassProp(\"mat-expanded\", ctx.expanded)(\"_mat-animation-noopable\", ctx._animationsDisabled)(\"mat-expansion-panel-spacing\", ctx._hasSpacing());\n }\n },\n inputs: {\n hideToggle: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"hideToggle\", \"hideToggle\", booleanAttribute],\n togglePosition: \"togglePosition\"\n },\n outputs: {\n afterExpand: \"afterExpand\",\n afterCollapse: \"afterCollapse\"\n },\n exportAs: [\"matExpansionPanel\"],\n standalone: true,\n features: [i0.ɵɵProvidersFeature([\n // Provide MatAccordion as undefined to prevent nested expansion panels from registering\n // to the same accordion.\n {\n provide: MAT_ACCORDION,\n useValue: undefined\n }, {\n provide: MAT_EXPANSION_PANEL,\n useExisting: MatExpansionPanel\n }]), i0.ɵɵInputTransformsFeature, i0.ɵɵInheritDefinitionFeature, i0.ɵɵNgOnChangesFeature, i0.ɵɵStandaloneFeature],\n ngContentSelectors: _c2,\n decls: 7,\n vars: 4,\n consts: [[\"body\", \"\"], [\"role\", \"region\", 1, \"mat-expansion-panel-content\", 3, \"id\"], [1, \"mat-expansion-panel-body\"], [3, \"cdkPortalOutlet\"]],\n template: function MatExpansionPanel_Template(rf, ctx) {\n if (rf & 1) {\n const _r1 = i0.ɵɵgetCurrentView();\n i0.ɵɵprojectionDef(_c1);\n i0.ɵɵprojection(0);\n i0.ɵɵelementStart(1, \"div\", 1, 0);\n i0.ɵɵlistener(\"@bodyExpansion.start\", function MatExpansionPanel_Template_div_animation_bodyExpansion_start_1_listener($event) {\n i0.ɵɵrestoreView(_r1);\n return i0.ɵɵresetView(ctx._animationStarted($event));\n })(\"@bodyExpansion.done\", function MatExpansionPanel_Template_div_animation_bodyExpansion_done_1_listener($event) {\n i0.ɵɵrestoreView(_r1);\n return i0.ɵɵresetView(ctx._animationDone($event));\n });\n i0.ɵɵelementStart(3, \"div\", 2);\n i0.ɵɵprojection(4, 1);\n i0.ɵɵtemplate(5, MatExpansionPanel_ng_template_5_Template, 0, 0, \"ng-template\", 3);\n i0.ɵɵelementEnd();\n i0.ɵɵprojection(6, 2);\n i0.ɵɵelementEnd();\n }\n if (rf & 2) {\n i0.ɵɵadvance();\n i0.ɵɵproperty(\"@bodyExpansion\", ctx._getExpandedState())(\"id\", ctx.id);\n i0.ɵɵattribute(\"aria-labelledby\", ctx._headerId);\n i0.ɵɵadvance(4);\n i0.ɵɵproperty(\"cdkPortalOutlet\", ctx._portal);\n }\n },\n dependencies: [CdkPortalOutlet],\n styles: [\".mat-expansion-panel{box-sizing:content-box;display:block;margin:0;overflow:hidden;transition:margin 225ms cubic-bezier(0.4, 0, 0.2, 1),box-shadow 280ms cubic-bezier(0.4, 0, 0.2, 1);position:relative;background:var(--mat-expansion-container-background-color);color:var(--mat-expansion-container-text-color);border-radius:var(--mat-expansion-container-shape)}.mat-expansion-panel:not([class*=mat-elevation-z]){box-shadow:0px 3px 1px -2px rgba(0, 0, 0, 0.2), 0px 2px 2px 0px rgba(0, 0, 0, 0.14), 0px 1px 5px 0px rgba(0, 0, 0, 0.12)}.mat-accordion .mat-expansion-panel:not(.mat-expanded),.mat-accordion .mat-expansion-panel:not(.mat-expansion-panel-spacing){border-radius:0}.mat-accordion .mat-expansion-panel:first-of-type{border-top-right-radius:var(--mat-expansion-container-shape);border-top-left-radius:var(--mat-expansion-container-shape)}.mat-accordion .mat-expansion-panel:last-of-type{border-bottom-right-radius:var(--mat-expansion-container-shape);border-bottom-left-radius:var(--mat-expansion-container-shape)}.cdk-high-contrast-active .mat-expansion-panel{outline:solid 1px}.mat-expansion-panel.ng-animate-disabled,.ng-animate-disabled .mat-expansion-panel,.mat-expansion-panel._mat-animation-noopable{transition:none}.mat-expansion-panel-content{display:flex;flex-direction:column;overflow:visible;font-family:var(--mat-expansion-container-text-font);font-size:var(--mat-expansion-container-text-size);font-weight:var(--mat-expansion-container-text-weight);line-height:var(--mat-expansion-container-text-line-height);letter-spacing:var(--mat-expansion-container-text-tracking)}.mat-expansion-panel-content[style*=\\\"visibility: hidden\\\"] *{visibility:hidden !important}.mat-expansion-panel-body{padding:0 24px 16px}.mat-expansion-panel-spacing{margin:16px 0}.mat-accordion>.mat-expansion-panel-spacing:first-child,.mat-accordion>*:first-child:not(.mat-expansion-panel) .mat-expansion-panel-spacing{margin-top:0}.mat-accordion>.mat-expansion-panel-spacing:last-child,.mat-accordion>*:last-child:not(.mat-expansion-panel) .mat-expansion-panel-spacing{margin-bottom:0}.mat-action-row{border-top-style:solid;border-top-width:1px;display:flex;flex-direction:row;justify-content:flex-end;padding:16px 8px 16px 24px;border-top-color:var(--mat-expansion-actions-divider-color)}.mat-action-row .mat-button-base,.mat-action-row .mat-mdc-button-base{margin-left:8px}[dir=rtl] .mat-action-row .mat-button-base,[dir=rtl] .mat-action-row .mat-mdc-button-base{margin-left:0;margin-right:8px}\"],\n encapsulation: 2,\n data: {\n animation: [matExpansionAnimations.bodyExpansion]\n },\n changeDetection: 0\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatExpansionPanel, [{\n type: Component,\n args: [{\n selector: 'mat-expansion-panel',\n exportAs: 'matExpansionPanel',\n encapsulation: ViewEncapsulation.None,\n changeDetection: ChangeDetectionStrategy.OnPush,\n animations: [matExpansionAnimations.bodyExpansion],\n providers: [\n // Provide MatAccordion as undefined to prevent nested expansion panels from registering\n // to the same accordion.\n {\n provide: MAT_ACCORDION,\n useValue: undefined\n }, {\n provide: MAT_EXPANSION_PANEL,\n useExisting: MatExpansionPanel\n }],\n host: {\n 'class': 'mat-expansion-panel',\n '[class.mat-expanded]': 'expanded',\n '[class._mat-animation-noopable]': '_animationsDisabled',\n '[class.mat-expansion-panel-spacing]': '_hasSpacing()'\n },\n standalone: true,\n imports: [CdkPortalOutlet],\n template: \"
\\n
\\n\",\n styles: [\".mat-expansion-panel{box-sizing:content-box;display:block;margin:0;overflow:hidden;transition:margin 225ms cubic-bezier(0.4, 0, 0.2, 1),box-shadow 280ms cubic-bezier(0.4, 0, 0.2, 1);position:relative;background:var(--mat-expansion-container-background-color);color:var(--mat-expansion-container-text-color);border-radius:var(--mat-expansion-container-shape)}.mat-expansion-panel:not([class*=mat-elevation-z]){box-shadow:0px 3px 1px -2px rgba(0, 0, 0, 0.2), 0px 2px 2px 0px rgba(0, 0, 0, 0.14), 0px 1px 5px 0px rgba(0, 0, 0, 0.12)}.mat-accordion .mat-expansion-panel:not(.mat-expanded),.mat-accordion .mat-expansion-panel:not(.mat-expansion-panel-spacing){border-radius:0}.mat-accordion .mat-expansion-panel:first-of-type{border-top-right-radius:var(--mat-expansion-container-shape);border-top-left-radius:var(--mat-expansion-container-shape)}.mat-accordion .mat-expansion-panel:last-of-type{border-bottom-right-radius:var(--mat-expansion-container-shape);border-bottom-left-radius:var(--mat-expansion-container-shape)}.cdk-high-contrast-active .mat-expansion-panel{outline:solid 1px}.mat-expansion-panel.ng-animate-disabled,.ng-animate-disabled .mat-expansion-panel,.mat-expansion-panel._mat-animation-noopable{transition:none}.mat-expansion-panel-content{display:flex;flex-direction:column;overflow:visible;font-family:var(--mat-expansion-container-text-font);font-size:var(--mat-expansion-container-text-size);font-weight:var(--mat-expansion-container-text-weight);line-height:var(--mat-expansion-container-text-line-height);letter-spacing:var(--mat-expansion-container-text-tracking)}.mat-expansion-panel-content[style*=\\\"visibility: hidden\\\"] *{visibility:hidden !important}.mat-expansion-panel-body{padding:0 24px 16px}.mat-expansion-panel-spacing{margin:16px 0}.mat-accordion>.mat-expansion-panel-spacing:first-child,.mat-accordion>*:first-child:not(.mat-expansion-panel) .mat-expansion-panel-spacing{margin-top:0}.mat-accordion>.mat-expansion-panel-spacing:last-child,.mat-accordion>*:last-child:not(.mat-expansion-panel) .mat-expansion-panel-spacing{margin-bottom:0}.mat-action-row{border-top-style:solid;border-top-width:1px;display:flex;flex-direction:row;justify-content:flex-end;padding:16px 8px 16px 24px;border-top-color:var(--mat-expansion-actions-divider-color)}.mat-action-row .mat-button-base,.mat-action-row .mat-mdc-button-base{margin-left:8px}[dir=rtl] .mat-action-row .mat-button-base,[dir=rtl] .mat-action-row .mat-mdc-button-base{margin-left:0;margin-right:8px}\"]\n }]\n }], () => [{\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: SkipSelf\n }, {\n type: Inject,\n args: [MAT_ACCORDION]\n }]\n }, {\n type: i0.ChangeDetectorRef\n }, {\n type: i1.UniqueSelectionDispatcher\n }, {\n type: i0.ViewContainerRef\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [ANIMATION_MODULE_TYPE]\n }]\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [MAT_EXPANSION_PANEL_DEFAULT_OPTIONS]\n }, {\n type: Optional\n }]\n }], {\n hideToggle: [{\n type: Input,\n args: [{\n transform: booleanAttribute\n }]\n }],\n togglePosition: [{\n type: Input\n }],\n afterExpand: [{\n type: Output\n }],\n afterCollapse: [{\n type: Output\n }],\n _lazyContent: [{\n type: ContentChild,\n args: [MatExpansionPanelContent]\n }],\n _body: [{\n type: ViewChild,\n args: ['body']\n }]\n });\n})();\n/** Checks whether an animation is the initial setup animation. */\nfunction isInitialAnimation(event) {\n return event.fromState === 'void';\n}\n/**\n * Actions of a `
`.\n */\nclass MatExpansionPanelActionRow {\n static {\n this.ɵfac = function MatExpansionPanelActionRow_Factory(t) {\n return new (t || MatExpansionPanelActionRow)();\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatExpansionPanelActionRow,\n selectors: [[\"mat-action-row\"]],\n hostAttrs: [1, \"mat-action-row\"],\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatExpansionPanelActionRow, [{\n type: Directive,\n args: [{\n selector: 'mat-action-row',\n host: {\n class: 'mat-action-row'\n },\n standalone: true\n }]\n }], null, null);\n})();\n\n/**\n * Header element of a ``.\n */\nclass MatExpansionPanelHeader {\n constructor(panel, _element, _focusMonitor, _changeDetectorRef, defaultOptions, _animationMode, tabIndex) {\n this.panel = panel;\n this._element = _element;\n this._focusMonitor = _focusMonitor;\n this._changeDetectorRef = _changeDetectorRef;\n this._animationMode = _animationMode;\n this._parentChangeSubscription = Subscription.EMPTY;\n /** Tab index of the header. */\n this.tabIndex = 0;\n const accordionHideToggleChange = panel.accordion ? panel.accordion._stateChanges.pipe(filter(changes => !!(changes['hideToggle'] || changes['togglePosition']))) : EMPTY;\n this.tabIndex = parseInt(tabIndex || '') || 0;\n // Since the toggle state depends on an @Input on the panel, we\n // need to subscribe and trigger change detection manually.\n this._parentChangeSubscription = merge(panel.opened, panel.closed, accordionHideToggleChange, panel._inputChanges.pipe(filter(changes => {\n return !!(changes['hideToggle'] || changes['disabled'] || changes['togglePosition']);\n }))).subscribe(() => this._changeDetectorRef.markForCheck());\n // Avoids focus being lost if the panel contained the focused element and was closed.\n panel.closed.pipe(filter(() => panel._containsFocus())).subscribe(() => _focusMonitor.focusVia(_element, 'program'));\n if (defaultOptions) {\n this.expandedHeight = defaultOptions.expandedHeight;\n this.collapsedHeight = defaultOptions.collapsedHeight;\n }\n }\n /**\n * Whether the associated panel is disabled. Implemented as a part of `FocusableOption`.\n * @docs-private\n */\n get disabled() {\n return this.panel.disabled;\n }\n /** Toggles the expanded state of the panel. */\n _toggle() {\n if (!this.disabled) {\n this.panel.toggle();\n }\n }\n /** Gets whether the panel is expanded. */\n _isExpanded() {\n return this.panel.expanded;\n }\n /** Gets the expanded state string of the panel. */\n _getExpandedState() {\n return this.panel._getExpandedState();\n }\n /** Gets the panel id. */\n _getPanelId() {\n return this.panel.id;\n }\n /** Gets the toggle position for the header. */\n _getTogglePosition() {\n return this.panel.togglePosition;\n }\n /** Gets whether the expand indicator should be shown. */\n _showToggle() {\n return !this.panel.hideToggle && !this.panel.disabled;\n }\n /**\n * Gets the current height of the header. Null if no custom height has been\n * specified, and if the default height from the stylesheet should be used.\n */\n _getHeaderHeight() {\n const isExpanded = this._isExpanded();\n if (isExpanded && this.expandedHeight) {\n return this.expandedHeight;\n } else if (!isExpanded && this.collapsedHeight) {\n return this.collapsedHeight;\n }\n return null;\n }\n /** Handle keydown event calling to toggle() if appropriate. */\n _keydown(event) {\n switch (event.keyCode) {\n // Toggle for space and enter keys.\n case SPACE:\n case ENTER:\n if (!hasModifierKey(event)) {\n event.preventDefault();\n this._toggle();\n }\n break;\n default:\n if (this.panel.accordion) {\n this.panel.accordion._handleHeaderKeydown(event);\n }\n return;\n }\n }\n /**\n * Focuses the panel header. Implemented as a part of `FocusableOption`.\n * @param origin Origin of the action that triggered the focus.\n * @docs-private\n */\n focus(origin, options) {\n if (origin) {\n this._focusMonitor.focusVia(this._element, origin, options);\n } else {\n this._element.nativeElement.focus(options);\n }\n }\n ngAfterViewInit() {\n this._focusMonitor.monitor(this._element).subscribe(origin => {\n if (origin && this.panel.accordion) {\n this.panel.accordion._handleHeaderFocus(this);\n }\n });\n }\n ngOnDestroy() {\n this._parentChangeSubscription.unsubscribe();\n this._focusMonitor.stopMonitoring(this._element);\n }\n static {\n this.ɵfac = function MatExpansionPanelHeader_Factory(t) {\n return new (t || MatExpansionPanelHeader)(i0.ɵɵdirectiveInject(MatExpansionPanel, 1), i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(i2.FocusMonitor), i0.ɵɵdirectiveInject(i0.ChangeDetectorRef), i0.ɵɵdirectiveInject(MAT_EXPANSION_PANEL_DEFAULT_OPTIONS, 8), i0.ɵɵdirectiveInject(ANIMATION_MODULE_TYPE, 8), i0.ɵɵinjectAttribute('tabindex'));\n };\n }\n static {\n this.ɵcmp = /* @__PURE__ */i0.ɵɵdefineComponent({\n type: MatExpansionPanelHeader,\n selectors: [[\"mat-expansion-panel-header\"]],\n hostAttrs: [\"role\", \"button\", 1, \"mat-expansion-panel-header\", \"mat-focus-indicator\"],\n hostVars: 15,\n hostBindings: function MatExpansionPanelHeader_HostBindings(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵlistener(\"click\", function MatExpansionPanelHeader_click_HostBindingHandler() {\n return ctx._toggle();\n })(\"keydown\", function MatExpansionPanelHeader_keydown_HostBindingHandler($event) {\n return ctx._keydown($event);\n });\n }\n if (rf & 2) {\n i0.ɵɵattribute(\"id\", ctx.panel._headerId)(\"tabindex\", ctx.disabled ? -1 : ctx.tabIndex)(\"aria-controls\", ctx._getPanelId())(\"aria-expanded\", ctx._isExpanded())(\"aria-disabled\", ctx.panel.disabled);\n i0.ɵɵstyleProp(\"height\", ctx._getHeaderHeight());\n i0.ɵɵclassProp(\"mat-expanded\", ctx._isExpanded())(\"mat-expansion-toggle-indicator-after\", ctx._getTogglePosition() === \"after\")(\"mat-expansion-toggle-indicator-before\", ctx._getTogglePosition() === \"before\")(\"_mat-animation-noopable\", ctx._animationMode === \"NoopAnimations\");\n }\n },\n inputs: {\n expandedHeight: \"expandedHeight\",\n collapsedHeight: \"collapsedHeight\",\n tabIndex: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"tabIndex\", \"tabIndex\", value => value == null ? 0 : numberAttribute(value)]\n },\n standalone: true,\n features: [i0.ɵɵInputTransformsFeature, i0.ɵɵStandaloneFeature],\n ngContentSelectors: _c4,\n decls: 5,\n vars: 3,\n consts: [[1, \"mat-content\"], [1, \"mat-expansion-indicator\"], [\"xmlns\", \"http://www.w3.org/2000/svg\", \"viewBox\", \"0 -960 960 960\", \"aria-hidden\", \"true\", \"focusable\", \"false\"], [\"d\", \"M480-345 240-585l56-56 184 184 184-184 56 56-240 240Z\"]],\n template: function MatExpansionPanelHeader_Template(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵprojectionDef(_c3);\n i0.ɵɵelementStart(0, \"span\", 0);\n i0.ɵɵprojection(1);\n i0.ɵɵprojection(2, 1);\n i0.ɵɵprojection(3, 2);\n i0.ɵɵelementEnd();\n i0.ɵɵtemplate(4, MatExpansionPanelHeader_Conditional_4_Template, 3, 1, \"span\", 1);\n }\n if (rf & 2) {\n i0.ɵɵclassProp(\"mat-content-hide-toggle\", !ctx._showToggle());\n i0.ɵɵadvance(4);\n i0.ɵɵconditional(4, ctx._showToggle() ? 4 : -1);\n }\n },\n styles: [\".mat-expansion-panel-header{display:flex;flex-direction:row;align-items:center;padding:0 24px;border-radius:inherit;transition:height 225ms cubic-bezier(0.4, 0, 0.2, 1);height:var(--mat-expansion-header-collapsed-state-height);font-family:var(--mat-expansion-header-text-font);font-size:var(--mat-expansion-header-text-size);font-weight:var(--mat-expansion-header-text-weight);line-height:var(--mat-expansion-header-text-line-height);letter-spacing:var(--mat-expansion-header-text-tracking)}.mat-expansion-panel-header.mat-expanded{height:var(--mat-expansion-header-expanded-state-height)}.mat-expansion-panel-header[aria-disabled=true]{color:var(--mat-expansion-header-disabled-state-text-color)}.mat-expansion-panel-header:not([aria-disabled=true]){cursor:pointer}.mat-expansion-panel:not(.mat-expanded) .mat-expansion-panel-header:not([aria-disabled=true]):hover{background:var(--mat-expansion-header-hover-state-layer-color)}@media(hover: none){.mat-expansion-panel:not(.mat-expanded) .mat-expansion-panel-header:not([aria-disabled=true]):hover{background:var(--mat-expansion-container-background-color)}}.mat-expansion-panel .mat-expansion-panel-header:not([aria-disabled=true]).cdk-keyboard-focused,.mat-expansion-panel .mat-expansion-panel-header:not([aria-disabled=true]).cdk-program-focused{background:var(--mat-expansion-header-focus-state-layer-color)}.mat-expansion-panel-header._mat-animation-noopable{transition:none}.mat-expansion-panel-header:focus,.mat-expansion-panel-header:hover{outline:none}.mat-expansion-panel-header.mat-expanded:focus,.mat-expansion-panel-header.mat-expanded:hover{background:inherit}.mat-expansion-panel-header.mat-expansion-toggle-indicator-before{flex-direction:row-reverse}.mat-expansion-panel-header.mat-expansion-toggle-indicator-before .mat-expansion-indicator{margin:0 16px 0 0}[dir=rtl] .mat-expansion-panel-header.mat-expansion-toggle-indicator-before .mat-expansion-indicator{margin:0 0 0 16px}.mat-content{display:flex;flex:1;flex-direction:row;overflow:hidden}.mat-content.mat-content-hide-toggle{margin-right:8px}[dir=rtl] .mat-content.mat-content-hide-toggle{margin-right:0;margin-left:8px}.mat-expansion-toggle-indicator-before .mat-content.mat-content-hide-toggle{margin-left:24px;margin-right:0}[dir=rtl] .mat-expansion-toggle-indicator-before .mat-content.mat-content-hide-toggle{margin-right:24px;margin-left:0}.mat-expansion-panel-header-title{color:var(--mat-expansion-header-text-color)}.mat-expansion-panel-header-title,.mat-expansion-panel-header-description{display:flex;flex-grow:1;flex-basis:0;margin-right:16px;align-items:center}[dir=rtl] .mat-expansion-panel-header-title,[dir=rtl] .mat-expansion-panel-header-description{margin-right:0;margin-left:16px}.mat-expansion-panel-header[aria-disabled=true] .mat-expansion-panel-header-title,.mat-expansion-panel-header[aria-disabled=true] .mat-expansion-panel-header-description{color:inherit}.mat-expansion-panel-header-description{flex-grow:2;color:var(--mat-expansion-header-description-color)}.mat-expansion-indicator::after{border-style:solid;border-width:0 2px 2px 0;content:\\\"\\\";display:inline-block;padding:3px;transform:rotate(45deg);vertical-align:middle;color:var(--mat-expansion-header-indicator-color);display:inline-block;display:var(--mat-expansion-legacy-header-indicator-display, inline-block)}.mat-expansion-indicator svg{width:24px;height:24px;margin:0 -8px;vertical-align:middle;fill:var(--mat-expansion-header-indicator-color);display:none;display:var(--mat-expansion-header-indicator-display, none)}.cdk-high-contrast-active .mat-expansion-panel-content{border-top:1px solid;border-top-left-radius:0;border-top-right-radius:0}\"],\n encapsulation: 2,\n data: {\n animation: [matExpansionAnimations.indicatorRotate]\n },\n changeDetection: 0\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatExpansionPanelHeader, [{\n type: Component,\n args: [{\n selector: 'mat-expansion-panel-header',\n encapsulation: ViewEncapsulation.None,\n changeDetection: ChangeDetectionStrategy.OnPush,\n animations: [matExpansionAnimations.indicatorRotate],\n host: {\n 'class': 'mat-expansion-panel-header mat-focus-indicator',\n 'role': 'button',\n '[attr.id]': 'panel._headerId',\n '[attr.tabindex]': 'disabled ? -1 : tabIndex',\n '[attr.aria-controls]': '_getPanelId()',\n '[attr.aria-expanded]': '_isExpanded()',\n '[attr.aria-disabled]': 'panel.disabled',\n '[class.mat-expanded]': '_isExpanded()',\n '[class.mat-expansion-toggle-indicator-after]': `_getTogglePosition() === 'after'`,\n '[class.mat-expansion-toggle-indicator-before]': `_getTogglePosition() === 'before'`,\n '[class._mat-animation-noopable]': '_animationMode === \"NoopAnimations\"',\n '[style.height]': '_getHeaderHeight()',\n '(click)': '_toggle()',\n '(keydown)': '_keydown($event)'\n },\n standalone: true,\n template: \"\\n \\n \\n \\n \\n\\n@if (_showToggle()) {\\n \\n \\n \\n \\n \\n}\\n\",\n styles: [\".mat-expansion-panel-header{display:flex;flex-direction:row;align-items:center;padding:0 24px;border-radius:inherit;transition:height 225ms cubic-bezier(0.4, 0, 0.2, 1);height:var(--mat-expansion-header-collapsed-state-height);font-family:var(--mat-expansion-header-text-font);font-size:var(--mat-expansion-header-text-size);font-weight:var(--mat-expansion-header-text-weight);line-height:var(--mat-expansion-header-text-line-height);letter-spacing:var(--mat-expansion-header-text-tracking)}.mat-expansion-panel-header.mat-expanded{height:var(--mat-expansion-header-expanded-state-height)}.mat-expansion-panel-header[aria-disabled=true]{color:var(--mat-expansion-header-disabled-state-text-color)}.mat-expansion-panel-header:not([aria-disabled=true]){cursor:pointer}.mat-expansion-panel:not(.mat-expanded) .mat-expansion-panel-header:not([aria-disabled=true]):hover{background:var(--mat-expansion-header-hover-state-layer-color)}@media(hover: none){.mat-expansion-panel:not(.mat-expanded) .mat-expansion-panel-header:not([aria-disabled=true]):hover{background:var(--mat-expansion-container-background-color)}}.mat-expansion-panel .mat-expansion-panel-header:not([aria-disabled=true]).cdk-keyboard-focused,.mat-expansion-panel .mat-expansion-panel-header:not([aria-disabled=true]).cdk-program-focused{background:var(--mat-expansion-header-focus-state-layer-color)}.mat-expansion-panel-header._mat-animation-noopable{transition:none}.mat-expansion-panel-header:focus,.mat-expansion-panel-header:hover{outline:none}.mat-expansion-panel-header.mat-expanded:focus,.mat-expansion-panel-header.mat-expanded:hover{background:inherit}.mat-expansion-panel-header.mat-expansion-toggle-indicator-before{flex-direction:row-reverse}.mat-expansion-panel-header.mat-expansion-toggle-indicator-before .mat-expansion-indicator{margin:0 16px 0 0}[dir=rtl] .mat-expansion-panel-header.mat-expansion-toggle-indicator-before .mat-expansion-indicator{margin:0 0 0 16px}.mat-content{display:flex;flex:1;flex-direction:row;overflow:hidden}.mat-content.mat-content-hide-toggle{margin-right:8px}[dir=rtl] .mat-content.mat-content-hide-toggle{margin-right:0;margin-left:8px}.mat-expansion-toggle-indicator-before .mat-content.mat-content-hide-toggle{margin-left:24px;margin-right:0}[dir=rtl] .mat-expansion-toggle-indicator-before .mat-content.mat-content-hide-toggle{margin-right:24px;margin-left:0}.mat-expansion-panel-header-title{color:var(--mat-expansion-header-text-color)}.mat-expansion-panel-header-title,.mat-expansion-panel-header-description{display:flex;flex-grow:1;flex-basis:0;margin-right:16px;align-items:center}[dir=rtl] .mat-expansion-panel-header-title,[dir=rtl] .mat-expansion-panel-header-description{margin-right:0;margin-left:16px}.mat-expansion-panel-header[aria-disabled=true] .mat-expansion-panel-header-title,.mat-expansion-panel-header[aria-disabled=true] .mat-expansion-panel-header-description{color:inherit}.mat-expansion-panel-header-description{flex-grow:2;color:var(--mat-expansion-header-description-color)}.mat-expansion-indicator::after{border-style:solid;border-width:0 2px 2px 0;content:\\\"\\\";display:inline-block;padding:3px;transform:rotate(45deg);vertical-align:middle;color:var(--mat-expansion-header-indicator-color);display:inline-block;display:var(--mat-expansion-legacy-header-indicator-display, inline-block)}.mat-expansion-indicator svg{width:24px;height:24px;margin:0 -8px;vertical-align:middle;fill:var(--mat-expansion-header-indicator-color);display:none;display:var(--mat-expansion-header-indicator-display, none)}.cdk-high-contrast-active .mat-expansion-panel-content{border-top:1px solid;border-top-left-radius:0;border-top-right-radius:0}\"]\n }]\n }], () => [{\n type: MatExpansionPanel,\n decorators: [{\n type: Host\n }]\n }, {\n type: i0.ElementRef\n }, {\n type: i2.FocusMonitor\n }, {\n type: i0.ChangeDetectorRef\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [MAT_EXPANSION_PANEL_DEFAULT_OPTIONS]\n }, {\n type: Optional\n }]\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [ANIMATION_MODULE_TYPE]\n }]\n }, {\n type: undefined,\n decorators: [{\n type: Attribute,\n args: ['tabindex']\n }]\n }], {\n expandedHeight: [{\n type: Input\n }],\n collapsedHeight: [{\n type: Input\n }],\n tabIndex: [{\n type: Input,\n args: [{\n transform: value => value == null ? 0 : numberAttribute(value)\n }]\n }]\n });\n})();\n/**\n * Description element of a ``.\n */\nclass MatExpansionPanelDescription {\n static {\n this.ɵfac = function MatExpansionPanelDescription_Factory(t) {\n return new (t || MatExpansionPanelDescription)();\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatExpansionPanelDescription,\n selectors: [[\"mat-panel-description\"]],\n hostAttrs: [1, \"mat-expansion-panel-header-description\"],\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatExpansionPanelDescription, [{\n type: Directive,\n args: [{\n selector: 'mat-panel-description',\n host: {\n class: 'mat-expansion-panel-header-description'\n },\n standalone: true\n }]\n }], null, null);\n})();\n/**\n * Title element of a ``.\n */\nclass MatExpansionPanelTitle {\n static {\n this.ɵfac = function MatExpansionPanelTitle_Factory(t) {\n return new (t || MatExpansionPanelTitle)();\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatExpansionPanelTitle,\n selectors: [[\"mat-panel-title\"]],\n hostAttrs: [1, \"mat-expansion-panel-header-title\"],\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatExpansionPanelTitle, [{\n type: Directive,\n args: [{\n selector: 'mat-panel-title',\n host: {\n class: 'mat-expansion-panel-header-title'\n },\n standalone: true\n }]\n }], null, null);\n})();\n\n/**\n * Directive for a Material Design Accordion.\n */\nclass MatAccordion extends CdkAccordion {\n constructor() {\n super(...arguments);\n /** Headers belonging to this accordion. */\n this._ownHeaders = new QueryList();\n /** Whether the expansion indicator should be hidden. */\n this.hideToggle = false;\n /**\n * Display mode used for all expansion panels in the accordion. Currently two display\n * modes exist:\n * default - a gutter-like spacing is placed around any expanded panel, placing the expanded\n * panel at a different elevation from the rest of the accordion.\n * flat - no spacing is placed around expanded panels, showing all panels at the same\n * elevation.\n */\n this.displayMode = 'default';\n /** The position of the expansion indicator. */\n this.togglePosition = 'after';\n }\n ngAfterContentInit() {\n this._headers.changes.pipe(startWith(this._headers)).subscribe(headers => {\n this._ownHeaders.reset(headers.filter(header => header.panel.accordion === this));\n this._ownHeaders.notifyOnChanges();\n });\n this._keyManager = new FocusKeyManager(this._ownHeaders).withWrap().withHomeAndEnd();\n }\n /** Handles keyboard events coming in from the panel headers. */\n _handleHeaderKeydown(event) {\n this._keyManager.onKeydown(event);\n }\n _handleHeaderFocus(header) {\n this._keyManager.updateActiveItem(header);\n }\n ngOnDestroy() {\n super.ngOnDestroy();\n this._keyManager?.destroy();\n this._ownHeaders.destroy();\n }\n static {\n this.ɵfac = /* @__PURE__ */(() => {\n let ɵMatAccordion_BaseFactory;\n return function MatAccordion_Factory(t) {\n return (ɵMatAccordion_BaseFactory || (ɵMatAccordion_BaseFactory = i0.ɵɵgetInheritedFactory(MatAccordion)))(t || MatAccordion);\n };\n })();\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatAccordion,\n selectors: [[\"mat-accordion\"]],\n contentQueries: function MatAccordion_ContentQueries(rf, ctx, dirIndex) {\n if (rf & 1) {\n i0.ɵɵcontentQuery(dirIndex, MatExpansionPanelHeader, 5);\n }\n if (rf & 2) {\n let _t;\n i0.ɵɵqueryRefresh(_t = i0.ɵɵloadQuery()) && (ctx._headers = _t);\n }\n },\n hostAttrs: [1, \"mat-accordion\"],\n hostVars: 2,\n hostBindings: function MatAccordion_HostBindings(rf, ctx) {\n if (rf & 2) {\n i0.ɵɵclassProp(\"mat-accordion-multi\", ctx.multi);\n }\n },\n inputs: {\n hideToggle: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"hideToggle\", \"hideToggle\", booleanAttribute],\n displayMode: \"displayMode\",\n togglePosition: \"togglePosition\"\n },\n exportAs: [\"matAccordion\"],\n standalone: true,\n features: [i0.ɵɵProvidersFeature([{\n provide: MAT_ACCORDION,\n useExisting: MatAccordion\n }]), i0.ɵɵInputTransformsFeature, i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatAccordion, [{\n type: Directive,\n args: [{\n selector: 'mat-accordion',\n exportAs: 'matAccordion',\n providers: [{\n provide: MAT_ACCORDION,\n useExisting: MatAccordion\n }],\n host: {\n class: 'mat-accordion',\n // Class binding which is only used by the test harness as there is no other\n // way for the harness to detect if multiple panel support is enabled.\n '[class.mat-accordion-multi]': 'this.multi'\n },\n standalone: true\n }]\n }], null, {\n _headers: [{\n type: ContentChildren,\n args: [MatExpansionPanelHeader, {\n descendants: true\n }]\n }],\n hideToggle: [{\n type: Input,\n args: [{\n transform: booleanAttribute\n }]\n }],\n displayMode: [{\n type: Input\n }],\n togglePosition: [{\n type: Input\n }]\n });\n})();\nclass MatExpansionModule {\n static {\n this.ɵfac = function MatExpansionModule_Factory(t) {\n return new (t || MatExpansionModule)();\n };\n }\n static {\n this.ɵmod = /* @__PURE__ */i0.ɵɵdefineNgModule({\n type: MatExpansionModule\n });\n }\n static {\n this.ɵinj = /* @__PURE__ */i0.ɵɵdefineInjector({\n imports: [MatCommonModule, CdkAccordionModule, PortalModule]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatExpansionModule, [{\n type: NgModule,\n args: [{\n imports: [MatCommonModule, CdkAccordionModule, PortalModule, MatAccordion, MatExpansionPanel, MatExpansionPanelActionRow, MatExpansionPanelHeader, MatExpansionPanelTitle, MatExpansionPanelDescription, MatExpansionPanelContent],\n exports: [MatAccordion, MatExpansionPanel, MatExpansionPanelActionRow, MatExpansionPanelHeader, MatExpansionPanelTitle, MatExpansionPanelDescription, MatExpansionPanelContent]\n }]\n }], null, null);\n})();\n\n/**\n * Generated bundle index. Do not edit.\n */\n\nexport { EXPANSION_PANEL_ANIMATION_TIMING, MAT_ACCORDION, MAT_EXPANSION_PANEL, MAT_EXPANSION_PANEL_DEFAULT_OPTIONS, MatAccordion, MatExpansionModule, MatExpansionPanel, MatExpansionPanelActionRow, MatExpansionPanelContent, MatExpansionPanelDescription, MatExpansionPanelHeader, MatExpansionPanelTitle, matExpansionAnimations };\n","/**\n * @license\n * Video.js 8.12.0 \n * Copyright Brightcove, Inc. \n * Available under Apache License Version 2.0\n * \n *\n * Includes vtt.js \n * Available under Apache License Version 2.0\n * \n */\n\nimport window$1 from 'global/window';\nimport document from 'global/document';\nimport keycode from 'keycode';\nimport safeParseTuple from 'safe-json-parse/tuple';\nimport XHR from '@videojs/xhr';\nimport vtt from 'videojs-vtt.js';\nimport _extends from '@babel/runtime/helpers/extends';\nimport _resolveUrl from '@videojs/vhs-utils/es/resolve-url.js';\nimport { Parser } from 'm3u8-parser';\nimport { DEFAULT_VIDEO_CODEC, DEFAULT_AUDIO_CODEC, parseCodecs, muxerSupportsCodec, browserSupportsCodec, translateLegacyCodec, codecsFromDefault, isAudioCodec, getMimeForCodec } from '@videojs/vhs-utils/es/codecs.js';\nimport { simpleTypeFromSourceType } from '@videojs/vhs-utils/es/media-types.js';\nimport { isArrayBufferView, concatTypedArrays, stringToBytes, toUint8 } from '@videojs/vhs-utils/es/byte-helpers';\nimport { generateSidxKey, parseUTCTiming, parse, addSidxSegmentsToPlaylist } from 'mpd-parser';\nimport parseSidx from 'mux.js/lib/tools/parse-sidx';\nimport { getId3Offset } from '@videojs/vhs-utils/es/id3-helpers';\nimport { detectContainerForBytes, isLikelyFmp4MediaSegment } from '@videojs/vhs-utils/es/containers';\nimport { ONE_SECOND_IN_TS } from 'mux.js/lib/utils/clock';\n\nvar version$6 = \"8.12.0\";\n\n/**\n * An Object that contains lifecycle hooks as keys which point to an array\n * of functions that are run when a lifecycle is triggered\n *\n * @private\n */\nconst hooks_ = {};\n\n/**\n * Get a list of hooks for a specific lifecycle\n *\n * @param {string} type\n * the lifecycle to get hooks from\n *\n * @param {Function|Function[]} [fn]\n * Optionally add a hook (or hooks) to the lifecycle that your are getting.\n *\n * @return {Array}\n * an array of hooks, or an empty array if there are none.\n */\nconst hooks = function (type, fn) {\n hooks_[type] = hooks_[type] || [];\n if (fn) {\n hooks_[type] = hooks_[type].concat(fn);\n }\n return hooks_[type];\n};\n\n/**\n * Add a function hook to a specific videojs lifecycle.\n *\n * @param {string} type\n * the lifecycle to hook the function to.\n *\n * @param {Function|Function[]}\n * The function or array of functions to attach.\n */\nconst hook = function (type, fn) {\n hooks(type, fn);\n};\n\n/**\n * Remove a hook from a specific videojs lifecycle.\n *\n * @param {string} type\n * the lifecycle that the function hooked to\n *\n * @param {Function} fn\n * The hooked function to remove\n *\n * @return {boolean}\n * The function that was removed or undef\n */\nconst removeHook = function (type, fn) {\n const index = hooks(type).indexOf(fn);\n if (index <= -1) {\n return false;\n }\n hooks_[type] = hooks_[type].slice();\n hooks_[type].splice(index, 1);\n return true;\n};\n\n/**\n * Add a function hook that will only run once to a specific videojs lifecycle.\n *\n * @param {string} type\n * the lifecycle to hook the function to.\n *\n * @param {Function|Function[]}\n * The function or array of functions to attach.\n */\nconst hookOnce = function (type, fn) {\n hooks(type, [].concat(fn).map(original => {\n const wrapper = (...args) => {\n removeHook(type, wrapper);\n return original(...args);\n };\n return wrapper;\n }));\n};\n\n/**\n * @file fullscreen-api.js\n * @module fullscreen-api\n */\n\n/**\n * Store the browser-specific methods for the fullscreen API.\n *\n * @type {Object}\n * @see [Specification]{@link https://fullscreen.spec.whatwg.org}\n * @see [Map Approach From Screenfull.js]{@link https://github.com/sindresorhus/screenfull.js}\n */\nconst FullscreenApi = {\n prefixed: true\n};\n\n// browser API methods\nconst apiMap = [['requestFullscreen', 'exitFullscreen', 'fullscreenElement', 'fullscreenEnabled', 'fullscreenchange', 'fullscreenerror', 'fullscreen'],\n// WebKit\n['webkitRequestFullscreen', 'webkitExitFullscreen', 'webkitFullscreenElement', 'webkitFullscreenEnabled', 'webkitfullscreenchange', 'webkitfullscreenerror', '-webkit-full-screen']];\nconst specApi = apiMap[0];\nlet browserApi;\n\n// determine the supported set of functions\nfor (let i = 0; i < apiMap.length; i++) {\n // check for exitFullscreen function\n if (apiMap[i][1] in document) {\n browserApi = apiMap[i];\n break;\n }\n}\n\n// map the browser API names to the spec API names\nif (browserApi) {\n for (let i = 0; i < browserApi.length; i++) {\n FullscreenApi[specApi[i]] = browserApi[i];\n }\n FullscreenApi.prefixed = browserApi[0] !== specApi[0];\n}\n\n/**\n * @file create-logger.js\n * @module create-logger\n */\n\n// This is the private tracking variable for the logging history.\nlet history = [];\n\n/**\n * Log messages to the console and history based on the type of message\n *\n * @private\n * @param {string} name\n * The name of the console method to use.\n *\n * @param {Object} log\n * The arguments to be passed to the matching console method.\n *\n * @param {string} [styles]\n * styles for name\n */\nconst LogByTypeFactory = (name, log, styles) => (type, level, args) => {\n const lvl = log.levels[level];\n const lvlRegExp = new RegExp(`^(${lvl})$`);\n let resultName = name;\n if (type !== 'log') {\n // Add the type to the front of the message when it's not \"log\".\n args.unshift(type.toUpperCase() + ':');\n }\n if (styles) {\n resultName = `%c${name}`;\n args.unshift(styles);\n }\n\n // Add console prefix after adding to history.\n args.unshift(resultName + ':');\n\n // Add a clone of the args at this point to history.\n if (history) {\n history.push([].concat(args));\n\n // only store 1000 history entries\n const splice = history.length - 1000;\n history.splice(0, splice > 0 ? splice : 0);\n }\n\n // If there's no console then don't try to output messages, but they will\n // still be stored in history.\n if (!window$1.console) {\n return;\n }\n\n // Was setting these once outside of this function, but containing them\n // in the function makes it easier to test cases where console doesn't exist\n // when the module is executed.\n let fn = window$1.console[type];\n if (!fn && type === 'debug') {\n // Certain browsers don't have support for console.debug. For those, we\n // should default to the closest comparable log.\n fn = window$1.console.info || window$1.console.log;\n }\n\n // Bail out if there's no console or if this type is not allowed by the\n // current logging level.\n if (!fn || !lvl || !lvlRegExp.test(type)) {\n return;\n }\n fn[Array.isArray(args) ? 'apply' : 'call'](window$1.console, args);\n};\nfunction createLogger$1(name, delimiter = ':', styles = '') {\n // This is the private tracking variable for logging level.\n let level = 'info';\n\n // the curried logByType bound to the specific log and history\n let logByType;\n\n /**\n * Logs plain debug messages. Similar to `console.log`.\n *\n * Due to [limitations](https://github.com/jsdoc3/jsdoc/issues/955#issuecomment-313829149)\n * of our JSDoc template, we cannot properly document this as both a function\n * and a namespace, so its function signature is documented here.\n *\n * #### Arguments\n * ##### *args\n * *[]\n *\n * Any combination of values that could be passed to `console.log()`.\n *\n * #### Return Value\n *\n * `undefined`\n *\n * @namespace\n * @param {...*} args\n * One or more messages or objects that should be logged.\n */\n const log = function (...args) {\n logByType('log', level, args);\n };\n\n // This is the logByType helper that the logging methods below use\n logByType = LogByTypeFactory(name, log, styles);\n\n /**\n * Create a new subLogger which chains the old name to the new name.\n *\n * For example, doing `videojs.log.createLogger('player')` and then using that logger will log the following:\n * ```js\n * mylogger('foo');\n * // > VIDEOJS: player: foo\n * ```\n *\n * @param {string} subName\n * The name to add call the new logger\n * @param {string} [subDelimiter]\n * Optional delimiter\n * @param {string} [subStyles]\n * Optional styles\n * @return {Object}\n */\n log.createLogger = (subName, subDelimiter, subStyles) => {\n const resultDelimiter = subDelimiter !== undefined ? subDelimiter : delimiter;\n const resultStyles = subStyles !== undefined ? subStyles : styles;\n const resultName = `${name} ${resultDelimiter} ${subName}`;\n return createLogger$1(resultName, resultDelimiter, resultStyles);\n };\n\n /**\n * Create a new logger.\n *\n * @param {string} newName\n * The name for the new logger\n * @param {string} [newDelimiter]\n * Optional delimiter\n * @param {string} [newStyles]\n * Optional styles\n * @return {Object}\n */\n log.createNewLogger = (newName, newDelimiter, newStyles) => {\n return createLogger$1(newName, newDelimiter, newStyles);\n };\n\n /**\n * Enumeration of available logging levels, where the keys are the level names\n * and the values are `|`-separated strings containing logging methods allowed\n * in that logging level. These strings are used to create a regular expression\n * matching the function name being called.\n *\n * Levels provided by Video.js are:\n *\n * - `off`: Matches no calls. Any value that can be cast to `false` will have\n * this effect. The most restrictive.\n * - `all`: Matches only Video.js-provided functions (`debug`, `log`,\n * `log.warn`, and `log.error`).\n * - `debug`: Matches `log.debug`, `log`, `log.warn`, and `log.error` calls.\n * - `info` (default): Matches `log`, `log.warn`, and `log.error` calls.\n * - `warn`: Matches `log.warn` and `log.error` calls.\n * - `error`: Matches only `log.error` calls.\n *\n * @type {Object}\n */\n log.levels = {\n all: 'debug|log|warn|error',\n off: '',\n debug: 'debug|log|warn|error',\n info: 'log|warn|error',\n warn: 'warn|error',\n error: 'error',\n DEFAULT: level\n };\n\n /**\n * Get or set the current logging level.\n *\n * If a string matching a key from {@link module:log.levels} is provided, acts\n * as a setter.\n *\n * @param {'all'|'debug'|'info'|'warn'|'error'|'off'} [lvl]\n * Pass a valid level to set a new logging level.\n *\n * @return {string}\n * The current logging level.\n */\n log.level = lvl => {\n if (typeof lvl === 'string') {\n if (!log.levels.hasOwnProperty(lvl)) {\n throw new Error(`\"${lvl}\" in not a valid log level`);\n }\n level = lvl;\n }\n return level;\n };\n\n /**\n * Returns an array containing everything that has been logged to the history.\n *\n * This array is a shallow clone of the internal history record. However, its\n * contents are _not_ cloned; so, mutating objects inside this array will\n * mutate them in history.\n *\n * @return {Array}\n */\n log.history = () => history ? [].concat(history) : [];\n\n /**\n * Allows you to filter the history by the given logger name\n *\n * @param {string} fname\n * The name to filter by\n *\n * @return {Array}\n * The filtered list to return\n */\n log.history.filter = fname => {\n return (history || []).filter(historyItem => {\n // if the first item in each historyItem includes `fname`, then it's a match\n return new RegExp(`.*${fname}.*`).test(historyItem[0]);\n });\n };\n\n /**\n * Clears the internal history tracking, but does not prevent further history\n * tracking.\n */\n log.history.clear = () => {\n if (history) {\n history.length = 0;\n }\n };\n\n /**\n * Disable history tracking if it is currently enabled.\n */\n log.history.disable = () => {\n if (history !== null) {\n history.length = 0;\n history = null;\n }\n };\n\n /**\n * Enable history tracking if it is currently disabled.\n */\n log.history.enable = () => {\n if (history === null) {\n history = [];\n }\n };\n\n /**\n * Logs error messages. Similar to `console.error`.\n *\n * @param {...*} args\n * One or more messages or objects that should be logged as an error\n */\n log.error = (...args) => logByType('error', level, args);\n\n /**\n * Logs warning messages. Similar to `console.warn`.\n *\n * @param {...*} args\n * One or more messages or objects that should be logged as a warning.\n */\n log.warn = (...args) => logByType('warn', level, args);\n\n /**\n * Logs debug messages. Similar to `console.debug`, but may also act as a comparable\n * log if `console.debug` is not available\n *\n * @param {...*} args\n * One or more messages or objects that should be logged as debug.\n */\n log.debug = (...args) => logByType('debug', level, args);\n return log;\n}\n\n/**\n * @file log.js\n * @module log\n */\nconst log$1 = createLogger$1('VIDEOJS');\nconst createLogger = log$1.createLogger;\n\n/**\n * @file obj.js\n * @module obj\n */\n\n/**\n * @callback obj:EachCallback\n *\n * @param {*} value\n * The current key for the object that is being iterated over.\n *\n * @param {string} key\n * The current key-value for object that is being iterated over\n */\n\n/**\n * @callback obj:ReduceCallback\n *\n * @param {*} accum\n * The value that is accumulating over the reduce loop.\n *\n * @param {*} value\n * The current key for the object that is being iterated over.\n *\n * @param {string} key\n * The current key-value for object that is being iterated over\n *\n * @return {*}\n * The new accumulated value.\n */\nconst toString = Object.prototype.toString;\n\n/**\n * Get the keys of an Object\n *\n * @param {Object}\n * The Object to get the keys from\n *\n * @return {string[]}\n * An array of the keys from the object. Returns an empty array if the\n * object passed in was invalid or had no keys.\n *\n * @private\n */\nconst keys = function (object) {\n return isObject(object) ? Object.keys(object) : [];\n};\n\n/**\n * Array-like iteration for objects.\n *\n * @param {Object} object\n * The object to iterate over\n *\n * @param {obj:EachCallback} fn\n * The callback function which is called for each key in the object.\n */\nfunction each(object, fn) {\n keys(object).forEach(key => fn(object[key], key));\n}\n\n/**\n * Array-like reduce for objects.\n *\n * @param {Object} object\n * The Object that you want to reduce.\n *\n * @param {Function} fn\n * A callback function which is called for each key in the object. It\n * receives the accumulated value and the per-iteration value and key\n * as arguments.\n *\n * @param {*} [initial = 0]\n * Starting value\n *\n * @return {*}\n * The final accumulated value.\n */\nfunction reduce(object, fn, initial = 0) {\n return keys(object).reduce((accum, key) => fn(accum, object[key], key), initial);\n}\n\n/**\n * Returns whether a value is an object of any kind - including DOM nodes,\n * arrays, regular expressions, etc. Not functions, though.\n *\n * This avoids the gotcha where using `typeof` on a `null` value\n * results in `'object'`.\n *\n * @param {Object} value\n * @return {boolean}\n */\nfunction isObject(value) {\n return !!value && typeof value === 'object';\n}\n\n/**\n * Returns whether an object appears to be a \"plain\" object - that is, a\n * direct instance of `Object`.\n *\n * @param {Object} value\n * @return {boolean}\n */\nfunction isPlain(value) {\n return isObject(value) && toString.call(value) === '[object Object]' && value.constructor === Object;\n}\n\n/**\n * Merge two objects recursively.\n *\n * Performs a deep merge like\n * {@link https://lodash.com/docs/4.17.10#merge|lodash.merge}, but only merges\n * plain objects (not arrays, elements, or anything else).\n *\n * Non-plain object values will be copied directly from the right-most\n * argument.\n *\n * @param {Object[]} sources\n * One or more objects to merge into a new object.\n *\n * @return {Object}\n * A new object that is the merged result of all sources.\n */\nfunction merge$1(...sources) {\n const result = {};\n sources.forEach(source => {\n if (!source) {\n return;\n }\n each(source, (value, key) => {\n if (!isPlain(value)) {\n result[key] = value;\n return;\n }\n if (!isPlain(result[key])) {\n result[key] = {};\n }\n result[key] = merge$1(result[key], value);\n });\n });\n return result;\n}\n\n/**\n * Returns an array of values for a given object\n *\n * @param {Object} source - target object\n * @return {Array} - object values\n */\nfunction values(source = {}) {\n const result = [];\n for (const key in source) {\n if (source.hasOwnProperty(key)) {\n const value = source[key];\n result.push(value);\n }\n }\n return result;\n}\n\n/**\n * Object.defineProperty but \"lazy\", which means that the value is only set after\n * it is retrieved the first time, rather than being set right away.\n *\n * @param {Object} obj the object to set the property on\n * @param {string} key the key for the property to set\n * @param {Function} getValue the function used to get the value when it is needed.\n * @param {boolean} setter whether a setter should be allowed or not\n */\nfunction defineLazyProperty(obj, key, getValue, setter = true) {\n const set = value => Object.defineProperty(obj, key, {\n value,\n enumerable: true,\n writable: true\n });\n const options = {\n configurable: true,\n enumerable: true,\n get() {\n const value = getValue();\n set(value);\n return value;\n }\n };\n if (setter) {\n options.set = set;\n }\n return Object.defineProperty(obj, key, options);\n}\n\nvar Obj = /*#__PURE__*/Object.freeze({\n __proto__: null,\n each: each,\n reduce: reduce,\n isObject: isObject,\n isPlain: isPlain,\n merge: merge$1,\n values: values,\n defineLazyProperty: defineLazyProperty\n});\n\n/**\n * @file browser.js\n * @module browser\n */\n\n/**\n * Whether or not this device is an iPod.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_IPOD = false;\n\n/**\n * The detected iOS version - or `null`.\n *\n * @static\n * @type {string|null}\n */\nlet IOS_VERSION = null;\n\n/**\n * Whether or not this is an Android device.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_ANDROID = false;\n\n/**\n * The detected Android version - or `null` if not Android or indeterminable.\n *\n * @static\n * @type {number|string|null}\n */\nlet ANDROID_VERSION;\n\n/**\n * Whether or not this is Mozilla Firefox.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_FIREFOX = false;\n\n/**\n * Whether or not this is Microsoft Edge.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_EDGE = false;\n\n/**\n * Whether or not this is any Chromium Browser\n *\n * @static\n * @type {Boolean}\n */\nlet IS_CHROMIUM = false;\n\n/**\n * Whether or not this is any Chromium browser that is not Edge.\n *\n * This will also be `true` for Chrome on iOS, which will have different support\n * as it is actually Safari under the hood.\n *\n * Deprecated, as the behaviour to not match Edge was to prevent Legacy Edge's UA matching.\n * IS_CHROMIUM should be used instead.\n * \"Chromium but not Edge\" could be explicitly tested with IS_CHROMIUM && !IS_EDGE\n *\n * @static\n * @deprecated\n * @type {Boolean}\n */\nlet IS_CHROME = false;\n\n/**\n * The detected Chromium version - or `null`.\n *\n * @static\n * @type {number|null}\n */\nlet CHROMIUM_VERSION = null;\n\n/**\n * The detected Google Chrome version - or `null`.\n * This has always been the _Chromium_ version, i.e. would return on Chromium Edge.\n * Deprecated, use CHROMIUM_VERSION instead.\n *\n * @static\n * @deprecated\n * @type {number|null}\n */\nlet CHROME_VERSION = null;\n\n/**\n * The detected Internet Explorer version - or `null`.\n *\n * @static\n * @deprecated\n * @type {number|null}\n */\nlet IE_VERSION = null;\n\n/**\n * Whether or not this is desktop Safari.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_SAFARI = false;\n\n/**\n * Whether or not this is a Windows machine.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_WINDOWS = false;\n\n/**\n * Whether or not this device is an iPad.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_IPAD = false;\n\n/**\n * Whether or not this device is an iPhone.\n *\n * @static\n * @type {Boolean}\n */\n// The Facebook app's UIWebView identifies as both an iPhone and iPad, so\n// to identify iPhones, we need to exclude iPads.\n// http://artsy.github.io/blog/2012/10/18/the-perils-of-ios-user-agent-sniffing/\nlet IS_IPHONE = false;\n\n/**\n * Whether or not this is a Tizen device.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_TIZEN = false;\n\n/**\n * Whether or not this is a WebOS device.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_WEBOS = false;\n\n/**\n * Whether or not this is a Smart TV (Tizen or WebOS) device.\n *\n * @static\n * @type {Boolean}\n */\nlet IS_SMART_TV = false;\n\n/**\n * Whether or not this device is touch-enabled.\n *\n * @static\n * @const\n * @type {Boolean}\n */\nconst TOUCH_ENABLED = Boolean(isReal() && ('ontouchstart' in window$1 || window$1.navigator.maxTouchPoints || window$1.DocumentTouch && window$1.document instanceof window$1.DocumentTouch));\nconst UAD = window$1.navigator && window$1.navigator.userAgentData;\nif (UAD && UAD.platform && UAD.brands) {\n // If userAgentData is present, use it instead of userAgent to avoid warnings\n // Currently only implemented on Chromium\n // userAgentData does not expose Android version, so ANDROID_VERSION remains `null`\n\n IS_ANDROID = UAD.platform === 'Android';\n IS_EDGE = Boolean(UAD.brands.find(b => b.brand === 'Microsoft Edge'));\n IS_CHROMIUM = Boolean(UAD.brands.find(b => b.brand === 'Chromium'));\n IS_CHROME = !IS_EDGE && IS_CHROMIUM;\n CHROMIUM_VERSION = CHROME_VERSION = (UAD.brands.find(b => b.brand === 'Chromium') || {}).version || null;\n IS_WINDOWS = UAD.platform === 'Windows';\n}\n\n// If the browser is not Chromium, either userAgentData is not present which could be an old Chromium browser,\n// or it's a browser that has added userAgentData since that we don't have tests for yet. In either case,\n// the checks need to be made agiainst the regular userAgent string.\nif (!IS_CHROMIUM) {\n const USER_AGENT = window$1.navigator && window$1.navigator.userAgent || '';\n IS_IPOD = /iPod/i.test(USER_AGENT);\n IOS_VERSION = function () {\n const match = USER_AGENT.match(/OS (\\d+)_/i);\n if (match && match[1]) {\n return match[1];\n }\n return null;\n }();\n IS_ANDROID = /Android/i.test(USER_AGENT);\n ANDROID_VERSION = function () {\n // This matches Android Major.Minor.Patch versions\n // ANDROID_VERSION is Major.Minor as a Number, if Minor isn't available, then only Major is returned\n const match = USER_AGENT.match(/Android (\\d+)(?:\\.(\\d+))?(?:\\.(\\d+))*/i);\n if (!match) {\n return null;\n }\n const major = match[1] && parseFloat(match[1]);\n const minor = match[2] && parseFloat(match[2]);\n if (major && minor) {\n return parseFloat(match[1] + '.' + match[2]);\n } else if (major) {\n return major;\n }\n return null;\n }();\n IS_FIREFOX = /Firefox/i.test(USER_AGENT);\n IS_EDGE = /Edg/i.test(USER_AGENT);\n IS_CHROMIUM = /Chrome/i.test(USER_AGENT) || /CriOS/i.test(USER_AGENT);\n IS_CHROME = !IS_EDGE && IS_CHROMIUM;\n CHROMIUM_VERSION = CHROME_VERSION = function () {\n const match = USER_AGENT.match(/(Chrome|CriOS)\\/(\\d+)/);\n if (match && match[2]) {\n return parseFloat(match[2]);\n }\n return null;\n }();\n IE_VERSION = function () {\n const result = /MSIE\\s(\\d+)\\.\\d/.exec(USER_AGENT);\n let version = result && parseFloat(result[1]);\n if (!version && /Trident\\/7.0/i.test(USER_AGENT) && /rv:11.0/.test(USER_AGENT)) {\n // IE 11 has a different user agent string than other IE versions\n version = 11.0;\n }\n return version;\n }();\n IS_TIZEN = /Tizen/i.test(USER_AGENT);\n IS_WEBOS = /Web0S/i.test(USER_AGENT);\n IS_SMART_TV = IS_TIZEN || IS_WEBOS;\n IS_SAFARI = /Safari/i.test(USER_AGENT) && !IS_CHROME && !IS_ANDROID && !IS_EDGE && !IS_SMART_TV;\n IS_WINDOWS = /Windows/i.test(USER_AGENT);\n IS_IPAD = /iPad/i.test(USER_AGENT) || IS_SAFARI && TOUCH_ENABLED && !/iPhone/i.test(USER_AGENT);\n IS_IPHONE = /iPhone/i.test(USER_AGENT) && !IS_IPAD;\n}\n\n/**\n * Whether or not this is an iOS device.\n *\n * @static\n * @const\n * @type {Boolean}\n */\nconst IS_IOS = IS_IPHONE || IS_IPAD || IS_IPOD;\n\n/**\n * Whether or not this is any flavor of Safari - including iOS.\n *\n * @static\n * @const\n * @type {Boolean}\n */\nconst IS_ANY_SAFARI = (IS_SAFARI || IS_IOS) && !IS_CHROME;\n\nvar browser = /*#__PURE__*/Object.freeze({\n __proto__: null,\n get IS_IPOD () { return IS_IPOD; },\n get IOS_VERSION () { return IOS_VERSION; },\n get IS_ANDROID () { return IS_ANDROID; },\n get ANDROID_VERSION () { return ANDROID_VERSION; },\n get IS_FIREFOX () { return IS_FIREFOX; },\n get IS_EDGE () { return IS_EDGE; },\n get IS_CHROMIUM () { return IS_CHROMIUM; },\n get IS_CHROME () { return IS_CHROME; },\n get CHROMIUM_VERSION () { return CHROMIUM_VERSION; },\n get CHROME_VERSION () { return CHROME_VERSION; },\n get IE_VERSION () { return IE_VERSION; },\n get IS_SAFARI () { return IS_SAFARI; },\n get IS_WINDOWS () { return IS_WINDOWS; },\n get IS_IPAD () { return IS_IPAD; },\n get IS_IPHONE () { return IS_IPHONE; },\n get IS_TIZEN () { return IS_TIZEN; },\n get IS_WEBOS () { return IS_WEBOS; },\n get IS_SMART_TV () { return IS_SMART_TV; },\n TOUCH_ENABLED: TOUCH_ENABLED,\n IS_IOS: IS_IOS,\n IS_ANY_SAFARI: IS_ANY_SAFARI\n});\n\n/**\n * @file dom.js\n * @module dom\n */\n\n/**\n * Detect if a value is a string with any non-whitespace characters.\n *\n * @private\n * @param {string} str\n * The string to check\n *\n * @return {boolean}\n * Will be `true` if the string is non-blank, `false` otherwise.\n *\n */\nfunction isNonBlankString(str) {\n // we use str.trim as it will trim any whitespace characters\n // from the front or back of non-whitespace characters. aka\n // Any string that contains non-whitespace characters will\n // still contain them after `trim` but whitespace only strings\n // will have a length of 0, failing this check.\n return typeof str === 'string' && Boolean(str.trim());\n}\n\n/**\n * Throws an error if the passed string has whitespace. This is used by\n * class methods to be relatively consistent with the classList API.\n *\n * @private\n * @param {string} str\n * The string to check for whitespace.\n *\n * @throws {Error}\n * Throws an error if there is whitespace in the string.\n */\nfunction throwIfWhitespace(str) {\n // str.indexOf instead of regex because str.indexOf is faster performance wise.\n if (str.indexOf(' ') >= 0) {\n throw new Error('class has illegal whitespace characters');\n }\n}\n\n/**\n * Whether the current DOM interface appears to be real (i.e. not simulated).\n *\n * @return {boolean}\n * Will be `true` if the DOM appears to be real, `false` otherwise.\n */\nfunction isReal() {\n // Both document and window will never be undefined thanks to `global`.\n return document === window$1.document;\n}\n\n/**\n * Determines, via duck typing, whether or not a value is a DOM element.\n *\n * @param {*} value\n * The value to check.\n *\n * @return {boolean}\n * Will be `true` if the value is a DOM element, `false` otherwise.\n */\nfunction isEl(value) {\n return isObject(value) && value.nodeType === 1;\n}\n\n/**\n * Determines if the current DOM is embedded in an iframe.\n *\n * @return {boolean}\n * Will be `true` if the DOM is embedded in an iframe, `false`\n * otherwise.\n */\nfunction isInFrame() {\n // We need a try/catch here because Safari will throw errors when attempting\n // to get either `parent` or `self`\n try {\n return window$1.parent !== window$1.self;\n } catch (x) {\n return true;\n }\n}\n\n/**\n * Creates functions to query the DOM using a given method.\n *\n * @private\n * @param {string} method\n * The method to create the query with.\n *\n * @return {Function}\n * The query method\n */\nfunction createQuerier(method) {\n return function (selector, context) {\n if (!isNonBlankString(selector)) {\n return document[method](null);\n }\n if (isNonBlankString(context)) {\n context = document.querySelector(context);\n }\n const ctx = isEl(context) ? context : document;\n return ctx[method] && ctx[method](selector);\n };\n}\n\n/**\n * Creates an element and applies properties, attributes, and inserts content.\n *\n * @param {string} [tagName='div']\n * Name of tag to be created.\n *\n * @param {Object} [properties={}]\n * Element properties to be applied.\n *\n * @param {Object} [attributes={}]\n * Element attributes to be applied.\n *\n * @param {ContentDescriptor} [content]\n * A content descriptor object.\n *\n * @return {Element}\n * The element that was created.\n */\nfunction createEl(tagName = 'div', properties = {}, attributes = {}, content) {\n const el = document.createElement(tagName);\n Object.getOwnPropertyNames(properties).forEach(function (propName) {\n const val = properties[propName];\n\n // Handle textContent since it's not supported everywhere and we have a\n // method for it.\n if (propName === 'textContent') {\n textContent(el, val);\n } else if (el[propName] !== val || propName === 'tabIndex') {\n el[propName] = val;\n }\n });\n Object.getOwnPropertyNames(attributes).forEach(function (attrName) {\n el.setAttribute(attrName, attributes[attrName]);\n });\n if (content) {\n appendContent(el, content);\n }\n return el;\n}\n\n/**\n * Injects text into an element, replacing any existing contents entirely.\n *\n * @param {HTMLElement} el\n * The element to add text content into\n *\n * @param {string} text\n * The text content to add.\n *\n * @return {Element}\n * The element with added text content.\n */\nfunction textContent(el, text) {\n if (typeof el.textContent === 'undefined') {\n el.innerText = text;\n } else {\n el.textContent = text;\n }\n return el;\n}\n\n/**\n * Insert an element as the first child node of another\n *\n * @param {Element} child\n * Element to insert\n *\n * @param {Element} parent\n * Element to insert child into\n */\nfunction prependTo(child, parent) {\n if (parent.firstChild) {\n parent.insertBefore(child, parent.firstChild);\n } else {\n parent.appendChild(child);\n }\n}\n\n/**\n * Check if an element has a class name.\n *\n * @param {Element} element\n * Element to check\n *\n * @param {string} classToCheck\n * Class name to check for\n *\n * @return {boolean}\n * Will be `true` if the element has a class, `false` otherwise.\n *\n * @throws {Error}\n * Throws an error if `classToCheck` has white space.\n */\nfunction hasClass(element, classToCheck) {\n throwIfWhitespace(classToCheck);\n return element.classList.contains(classToCheck);\n}\n\n/**\n * Add a class name to an element.\n *\n * @param {Element} element\n * Element to add class name to.\n *\n * @param {...string} classesToAdd\n * One or more class name to add.\n *\n * @return {Element}\n * The DOM element with the added class name.\n */\nfunction addClass(element, ...classesToAdd) {\n element.classList.add(...classesToAdd.reduce((prev, current) => prev.concat(current.split(/\\s+/)), []));\n return element;\n}\n\n/**\n * Remove a class name from an element.\n *\n * @param {Element} element\n * Element to remove a class name from.\n *\n * @param {...string} classesToRemove\n * One or more class name to remove.\n *\n * @return {Element}\n * The DOM element with class name removed.\n */\nfunction removeClass(element, ...classesToRemove) {\n // Protect in case the player gets disposed\n if (!element) {\n log$1.warn(\"removeClass was called with an element that doesn't exist\");\n return null;\n }\n element.classList.remove(...classesToRemove.reduce((prev, current) => prev.concat(current.split(/\\s+/)), []));\n return element;\n}\n\n/**\n * The callback definition for toggleClass.\n *\n * @callback module:dom~PredicateCallback\n * @param {Element} element\n * The DOM element of the Component.\n *\n * @param {string} classToToggle\n * The `className` that wants to be toggled\n *\n * @return {boolean|undefined}\n * If `true` is returned, the `classToToggle` will be added to the\n * `element`. If `false`, the `classToToggle` will be removed from\n * the `element`. If `undefined`, the callback will be ignored.\n */\n\n/**\n * Adds or removes a class name to/from an element depending on an optional\n * condition or the presence/absence of the class name.\n *\n * @param {Element} element\n * The element to toggle a class name on.\n *\n * @param {string} classToToggle\n * The class that should be toggled.\n *\n * @param {boolean|module:dom~PredicateCallback} [predicate]\n * See the return value for {@link module:dom~PredicateCallback}\n *\n * @return {Element}\n * The element with a class that has been toggled.\n */\nfunction toggleClass(element, classToToggle, predicate) {\n if (typeof predicate === 'function') {\n predicate = predicate(element, classToToggle);\n }\n if (typeof predicate !== 'boolean') {\n predicate = undefined;\n }\n classToToggle.split(/\\s+/).forEach(className => element.classList.toggle(className, predicate));\n return element;\n}\n\n/**\n * Apply attributes to an HTML element.\n *\n * @param {Element} el\n * Element to add attributes to.\n *\n * @param {Object} [attributes]\n * Attributes to be applied.\n */\nfunction setAttributes(el, attributes) {\n Object.getOwnPropertyNames(attributes).forEach(function (attrName) {\n const attrValue = attributes[attrName];\n if (attrValue === null || typeof attrValue === 'undefined' || attrValue === false) {\n el.removeAttribute(attrName);\n } else {\n el.setAttribute(attrName, attrValue === true ? '' : attrValue);\n }\n });\n}\n\n/**\n * Get an element's attribute values, as defined on the HTML tag.\n *\n * Attributes are not the same as properties. They're defined on the tag\n * or with setAttribute.\n *\n * @param {Element} tag\n * Element from which to get tag attributes.\n *\n * @return {Object}\n * All attributes of the element. Boolean attributes will be `true` or\n * `false`, others will be strings.\n */\nfunction getAttributes(tag) {\n const obj = {};\n\n // known boolean attributes\n // we can check for matching boolean properties, but not all browsers\n // and not all tags know about these attributes, so, we still want to check them manually\n const knownBooleans = ['autoplay', 'controls', 'playsinline', 'loop', 'muted', 'default', 'defaultMuted'];\n if (tag && tag.attributes && tag.attributes.length > 0) {\n const attrs = tag.attributes;\n for (let i = attrs.length - 1; i >= 0; i--) {\n const attrName = attrs[i].name;\n /** @type {boolean|string} */\n let attrVal = attrs[i].value;\n\n // check for known booleans\n // the matching element property will return a value for typeof\n if (knownBooleans.includes(attrName)) {\n // the value of an included boolean attribute is typically an empty\n // string ('') which would equal false if we just check for a false value.\n // we also don't want support bad code like autoplay='false'\n attrVal = attrVal !== null ? true : false;\n }\n obj[attrName] = attrVal;\n }\n }\n return obj;\n}\n\n/**\n * Get the value of an element's attribute.\n *\n * @param {Element} el\n * A DOM element.\n *\n * @param {string} attribute\n * Attribute to get the value of.\n *\n * @return {string}\n * The value of the attribute.\n */\nfunction getAttribute(el, attribute) {\n return el.getAttribute(attribute);\n}\n\n/**\n * Set the value of an element's attribute.\n *\n * @param {Element} el\n * A DOM element.\n *\n * @param {string} attribute\n * Attribute to set.\n *\n * @param {string} value\n * Value to set the attribute to.\n */\nfunction setAttribute(el, attribute, value) {\n el.setAttribute(attribute, value);\n}\n\n/**\n * Remove an element's attribute.\n *\n * @param {Element} el\n * A DOM element.\n *\n * @param {string} attribute\n * Attribute to remove.\n */\nfunction removeAttribute(el, attribute) {\n el.removeAttribute(attribute);\n}\n\n/**\n * Attempt to block the ability to select text.\n */\nfunction blockTextSelection() {\n document.body.focus();\n document.onselectstart = function () {\n return false;\n };\n}\n\n/**\n * Turn off text selection blocking.\n */\nfunction unblockTextSelection() {\n document.onselectstart = function () {\n return true;\n };\n}\n\n/**\n * Identical to the native `getBoundingClientRect` function, but ensures that\n * the method is supported at all (it is in all browsers we claim to support)\n * and that the element is in the DOM before continuing.\n *\n * This wrapper function also shims properties which are not provided by some\n * older browsers (namely, IE8).\n *\n * Additionally, some browsers do not support adding properties to a\n * `ClientRect`/`DOMRect` object; so, we shallow-copy it with the standard\n * properties (except `x` and `y` which are not widely supported). This helps\n * avoid implementations where keys are non-enumerable.\n *\n * @param {Element} el\n * Element whose `ClientRect` we want to calculate.\n *\n * @return {Object|undefined}\n * Always returns a plain object - or `undefined` if it cannot.\n */\nfunction getBoundingClientRect(el) {\n if (el && el.getBoundingClientRect && el.parentNode) {\n const rect = el.getBoundingClientRect();\n const result = {};\n ['bottom', 'height', 'left', 'right', 'top', 'width'].forEach(k => {\n if (rect[k] !== undefined) {\n result[k] = rect[k];\n }\n });\n if (!result.height) {\n result.height = parseFloat(computedStyle(el, 'height'));\n }\n if (!result.width) {\n result.width = parseFloat(computedStyle(el, 'width'));\n }\n return result;\n }\n}\n\n/**\n * Represents the position of a DOM element on the page.\n *\n * @typedef {Object} module:dom~Position\n *\n * @property {number} left\n * Pixels to the left.\n *\n * @property {number} top\n * Pixels from the top.\n */\n\n/**\n * Get the position of an element in the DOM.\n *\n * Uses `getBoundingClientRect` technique from John Resig.\n *\n * @see http://ejohn.org/blog/getboundingclientrect-is-awesome/\n *\n * @param {Element} el\n * Element from which to get offset.\n *\n * @return {module:dom~Position}\n * The position of the element that was passed in.\n */\nfunction findPosition(el) {\n if (!el || el && !el.offsetParent) {\n return {\n left: 0,\n top: 0,\n width: 0,\n height: 0\n };\n }\n const width = el.offsetWidth;\n const height = el.offsetHeight;\n let left = 0;\n let top = 0;\n while (el.offsetParent && el !== document[FullscreenApi.fullscreenElement]) {\n left += el.offsetLeft;\n top += el.offsetTop;\n el = el.offsetParent;\n }\n return {\n left,\n top,\n width,\n height\n };\n}\n\n/**\n * Represents x and y coordinates for a DOM element or mouse pointer.\n *\n * @typedef {Object} module:dom~Coordinates\n *\n * @property {number} x\n * x coordinate in pixels\n *\n * @property {number} y\n * y coordinate in pixels\n */\n\n/**\n * Get the pointer position within an element.\n *\n * The base on the coordinates are the bottom left of the element.\n *\n * @param {Element} el\n * Element on which to get the pointer position on.\n *\n * @param {Event} event\n * Event object.\n *\n * @return {module:dom~Coordinates}\n * A coordinates object corresponding to the mouse position.\n *\n */\nfunction getPointerPosition(el, event) {\n const translated = {\n x: 0,\n y: 0\n };\n if (IS_IOS) {\n let item = el;\n while (item && item.nodeName.toLowerCase() !== 'html') {\n const transform = computedStyle(item, 'transform');\n if (/^matrix/.test(transform)) {\n const values = transform.slice(7, -1).split(/,\\s/).map(Number);\n translated.x += values[4];\n translated.y += values[5];\n } else if (/^matrix3d/.test(transform)) {\n const values = transform.slice(9, -1).split(/,\\s/).map(Number);\n translated.x += values[12];\n translated.y += values[13];\n }\n item = item.parentNode;\n }\n }\n const position = {};\n const boxTarget = findPosition(event.target);\n const box = findPosition(el);\n const boxW = box.width;\n const boxH = box.height;\n let offsetY = event.offsetY - (box.top - boxTarget.top);\n let offsetX = event.offsetX - (box.left - boxTarget.left);\n if (event.changedTouches) {\n offsetX = event.changedTouches[0].pageX - box.left;\n offsetY = event.changedTouches[0].pageY + box.top;\n if (IS_IOS) {\n offsetX -= translated.x;\n offsetY -= translated.y;\n }\n }\n position.y = 1 - Math.max(0, Math.min(1, offsetY / boxH));\n position.x = Math.max(0, Math.min(1, offsetX / boxW));\n return position;\n}\n\n/**\n * Determines, via duck typing, whether or not a value is a text node.\n *\n * @param {*} value\n * Check if this value is a text node.\n *\n * @return {boolean}\n * Will be `true` if the value is a text node, `false` otherwise.\n */\nfunction isTextNode(value) {\n return isObject(value) && value.nodeType === 3;\n}\n\n/**\n * Empties the contents of an element.\n *\n * @param {Element} el\n * The element to empty children from\n *\n * @return {Element}\n * The element with no children\n */\nfunction emptyEl(el) {\n while (el.firstChild) {\n el.removeChild(el.firstChild);\n }\n return el;\n}\n\n/**\n * This is a mixed value that describes content to be injected into the DOM\n * via some method. It can be of the following types:\n *\n * Type | Description\n * -----------|-------------\n * `string` | The value will be normalized into a text node.\n * `Element` | The value will be accepted as-is.\n * `Text` | A TextNode. The value will be accepted as-is.\n * `Array` | A one-dimensional array of strings, elements, text nodes, or functions. These functions should return a string, element, or text node (any other return value, like an array, will be ignored).\n * `Function` | A function, which is expected to return a string, element, text node, or array - any of the other possible values described above. This means that a content descriptor could be a function that returns an array of functions, but those second-level functions must return strings, elements, or text nodes.\n *\n * @typedef {string|Element|Text|Array|Function} ContentDescriptor\n */\n\n/**\n * Normalizes content for eventual insertion into the DOM.\n *\n * This allows a wide range of content definition methods, but helps protect\n * from falling into the trap of simply writing to `innerHTML`, which could\n * be an XSS concern.\n *\n * The content for an element can be passed in multiple types and\n * combinations, whose behavior is as follows:\n *\n * @param {ContentDescriptor} content\n * A content descriptor value.\n *\n * @return {Array}\n * All of the content that was passed in, normalized to an array of\n * elements or text nodes.\n */\nfunction normalizeContent(content) {\n // First, invoke content if it is a function. If it produces an array,\n // that needs to happen before normalization.\n if (typeof content === 'function') {\n content = content();\n }\n\n // Next up, normalize to an array, so one or many items can be normalized,\n // filtered, and returned.\n return (Array.isArray(content) ? content : [content]).map(value => {\n // First, invoke value if it is a function to produce a new value,\n // which will be subsequently normalized to a Node of some kind.\n if (typeof value === 'function') {\n value = value();\n }\n if (isEl(value) || isTextNode(value)) {\n return value;\n }\n if (typeof value === 'string' && /\\S/.test(value)) {\n return document.createTextNode(value);\n }\n }).filter(value => value);\n}\n\n/**\n * Normalizes and appends content to an element.\n *\n * @param {Element} el\n * Element to append normalized content to.\n *\n * @param {ContentDescriptor} content\n * A content descriptor value.\n *\n * @return {Element}\n * The element with appended normalized content.\n */\nfunction appendContent(el, content) {\n normalizeContent(content).forEach(node => el.appendChild(node));\n return el;\n}\n\n/**\n * Normalizes and inserts content into an element; this is identical to\n * `appendContent()`, except it empties the element first.\n *\n * @param {Element} el\n * Element to insert normalized content into.\n *\n * @param {ContentDescriptor} content\n * A content descriptor value.\n *\n * @return {Element}\n * The element with inserted normalized content.\n */\nfunction insertContent(el, content) {\n return appendContent(emptyEl(el), content);\n}\n\n/**\n * Check if an event was a single left click.\n *\n * @param {MouseEvent} event\n * Event object.\n *\n * @return {boolean}\n * Will be `true` if a single left click, `false` otherwise.\n */\nfunction isSingleLeftClick(event) {\n // Note: if you create something draggable, be sure to\n // call it on both `mousedown` and `mousemove` event,\n // otherwise `mousedown` should be enough for a button\n\n if (event.button === undefined && event.buttons === undefined) {\n // Why do we need `buttons` ?\n // Because, middle mouse sometimes have this:\n // e.button === 0 and e.buttons === 4\n // Furthermore, we want to prevent combination click, something like\n // HOLD middlemouse then left click, that would be\n // e.button === 0, e.buttons === 5\n // just `button` is not gonna work\n\n // Alright, then what this block does ?\n // this is for chrome `simulate mobile devices`\n // I want to support this as well\n\n return true;\n }\n if (event.button === 0 && event.buttons === undefined) {\n // Touch screen, sometimes on some specific device, `buttons`\n // doesn't have anything (safari on ios, blackberry...)\n\n return true;\n }\n\n // `mouseup` event on a single left click has\n // `button` and `buttons` equal to 0\n if (event.type === 'mouseup' && event.button === 0 && event.buttons === 0) {\n return true;\n }\n if (event.button !== 0 || event.buttons !== 1) {\n // This is the reason we have those if else block above\n // if any special case we can catch and let it slide\n // we do it above, when get to here, this definitely\n // is-not-left-click\n\n return false;\n }\n return true;\n}\n\n/**\n * Finds a single DOM element matching `selector` within the optional\n * `context` of another DOM element (defaulting to `document`).\n *\n * @param {string} selector\n * A valid CSS selector, which will be passed to `querySelector`.\n *\n * @param {Element|String} [context=document]\n * A DOM element within which to query. Can also be a selector\n * string in which case the first matching element will be used\n * as context. If missing (or no element matches selector), falls\n * back to `document`.\n *\n * @return {Element|null}\n * The element that was found or null.\n */\nconst $ = createQuerier('querySelector');\n\n/**\n * Finds a all DOM elements matching `selector` within the optional\n * `context` of another DOM element (defaulting to `document`).\n *\n * @param {string} selector\n * A valid CSS selector, which will be passed to `querySelectorAll`.\n *\n * @param {Element|String} [context=document]\n * A DOM element within which to query. Can also be a selector\n * string in which case the first matching element will be used\n * as context. If missing (or no element matches selector), falls\n * back to `document`.\n *\n * @return {NodeList}\n * A element list of elements that were found. Will be empty if none\n * were found.\n *\n */\nconst $$ = createQuerier('querySelectorAll');\n\n/**\n * A safe getComputedStyle.\n *\n * This is needed because in Firefox, if the player is loaded in an iframe with\n * `display:none`, then `getComputedStyle` returns `null`, so, we do a\n * null-check to make sure that the player doesn't break in these cases.\n *\n * @param {Element} el\n * The element you want the computed style of\n *\n * @param {string} prop\n * The property name you want\n *\n * @see https://bugzilla.mozilla.org/show_bug.cgi?id=548397\n */\nfunction computedStyle(el, prop) {\n if (!el || !prop) {\n return '';\n }\n if (typeof window$1.getComputedStyle === 'function') {\n let computedStyleValue;\n try {\n computedStyleValue = window$1.getComputedStyle(el);\n } catch (e) {\n return '';\n }\n return computedStyleValue ? computedStyleValue.getPropertyValue(prop) || computedStyleValue[prop] : '';\n }\n return '';\n}\n\n/**\n * Copy document style sheets to another window.\n *\n * @param {Window} win\n * The window element you want to copy the document style sheets to.\n *\n */\nfunction copyStyleSheetsToWindow(win) {\n [...document.styleSheets].forEach(styleSheet => {\n try {\n const cssRules = [...styleSheet.cssRules].map(rule => rule.cssText).join('');\n const style = document.createElement('style');\n style.textContent = cssRules;\n win.document.head.appendChild(style);\n } catch (e) {\n const link = document.createElement('link');\n link.rel = 'stylesheet';\n link.type = styleSheet.type;\n // For older Safari this has to be the string; on other browsers setting the MediaList works\n link.media = styleSheet.media.mediaText;\n link.href = styleSheet.href;\n win.document.head.appendChild(link);\n }\n });\n}\n\nvar Dom = /*#__PURE__*/Object.freeze({\n __proto__: null,\n isReal: isReal,\n isEl: isEl,\n isInFrame: isInFrame,\n createEl: createEl,\n textContent: textContent,\n prependTo: prependTo,\n hasClass: hasClass,\n addClass: addClass,\n removeClass: removeClass,\n toggleClass: toggleClass,\n setAttributes: setAttributes,\n getAttributes: getAttributes,\n getAttribute: getAttribute,\n setAttribute: setAttribute,\n removeAttribute: removeAttribute,\n blockTextSelection: blockTextSelection,\n unblockTextSelection: unblockTextSelection,\n getBoundingClientRect: getBoundingClientRect,\n findPosition: findPosition,\n getPointerPosition: getPointerPosition,\n isTextNode: isTextNode,\n emptyEl: emptyEl,\n normalizeContent: normalizeContent,\n appendContent: appendContent,\n insertContent: insertContent,\n isSingleLeftClick: isSingleLeftClick,\n $: $,\n $$: $$,\n computedStyle: computedStyle,\n copyStyleSheetsToWindow: copyStyleSheetsToWindow\n});\n\n/**\n * @file setup.js - Functions for setting up a player without\n * user interaction based on the data-setup `attribute` of the video tag.\n *\n * @module setup\n */\nlet _windowLoaded = false;\nlet videojs$1;\n\n/**\n * Set up any tags that have a data-setup `attribute` when the player is started.\n */\nconst autoSetup = function () {\n if (videojs$1.options.autoSetup === false) {\n return;\n }\n const vids = Array.prototype.slice.call(document.getElementsByTagName('video'));\n const audios = Array.prototype.slice.call(document.getElementsByTagName('audio'));\n const divs = Array.prototype.slice.call(document.getElementsByTagName('video-js'));\n const mediaEls = vids.concat(audios, divs);\n\n // Check if any media elements exist\n if (mediaEls && mediaEls.length > 0) {\n for (let i = 0, e = mediaEls.length; i < e; i++) {\n const mediaEl = mediaEls[i];\n\n // Check if element exists, has getAttribute func.\n if (mediaEl && mediaEl.getAttribute) {\n // Make sure this player hasn't already been set up.\n if (mediaEl.player === undefined) {\n const options = mediaEl.getAttribute('data-setup');\n\n // Check if data-setup attr exists.\n // We only auto-setup if they've added the data-setup attr.\n if (options !== null) {\n // Create new video.js instance.\n videojs$1(mediaEl);\n }\n }\n\n // If getAttribute isn't defined, we need to wait for the DOM.\n } else {\n autoSetupTimeout(1);\n break;\n }\n }\n\n // No videos were found, so keep looping unless page is finished loading.\n } else if (!_windowLoaded) {\n autoSetupTimeout(1);\n }\n};\n\n/**\n * Wait until the page is loaded before running autoSetup. This will be called in\n * autoSetup if `hasLoaded` returns false.\n *\n * @param {number} wait\n * How long to wait in ms\n *\n * @param {module:videojs} [vjs]\n * The videojs library function\n */\nfunction autoSetupTimeout(wait, vjs) {\n // Protect against breakage in non-browser environments\n if (!isReal()) {\n return;\n }\n if (vjs) {\n videojs$1 = vjs;\n }\n window$1.setTimeout(autoSetup, wait);\n}\n\n/**\n * Used to set the internal tracking of window loaded state to true.\n *\n * @private\n */\nfunction setWindowLoaded() {\n _windowLoaded = true;\n window$1.removeEventListener('load', setWindowLoaded);\n}\nif (isReal()) {\n if (document.readyState === 'complete') {\n setWindowLoaded();\n } else {\n /**\n * Listen for the load event on window, and set _windowLoaded to true.\n *\n * We use a standard event listener here to avoid incrementing the GUID\n * before any players are created.\n *\n * @listens load\n */\n window$1.addEventListener('load', setWindowLoaded);\n }\n}\n\n/**\n * @file stylesheet.js\n * @module stylesheet\n */\n\n/**\n * Create a DOM style element given a className for it.\n *\n * @param {string} className\n * The className to add to the created style element.\n *\n * @return {Element}\n * The element that was created.\n */\nconst createStyleElement = function (className) {\n const style = document.createElement('style');\n style.className = className;\n return style;\n};\n\n/**\n * Add text to a DOM element.\n *\n * @param {Element} el\n * The Element to add text content to.\n *\n * @param {string} content\n * The text to add to the element.\n */\nconst setTextContent = function (el, content) {\n if (el.styleSheet) {\n el.styleSheet.cssText = content;\n } else {\n el.textContent = content;\n }\n};\n\n/**\n * @file dom-data.js\n * @module dom-data\n */\n\n/**\n * Element Data Store.\n *\n * Allows for binding data to an element without putting it directly on the\n * element. Ex. Event listeners are stored here.\n * (also from jsninja.com, slightly modified and updated for closure compiler)\n *\n * @type {Object}\n * @private\n */\nvar DomData = new WeakMap();\n\n/**\n * @file guid.js\n * @module guid\n */\n\n// Default value for GUIDs. This allows us to reset the GUID counter in tests.\n//\n// The initial GUID is 3 because some users have come to rely on the first\n// default player ID ending up as `vjs_video_3`.\n//\n// See: https://github.com/videojs/video.js/pull/6216\nconst _initialGuid = 3;\n\n/**\n * Unique ID for an element or function\n *\n * @type {Number}\n */\nlet _guid = _initialGuid;\n\n/**\n * Get a unique auto-incrementing ID by number that has not been returned before.\n *\n * @return {number}\n * A new unique ID.\n */\nfunction newGUID() {\n return _guid++;\n}\n\n/**\n * @file events.js. An Event System (John Resig - Secrets of a JS Ninja http://jsninja.com/)\n * (Original book version wasn't completely usable, so fixed some things and made Closure Compiler compatible)\n * This should work very similarly to jQuery's events, however it's based off the book version which isn't as\n * robust as jquery's, so there's probably some differences.\n *\n * @file events.js\n * @module events\n */\n\n/**\n * Clean up the listener cache and dispatchers\n *\n * @param {Element|Object} elem\n * Element to clean up\n *\n * @param {string} type\n * Type of event to clean up\n */\nfunction _cleanUpEvents(elem, type) {\n if (!DomData.has(elem)) {\n return;\n }\n const data = DomData.get(elem);\n\n // Remove the events of a particular type if there are none left\n if (data.handlers[type].length === 0) {\n delete data.handlers[type];\n // data.handlers[type] = null;\n // Setting to null was causing an error with data.handlers\n\n // Remove the meta-handler from the element\n if (elem.removeEventListener) {\n elem.removeEventListener(type, data.dispatcher, false);\n } else if (elem.detachEvent) {\n elem.detachEvent('on' + type, data.dispatcher);\n }\n }\n\n // Remove the events object if there are no types left\n if (Object.getOwnPropertyNames(data.handlers).length <= 0) {\n delete data.handlers;\n delete data.dispatcher;\n delete data.disabled;\n }\n\n // Finally remove the element data if there is no data left\n if (Object.getOwnPropertyNames(data).length === 0) {\n DomData.delete(elem);\n }\n}\n\n/**\n * Loops through an array of event types and calls the requested method for each type.\n *\n * @param {Function} fn\n * The event method we want to use.\n *\n * @param {Element|Object} elem\n * Element or object to bind listeners to\n *\n * @param {string[]} types\n * Type of event to bind to.\n *\n * @param {Function} callback\n * Event listener.\n */\nfunction _handleMultipleEvents(fn, elem, types, callback) {\n types.forEach(function (type) {\n // Call the event method for each one of the types\n fn(elem, type, callback);\n });\n}\n\n/**\n * Fix a native event to have standard property values\n *\n * @param {Object} event\n * Event object to fix.\n *\n * @return {Object}\n * Fixed event object.\n */\nfunction fixEvent(event) {\n if (event.fixed_) {\n return event;\n }\n function returnTrue() {\n return true;\n }\n function returnFalse() {\n return false;\n }\n\n // Test if fixing up is needed\n // Used to check if !event.stopPropagation instead of isPropagationStopped\n // But native events return true for stopPropagation, but don't have\n // other expected methods like isPropagationStopped. Seems to be a problem\n // with the Javascript Ninja code. So we're just overriding all events now.\n if (!event || !event.isPropagationStopped || !event.isImmediatePropagationStopped) {\n const old = event || window$1.event;\n event = {};\n // Clone the old object so that we can modify the values event = {};\n // IE8 Doesn't like when you mess with native event properties\n // Firefox returns false for event.hasOwnProperty('type') and other props\n // which makes copying more difficult.\n // TODO: Probably best to create a whitelist of event props\n for (const key in old) {\n // Safari 6.0.3 warns you if you try to copy deprecated layerX/Y\n // Chrome warns you if you try to copy deprecated keyboardEvent.keyLocation\n // and webkitMovementX/Y\n // Lighthouse complains if Event.path is copied\n if (key !== 'layerX' && key !== 'layerY' && key !== 'keyLocation' && key !== 'webkitMovementX' && key !== 'webkitMovementY' && key !== 'path') {\n // Chrome 32+ warns if you try to copy deprecated returnValue, but\n // we still want to if preventDefault isn't supported (IE8).\n if (!(key === 'returnValue' && old.preventDefault)) {\n event[key] = old[key];\n }\n }\n }\n\n // The event occurred on this element\n if (!event.target) {\n event.target = event.srcElement || document;\n }\n\n // Handle which other element the event is related to\n if (!event.relatedTarget) {\n event.relatedTarget = event.fromElement === event.target ? event.toElement : event.fromElement;\n }\n\n // Stop the default browser action\n event.preventDefault = function () {\n if (old.preventDefault) {\n old.preventDefault();\n }\n event.returnValue = false;\n old.returnValue = false;\n event.defaultPrevented = true;\n };\n event.defaultPrevented = false;\n\n // Stop the event from bubbling\n event.stopPropagation = function () {\n if (old.stopPropagation) {\n old.stopPropagation();\n }\n event.cancelBubble = true;\n old.cancelBubble = true;\n event.isPropagationStopped = returnTrue;\n };\n event.isPropagationStopped = returnFalse;\n\n // Stop the event from bubbling and executing other handlers\n event.stopImmediatePropagation = function () {\n if (old.stopImmediatePropagation) {\n old.stopImmediatePropagation();\n }\n event.isImmediatePropagationStopped = returnTrue;\n event.stopPropagation();\n };\n event.isImmediatePropagationStopped = returnFalse;\n\n // Handle mouse position\n if (event.clientX !== null && event.clientX !== undefined) {\n const doc = document.documentElement;\n const body = document.body;\n event.pageX = event.clientX + (doc && doc.scrollLeft || body && body.scrollLeft || 0) - (doc && doc.clientLeft || body && body.clientLeft || 0);\n event.pageY = event.clientY + (doc && doc.scrollTop || body && body.scrollTop || 0) - (doc && doc.clientTop || body && body.clientTop || 0);\n }\n\n // Handle key presses\n event.which = event.charCode || event.keyCode;\n\n // Fix button for mouse clicks:\n // 0 == left; 1 == middle; 2 == right\n if (event.button !== null && event.button !== undefined) {\n // The following is disabled because it does not pass videojs-standard\n // and... yikes.\n /* eslint-disable */\n event.button = event.button & 1 ? 0 : event.button & 4 ? 1 : event.button & 2 ? 2 : 0;\n /* eslint-enable */\n }\n }\n\n event.fixed_ = true;\n // Returns fixed-up instance\n return event;\n}\n\n/**\n * Whether passive event listeners are supported\n */\nlet _supportsPassive;\nconst supportsPassive = function () {\n if (typeof _supportsPassive !== 'boolean') {\n _supportsPassive = false;\n try {\n const opts = Object.defineProperty({}, 'passive', {\n get() {\n _supportsPassive = true;\n }\n });\n window$1.addEventListener('test', null, opts);\n window$1.removeEventListener('test', null, opts);\n } catch (e) {\n // disregard\n }\n }\n return _supportsPassive;\n};\n\n/**\n * Touch events Chrome expects to be passive\n */\nconst passiveEvents = ['touchstart', 'touchmove'];\n\n/**\n * Add an event listener to element\n * It stores the handler function in a separate cache object\n * and adds a generic handler to the element's event,\n * along with a unique id (guid) to the element.\n *\n * @param {Element|Object} elem\n * Element or object to bind listeners to\n *\n * @param {string|string[]} type\n * Type of event to bind to.\n *\n * @param {Function} fn\n * Event listener.\n */\nfunction on(elem, type, fn) {\n if (Array.isArray(type)) {\n return _handleMultipleEvents(on, elem, type, fn);\n }\n if (!DomData.has(elem)) {\n DomData.set(elem, {});\n }\n const data = DomData.get(elem);\n\n // We need a place to store all our handler data\n if (!data.handlers) {\n data.handlers = {};\n }\n if (!data.handlers[type]) {\n data.handlers[type] = [];\n }\n if (!fn.guid) {\n fn.guid = newGUID();\n }\n data.handlers[type].push(fn);\n if (!data.dispatcher) {\n data.disabled = false;\n data.dispatcher = function (event, hash) {\n if (data.disabled) {\n return;\n }\n event = fixEvent(event);\n const handlers = data.handlers[event.type];\n if (handlers) {\n // Copy handlers so if handlers are added/removed during the process it doesn't throw everything off.\n const handlersCopy = handlers.slice(0);\n for (let m = 0, n = handlersCopy.length; m < n; m++) {\n if (event.isImmediatePropagationStopped()) {\n break;\n } else {\n try {\n handlersCopy[m].call(elem, event, hash);\n } catch (e) {\n log$1.error(e);\n }\n }\n }\n }\n };\n }\n if (data.handlers[type].length === 1) {\n if (elem.addEventListener) {\n let options = false;\n if (supportsPassive() && passiveEvents.indexOf(type) > -1) {\n options = {\n passive: true\n };\n }\n elem.addEventListener(type, data.dispatcher, options);\n } else if (elem.attachEvent) {\n elem.attachEvent('on' + type, data.dispatcher);\n }\n }\n}\n\n/**\n * Removes event listeners from an element\n *\n * @param {Element|Object} elem\n * Object to remove listeners from.\n *\n * @param {string|string[]} [type]\n * Type of listener to remove. Don't include to remove all events from element.\n *\n * @param {Function} [fn]\n * Specific listener to remove. Don't include to remove listeners for an event\n * type.\n */\nfunction off(elem, type, fn) {\n // Don't want to add a cache object through getElData if not needed\n if (!DomData.has(elem)) {\n return;\n }\n const data = DomData.get(elem);\n\n // If no events exist, nothing to unbind\n if (!data.handlers) {\n return;\n }\n if (Array.isArray(type)) {\n return _handleMultipleEvents(off, elem, type, fn);\n }\n\n // Utility function\n const removeType = function (el, t) {\n data.handlers[t] = [];\n _cleanUpEvents(el, t);\n };\n\n // Are we removing all bound events?\n if (type === undefined) {\n for (const t in data.handlers) {\n if (Object.prototype.hasOwnProperty.call(data.handlers || {}, t)) {\n removeType(elem, t);\n }\n }\n return;\n }\n const handlers = data.handlers[type];\n\n // If no handlers exist, nothing to unbind\n if (!handlers) {\n return;\n }\n\n // If no listener was provided, remove all listeners for type\n if (!fn) {\n removeType(elem, type);\n return;\n }\n\n // We're only removing a single handler\n if (fn.guid) {\n for (let n = 0; n < handlers.length; n++) {\n if (handlers[n].guid === fn.guid) {\n handlers.splice(n--, 1);\n }\n }\n }\n _cleanUpEvents(elem, type);\n}\n\n/**\n * Trigger an event for an element\n *\n * @param {Element|Object} elem\n * Element to trigger an event on\n *\n * @param {EventTarget~Event|string} event\n * A string (the type) or an event object with a type attribute\n *\n * @param {Object} [hash]\n * data hash to pass along with the event\n *\n * @return {boolean|undefined}\n * Returns the opposite of `defaultPrevented` if default was\n * prevented. Otherwise, returns `undefined`\n */\nfunction trigger(elem, event, hash) {\n // Fetches element data and a reference to the parent (for bubbling).\n // Don't want to add a data object to cache for every parent,\n // so checking hasElData first.\n const elemData = DomData.has(elem) ? DomData.get(elem) : {};\n const parent = elem.parentNode || elem.ownerDocument;\n // type = event.type || event,\n // handler;\n\n // If an event name was passed as a string, creates an event out of it\n if (typeof event === 'string') {\n event = {\n type: event,\n target: elem\n };\n } else if (!event.target) {\n event.target = elem;\n }\n\n // Normalizes the event properties.\n event = fixEvent(event);\n\n // If the passed element has a dispatcher, executes the established handlers.\n if (elemData.dispatcher) {\n elemData.dispatcher.call(elem, event, hash);\n }\n\n // Unless explicitly stopped or the event does not bubble (e.g. media events)\n // recursively calls this function to bubble the event up the DOM.\n if (parent && !event.isPropagationStopped() && event.bubbles === true) {\n trigger.call(null, parent, event, hash);\n\n // If at the top of the DOM, triggers the default action unless disabled.\n } else if (!parent && !event.defaultPrevented && event.target && event.target[event.type]) {\n if (!DomData.has(event.target)) {\n DomData.set(event.target, {});\n }\n const targetData = DomData.get(event.target);\n\n // Checks if the target has a default action for this event.\n if (event.target[event.type]) {\n // Temporarily disables event dispatching on the target as we have already executed the handler.\n targetData.disabled = true;\n // Executes the default action.\n if (typeof event.target[event.type] === 'function') {\n event.target[event.type]();\n }\n // Re-enables event dispatching.\n targetData.disabled = false;\n }\n }\n\n // Inform the triggerer if the default was prevented by returning false\n return !event.defaultPrevented;\n}\n\n/**\n * Trigger a listener only once for an event.\n *\n * @param {Element|Object} elem\n * Element or object to bind to.\n *\n * @param {string|string[]} type\n * Name/type of event\n *\n * @param {Event~EventListener} fn\n * Event listener function\n */\nfunction one(elem, type, fn) {\n if (Array.isArray(type)) {\n return _handleMultipleEvents(one, elem, type, fn);\n }\n const func = function () {\n off(elem, type, func);\n fn.apply(this, arguments);\n };\n\n // copy the guid to the new function so it can removed using the original function's ID\n func.guid = fn.guid = fn.guid || newGUID();\n on(elem, type, func);\n}\n\n/**\n * Trigger a listener only once and then turn if off for all\n * configured events\n *\n * @param {Element|Object} elem\n * Element or object to bind to.\n *\n * @param {string|string[]} type\n * Name/type of event\n *\n * @param {Event~EventListener} fn\n * Event listener function\n */\nfunction any(elem, type, fn) {\n const func = function () {\n off(elem, type, func);\n fn.apply(this, arguments);\n };\n\n // copy the guid to the new function so it can removed using the original function's ID\n func.guid = fn.guid = fn.guid || newGUID();\n\n // multiple ons, but one off for everything\n on(elem, type, func);\n}\n\nvar Events = /*#__PURE__*/Object.freeze({\n __proto__: null,\n fixEvent: fixEvent,\n on: on,\n off: off,\n trigger: trigger,\n one: one,\n any: any\n});\n\n/**\n * @file fn.js\n * @module fn\n */\nconst UPDATE_REFRESH_INTERVAL = 30;\n\n/**\n * A private, internal-only function for changing the context of a function.\n *\n * It also stores a unique id on the function so it can be easily removed from\n * events.\n *\n * @private\n * @function\n * @param {*} context\n * The object to bind as scope.\n *\n * @param {Function} fn\n * The function to be bound to a scope.\n *\n * @param {number} [uid]\n * An optional unique ID for the function to be set\n *\n * @return {Function}\n * The new function that will be bound into the context given\n */\nconst bind_ = function (context, fn, uid) {\n // Make sure the function has a unique ID\n if (!fn.guid) {\n fn.guid = newGUID();\n }\n\n // Create the new function that changes the context\n const bound = fn.bind(context);\n\n // Allow for the ability to individualize this function\n // Needed in the case where multiple objects might share the same prototype\n // IF both items add an event listener with the same function, then you try to remove just one\n // it will remove both because they both have the same guid.\n // when using this, you need to use the bind method when you remove the listener as well.\n // currently used in text tracks\n bound.guid = uid ? uid + '_' + fn.guid : fn.guid;\n return bound;\n};\n\n/**\n * Wraps the given function, `fn`, with a new function that only invokes `fn`\n * at most once per every `wait` milliseconds.\n *\n * @function\n * @param {Function} fn\n * The function to be throttled.\n *\n * @param {number} wait\n * The number of milliseconds by which to throttle.\n *\n * @return {Function}\n */\nconst throttle = function (fn, wait) {\n let last = window$1.performance.now();\n const throttled = function (...args) {\n const now = window$1.performance.now();\n if (now - last >= wait) {\n fn(...args);\n last = now;\n }\n };\n return throttled;\n};\n\n/**\n * Creates a debounced function that delays invoking `func` until after `wait`\n * milliseconds have elapsed since the last time the debounced function was\n * invoked.\n *\n * Inspired by lodash and underscore implementations.\n *\n * @function\n * @param {Function} func\n * The function to wrap with debounce behavior.\n *\n * @param {number} wait\n * The number of milliseconds to wait after the last invocation.\n *\n * @param {boolean} [immediate]\n * Whether or not to invoke the function immediately upon creation.\n *\n * @param {Object} [context=window]\n * The \"context\" in which the debounced function should debounce. For\n * example, if this function should be tied to a Video.js player,\n * the player can be passed here. Alternatively, defaults to the\n * global `window` object.\n *\n * @return {Function}\n * A debounced function.\n */\nconst debounce = function (func, wait, immediate, context = window$1) {\n let timeout;\n const cancel = () => {\n context.clearTimeout(timeout);\n timeout = null;\n };\n\n /* eslint-disable consistent-this */\n const debounced = function () {\n const self = this;\n const args = arguments;\n let later = function () {\n timeout = null;\n later = null;\n if (!immediate) {\n func.apply(self, args);\n }\n };\n if (!timeout && immediate) {\n func.apply(self, args);\n }\n context.clearTimeout(timeout);\n timeout = context.setTimeout(later, wait);\n };\n /* eslint-enable consistent-this */\n\n debounced.cancel = cancel;\n return debounced;\n};\n\nvar Fn = /*#__PURE__*/Object.freeze({\n __proto__: null,\n UPDATE_REFRESH_INTERVAL: UPDATE_REFRESH_INTERVAL,\n bind_: bind_,\n throttle: throttle,\n debounce: debounce\n});\n\n/**\n * @file src/js/event-target.js\n */\nlet EVENT_MAP;\n\n/**\n * `EventTarget` is a class that can have the same API as the DOM `EventTarget`. It\n * adds shorthand functions that wrap around lengthy functions. For example:\n * the `on` function is a wrapper around `addEventListener`.\n *\n * @see [EventTarget Spec]{@link https://www.w3.org/TR/DOM-Level-2-Events/events.html#Events-EventTarget}\n * @class EventTarget\n */\nclass EventTarget$2 {\n /**\n * Adds an `event listener` to an instance of an `EventTarget`. An `event listener` is a\n * function that will get called when an event with a certain name gets triggered.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} fn\n * The function to call with `EventTarget`s\n */\n on(type, fn) {\n // Remove the addEventListener alias before calling Events.on\n // so we don't get into an infinite type loop\n const ael = this.addEventListener;\n this.addEventListener = () => {};\n on(this, type, fn);\n this.addEventListener = ael;\n }\n /**\n * Removes an `event listener` for a specific event from an instance of `EventTarget`.\n * This makes it so that the `event listener` will no longer get called when the\n * named event happens.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} fn\n * The function to remove.\n */\n off(type, fn) {\n off(this, type, fn);\n }\n /**\n * This function will add an `event listener` that gets triggered only once. After the\n * first trigger it will get removed. This is like adding an `event listener`\n * with {@link EventTarget#on} that calls {@link EventTarget#off} on itself.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} fn\n * The function to be called once for each event name.\n */\n one(type, fn) {\n // Remove the addEventListener aliasing Events.on\n // so we don't get into an infinite type loop\n const ael = this.addEventListener;\n this.addEventListener = () => {};\n one(this, type, fn);\n this.addEventListener = ael;\n }\n /**\n * This function will add an `event listener` that gets triggered only once and is\n * removed from all events. This is like adding an array of `event listener`s\n * with {@link EventTarget#on} that calls {@link EventTarget#off} on all events the\n * first time it is triggered.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} fn\n * The function to be called once for each event name.\n */\n any(type, fn) {\n // Remove the addEventListener aliasing Events.on\n // so we don't get into an infinite type loop\n const ael = this.addEventListener;\n this.addEventListener = () => {};\n any(this, type, fn);\n this.addEventListener = ael;\n }\n /**\n * This function causes an event to happen. This will then cause any `event listeners`\n * that are waiting for that event, to get called. If there are no `event listeners`\n * for an event then nothing will happen.\n *\n * If the name of the `Event` that is being triggered is in `EventTarget.allowedEvents_`.\n * Trigger will also call the `on` + `uppercaseEventName` function.\n *\n * Example:\n * 'click' is in `EventTarget.allowedEvents_`, so, trigger will attempt to call\n * `onClick` if it exists.\n *\n * @param {string|EventTarget~Event|Object} event\n * The name of the event, an `Event`, or an object with a key of type set to\n * an event name.\n */\n trigger(event) {\n const type = event.type || event;\n\n // deprecation\n // In a future version we should default target to `this`\n // similar to how we default the target to `elem` in\n // `Events.trigger`. Right now the default `target` will be\n // `document` due to the `Event.fixEvent` call.\n if (typeof event === 'string') {\n event = {\n type\n };\n }\n event = fixEvent(event);\n if (this.allowedEvents_[type] && this['on' + type]) {\n this['on' + type](event);\n }\n trigger(this, event);\n }\n queueTrigger(event) {\n // only set up EVENT_MAP if it'll be used\n if (!EVENT_MAP) {\n EVENT_MAP = new Map();\n }\n const type = event.type || event;\n let map = EVENT_MAP.get(this);\n if (!map) {\n map = new Map();\n EVENT_MAP.set(this, map);\n }\n const oldTimeout = map.get(type);\n map.delete(type);\n window$1.clearTimeout(oldTimeout);\n const timeout = window$1.setTimeout(() => {\n map.delete(type);\n // if we cleared out all timeouts for the current target, delete its map\n if (map.size === 0) {\n map = null;\n EVENT_MAP.delete(this);\n }\n this.trigger(event);\n }, 0);\n map.set(type, timeout);\n }\n}\n\n/**\n * A Custom DOM event.\n *\n * @typedef {CustomEvent} Event\n * @see [Properties]{@link https://developer.mozilla.org/en-US/docs/Web/API/CustomEvent}\n */\n\n/**\n * All event listeners should follow the following format.\n *\n * @callback EventListener\n * @this {EventTarget}\n *\n * @param {Event} event\n * the event that triggered this function\n *\n * @param {Object} [hash]\n * hash of data sent during the event\n */\n\n/**\n * An object containing event names as keys and booleans as values.\n *\n * > NOTE: If an event name is set to a true value here {@link EventTarget#trigger}\n * will have extra functionality. See that function for more information.\n *\n * @property EventTarget.prototype.allowedEvents_\n * @protected\n */\nEventTarget$2.prototype.allowedEvents_ = {};\n\n/**\n * An alias of {@link EventTarget#on}. Allows `EventTarget` to mimic\n * the standard DOM API.\n *\n * @function\n * @see {@link EventTarget#on}\n */\nEventTarget$2.prototype.addEventListener = EventTarget$2.prototype.on;\n\n/**\n * An alias of {@link EventTarget#off}. Allows `EventTarget` to mimic\n * the standard DOM API.\n *\n * @function\n * @see {@link EventTarget#off}\n */\nEventTarget$2.prototype.removeEventListener = EventTarget$2.prototype.off;\n\n/**\n * An alias of {@link EventTarget#trigger}. Allows `EventTarget` to mimic\n * the standard DOM API.\n *\n * @function\n * @see {@link EventTarget#trigger}\n */\nEventTarget$2.prototype.dispatchEvent = EventTarget$2.prototype.trigger;\n\n/**\n * @file mixins/evented.js\n * @module evented\n */\nconst objName = obj => {\n if (typeof obj.name === 'function') {\n return obj.name();\n }\n if (typeof obj.name === 'string') {\n return obj.name;\n }\n if (obj.name_) {\n return obj.name_;\n }\n if (obj.constructor && obj.constructor.name) {\n return obj.constructor.name;\n }\n return typeof obj;\n};\n\n/**\n * Returns whether or not an object has had the evented mixin applied.\n *\n * @param {Object} object\n * An object to test.\n *\n * @return {boolean}\n * Whether or not the object appears to be evented.\n */\nconst isEvented = object => object instanceof EventTarget$2 || !!object.eventBusEl_ && ['on', 'one', 'off', 'trigger'].every(k => typeof object[k] === 'function');\n\n/**\n * Adds a callback to run after the evented mixin applied.\n *\n * @param {Object} target\n * An object to Add\n * @param {Function} callback\n * The callback to run.\n */\nconst addEventedCallback = (target, callback) => {\n if (isEvented(target)) {\n callback();\n } else {\n if (!target.eventedCallbacks) {\n target.eventedCallbacks = [];\n }\n target.eventedCallbacks.push(callback);\n }\n};\n\n/**\n * Whether a value is a valid event type - non-empty string or array.\n *\n * @private\n * @param {string|Array} type\n * The type value to test.\n *\n * @return {boolean}\n * Whether or not the type is a valid event type.\n */\nconst isValidEventType = type =>\n// The regex here verifies that the `type` contains at least one non-\n// whitespace character.\ntypeof type === 'string' && /\\S/.test(type) || Array.isArray(type) && !!type.length;\n\n/**\n * Validates a value to determine if it is a valid event target. Throws if not.\n *\n * @private\n * @throws {Error}\n * If the target does not appear to be a valid event target.\n *\n * @param {Object} target\n * The object to test.\n *\n * @param {Object} obj\n * The evented object we are validating for\n *\n * @param {string} fnName\n * The name of the evented mixin function that called this.\n */\nconst validateTarget = (target, obj, fnName) => {\n if (!target || !target.nodeName && !isEvented(target)) {\n throw new Error(`Invalid target for ${objName(obj)}#${fnName}; must be a DOM node or evented object.`);\n }\n};\n\n/**\n * Validates a value to determine if it is a valid event target. Throws if not.\n *\n * @private\n * @throws {Error}\n * If the type does not appear to be a valid event type.\n *\n * @param {string|Array} type\n * The type to test.\n *\n * @param {Object} obj\n* The evented object we are validating for\n *\n * @param {string} fnName\n * The name of the evented mixin function that called this.\n */\nconst validateEventType = (type, obj, fnName) => {\n if (!isValidEventType(type)) {\n throw new Error(`Invalid event type for ${objName(obj)}#${fnName}; must be a non-empty string or array.`);\n }\n};\n\n/**\n * Validates a value to determine if it is a valid listener. Throws if not.\n *\n * @private\n * @throws {Error}\n * If the listener is not a function.\n *\n * @param {Function} listener\n * The listener to test.\n *\n * @param {Object} obj\n * The evented object we are validating for\n *\n * @param {string} fnName\n * The name of the evented mixin function that called this.\n */\nconst validateListener = (listener, obj, fnName) => {\n if (typeof listener !== 'function') {\n throw new Error(`Invalid listener for ${objName(obj)}#${fnName}; must be a function.`);\n }\n};\n\n/**\n * Takes an array of arguments given to `on()` or `one()`, validates them, and\n * normalizes them into an object.\n *\n * @private\n * @param {Object} self\n * The evented object on which `on()` or `one()` was called. This\n * object will be bound as the `this` value for the listener.\n *\n * @param {Array} args\n * An array of arguments passed to `on()` or `one()`.\n *\n * @param {string} fnName\n * The name of the evented mixin function that called this.\n *\n * @return {Object}\n * An object containing useful values for `on()` or `one()` calls.\n */\nconst normalizeListenArgs = (self, args, fnName) => {\n // If the number of arguments is less than 3, the target is always the\n // evented object itself.\n const isTargetingSelf = args.length < 3 || args[0] === self || args[0] === self.eventBusEl_;\n let target;\n let type;\n let listener;\n if (isTargetingSelf) {\n target = self.eventBusEl_;\n\n // Deal with cases where we got 3 arguments, but we are still listening to\n // the evented object itself.\n if (args.length >= 3) {\n args.shift();\n }\n [type, listener] = args;\n } else {\n [target, type, listener] = args;\n }\n validateTarget(target, self, fnName);\n validateEventType(type, self, fnName);\n validateListener(listener, self, fnName);\n listener = bind_(self, listener);\n return {\n isTargetingSelf,\n target,\n type,\n listener\n };\n};\n\n/**\n * Adds the listener to the event type(s) on the target, normalizing for\n * the type of target.\n *\n * @private\n * @param {Element|Object} target\n * A DOM node or evented object.\n *\n * @param {string} method\n * The event binding method to use (\"on\" or \"one\").\n *\n * @param {string|Array} type\n * One or more event type(s).\n *\n * @param {Function} listener\n * A listener function.\n */\nconst listen = (target, method, type, listener) => {\n validateTarget(target, target, method);\n if (target.nodeName) {\n Events[method](target, type, listener);\n } else {\n target[method](type, listener);\n }\n};\n\n/**\n * Contains methods that provide event capabilities to an object which is passed\n * to {@link module:evented|evented}.\n *\n * @mixin EventedMixin\n */\nconst EventedMixin = {\n /**\n * Add a listener to an event (or events) on this object or another evented\n * object.\n *\n * @param {string|Array|Element|Object} targetOrType\n * If this is a string or array, it represents the event type(s)\n * that will trigger the listener.\n *\n * Another evented object can be passed here instead, which will\n * cause the listener to listen for events on _that_ object.\n *\n * In either case, the listener's `this` value will be bound to\n * this object.\n *\n * @param {string|Array|Function} typeOrListener\n * If the first argument was a string or array, this should be the\n * listener function. Otherwise, this is a string or array of event\n * type(s).\n *\n * @param {Function} [listener]\n * If the first argument was another evented object, this will be\n * the listener function.\n */\n on(...args) {\n const {\n isTargetingSelf,\n target,\n type,\n listener\n } = normalizeListenArgs(this, args, 'on');\n listen(target, 'on', type, listener);\n\n // If this object is listening to another evented object.\n if (!isTargetingSelf) {\n // If this object is disposed, remove the listener.\n const removeListenerOnDispose = () => this.off(target, type, listener);\n\n // Use the same function ID as the listener so we can remove it later it\n // using the ID of the original listener.\n removeListenerOnDispose.guid = listener.guid;\n\n // Add a listener to the target's dispose event as well. This ensures\n // that if the target is disposed BEFORE this object, we remove the\n // removal listener that was just added. Otherwise, we create a memory leak.\n const removeRemoverOnTargetDispose = () => this.off('dispose', removeListenerOnDispose);\n\n // Use the same function ID as the listener so we can remove it later\n // it using the ID of the original listener.\n removeRemoverOnTargetDispose.guid = listener.guid;\n listen(this, 'on', 'dispose', removeListenerOnDispose);\n listen(target, 'on', 'dispose', removeRemoverOnTargetDispose);\n }\n },\n /**\n * Add a listener to an event (or events) on this object or another evented\n * object. The listener will be called once per event and then removed.\n *\n * @param {string|Array|Element|Object} targetOrType\n * If this is a string or array, it represents the event type(s)\n * that will trigger the listener.\n *\n * Another evented object can be passed here instead, which will\n * cause the listener to listen for events on _that_ object.\n *\n * In either case, the listener's `this` value will be bound to\n * this object.\n *\n * @param {string|Array|Function} typeOrListener\n * If the first argument was a string or array, this should be the\n * listener function. Otherwise, this is a string or array of event\n * type(s).\n *\n * @param {Function} [listener]\n * If the first argument was another evented object, this will be\n * the listener function.\n */\n one(...args) {\n const {\n isTargetingSelf,\n target,\n type,\n listener\n } = normalizeListenArgs(this, args, 'one');\n\n // Targeting this evented object.\n if (isTargetingSelf) {\n listen(target, 'one', type, listener);\n\n // Targeting another evented object.\n } else {\n // TODO: This wrapper is incorrect! It should only\n // remove the wrapper for the event type that called it.\n // Instead all listeners are removed on the first trigger!\n // see https://github.com/videojs/video.js/issues/5962\n const wrapper = (...largs) => {\n this.off(target, type, wrapper);\n listener.apply(null, largs);\n };\n\n // Use the same function ID as the listener so we can remove it later\n // it using the ID of the original listener.\n wrapper.guid = listener.guid;\n listen(target, 'one', type, wrapper);\n }\n },\n /**\n * Add a listener to an event (or events) on this object or another evented\n * object. The listener will only be called once for the first event that is triggered\n * then removed.\n *\n * @param {string|Array|Element|Object} targetOrType\n * If this is a string or array, it represents the event type(s)\n * that will trigger the listener.\n *\n * Another evented object can be passed here instead, which will\n * cause the listener to listen for events on _that_ object.\n *\n * In either case, the listener's `this` value will be bound to\n * this object.\n *\n * @param {string|Array|Function} typeOrListener\n * If the first argument was a string or array, this should be the\n * listener function. Otherwise, this is a string or array of event\n * type(s).\n *\n * @param {Function} [listener]\n * If the first argument was another evented object, this will be\n * the listener function.\n */\n any(...args) {\n const {\n isTargetingSelf,\n target,\n type,\n listener\n } = normalizeListenArgs(this, args, 'any');\n\n // Targeting this evented object.\n if (isTargetingSelf) {\n listen(target, 'any', type, listener);\n\n // Targeting another evented object.\n } else {\n const wrapper = (...largs) => {\n this.off(target, type, wrapper);\n listener.apply(null, largs);\n };\n\n // Use the same function ID as the listener so we can remove it later\n // it using the ID of the original listener.\n wrapper.guid = listener.guid;\n listen(target, 'any', type, wrapper);\n }\n },\n /**\n * Removes listener(s) from event(s) on an evented object.\n *\n * @param {string|Array|Element|Object} [targetOrType]\n * If this is a string or array, it represents the event type(s).\n *\n * Another evented object can be passed here instead, in which case\n * ALL 3 arguments are _required_.\n *\n * @param {string|Array|Function} [typeOrListener]\n * If the first argument was a string or array, this may be the\n * listener function. Otherwise, this is a string or array of event\n * type(s).\n *\n * @param {Function} [listener]\n * If the first argument was another evented object, this will be\n * the listener function; otherwise, _all_ listeners bound to the\n * event type(s) will be removed.\n */\n off(targetOrType, typeOrListener, listener) {\n // Targeting this evented object.\n if (!targetOrType || isValidEventType(targetOrType)) {\n off(this.eventBusEl_, targetOrType, typeOrListener);\n\n // Targeting another evented object.\n } else {\n const target = targetOrType;\n const type = typeOrListener;\n\n // Fail fast and in a meaningful way!\n validateTarget(target, this, 'off');\n validateEventType(type, this, 'off');\n validateListener(listener, this, 'off');\n\n // Ensure there's at least a guid, even if the function hasn't been used\n listener = bind_(this, listener);\n\n // Remove the dispose listener on this evented object, which was given\n // the same guid as the event listener in on().\n this.off('dispose', listener);\n if (target.nodeName) {\n off(target, type, listener);\n off(target, 'dispose', listener);\n } else if (isEvented(target)) {\n target.off(type, listener);\n target.off('dispose', listener);\n }\n }\n },\n /**\n * Fire an event on this evented object, causing its listeners to be called.\n *\n * @param {string|Object} event\n * An event type or an object with a type property.\n *\n * @param {Object} [hash]\n * An additional object to pass along to listeners.\n *\n * @return {boolean}\n * Whether or not the default behavior was prevented.\n */\n trigger(event, hash) {\n validateTarget(this.eventBusEl_, this, 'trigger');\n const type = event && typeof event !== 'string' ? event.type : event;\n if (!isValidEventType(type)) {\n throw new Error(`Invalid event type for ${objName(this)}#trigger; ` + 'must be a non-empty string or object with a type key that has a non-empty value.');\n }\n return trigger(this.eventBusEl_, event, hash);\n }\n};\n\n/**\n * Applies {@link module:evented~EventedMixin|EventedMixin} to a target object.\n *\n * @param {Object} target\n * The object to which to add event methods.\n *\n * @param {Object} [options={}]\n * Options for customizing the mixin behavior.\n *\n * @param {string} [options.eventBusKey]\n * By default, adds a `eventBusEl_` DOM element to the target object,\n * which is used as an event bus. If the target object already has a\n * DOM element that should be used, pass its key here.\n *\n * @return {Object}\n * The target object.\n */\nfunction evented(target, options = {}) {\n const {\n eventBusKey\n } = options;\n\n // Set or create the eventBusEl_.\n if (eventBusKey) {\n if (!target[eventBusKey].nodeName) {\n throw new Error(`The eventBusKey \"${eventBusKey}\" does not refer to an element.`);\n }\n target.eventBusEl_ = target[eventBusKey];\n } else {\n target.eventBusEl_ = createEl('span', {\n className: 'vjs-event-bus'\n });\n }\n Object.assign(target, EventedMixin);\n if (target.eventedCallbacks) {\n target.eventedCallbacks.forEach(callback => {\n callback();\n });\n }\n\n // When any evented object is disposed, it removes all its listeners.\n target.on('dispose', () => {\n target.off();\n [target, target.el_, target.eventBusEl_].forEach(function (val) {\n if (val && DomData.has(val)) {\n DomData.delete(val);\n }\n });\n window$1.setTimeout(() => {\n target.eventBusEl_ = null;\n }, 0);\n });\n return target;\n}\n\n/**\n * @file mixins/stateful.js\n * @module stateful\n */\n\n/**\n * Contains methods that provide statefulness to an object which is passed\n * to {@link module:stateful}.\n *\n * @mixin StatefulMixin\n */\nconst StatefulMixin = {\n /**\n * A hash containing arbitrary keys and values representing the state of\n * the object.\n *\n * @type {Object}\n */\n state: {},\n /**\n * Set the state of an object by mutating its\n * {@link module:stateful~StatefulMixin.state|state} object in place.\n *\n * @fires module:stateful~StatefulMixin#statechanged\n * @param {Object|Function} stateUpdates\n * A new set of properties to shallow-merge into the plugin state.\n * Can be a plain object or a function returning a plain object.\n *\n * @return {Object|undefined}\n * An object containing changes that occurred. If no changes\n * occurred, returns `undefined`.\n */\n setState(stateUpdates) {\n // Support providing the `stateUpdates` state as a function.\n if (typeof stateUpdates === 'function') {\n stateUpdates = stateUpdates();\n }\n let changes;\n each(stateUpdates, (value, key) => {\n // Record the change if the value is different from what's in the\n // current state.\n if (this.state[key] !== value) {\n changes = changes || {};\n changes[key] = {\n from: this.state[key],\n to: value\n };\n }\n this.state[key] = value;\n });\n\n // Only trigger \"statechange\" if there were changes AND we have a trigger\n // function. This allows us to not require that the target object be an\n // evented object.\n if (changes && isEvented(this)) {\n /**\n * An event triggered on an object that is both\n * {@link module:stateful|stateful} and {@link module:evented|evented}\n * indicating that its state has changed.\n *\n * @event module:stateful~StatefulMixin#statechanged\n * @type {Object}\n * @property {Object} changes\n * A hash containing the properties that were changed and\n * the values they were changed `from` and `to`.\n */\n this.trigger({\n changes,\n type: 'statechanged'\n });\n }\n return changes;\n }\n};\n\n/**\n * Applies {@link module:stateful~StatefulMixin|StatefulMixin} to a target\n * object.\n *\n * If the target object is {@link module:evented|evented} and has a\n * `handleStateChanged` method, that method will be automatically bound to the\n * `statechanged` event on itself.\n *\n * @param {Object} target\n * The object to be made stateful.\n *\n * @param {Object} [defaultState]\n * A default set of properties to populate the newly-stateful object's\n * `state` property.\n *\n * @return {Object}\n * Returns the `target`.\n */\nfunction stateful(target, defaultState) {\n Object.assign(target, StatefulMixin);\n\n // This happens after the mixing-in because we need to replace the `state`\n // added in that step.\n target.state = Object.assign({}, target.state, defaultState);\n\n // Auto-bind the `handleStateChanged` method of the target object if it exists.\n if (typeof target.handleStateChanged === 'function' && isEvented(target)) {\n target.on('statechanged', target.handleStateChanged);\n }\n return target;\n}\n\n/**\n * @file str.js\n * @module to-lower-case\n */\n\n/**\n * Lowercase the first letter of a string.\n *\n * @param {string} string\n * String to be lowercased\n *\n * @return {string}\n * The string with a lowercased first letter\n */\nconst toLowerCase = function (string) {\n if (typeof string !== 'string') {\n return string;\n }\n return string.replace(/./, w => w.toLowerCase());\n};\n\n/**\n * Uppercase the first letter of a string.\n *\n * @param {string} string\n * String to be uppercased\n *\n * @return {string}\n * The string with an uppercased first letter\n */\nconst toTitleCase$1 = function (string) {\n if (typeof string !== 'string') {\n return string;\n }\n return string.replace(/./, w => w.toUpperCase());\n};\n\n/**\n * Compares the TitleCase versions of the two strings for equality.\n *\n * @param {string} str1\n * The first string to compare\n *\n * @param {string} str2\n * The second string to compare\n *\n * @return {boolean}\n * Whether the TitleCase versions of the strings are equal\n */\nconst titleCaseEquals = function (str1, str2) {\n return toTitleCase$1(str1) === toTitleCase$1(str2);\n};\n\nvar Str = /*#__PURE__*/Object.freeze({\n __proto__: null,\n toLowerCase: toLowerCase,\n toTitleCase: toTitleCase$1,\n titleCaseEquals: titleCaseEquals\n});\n\n/**\n * Player Component - Base class for all UI objects\n *\n * @file component.js\n */\n\n/**\n * Base class for all UI Components.\n * Components are UI objects which represent both a javascript object and an element\n * in the DOM. They can be children of other components, and can have\n * children themselves.\n *\n * Components can also use methods from {@link EventTarget}\n */\nclass Component$1 {\n /**\n * A callback that is called when a component is ready. Does not have any\n * parameters and any callback value will be ignored.\n *\n * @callback ReadyCallback\n * @this Component\n */\n\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of component options.\n *\n * @param {Object[]} [options.children]\n * An array of children objects to initialize this component with. Children objects have\n * a name property that will be used if more than one component of the same type needs to be\n * added.\n *\n * @param {string} [options.className]\n * A class or space separated list of classes to add the component\n *\n * @param {ReadyCallback} [ready]\n * Function that gets called when the `Component` is ready.\n */\n constructor(player, options, ready) {\n // The component might be the player itself and we can't pass `this` to super\n if (!player && this.play) {\n this.player_ = player = this; // eslint-disable-line\n } else {\n this.player_ = player;\n }\n this.isDisposed_ = false;\n\n // Hold the reference to the parent component via `addChild` method\n this.parentComponent_ = null;\n\n // Make a copy of prototype.options_ to protect against overriding defaults\n this.options_ = merge$1({}, this.options_);\n\n // Updated options with supplied options\n options = this.options_ = merge$1(this.options_, options);\n\n // Get ID from options or options element if one is supplied\n this.id_ = options.id || options.el && options.el.id;\n\n // If there was no ID from the options, generate one\n if (!this.id_) {\n // Don't require the player ID function in the case of mock players\n const id = player && player.id && player.id() || 'no_player';\n this.id_ = `${id}_component_${newGUID()}`;\n }\n this.name_ = options.name || null;\n\n // Create element if one wasn't provided in options\n if (options.el) {\n this.el_ = options.el;\n } else if (options.createEl !== false) {\n this.el_ = this.createEl();\n }\n if (options.className && this.el_) {\n options.className.split(' ').forEach(c => this.addClass(c));\n }\n\n // Remove the placeholder event methods. If the component is evented, the\n // real methods are added next\n ['on', 'off', 'one', 'any', 'trigger'].forEach(fn => {\n this[fn] = undefined;\n });\n\n // if evented is anything except false, we want to mixin in evented\n if (options.evented !== false) {\n // Make this an evented object and use `el_`, if available, as its event bus\n evented(this, {\n eventBusKey: this.el_ ? 'el_' : null\n });\n this.handleLanguagechange = this.handleLanguagechange.bind(this);\n this.on(this.player_, 'languagechange', this.handleLanguagechange);\n }\n stateful(this, this.constructor.defaultState);\n this.children_ = [];\n this.childIndex_ = {};\n this.childNameIndex_ = {};\n this.setTimeoutIds_ = new Set();\n this.setIntervalIds_ = new Set();\n this.rafIds_ = new Set();\n this.namedRafs_ = new Map();\n this.clearingTimersOnDispose_ = false;\n\n // Add any child components in options\n if (options.initChildren !== false) {\n this.initChildren();\n }\n\n // Don't want to trigger ready here or it will go before init is actually\n // finished for all children that run this constructor\n this.ready(ready);\n if (options.reportTouchActivity !== false) {\n this.enableTouchActivity();\n }\n }\n\n // `on`, `off`, `one`, `any` and `trigger` are here so tsc includes them in definitions.\n // They are replaced or removed in the constructor\n\n /**\n * Adds an `event listener` to an instance of an `EventTarget`. An `event listener` is a\n * function that will get called when an event with a certain name gets triggered.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} fn\n * The function to call with `EventTarget`s\n */\n on(type, fn) {}\n\n /**\n * Removes an `event listener` for a specific event from an instance of `EventTarget`.\n * This makes it so that the `event listener` will no longer get called when the\n * named event happens.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} [fn]\n * The function to remove. If not specified, all listeners managed by Video.js will be removed.\n */\n off(type, fn) {}\n\n /**\n * This function will add an `event listener` that gets triggered only once. After the\n * first trigger it will get removed. This is like adding an `event listener`\n * with {@link EventTarget#on} that calls {@link EventTarget#off} on itself.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} fn\n * The function to be called once for each event name.\n */\n one(type, fn) {}\n\n /**\n * This function will add an `event listener` that gets triggered only once and is\n * removed from all events. This is like adding an array of `event listener`s\n * with {@link EventTarget#on} that calls {@link EventTarget#off} on all events the\n * first time it is triggered.\n *\n * @param {string|string[]} type\n * An event name or an array of event names.\n *\n * @param {Function} fn\n * The function to be called once for each event name.\n */\n any(type, fn) {}\n\n /**\n * This function causes an event to happen. This will then cause any `event listeners`\n * that are waiting for that event, to get called. If there are no `event listeners`\n * for an event then nothing will happen.\n *\n * If the name of the `Event` that is being triggered is in `EventTarget.allowedEvents_`.\n * Trigger will also call the `on` + `uppercaseEventName` function.\n *\n * Example:\n * 'click' is in `EventTarget.allowedEvents_`, so, trigger will attempt to call\n * `onClick` if it exists.\n *\n * @param {string|Event|Object} event\n * The name of the event, an `Event`, or an object with a key of type set to\n * an event name.\n *\n * @param {Object} [hash]\n * Optionally extra argument to pass through to an event listener\n */\n trigger(event, hash) {}\n\n /**\n * Dispose of the `Component` and all child components.\n *\n * @fires Component#dispose\n *\n * @param {Object} options\n * @param {Element} options.originalEl element with which to replace player element\n */\n dispose(options = {}) {\n // Bail out if the component has already been disposed.\n if (this.isDisposed_) {\n return;\n }\n if (this.readyQueue_) {\n this.readyQueue_.length = 0;\n }\n\n /**\n * Triggered when a `Component` is disposed.\n *\n * @event Component#dispose\n * @type {Event}\n *\n * @property {boolean} [bubbles=false]\n * set to false so that the dispose event does not\n * bubble up\n */\n this.trigger({\n type: 'dispose',\n bubbles: false\n });\n this.isDisposed_ = true;\n\n // Dispose all children.\n if (this.children_) {\n for (let i = this.children_.length - 1; i >= 0; i--) {\n if (this.children_[i].dispose) {\n this.children_[i].dispose();\n }\n }\n }\n\n // Delete child references\n this.children_ = null;\n this.childIndex_ = null;\n this.childNameIndex_ = null;\n this.parentComponent_ = null;\n if (this.el_) {\n // Remove element from DOM\n if (this.el_.parentNode) {\n if (options.restoreEl) {\n this.el_.parentNode.replaceChild(options.restoreEl, this.el_);\n } else {\n this.el_.parentNode.removeChild(this.el_);\n }\n }\n this.el_ = null;\n }\n\n // remove reference to the player after disposing of the element\n this.player_ = null;\n }\n\n /**\n * Determine whether or not this component has been disposed.\n *\n * @return {boolean}\n * If the component has been disposed, will be `true`. Otherwise, `false`.\n */\n isDisposed() {\n return Boolean(this.isDisposed_);\n }\n\n /**\n * Return the {@link Player} that the `Component` has attached to.\n *\n * @return { import('./player').default }\n * The player that this `Component` has attached to.\n */\n player() {\n return this.player_;\n }\n\n /**\n * Deep merge of options objects with new options.\n * > Note: When both `obj` and `options` contain properties whose values are objects.\n * The two properties get merged using {@link module:obj.merge}\n *\n * @param {Object} obj\n * The object that contains new options.\n *\n * @return {Object}\n * A new object of `this.options_` and `obj` merged together.\n */\n options(obj) {\n if (!obj) {\n return this.options_;\n }\n this.options_ = merge$1(this.options_, obj);\n return this.options_;\n }\n\n /**\n * Get the `Component`s DOM element\n *\n * @return {Element}\n * The DOM element for this `Component`.\n */\n el() {\n return this.el_;\n }\n\n /**\n * Create the `Component`s DOM element.\n *\n * @param {string} [tagName]\n * Element's DOM node type. e.g. 'div'\n *\n * @param {Object} [properties]\n * An object of properties that should be set.\n *\n * @param {Object} [attributes]\n * An object of attributes that should be set.\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl(tagName, properties, attributes) {\n return createEl(tagName, properties, attributes);\n }\n\n /**\n * Localize a string given the string in english.\n *\n * If tokens are provided, it'll try and run a simple token replacement on the provided string.\n * The tokens it looks for look like `{1}` with the index being 1-indexed into the tokens array.\n *\n * If a `defaultValue` is provided, it'll use that over `string`,\n * if a value isn't found in provided language files.\n * This is useful if you want to have a descriptive key for token replacement\n * but have a succinct localized string and not require `en.json` to be included.\n *\n * Currently, it is used for the progress bar timing.\n * ```js\n * {\n * \"progress bar timing: currentTime={1} duration={2}\": \"{1} of {2}\"\n * }\n * ```\n * It is then used like so:\n * ```js\n * this.localize('progress bar timing: currentTime={1} duration{2}',\n * [this.player_.currentTime(), this.player_.duration()],\n * '{1} of {2}');\n * ```\n *\n * Which outputs something like: `01:23 of 24:56`.\n *\n *\n * @param {string} string\n * The string to localize and the key to lookup in the language files.\n * @param {string[]} [tokens]\n * If the current item has token replacements, provide the tokens here.\n * @param {string} [defaultValue]\n * Defaults to `string`. Can be a default value to use for token replacement\n * if the lookup key is needed to be separate.\n *\n * @return {string}\n * The localized string or if no localization exists the english string.\n */\n localize(string, tokens, defaultValue = string) {\n const code = this.player_.language && this.player_.language();\n const languages = this.player_.languages && this.player_.languages();\n const language = languages && languages[code];\n const primaryCode = code && code.split('-')[0];\n const primaryLang = languages && languages[primaryCode];\n let localizedString = defaultValue;\n if (language && language[string]) {\n localizedString = language[string];\n } else if (primaryLang && primaryLang[string]) {\n localizedString = primaryLang[string];\n }\n if (tokens) {\n localizedString = localizedString.replace(/\\{(\\d+)\\}/g, function (match, index) {\n const value = tokens[index - 1];\n let ret = value;\n if (typeof value === 'undefined') {\n ret = match;\n }\n return ret;\n });\n }\n return localizedString;\n }\n\n /**\n * Handles language change for the player in components. Should be overridden by sub-components.\n *\n * @abstract\n */\n handleLanguagechange() {}\n\n /**\n * Return the `Component`s DOM element. This is where children get inserted.\n * This will usually be the the same as the element returned in {@link Component#el}.\n *\n * @return {Element}\n * The content element for this `Component`.\n */\n contentEl() {\n return this.contentEl_ || this.el_;\n }\n\n /**\n * Get this `Component`s ID\n *\n * @return {string}\n * The id of this `Component`\n */\n id() {\n return this.id_;\n }\n\n /**\n * Get the `Component`s name. The name gets used to reference the `Component`\n * and is set during registration.\n *\n * @return {string}\n * The name of this `Component`.\n */\n name() {\n return this.name_;\n }\n\n /**\n * Get an array of all child components\n *\n * @return {Array}\n * The children\n */\n children() {\n return this.children_;\n }\n\n /**\n * Returns the child `Component` with the given `id`.\n *\n * @param {string} id\n * The id of the child `Component` to get.\n *\n * @return {Component|undefined}\n * The child `Component` with the given `id` or undefined.\n */\n getChildById(id) {\n return this.childIndex_[id];\n }\n\n /**\n * Returns the child `Component` with the given `name`.\n *\n * @param {string} name\n * The name of the child `Component` to get.\n *\n * @return {Component|undefined}\n * The child `Component` with the given `name` or undefined.\n */\n getChild(name) {\n if (!name) {\n return;\n }\n return this.childNameIndex_[name];\n }\n\n /**\n * Returns the descendant `Component` following the givent\n * descendant `names`. For instance ['foo', 'bar', 'baz'] would\n * try to get 'foo' on the current component, 'bar' on the 'foo'\n * component and 'baz' on the 'bar' component and return undefined\n * if any of those don't exist.\n *\n * @param {...string[]|...string} names\n * The name of the child `Component` to get.\n *\n * @return {Component|undefined}\n * The descendant `Component` following the given descendant\n * `names` or undefined.\n */\n getDescendant(...names) {\n // flatten array argument into the main array\n names = names.reduce((acc, n) => acc.concat(n), []);\n let currentChild = this;\n for (let i = 0; i < names.length; i++) {\n currentChild = currentChild.getChild(names[i]);\n if (!currentChild || !currentChild.getChild) {\n return;\n }\n }\n return currentChild;\n }\n\n /**\n * Adds an SVG icon element to another element or component.\n *\n * @param {string} iconName\n * The name of icon. A list of all the icon names can be found at 'sandbox/svg-icons.html'\n *\n * @param {Element} [el=this.el()]\n * Element to set the title on. Defaults to the current Component's element.\n *\n * @return {Element}\n * The newly created icon element.\n */\n setIcon(iconName, el = this.el()) {\n // TODO: In v9 of video.js, we will want to remove font icons entirely.\n // This means this check, as well as the others throughout the code, and\n // the unecessary CSS for font icons, will need to be removed.\n // See https://github.com/videojs/video.js/pull/8260 as to which components\n // need updating.\n if (!this.player_.options_.experimentalSvgIcons) {\n return;\n }\n const xmlnsURL = 'http://www.w3.org/2000/svg';\n\n // The below creates an element in the format of:\n // .... \n const iconContainer = createEl('span', {\n className: 'vjs-icon-placeholder vjs-svg-icon'\n }, {\n 'aria-hidden': 'true'\n });\n const svgEl = document.createElementNS(xmlnsURL, 'svg');\n svgEl.setAttributeNS(null, 'viewBox', '0 0 512 512');\n const useEl = document.createElementNS(xmlnsURL, 'use');\n svgEl.appendChild(useEl);\n useEl.setAttributeNS(null, 'href', `#vjs-icon-${iconName}`);\n iconContainer.appendChild(svgEl);\n\n // Replace a pre-existing icon if one exists.\n if (this.iconIsSet_) {\n el.replaceChild(iconContainer, el.querySelector('.vjs-icon-placeholder'));\n } else {\n el.appendChild(iconContainer);\n }\n this.iconIsSet_ = true;\n return iconContainer;\n }\n\n /**\n * Add a child `Component` inside the current `Component`.\n *\n * @param {string|Component} child\n * The name or instance of a child to add.\n *\n * @param {Object} [options={}]\n * The key/value store of options that will get passed to children of\n * the child.\n *\n * @param {number} [index=this.children_.length]\n * The index to attempt to add a child into.\n *\n *\n * @return {Component}\n * The `Component` that gets added as a child. When using a string the\n * `Component` will get created by this process.\n */\n addChild(child, options = {}, index = this.children_.length) {\n let component;\n let componentName;\n\n // If child is a string, create component with options\n if (typeof child === 'string') {\n componentName = toTitleCase$1(child);\n const componentClassName = options.componentClass || componentName;\n\n // Set name through options\n options.name = componentName;\n\n // Create a new object & element for this controls set\n // If there's no .player_, this is a player\n const ComponentClass = Component$1.getComponent(componentClassName);\n if (!ComponentClass) {\n throw new Error(`Component ${componentClassName} does not exist`);\n }\n\n // data stored directly on the videojs object may be\n // misidentified as a component to retain\n // backwards-compatibility with 4.x. check to make sure the\n // component class can be instantiated.\n if (typeof ComponentClass !== 'function') {\n return null;\n }\n component = new ComponentClass(this.player_ || this, options);\n\n // child is a component instance\n } else {\n component = child;\n }\n if (component.parentComponent_) {\n component.parentComponent_.removeChild(component);\n }\n this.children_.splice(index, 0, component);\n component.parentComponent_ = this;\n if (typeof component.id === 'function') {\n this.childIndex_[component.id()] = component;\n }\n\n // If a name wasn't used to create the component, check if we can use the\n // name function of the component\n componentName = componentName || component.name && toTitleCase$1(component.name());\n if (componentName) {\n this.childNameIndex_[componentName] = component;\n this.childNameIndex_[toLowerCase(componentName)] = component;\n }\n\n // Add the UI object's element to the container div (box)\n // Having an element is not required\n if (typeof component.el === 'function' && component.el()) {\n // If inserting before a component, insert before that component's element\n let refNode = null;\n if (this.children_[index + 1]) {\n // Most children are components, but the video tech is an HTML element\n if (this.children_[index + 1].el_) {\n refNode = this.children_[index + 1].el_;\n } else if (isEl(this.children_[index + 1])) {\n refNode = this.children_[index + 1];\n }\n }\n this.contentEl().insertBefore(component.el(), refNode);\n }\n\n // Return so it can stored on parent object if desired.\n return component;\n }\n\n /**\n * Remove a child `Component` from this `Component`s list of children. Also removes\n * the child `Component`s element from this `Component`s element.\n *\n * @param {Component} component\n * The child `Component` to remove.\n */\n removeChild(component) {\n if (typeof component === 'string') {\n component = this.getChild(component);\n }\n if (!component || !this.children_) {\n return;\n }\n let childFound = false;\n for (let i = this.children_.length - 1; i >= 0; i--) {\n if (this.children_[i] === component) {\n childFound = true;\n this.children_.splice(i, 1);\n break;\n }\n }\n if (!childFound) {\n return;\n }\n component.parentComponent_ = null;\n this.childIndex_[component.id()] = null;\n this.childNameIndex_[toTitleCase$1(component.name())] = null;\n this.childNameIndex_[toLowerCase(component.name())] = null;\n const compEl = component.el();\n if (compEl && compEl.parentNode === this.contentEl()) {\n this.contentEl().removeChild(component.el());\n }\n }\n\n /**\n * Add and initialize default child `Component`s based upon options.\n */\n initChildren() {\n const children = this.options_.children;\n if (children) {\n // `this` is `parent`\n const parentOptions = this.options_;\n const handleAdd = child => {\n const name = child.name;\n let opts = child.opts;\n\n // Allow options for children to be set at the parent options\n // e.g. videojs(id, { controlBar: false });\n // instead of videojs(id, { children: { controlBar: false });\n if (parentOptions[name] !== undefined) {\n opts = parentOptions[name];\n }\n\n // Allow for disabling default components\n // e.g. options['children']['posterImage'] = false\n if (opts === false) {\n return;\n }\n\n // Allow options to be passed as a simple boolean if no configuration\n // is necessary.\n if (opts === true) {\n opts = {};\n }\n\n // We also want to pass the original player options\n // to each component as well so they don't need to\n // reach back into the player for options later.\n opts.playerOptions = this.options_.playerOptions;\n\n // Create and add the child component.\n // Add a direct reference to the child by name on the parent instance.\n // If two of the same component are used, different names should be supplied\n // for each\n const newChild = this.addChild(name, opts);\n if (newChild) {\n this[name] = newChild;\n }\n };\n\n // Allow for an array of children details to passed in the options\n let workingChildren;\n const Tech = Component$1.getComponent('Tech');\n if (Array.isArray(children)) {\n workingChildren = children;\n } else {\n workingChildren = Object.keys(children);\n }\n workingChildren\n // children that are in this.options_ but also in workingChildren would\n // give us extra children we do not want. So, we want to filter them out.\n .concat(Object.keys(this.options_).filter(function (child) {\n return !workingChildren.some(function (wchild) {\n if (typeof wchild === 'string') {\n return child === wchild;\n }\n return child === wchild.name;\n });\n })).map(child => {\n let name;\n let opts;\n if (typeof child === 'string') {\n name = child;\n opts = children[name] || this.options_[name] || {};\n } else {\n name = child.name;\n opts = child;\n }\n return {\n name,\n opts\n };\n }).filter(child => {\n // we have to make sure that child.name isn't in the techOrder since\n // techs are registered as Components but can't aren't compatible\n // See https://github.com/videojs/video.js/issues/2772\n const c = Component$1.getComponent(child.opts.componentClass || toTitleCase$1(child.name));\n return c && !Tech.isTech(c);\n }).forEach(handleAdd);\n }\n }\n\n /**\n * Builds the default DOM class name. Should be overridden by sub-components.\n *\n * @return {string}\n * The DOM class name for this object.\n *\n * @abstract\n */\n buildCSSClass() {\n // Child classes can include a function that does:\n // return 'CLASS NAME' + this._super();\n return '';\n }\n\n /**\n * Bind a listener to the component's ready state.\n * Different from event listeners in that if the ready event has already happened\n * it will trigger the function immediately.\n *\n * @param {ReadyCallback} fn\n * Function that gets called when the `Component` is ready.\n *\n * @return {Component}\n * Returns itself; method can be chained.\n */\n ready(fn, sync = false) {\n if (!fn) {\n return;\n }\n if (!this.isReady_) {\n this.readyQueue_ = this.readyQueue_ || [];\n this.readyQueue_.push(fn);\n return;\n }\n if (sync) {\n fn.call(this);\n } else {\n // Call the function asynchronously by default for consistency\n this.setTimeout(fn, 1);\n }\n }\n\n /**\n * Trigger all the ready listeners for this `Component`.\n *\n * @fires Component#ready\n */\n triggerReady() {\n this.isReady_ = true;\n\n // Ensure ready is triggered asynchronously\n this.setTimeout(function () {\n const readyQueue = this.readyQueue_;\n\n // Reset Ready Queue\n this.readyQueue_ = [];\n if (readyQueue && readyQueue.length > 0) {\n readyQueue.forEach(function (fn) {\n fn.call(this);\n }, this);\n }\n\n // Allow for using event listeners also\n /**\n * Triggered when a `Component` is ready.\n *\n * @event Component#ready\n * @type {Event}\n */\n this.trigger('ready');\n }, 1);\n }\n\n /**\n * Find a single DOM element matching a `selector`. This can be within the `Component`s\n * `contentEl()` or another custom context.\n *\n * @param {string} selector\n * A valid CSS selector, which will be passed to `querySelector`.\n *\n * @param {Element|string} [context=this.contentEl()]\n * A DOM element within which to query. Can also be a selector string in\n * which case the first matching element will get used as context. If\n * missing `this.contentEl()` gets used. If `this.contentEl()` returns\n * nothing it falls back to `document`.\n *\n * @return {Element|null}\n * the dom element that was found, or null\n *\n * @see [Information on CSS Selectors](https://developer.mozilla.org/en-US/docs/Web/Guide/CSS/Getting_Started/Selectors)\n */\n $(selector, context) {\n return $(selector, context || this.contentEl());\n }\n\n /**\n * Finds all DOM element matching a `selector`. This can be within the `Component`s\n * `contentEl()` or another custom context.\n *\n * @param {string} selector\n * A valid CSS selector, which will be passed to `querySelectorAll`.\n *\n * @param {Element|string} [context=this.contentEl()]\n * A DOM element within which to query. Can also be a selector string in\n * which case the first matching element will get used as context. If\n * missing `this.contentEl()` gets used. If `this.contentEl()` returns\n * nothing it falls back to `document`.\n *\n * @return {NodeList}\n * a list of dom elements that were found\n *\n * @see [Information on CSS Selectors](https://developer.mozilla.org/en-US/docs/Web/Guide/CSS/Getting_Started/Selectors)\n */\n $$(selector, context) {\n return $$(selector, context || this.contentEl());\n }\n\n /**\n * Check if a component's element has a CSS class name.\n *\n * @param {string} classToCheck\n * CSS class name to check.\n *\n * @return {boolean}\n * - True if the `Component` has the class.\n * - False if the `Component` does not have the class`\n */\n hasClass(classToCheck) {\n return hasClass(this.el_, classToCheck);\n }\n\n /**\n * Add a CSS class name to the `Component`s element.\n *\n * @param {...string} classesToAdd\n * One or more CSS class name to add.\n */\n addClass(...classesToAdd) {\n addClass(this.el_, ...classesToAdd);\n }\n\n /**\n * Remove a CSS class name from the `Component`s element.\n *\n * @param {...string} classesToRemove\n * One or more CSS class name to remove.\n */\n removeClass(...classesToRemove) {\n removeClass(this.el_, ...classesToRemove);\n }\n\n /**\n * Add or remove a CSS class name from the component's element.\n * - `classToToggle` gets added when {@link Component#hasClass} would return false.\n * - `classToToggle` gets removed when {@link Component#hasClass} would return true.\n *\n * @param {string} classToToggle\n * The class to add or remove based on (@link Component#hasClass}\n *\n * @param {boolean|Dom~predicate} [predicate]\n * An {@link Dom~predicate} function or a boolean\n */\n toggleClass(classToToggle, predicate) {\n toggleClass(this.el_, classToToggle, predicate);\n }\n\n /**\n * Show the `Component`s element if it is hidden by removing the\n * 'vjs-hidden' class name from it.\n */\n show() {\n this.removeClass('vjs-hidden');\n }\n\n /**\n * Hide the `Component`s element if it is currently showing by adding the\n * 'vjs-hidden` class name to it.\n */\n hide() {\n this.addClass('vjs-hidden');\n }\n\n /**\n * Lock a `Component`s element in its visible state by adding the 'vjs-lock-showing'\n * class name to it. Used during fadeIn/fadeOut.\n *\n * @private\n */\n lockShowing() {\n this.addClass('vjs-lock-showing');\n }\n\n /**\n * Unlock a `Component`s element from its visible state by removing the 'vjs-lock-showing'\n * class name from it. Used during fadeIn/fadeOut.\n *\n * @private\n */\n unlockShowing() {\n this.removeClass('vjs-lock-showing');\n }\n\n /**\n * Get the value of an attribute on the `Component`s element.\n *\n * @param {string} attribute\n * Name of the attribute to get the value from.\n *\n * @return {string|null}\n * - The value of the attribute that was asked for.\n * - Can be an empty string on some browsers if the attribute does not exist\n * or has no value\n * - Most browsers will return null if the attribute does not exist or has\n * no value.\n *\n * @see [DOM API]{@link https://developer.mozilla.org/en-US/docs/Web/API/Element/getAttribute}\n */\n getAttribute(attribute) {\n return getAttribute(this.el_, attribute);\n }\n\n /**\n * Set the value of an attribute on the `Component`'s element\n *\n * @param {string} attribute\n * Name of the attribute to set.\n *\n * @param {string} value\n * Value to set the attribute to.\n *\n * @see [DOM API]{@link https://developer.mozilla.org/en-US/docs/Web/API/Element/setAttribute}\n */\n setAttribute(attribute, value) {\n setAttribute(this.el_, attribute, value);\n }\n\n /**\n * Remove an attribute from the `Component`s element.\n *\n * @param {string} attribute\n * Name of the attribute to remove.\n *\n * @see [DOM API]{@link https://developer.mozilla.org/en-US/docs/Web/API/Element/removeAttribute}\n */\n removeAttribute(attribute) {\n removeAttribute(this.el_, attribute);\n }\n\n /**\n * Get or set the width of the component based upon the CSS styles.\n * See {@link Component#dimension} for more detailed information.\n *\n * @param {number|string} [num]\n * The width that you want to set postfixed with '%', 'px' or nothing.\n *\n * @param {boolean} [skipListeners]\n * Skip the componentresize event trigger\n *\n * @return {number|undefined}\n * The width when getting, zero if there is no width\n */\n width(num, skipListeners) {\n return this.dimension('width', num, skipListeners);\n }\n\n /**\n * Get or set the height of the component based upon the CSS styles.\n * See {@link Component#dimension} for more detailed information.\n *\n * @param {number|string} [num]\n * The height that you want to set postfixed with '%', 'px' or nothing.\n *\n * @param {boolean} [skipListeners]\n * Skip the componentresize event trigger\n *\n * @return {number|undefined}\n * The height when getting, zero if there is no height\n */\n height(num, skipListeners) {\n return this.dimension('height', num, skipListeners);\n }\n\n /**\n * Set both the width and height of the `Component` element at the same time.\n *\n * @param {number|string} width\n * Width to set the `Component`s element to.\n *\n * @param {number|string} height\n * Height to set the `Component`s element to.\n */\n dimensions(width, height) {\n // Skip componentresize listeners on width for optimization\n this.width(width, true);\n this.height(height);\n }\n\n /**\n * Get or set width or height of the `Component` element. This is the shared code\n * for the {@link Component#width} and {@link Component#height}.\n *\n * Things to know:\n * - If the width or height in an number this will return the number postfixed with 'px'.\n * - If the width/height is a percent this will return the percent postfixed with '%'\n * - Hidden elements have a width of 0 with `window.getComputedStyle`. This function\n * defaults to the `Component`s `style.width` and falls back to `window.getComputedStyle`.\n * See [this]{@link http://www.foliotek.com/devblog/getting-the-width-of-a-hidden-element-with-jquery-using-width/}\n * for more information\n * - If you want the computed style of the component, use {@link Component#currentWidth}\n * and {@link {Component#currentHeight}\n *\n * @fires Component#componentresize\n *\n * @param {string} widthOrHeight\n 8 'width' or 'height'\n *\n * @param {number|string} [num]\n 8 New dimension\n *\n * @param {boolean} [skipListeners]\n * Skip componentresize event trigger\n *\n * @return {number|undefined}\n * The dimension when getting or 0 if unset\n */\n dimension(widthOrHeight, num, skipListeners) {\n if (num !== undefined) {\n // Set to zero if null or literally NaN (NaN !== NaN)\n if (num === null || num !== num) {\n num = 0;\n }\n\n // Check if using css width/height (% or px) and adjust\n if (('' + num).indexOf('%') !== -1 || ('' + num).indexOf('px') !== -1) {\n this.el_.style[widthOrHeight] = num;\n } else if (num === 'auto') {\n this.el_.style[widthOrHeight] = '';\n } else {\n this.el_.style[widthOrHeight] = num + 'px';\n }\n\n // skipListeners allows us to avoid triggering the resize event when setting both width and height\n if (!skipListeners) {\n /**\n * Triggered when a component is resized.\n *\n * @event Component#componentresize\n * @type {Event}\n */\n this.trigger('componentresize');\n }\n return;\n }\n\n // Not setting a value, so getting it\n // Make sure element exists\n if (!this.el_) {\n return 0;\n }\n\n // Get dimension value from style\n const val = this.el_.style[widthOrHeight];\n const pxIndex = val.indexOf('px');\n if (pxIndex !== -1) {\n // Return the pixel value with no 'px'\n return parseInt(val.slice(0, pxIndex), 10);\n }\n\n // No px so using % or no style was set, so falling back to offsetWidth/height\n // If component has display:none, offset will return 0\n // TODO: handle display:none and no dimension style using px\n return parseInt(this.el_['offset' + toTitleCase$1(widthOrHeight)], 10);\n }\n\n /**\n * Get the computed width or the height of the component's element.\n *\n * Uses `window.getComputedStyle`.\n *\n * @param {string} widthOrHeight\n * A string containing 'width' or 'height'. Whichever one you want to get.\n *\n * @return {number}\n * The dimension that gets asked for or 0 if nothing was set\n * for that dimension.\n */\n currentDimension(widthOrHeight) {\n let computedWidthOrHeight = 0;\n if (widthOrHeight !== 'width' && widthOrHeight !== 'height') {\n throw new Error('currentDimension only accepts width or height value');\n }\n computedWidthOrHeight = computedStyle(this.el_, widthOrHeight);\n\n // remove 'px' from variable and parse as integer\n computedWidthOrHeight = parseFloat(computedWidthOrHeight);\n\n // if the computed value is still 0, it's possible that the browser is lying\n // and we want to check the offset values.\n // This code also runs wherever getComputedStyle doesn't exist.\n if (computedWidthOrHeight === 0 || isNaN(computedWidthOrHeight)) {\n const rule = `offset${toTitleCase$1(widthOrHeight)}`;\n computedWidthOrHeight = this.el_[rule];\n }\n return computedWidthOrHeight;\n }\n\n /**\n * An object that contains width and height values of the `Component`s\n * computed style. Uses `window.getComputedStyle`.\n *\n * @typedef {Object} Component~DimensionObject\n *\n * @property {number} width\n * The width of the `Component`s computed style.\n *\n * @property {number} height\n * The height of the `Component`s computed style.\n */\n\n /**\n * Get an object that contains computed width and height values of the\n * component's element.\n *\n * Uses `window.getComputedStyle`.\n *\n * @return {Component~DimensionObject}\n * The computed dimensions of the component's element.\n */\n currentDimensions() {\n return {\n width: this.currentDimension('width'),\n height: this.currentDimension('height')\n };\n }\n\n /**\n * Get the computed width of the component's element.\n *\n * Uses `window.getComputedStyle`.\n *\n * @return {number}\n * The computed width of the component's element.\n */\n currentWidth() {\n return this.currentDimension('width');\n }\n\n /**\n * Get the computed height of the component's element.\n *\n * Uses `window.getComputedStyle`.\n *\n * @return {number}\n * The computed height of the component's element.\n */\n currentHeight() {\n return this.currentDimension('height');\n }\n\n /**\n * Set the focus to this component\n */\n focus() {\n this.el_.focus();\n }\n\n /**\n * Remove the focus from this component\n */\n blur() {\n this.el_.blur();\n }\n\n /**\n * When this Component receives a `keydown` event which it does not process,\n * it passes the event to the Player for handling.\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called.\n */\n handleKeyDown(event) {\n if (this.player_) {\n // We only stop propagation here because we want unhandled events to fall\n // back to the browser. Exclude Tab for focus trapping.\n if (!keycode.isEventKey(event, 'Tab')) {\n event.stopPropagation();\n }\n this.player_.handleKeyDown(event);\n }\n }\n\n /**\n * Many components used to have a `handleKeyPress` method, which was poorly\n * named because it listened to a `keydown` event. This method name now\n * delegates to `handleKeyDown`. This means anyone calling `handleKeyPress`\n * will not see their method calls stop working.\n *\n * @param {KeyboardEvent} event\n * The event that caused this function to be called.\n */\n handleKeyPress(event) {\n this.handleKeyDown(event);\n }\n\n /**\n * Emit a 'tap' events when touch event support gets detected. This gets used to\n * support toggling the controls through a tap on the video. They get enabled\n * because every sub-component would have extra overhead otherwise.\n *\n * @protected\n * @fires Component#tap\n * @listens Component#touchstart\n * @listens Component#touchmove\n * @listens Component#touchleave\n * @listens Component#touchcancel\n * @listens Component#touchend\n */\n emitTapEvents() {\n // Track the start time so we can determine how long the touch lasted\n let touchStart = 0;\n let firstTouch = null;\n\n // Maximum movement allowed during a touch event to still be considered a tap\n // Other popular libs use anywhere from 2 (hammer.js) to 15,\n // so 10 seems like a nice, round number.\n const tapMovementThreshold = 10;\n\n // The maximum length a touch can be while still being considered a tap\n const touchTimeThreshold = 200;\n let couldBeTap;\n this.on('touchstart', function (event) {\n // If more than one finger, don't consider treating this as a click\n if (event.touches.length === 1) {\n // Copy pageX/pageY from the object\n firstTouch = {\n pageX: event.touches[0].pageX,\n pageY: event.touches[0].pageY\n };\n // Record start time so we can detect a tap vs. \"touch and hold\"\n touchStart = window$1.performance.now();\n // Reset couldBeTap tracking\n couldBeTap = true;\n }\n });\n this.on('touchmove', function (event) {\n // If more than one finger, don't consider treating this as a click\n if (event.touches.length > 1) {\n couldBeTap = false;\n } else if (firstTouch) {\n // Some devices will throw touchmoves for all but the slightest of taps.\n // So, if we moved only a small distance, this could still be a tap\n const xdiff = event.touches[0].pageX - firstTouch.pageX;\n const ydiff = event.touches[0].pageY - firstTouch.pageY;\n const touchDistance = Math.sqrt(xdiff * xdiff + ydiff * ydiff);\n if (touchDistance > tapMovementThreshold) {\n couldBeTap = false;\n }\n }\n });\n const noTap = function () {\n couldBeTap = false;\n };\n\n // TODO: Listen to the original target. http://youtu.be/DujfpXOKUp8?t=13m8s\n this.on('touchleave', noTap);\n this.on('touchcancel', noTap);\n\n // When the touch ends, measure how long it took and trigger the appropriate\n // event\n this.on('touchend', function (event) {\n firstTouch = null;\n // Proceed only if the touchmove/leave/cancel event didn't happen\n if (couldBeTap === true) {\n // Measure how long the touch lasted\n const touchTime = window$1.performance.now() - touchStart;\n\n // Make sure the touch was less than the threshold to be considered a tap\n if (touchTime < touchTimeThreshold) {\n // Don't let browser turn this into a click\n event.preventDefault();\n /**\n * Triggered when a `Component` is tapped.\n *\n * @event Component#tap\n * @type {MouseEvent}\n */\n this.trigger('tap');\n // It may be good to copy the touchend event object and change the\n // type to tap, if the other event properties aren't exact after\n // Events.fixEvent runs (e.g. event.target)\n }\n }\n });\n }\n\n /**\n * This function reports user activity whenever touch events happen. This can get\n * turned off by any sub-components that wants touch events to act another way.\n *\n * Report user touch activity when touch events occur. User activity gets used to\n * determine when controls should show/hide. It is simple when it comes to mouse\n * events, because any mouse event should show the controls. So we capture mouse\n * events that bubble up to the player and report activity when that happens.\n * With touch events it isn't as easy as `touchstart` and `touchend` toggle player\n * controls. So touch events can't help us at the player level either.\n *\n * User activity gets checked asynchronously. So what could happen is a tap event\n * on the video turns the controls off. Then the `touchend` event bubbles up to\n * the player. Which, if it reported user activity, would turn the controls right\n * back on. We also don't want to completely block touch events from bubbling up.\n * Furthermore a `touchmove` event and anything other than a tap, should not turn\n * controls back on.\n *\n * @listens Component#touchstart\n * @listens Component#touchmove\n * @listens Component#touchend\n * @listens Component#touchcancel\n */\n enableTouchActivity() {\n // Don't continue if the root player doesn't support reporting user activity\n if (!this.player() || !this.player().reportUserActivity) {\n return;\n }\n\n // listener for reporting that the user is active\n const report = bind_(this.player(), this.player().reportUserActivity);\n let touchHolding;\n this.on('touchstart', function () {\n report();\n // For as long as the they are touching the device or have their mouse down,\n // we consider them active even if they're not moving their finger or mouse.\n // So we want to continue to update that they are active\n this.clearInterval(touchHolding);\n // report at the same interval as activityCheck\n touchHolding = this.setInterval(report, 250);\n });\n const touchEnd = function (event) {\n report();\n // stop the interval that maintains activity if the touch is holding\n this.clearInterval(touchHolding);\n };\n this.on('touchmove', report);\n this.on('touchend', touchEnd);\n this.on('touchcancel', touchEnd);\n }\n\n /**\n * A callback that has no parameters and is bound into `Component`s context.\n *\n * @callback Component~GenericCallback\n * @this Component\n */\n\n /**\n * Creates a function that runs after an `x` millisecond timeout. This function is a\n * wrapper around `window.setTimeout`. There are a few reasons to use this one\n * instead though:\n * 1. It gets cleared via {@link Component#clearTimeout} when\n * {@link Component#dispose} gets called.\n * 2. The function callback will gets turned into a {@link Component~GenericCallback}\n *\n * > Note: You can't use `window.clearTimeout` on the id returned by this function. This\n * will cause its dispose listener not to get cleaned up! Please use\n * {@link Component#clearTimeout} or {@link Component#dispose} instead.\n *\n * @param {Component~GenericCallback} fn\n * The function that will be run after `timeout`.\n *\n * @param {number} timeout\n * Timeout in milliseconds to delay before executing the specified function.\n *\n * @return {number}\n * Returns a timeout ID that gets used to identify the timeout. It can also\n * get used in {@link Component#clearTimeout} to clear the timeout that\n * was set.\n *\n * @listens Component#dispose\n * @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setTimeout}\n */\n setTimeout(fn, timeout) {\n // declare as variables so they are properly available in timeout function\n // eslint-disable-next-line\n var timeoutId;\n fn = bind_(this, fn);\n this.clearTimersOnDispose_();\n timeoutId = window$1.setTimeout(() => {\n if (this.setTimeoutIds_.has(timeoutId)) {\n this.setTimeoutIds_.delete(timeoutId);\n }\n fn();\n }, timeout);\n this.setTimeoutIds_.add(timeoutId);\n return timeoutId;\n }\n\n /**\n * Clears a timeout that gets created via `window.setTimeout` or\n * {@link Component#setTimeout}. If you set a timeout via {@link Component#setTimeout}\n * use this function instead of `window.clearTimout`. If you don't your dispose\n * listener will not get cleaned up until {@link Component#dispose}!\n *\n * @param {number} timeoutId\n * The id of the timeout to clear. The return value of\n * {@link Component#setTimeout} or `window.setTimeout`.\n *\n * @return {number}\n * Returns the timeout id that was cleared.\n *\n * @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/clearTimeout}\n */\n clearTimeout(timeoutId) {\n if (this.setTimeoutIds_.has(timeoutId)) {\n this.setTimeoutIds_.delete(timeoutId);\n window$1.clearTimeout(timeoutId);\n }\n return timeoutId;\n }\n\n /**\n * Creates a function that gets run every `x` milliseconds. This function is a wrapper\n * around `window.setInterval`. There are a few reasons to use this one instead though.\n * 1. It gets cleared via {@link Component#clearInterval} when\n * {@link Component#dispose} gets called.\n * 2. The function callback will be a {@link Component~GenericCallback}\n *\n * @param {Component~GenericCallback} fn\n * The function to run every `x` seconds.\n *\n * @param {number} interval\n * Execute the specified function every `x` milliseconds.\n *\n * @return {number}\n * Returns an id that can be used to identify the interval. It can also be be used in\n * {@link Component#clearInterval} to clear the interval.\n *\n * @listens Component#dispose\n * @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setInterval}\n */\n setInterval(fn, interval) {\n fn = bind_(this, fn);\n this.clearTimersOnDispose_();\n const intervalId = window$1.setInterval(fn, interval);\n this.setIntervalIds_.add(intervalId);\n return intervalId;\n }\n\n /**\n * Clears an interval that gets created via `window.setInterval` or\n * {@link Component#setInterval}. If you set an interval via {@link Component#setInterval}\n * use this function instead of `window.clearInterval`. If you don't your dispose\n * listener will not get cleaned up until {@link Component#dispose}!\n *\n * @param {number} intervalId\n * The id of the interval to clear. The return value of\n * {@link Component#setInterval} or `window.setInterval`.\n *\n * @return {number}\n * Returns the interval id that was cleared.\n *\n * @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/clearInterval}\n */\n clearInterval(intervalId) {\n if (this.setIntervalIds_.has(intervalId)) {\n this.setIntervalIds_.delete(intervalId);\n window$1.clearInterval(intervalId);\n }\n return intervalId;\n }\n\n /**\n * Queues up a callback to be passed to requestAnimationFrame (rAF), but\n * with a few extra bonuses:\n *\n * - Supports browsers that do not support rAF by falling back to\n * {@link Component#setTimeout}.\n *\n * - The callback is turned into a {@link Component~GenericCallback} (i.e.\n * bound to the component).\n *\n * - Automatic cancellation of the rAF callback is handled if the component\n * is disposed before it is called.\n *\n * @param {Component~GenericCallback} fn\n * A function that will be bound to this component and executed just\n * before the browser's next repaint.\n *\n * @return {number}\n * Returns an rAF ID that gets used to identify the timeout. It can\n * also be used in {@link Component#cancelAnimationFrame} to cancel\n * the animation frame callback.\n *\n * @listens Component#dispose\n * @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/window/requestAnimationFrame}\n */\n requestAnimationFrame(fn) {\n this.clearTimersOnDispose_();\n\n // declare as variables so they are properly available in rAF function\n // eslint-disable-next-line\n var id;\n fn = bind_(this, fn);\n id = window$1.requestAnimationFrame(() => {\n if (this.rafIds_.has(id)) {\n this.rafIds_.delete(id);\n }\n fn();\n });\n this.rafIds_.add(id);\n return id;\n }\n\n /**\n * Request an animation frame, but only one named animation\n * frame will be queued. Another will never be added until\n * the previous one finishes.\n *\n * @param {string} name\n * The name to give this requestAnimationFrame\n *\n * @param {Component~GenericCallback} fn\n * A function that will be bound to this component and executed just\n * before the browser's next repaint.\n */\n requestNamedAnimationFrame(name, fn) {\n if (this.namedRafs_.has(name)) {\n return;\n }\n this.clearTimersOnDispose_();\n fn = bind_(this, fn);\n const id = this.requestAnimationFrame(() => {\n fn();\n if (this.namedRafs_.has(name)) {\n this.namedRafs_.delete(name);\n }\n });\n this.namedRafs_.set(name, id);\n return name;\n }\n\n /**\n * Cancels a current named animation frame if it exists.\n *\n * @param {string} name\n * The name of the requestAnimationFrame to cancel.\n */\n cancelNamedAnimationFrame(name) {\n if (!this.namedRafs_.has(name)) {\n return;\n }\n this.cancelAnimationFrame(this.namedRafs_.get(name));\n this.namedRafs_.delete(name);\n }\n\n /**\n * Cancels a queued callback passed to {@link Component#requestAnimationFrame}\n * (rAF).\n *\n * If you queue an rAF callback via {@link Component#requestAnimationFrame},\n * use this function instead of `window.cancelAnimationFrame`. If you don't,\n * your dispose listener will not get cleaned up until {@link Component#dispose}!\n *\n * @param {number} id\n * The rAF ID to clear. The return value of {@link Component#requestAnimationFrame}.\n *\n * @return {number}\n * Returns the rAF ID that was cleared.\n *\n * @see [Similar to]{@link https://developer.mozilla.org/en-US/docs/Web/API/window/cancelAnimationFrame}\n */\n cancelAnimationFrame(id) {\n if (this.rafIds_.has(id)) {\n this.rafIds_.delete(id);\n window$1.cancelAnimationFrame(id);\n }\n return id;\n }\n\n /**\n * A function to setup `requestAnimationFrame`, `setTimeout`,\n * and `setInterval`, clearing on dispose.\n *\n * > Previously each timer added and removed dispose listeners on it's own.\n * For better performance it was decided to batch them all, and use `Set`s\n * to track outstanding timer ids.\n *\n * @private\n */\n clearTimersOnDispose_() {\n if (this.clearingTimersOnDispose_) {\n return;\n }\n this.clearingTimersOnDispose_ = true;\n this.one('dispose', () => {\n [['namedRafs_', 'cancelNamedAnimationFrame'], ['rafIds_', 'cancelAnimationFrame'], ['setTimeoutIds_', 'clearTimeout'], ['setIntervalIds_', 'clearInterval']].forEach(([idName, cancelName]) => {\n // for a `Set` key will actually be the value again\n // so forEach((val, val) =>` but for maps we want to use\n // the key.\n this[idName].forEach((val, key) => this[cancelName](key));\n });\n this.clearingTimersOnDispose_ = false;\n });\n }\n\n /**\n * Register a `Component` with `videojs` given the name and the component.\n *\n * > NOTE: {@link Tech}s should not be registered as a `Component`. {@link Tech}s\n * should be registered using {@link Tech.registerTech} or\n * {@link videojs:videojs.registerTech}.\n *\n * > NOTE: This function can also be seen on videojs as\n * {@link videojs:videojs.registerComponent}.\n *\n * @param {string} name\n * The name of the `Component` to register.\n *\n * @param {Component} ComponentToRegister\n * The `Component` class to register.\n *\n * @return {Component}\n * The `Component` that was registered.\n */\n static registerComponent(name, ComponentToRegister) {\n if (typeof name !== 'string' || !name) {\n throw new Error(`Illegal component name, \"${name}\"; must be a non-empty string.`);\n }\n const Tech = Component$1.getComponent('Tech');\n\n // We need to make sure this check is only done if Tech has been registered.\n const isTech = Tech && Tech.isTech(ComponentToRegister);\n const isComp = Component$1 === ComponentToRegister || Component$1.prototype.isPrototypeOf(ComponentToRegister.prototype);\n if (isTech || !isComp) {\n let reason;\n if (isTech) {\n reason = 'techs must be registered using Tech.registerTech()';\n } else {\n reason = 'must be a Component subclass';\n }\n throw new Error(`Illegal component, \"${name}\"; ${reason}.`);\n }\n name = toTitleCase$1(name);\n if (!Component$1.components_) {\n Component$1.components_ = {};\n }\n const Player = Component$1.getComponent('Player');\n if (name === 'Player' && Player && Player.players) {\n const players = Player.players;\n const playerNames = Object.keys(players);\n\n // If we have players that were disposed, then their name will still be\n // in Players.players. So, we must loop through and verify that the value\n // for each item is not null. This allows registration of the Player component\n // after all players have been disposed or before any were created.\n if (players && playerNames.length > 0 && playerNames.map(pname => players[pname]).every(Boolean)) {\n throw new Error('Can not register Player component after player has been created.');\n }\n }\n Component$1.components_[name] = ComponentToRegister;\n Component$1.components_[toLowerCase(name)] = ComponentToRegister;\n return ComponentToRegister;\n }\n\n /**\n * Get a `Component` based on the name it was registered with.\n *\n * @param {string} name\n * The Name of the component to get.\n *\n * @return {typeof Component}\n * The `Component` that got registered under the given name.\n */\n static getComponent(name) {\n if (!name || !Component$1.components_) {\n return;\n }\n return Component$1.components_[name];\n }\n}\nComponent$1.registerComponent('Component', Component$1);\n\n/**\n * @file time.js\n * @module time\n */\n\n/**\n * Returns the time for the specified index at the start or end\n * of a TimeRange object.\n *\n * @typedef {Function} TimeRangeIndex\n *\n * @param {number} [index=0]\n * The range number to return the time for.\n *\n * @return {number}\n * The time offset at the specified index.\n *\n * @deprecated The index argument must be provided.\n * In the future, leaving it out will throw an error.\n */\n\n/**\n * An object that contains ranges of time, which mimics {@link TimeRanges}.\n *\n * @typedef {Object} TimeRange\n *\n * @property {number} length\n * The number of time ranges represented by this object.\n *\n * @property {module:time~TimeRangeIndex} start\n * Returns the time offset at which a specified time range begins.\n *\n * @property {module:time~TimeRangeIndex} end\n * Returns the time offset at which a specified time range ends.\n *\n * @see https://developer.mozilla.org/en-US/docs/Web/API/TimeRanges\n */\n\n/**\n * Check if any of the time ranges are over the maximum index.\n *\n * @private\n * @param {string} fnName\n * The function name to use for logging\n *\n * @param {number} index\n * The index to check\n *\n * @param {number} maxIndex\n * The maximum possible index\n *\n * @throws {Error} if the timeRanges provided are over the maxIndex\n */\nfunction rangeCheck(fnName, index, maxIndex) {\n if (typeof index !== 'number' || index < 0 || index > maxIndex) {\n throw new Error(`Failed to execute '${fnName}' on 'TimeRanges': The index provided (${index}) is non-numeric or out of bounds (0-${maxIndex}).`);\n }\n}\n\n/**\n * Get the time for the specified index at the start or end\n * of a TimeRange object.\n *\n * @private\n * @param {string} fnName\n * The function name to use for logging\n *\n * @param {string} valueIndex\n * The property that should be used to get the time. should be\n * 'start' or 'end'\n *\n * @param {Array} ranges\n * An array of time ranges\n *\n * @param {Array} [rangeIndex=0]\n * The index to start the search at\n *\n * @return {number}\n * The time that offset at the specified index.\n *\n * @deprecated rangeIndex must be set to a value, in the future this will throw an error.\n * @throws {Error} if rangeIndex is more than the length of ranges\n */\nfunction getRange(fnName, valueIndex, ranges, rangeIndex) {\n rangeCheck(fnName, rangeIndex, ranges.length - 1);\n return ranges[rangeIndex][valueIndex];\n}\n\n/**\n * Create a time range object given ranges of time.\n *\n * @private\n * @param {Array} [ranges]\n * An array of time ranges.\n *\n * @return {TimeRange}\n */\nfunction createTimeRangesObj(ranges) {\n let timeRangesObj;\n if (ranges === undefined || ranges.length === 0) {\n timeRangesObj = {\n length: 0,\n start() {\n throw new Error('This TimeRanges object is empty');\n },\n end() {\n throw new Error('This TimeRanges object is empty');\n }\n };\n } else {\n timeRangesObj = {\n length: ranges.length,\n start: getRange.bind(null, 'start', 0, ranges),\n end: getRange.bind(null, 'end', 1, ranges)\n };\n }\n if (window$1.Symbol && window$1.Symbol.iterator) {\n timeRangesObj[window$1.Symbol.iterator] = () => (ranges || []).values();\n }\n return timeRangesObj;\n}\n\n/**\n * Create a `TimeRange` object which mimics an\n * {@link https://developer.mozilla.org/en-US/docs/Web/API/TimeRanges|HTML5 TimeRanges instance}.\n *\n * @param {number|Array[]} start\n * The start of a single range (a number) or an array of ranges (an\n * array of arrays of two numbers each).\n *\n * @param {number} end\n * The end of a single range. Cannot be used with the array form of\n * the `start` argument.\n *\n * @return {TimeRange}\n */\nfunction createTimeRanges$1(start, end) {\n if (Array.isArray(start)) {\n return createTimeRangesObj(start);\n } else if (start === undefined || end === undefined) {\n return createTimeRangesObj();\n }\n return createTimeRangesObj([[start, end]]);\n}\n\n/**\n * Format seconds as a time string, H:MM:SS or M:SS. Supplying a guide (in\n * seconds) will force a number of leading zeros to cover the length of the\n * guide.\n *\n * @private\n * @param {number} seconds\n * Number of seconds to be turned into a string\n *\n * @param {number} guide\n * Number (in seconds) to model the string after\n *\n * @return {string}\n * Time formatted as H:MM:SS or M:SS\n */\nconst defaultImplementation = function (seconds, guide) {\n seconds = seconds < 0 ? 0 : seconds;\n let s = Math.floor(seconds % 60);\n let m = Math.floor(seconds / 60 % 60);\n let h = Math.floor(seconds / 3600);\n const gm = Math.floor(guide / 60 % 60);\n const gh = Math.floor(guide / 3600);\n\n // handle invalid times\n if (isNaN(seconds) || seconds === Infinity) {\n // '-' is false for all relational operators (e.g. <, >=) so this setting\n // will add the minimum number of fields specified by the guide\n h = m = s = '-';\n }\n\n // Check if we need to show hours\n h = h > 0 || gh > 0 ? h + ':' : '';\n\n // If hours are showing, we may need to add a leading zero.\n // Always show at least one digit of minutes.\n m = ((h || gm >= 10) && m < 10 ? '0' + m : m) + ':';\n\n // Check if leading zero is need for seconds\n s = s < 10 ? '0' + s : s;\n return h + m + s;\n};\n\n// Internal pointer to the current implementation.\nlet implementation = defaultImplementation;\n\n/**\n * Replaces the default formatTime implementation with a custom implementation.\n *\n * @param {Function} customImplementation\n * A function which will be used in place of the default formatTime\n * implementation. Will receive the current time in seconds and the\n * guide (in seconds) as arguments.\n */\nfunction setFormatTime(customImplementation) {\n implementation = customImplementation;\n}\n\n/**\n * Resets formatTime to the default implementation.\n */\nfunction resetFormatTime() {\n implementation = defaultImplementation;\n}\n\n/**\n * Delegates to either the default time formatting function or a custom\n * function supplied via `setFormatTime`.\n *\n * Formats seconds as a time string (H:MM:SS or M:SS). Supplying a\n * guide (in seconds) will force a number of leading zeros to cover the\n * length of the guide.\n *\n * @example formatTime(125, 600) === \"02:05\"\n * @param {number} seconds\n * Number of seconds to be turned into a string\n *\n * @param {number} guide\n * Number (in seconds) to model the string after\n *\n * @return {string}\n * Time formatted as H:MM:SS or M:SS\n */\nfunction formatTime(seconds, guide = seconds) {\n return implementation(seconds, guide);\n}\n\nvar Time = /*#__PURE__*/Object.freeze({\n __proto__: null,\n createTimeRanges: createTimeRanges$1,\n createTimeRange: createTimeRanges$1,\n setFormatTime: setFormatTime,\n resetFormatTime: resetFormatTime,\n formatTime: formatTime\n});\n\n/**\n * @file buffer.js\n * @module buffer\n */\n\n/**\n * Compute the percentage of the media that has been buffered.\n *\n * @param { import('./time').TimeRange } buffered\n * The current `TimeRanges` object representing buffered time ranges\n *\n * @param {number} duration\n * Total duration of the media\n *\n * @return {number}\n * Percent buffered of the total duration in decimal form.\n */\nfunction bufferedPercent(buffered, duration) {\n let bufferedDuration = 0;\n let start;\n let end;\n if (!duration) {\n return 0;\n }\n if (!buffered || !buffered.length) {\n buffered = createTimeRanges$1(0, 0);\n }\n for (let i = 0; i < buffered.length; i++) {\n start = buffered.start(i);\n end = buffered.end(i);\n\n // buffered end can be bigger than duration by a very small fraction\n if (end > duration) {\n end = duration;\n }\n bufferedDuration += end - start;\n }\n return bufferedDuration / duration;\n}\n\n/**\n * @file media-error.js\n */\n\n/**\n * A Custom `MediaError` class which mimics the standard HTML5 `MediaError` class.\n *\n * @param {number|string|Object|MediaError} value\n * This can be of multiple types:\n * - number: should be a standard error code\n * - string: an error message (the code will be 0)\n * - Object: arbitrary properties\n * - `MediaError` (native): used to populate a video.js `MediaError` object\n * - `MediaError` (video.js): will return itself if it's already a\n * video.js `MediaError` object.\n *\n * @see [MediaError Spec]{@link https://dev.w3.org/html5/spec-author-view/video.html#mediaerror}\n * @see [Encrypted MediaError Spec]{@link https://www.w3.org/TR/2013/WD-encrypted-media-20130510/#error-codes}\n *\n * @class MediaError\n */\nfunction MediaError(value) {\n // Allow redundant calls to this constructor to avoid having `instanceof`\n // checks peppered around the code.\n if (value instanceof MediaError) {\n return value;\n }\n if (typeof value === 'number') {\n this.code = value;\n } else if (typeof value === 'string') {\n // default code is zero, so this is a custom error\n this.message = value;\n } else if (isObject(value)) {\n // We assign the `code` property manually because native `MediaError` objects\n // do not expose it as an own/enumerable property of the object.\n if (typeof value.code === 'number') {\n this.code = value.code;\n }\n Object.assign(this, value);\n }\n if (!this.message) {\n this.message = MediaError.defaultMessages[this.code] || '';\n }\n}\n\n/**\n * The error code that refers two one of the defined `MediaError` types\n *\n * @type {Number}\n */\nMediaError.prototype.code = 0;\n\n/**\n * An optional message that to show with the error. Message is not part of the HTML5\n * video spec but allows for more informative custom errors.\n *\n * @type {String}\n */\nMediaError.prototype.message = '';\n\n/**\n * An optional status code that can be set by plugins to allow even more detail about\n * the error. For example a plugin might provide a specific HTTP status code and an\n * error message for that code. Then when the plugin gets that error this class will\n * know how to display an error message for it. This allows a custom message to show\n * up on the `Player` error overlay.\n *\n * @type {Array}\n */\nMediaError.prototype.status = null;\n\n/**\n * An object containing an error type, as well as other information regarding the error.\n *\n * @typedef {{errorType: string, [key: string]: any}} ErrorMetadata\n */\n\n/**\n * An optional object to give more detail about the error. This can be used to give\n * a higher level of specificity to an error versus the more generic MediaError codes.\n * `metadata` expects an `errorType` string that should align with the values from videojs.Error.\n *\n * @type {ErrorMetadata}\n */\nMediaError.prototype.metadata = null;\n\n/**\n * Errors indexed by the W3C standard. The order **CANNOT CHANGE**! See the\n * specification listed under {@link MediaError} for more information.\n *\n * @enum {array}\n * @readonly\n * @property {string} 0 - MEDIA_ERR_CUSTOM\n * @property {string} 1 - MEDIA_ERR_ABORTED\n * @property {string} 2 - MEDIA_ERR_NETWORK\n * @property {string} 3 - MEDIA_ERR_DECODE\n * @property {string} 4 - MEDIA_ERR_SRC_NOT_SUPPORTED\n * @property {string} 5 - MEDIA_ERR_ENCRYPTED\n */\nMediaError.errorTypes = ['MEDIA_ERR_CUSTOM', 'MEDIA_ERR_ABORTED', 'MEDIA_ERR_NETWORK', 'MEDIA_ERR_DECODE', 'MEDIA_ERR_SRC_NOT_SUPPORTED', 'MEDIA_ERR_ENCRYPTED'];\n\n/**\n * The default `MediaError` messages based on the {@link MediaError.errorTypes}.\n *\n * @type {Array}\n * @constant\n */\nMediaError.defaultMessages = {\n 1: 'You aborted the media playback',\n 2: 'A network error caused the media download to fail part-way.',\n 3: 'The media playback was aborted due to a corruption problem or because the media used features your browser did not support.',\n 4: 'The media could not be loaded, either because the server or network failed or because the format is not supported.',\n 5: 'The media is encrypted and we do not have the keys to decrypt it.'\n};\n\n/**\n * W3C error code for any custom error.\n *\n * @member MediaError#MEDIA_ERR_CUSTOM\n * @constant {number}\n * @default 0\n */\nMediaError.MEDIA_ERR_CUSTOM = 0;\n\n/**\n * W3C error code for any custom error.\n *\n * @member MediaError.MEDIA_ERR_CUSTOM\n * @constant {number}\n * @default 0\n */\nMediaError.prototype.MEDIA_ERR_CUSTOM = 0;\n\n/**\n * W3C error code for media error aborted.\n *\n * @member MediaError#MEDIA_ERR_ABORTED\n * @constant {number}\n * @default 1\n */\nMediaError.MEDIA_ERR_ABORTED = 1;\n\n/**\n * W3C error code for media error aborted.\n *\n * @member MediaError.MEDIA_ERR_ABORTED\n * @constant {number}\n * @default 1\n */\nMediaError.prototype.MEDIA_ERR_ABORTED = 1;\n\n/**\n * W3C error code for any network error.\n *\n * @member MediaError#MEDIA_ERR_NETWORK\n * @constant {number}\n * @default 2\n */\nMediaError.MEDIA_ERR_NETWORK = 2;\n\n/**\n * W3C error code for any network error.\n *\n * @member MediaError.MEDIA_ERR_NETWORK\n * @constant {number}\n * @default 2\n */\nMediaError.prototype.MEDIA_ERR_NETWORK = 2;\n\n/**\n * W3C error code for any decoding error.\n *\n * @member MediaError#MEDIA_ERR_DECODE\n * @constant {number}\n * @default 3\n */\nMediaError.MEDIA_ERR_DECODE = 3;\n\n/**\n * W3C error code for any decoding error.\n *\n * @member MediaError.MEDIA_ERR_DECODE\n * @constant {number}\n * @default 3\n */\nMediaError.prototype.MEDIA_ERR_DECODE = 3;\n\n/**\n * W3C error code for any time that a source is not supported.\n *\n * @member MediaError#MEDIA_ERR_SRC_NOT_SUPPORTED\n * @constant {number}\n * @default 4\n */\nMediaError.MEDIA_ERR_SRC_NOT_SUPPORTED = 4;\n\n/**\n * W3C error code for any time that a source is not supported.\n *\n * @member MediaError.MEDIA_ERR_SRC_NOT_SUPPORTED\n * @constant {number}\n * @default 4\n */\nMediaError.prototype.MEDIA_ERR_SRC_NOT_SUPPORTED = 4;\n\n/**\n * W3C error code for any time that a source is encrypted.\n *\n * @member MediaError#MEDIA_ERR_ENCRYPTED\n * @constant {number}\n * @default 5\n */\nMediaError.MEDIA_ERR_ENCRYPTED = 5;\n\n/**\n * W3C error code for any time that a source is encrypted.\n *\n * @member MediaError.MEDIA_ERR_ENCRYPTED\n * @constant {number}\n * @default 5\n */\nMediaError.prototype.MEDIA_ERR_ENCRYPTED = 5;\n\n/**\n * Returns whether an object is `Promise`-like (i.e. has a `then` method).\n *\n * @param {Object} value\n * An object that may or may not be `Promise`-like.\n *\n * @return {boolean}\n * Whether or not the object is `Promise`-like.\n */\nfunction isPromise(value) {\n return value !== undefined && value !== null && typeof value.then === 'function';\n}\n\n/**\n * Silence a Promise-like object.\n *\n * This is useful for avoiding non-harmful, but potentially confusing \"uncaught\n * play promise\" rejection error messages.\n *\n * @param {Object} value\n * An object that may or may not be `Promise`-like.\n */\nfunction silencePromise(value) {\n if (isPromise(value)) {\n value.then(null, e => {});\n }\n}\n\n/**\n * @file text-track-list-converter.js Utilities for capturing text track state and\n * re-creating tracks based on a capture.\n *\n * @module text-track-list-converter\n */\n\n/**\n * Examine a single {@link TextTrack} and return a JSON-compatible javascript object that\n * represents the {@link TextTrack}'s state.\n *\n * @param {TextTrack} track\n * The text track to query.\n *\n * @return {Object}\n * A serializable javascript representation of the TextTrack.\n * @private\n */\nconst trackToJson_ = function (track) {\n const ret = ['kind', 'label', 'language', 'id', 'inBandMetadataTrackDispatchType', 'mode', 'src'].reduce((acc, prop, i) => {\n if (track[prop]) {\n acc[prop] = track[prop];\n }\n return acc;\n }, {\n cues: track.cues && Array.prototype.map.call(track.cues, function (cue) {\n return {\n startTime: cue.startTime,\n endTime: cue.endTime,\n text: cue.text,\n id: cue.id\n };\n })\n });\n return ret;\n};\n\n/**\n * Examine a {@link Tech} and return a JSON-compatible javascript array that represents the\n * state of all {@link TextTrack}s currently configured. The return array is compatible with\n * {@link text-track-list-converter:jsonToTextTracks}.\n *\n * @param { import('../tech/tech').default } tech\n * The tech object to query\n *\n * @return {Array}\n * A serializable javascript representation of the {@link Tech}s\n * {@link TextTrackList}.\n */\nconst textTracksToJson = function (tech) {\n const trackEls = tech.$$('track');\n const trackObjs = Array.prototype.map.call(trackEls, t => t.track);\n const tracks = Array.prototype.map.call(trackEls, function (trackEl) {\n const json = trackToJson_(trackEl.track);\n if (trackEl.src) {\n json.src = trackEl.src;\n }\n return json;\n });\n return tracks.concat(Array.prototype.filter.call(tech.textTracks(), function (track) {\n return trackObjs.indexOf(track) === -1;\n }).map(trackToJson_));\n};\n\n/**\n * Create a set of remote {@link TextTrack}s on a {@link Tech} based on an array of javascript\n * object {@link TextTrack} representations.\n *\n * @param {Array} json\n * An array of `TextTrack` representation objects, like those that would be\n * produced by `textTracksToJson`.\n *\n * @param {Tech} tech\n * The `Tech` to create the `TextTrack`s on.\n */\nconst jsonToTextTracks = function (json, tech) {\n json.forEach(function (track) {\n const addedTrack = tech.addRemoteTextTrack(track).track;\n if (!track.src && track.cues) {\n track.cues.forEach(cue => addedTrack.addCue(cue));\n }\n });\n return tech.textTracks();\n};\nvar textTrackConverter = {\n textTracksToJson,\n jsonToTextTracks,\n trackToJson_\n};\n\n/**\n * @file modal-dialog.js\n */\nconst MODAL_CLASS_NAME = 'vjs-modal-dialog';\n\n/**\n * The `ModalDialog` displays over the video and its controls, which blocks\n * interaction with the player until it is closed.\n *\n * Modal dialogs include a \"Close\" button and will close when that button\n * is activated - or when ESC is pressed anywhere.\n *\n * @extends Component\n */\nclass ModalDialog extends Component$1 {\n /**\n * Create an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param { import('./utils/dom').ContentDescriptor} [options.content=undefined]\n * Provide customized content for this modal.\n *\n * @param {string} [options.description]\n * A text description for the modal, primarily for accessibility.\n *\n * @param {boolean} [options.fillAlways=false]\n * Normally, modals are automatically filled only the first time\n * they open. This tells the modal to refresh its content\n * every time it opens.\n *\n * @param {string} [options.label]\n * A text label for the modal, primarily for accessibility.\n *\n * @param {boolean} [options.pauseOnOpen=true]\n * If `true`, playback will will be paused if playing when\n * the modal opens, and resumed when it closes.\n *\n * @param {boolean} [options.temporary=true]\n * If `true`, the modal can only be opened once; it will be\n * disposed as soon as it's closed.\n *\n * @param {boolean} [options.uncloseable=false]\n * If `true`, the user will not be able to close the modal\n * through the UI in the normal ways. Programmatic closing is\n * still possible.\n */\n constructor(player, options) {\n super(player, options);\n this.handleKeyDown_ = e => this.handleKeyDown(e);\n this.close_ = e => this.close(e);\n this.opened_ = this.hasBeenOpened_ = this.hasBeenFilled_ = false;\n this.closeable(!this.options_.uncloseable);\n this.content(this.options_.content);\n\n // Make sure the contentEl is defined AFTER any children are initialized\n // because we only want the contents of the modal in the contentEl\n // (not the UI elements like the close button).\n this.contentEl_ = createEl('div', {\n className: `${MODAL_CLASS_NAME}-content`\n }, {\n role: 'document'\n });\n this.descEl_ = createEl('p', {\n className: `${MODAL_CLASS_NAME}-description vjs-control-text`,\n id: this.el().getAttribute('aria-describedby')\n });\n textContent(this.descEl_, this.description());\n this.el_.appendChild(this.descEl_);\n this.el_.appendChild(this.contentEl_);\n }\n\n /**\n * Create the `ModalDialog`'s DOM element\n *\n * @return {Element}\n * The DOM element that gets created.\n */\n createEl() {\n return super.createEl('div', {\n className: this.buildCSSClass(),\n tabIndex: -1\n }, {\n 'aria-describedby': `${this.id()}_description`,\n 'aria-hidden': 'true',\n 'aria-label': this.label(),\n 'role': 'dialog',\n 'aria-live': 'polite'\n });\n }\n dispose() {\n this.contentEl_ = null;\n this.descEl_ = null;\n this.previouslyActiveEl_ = null;\n super.dispose();\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `${MODAL_CLASS_NAME} vjs-hidden ${super.buildCSSClass()}`;\n }\n\n /**\n * Returns the label string for this modal. Primarily used for accessibility.\n *\n * @return {string}\n * the localized or raw label of this modal.\n */\n label() {\n return this.localize(this.options_.label || 'Modal Window');\n }\n\n /**\n * Returns the description string for this modal. Primarily used for\n * accessibility.\n *\n * @return {string}\n * The localized or raw description of this modal.\n */\n description() {\n let desc = this.options_.description || this.localize('This is a modal window.');\n\n // Append a universal closeability message if the modal is closeable.\n if (this.closeable()) {\n desc += ' ' + this.localize('This modal can be closed by pressing the Escape key or activating the close button.');\n }\n return desc;\n }\n\n /**\n * Opens the modal.\n *\n * @fires ModalDialog#beforemodalopen\n * @fires ModalDialog#modalopen\n */\n open() {\n if (this.opened_) {\n if (this.options_.fillAlways) {\n this.fill();\n }\n return;\n }\n const player = this.player();\n\n /**\n * Fired just before a `ModalDialog` is opened.\n *\n * @event ModalDialog#beforemodalopen\n * @type {Event}\n */\n this.trigger('beforemodalopen');\n this.opened_ = true;\n\n // Fill content if the modal has never opened before and\n // never been filled.\n if (this.options_.fillAlways || !this.hasBeenOpened_ && !this.hasBeenFilled_) {\n this.fill();\n }\n\n // If the player was playing, pause it and take note of its previously\n // playing state.\n this.wasPlaying_ = !player.paused();\n if (this.options_.pauseOnOpen && this.wasPlaying_) {\n player.pause();\n }\n this.on('keydown', this.handleKeyDown_);\n\n // Hide controls and note if they were enabled.\n this.hadControls_ = player.controls();\n player.controls(false);\n this.show();\n this.conditionalFocus_();\n this.el().setAttribute('aria-hidden', 'false');\n\n /**\n * Fired just after a `ModalDialog` is opened.\n *\n * @event ModalDialog#modalopen\n * @type {Event}\n */\n this.trigger('modalopen');\n this.hasBeenOpened_ = true;\n }\n\n /**\n * If the `ModalDialog` is currently open or closed.\n *\n * @param {boolean} [value]\n * If given, it will open (`true`) or close (`false`) the modal.\n *\n * @return {boolean}\n * the current open state of the modaldialog\n */\n opened(value) {\n if (typeof value === 'boolean') {\n this[value ? 'open' : 'close']();\n }\n return this.opened_;\n }\n\n /**\n * Closes the modal, does nothing if the `ModalDialog` is\n * not open.\n *\n * @fires ModalDialog#beforemodalclose\n * @fires ModalDialog#modalclose\n */\n close() {\n if (!this.opened_) {\n return;\n }\n const player = this.player();\n\n /**\n * Fired just before a `ModalDialog` is closed.\n *\n * @event ModalDialog#beforemodalclose\n * @type {Event}\n */\n this.trigger('beforemodalclose');\n this.opened_ = false;\n if (this.wasPlaying_ && this.options_.pauseOnOpen) {\n player.play();\n }\n this.off('keydown', this.handleKeyDown_);\n if (this.hadControls_) {\n player.controls(true);\n }\n this.hide();\n this.el().setAttribute('aria-hidden', 'true');\n\n /**\n * Fired just after a `ModalDialog` is closed.\n *\n * @event ModalDialog#modalclose\n * @type {Event}\n */\n this.trigger('modalclose');\n this.conditionalBlur_();\n if (this.options_.temporary) {\n this.dispose();\n }\n }\n\n /**\n * Check to see if the `ModalDialog` is closeable via the UI.\n *\n * @param {boolean} [value]\n * If given as a boolean, it will set the `closeable` option.\n *\n * @return {boolean}\n * Returns the final value of the closable option.\n */\n closeable(value) {\n if (typeof value === 'boolean') {\n const closeable = this.closeable_ = !!value;\n let close = this.getChild('closeButton');\n\n // If this is being made closeable and has no close button, add one.\n if (closeable && !close) {\n // The close button should be a child of the modal - not its\n // content element, so temporarily change the content element.\n const temp = this.contentEl_;\n this.contentEl_ = this.el_;\n close = this.addChild('closeButton', {\n controlText: 'Close Modal Dialog'\n });\n this.contentEl_ = temp;\n this.on(close, 'close', this.close_);\n }\n\n // If this is being made uncloseable and has a close button, remove it.\n if (!closeable && close) {\n this.off(close, 'close', this.close_);\n this.removeChild(close);\n close.dispose();\n }\n }\n return this.closeable_;\n }\n\n /**\n * Fill the modal's content element with the modal's \"content\" option.\n * The content element will be emptied before this change takes place.\n */\n fill() {\n this.fillWith(this.content());\n }\n\n /**\n * Fill the modal's content element with arbitrary content.\n * The content element will be emptied before this change takes place.\n *\n * @fires ModalDialog#beforemodalfill\n * @fires ModalDialog#modalfill\n *\n * @param { import('./utils/dom').ContentDescriptor} [content]\n * The same rules apply to this as apply to the `content` option.\n */\n fillWith(content) {\n const contentEl = this.contentEl();\n const parentEl = contentEl.parentNode;\n const nextSiblingEl = contentEl.nextSibling;\n\n /**\n * Fired just before a `ModalDialog` is filled with content.\n *\n * @event ModalDialog#beforemodalfill\n * @type {Event}\n */\n this.trigger('beforemodalfill');\n this.hasBeenFilled_ = true;\n\n // Detach the content element from the DOM before performing\n // manipulation to avoid modifying the live DOM multiple times.\n parentEl.removeChild(contentEl);\n this.empty();\n insertContent(contentEl, content);\n /**\n * Fired just after a `ModalDialog` is filled with content.\n *\n * @event ModalDialog#modalfill\n * @type {Event}\n */\n this.trigger('modalfill');\n\n // Re-inject the re-filled content element.\n if (nextSiblingEl) {\n parentEl.insertBefore(contentEl, nextSiblingEl);\n } else {\n parentEl.appendChild(contentEl);\n }\n\n // make sure that the close button is last in the dialog DOM\n const closeButton = this.getChild('closeButton');\n if (closeButton) {\n parentEl.appendChild(closeButton.el_);\n }\n }\n\n /**\n * Empties the content element. This happens anytime the modal is filled.\n *\n * @fires ModalDialog#beforemodalempty\n * @fires ModalDialog#modalempty\n */\n empty() {\n /**\n * Fired just before a `ModalDialog` is emptied.\n *\n * @event ModalDialog#beforemodalempty\n * @type {Event}\n */\n this.trigger('beforemodalempty');\n emptyEl(this.contentEl());\n\n /**\n * Fired just after a `ModalDialog` is emptied.\n *\n * @event ModalDialog#modalempty\n * @type {Event}\n */\n this.trigger('modalempty');\n }\n\n /**\n * Gets or sets the modal content, which gets normalized before being\n * rendered into the DOM.\n *\n * This does not update the DOM or fill the modal, but it is called during\n * that process.\n *\n * @param { import('./utils/dom').ContentDescriptor} [value]\n * If defined, sets the internal content value to be used on the\n * next call(s) to `fill`. This value is normalized before being\n * inserted. To \"clear\" the internal content value, pass `null`.\n *\n * @return { import('./utils/dom').ContentDescriptor}\n * The current content of the modal dialog\n */\n content(value) {\n if (typeof value !== 'undefined') {\n this.content_ = value;\n }\n return this.content_;\n }\n\n /**\n * conditionally focus the modal dialog if focus was previously on the player.\n *\n * @private\n */\n conditionalFocus_() {\n const activeEl = document.activeElement;\n const playerEl = this.player_.el_;\n this.previouslyActiveEl_ = null;\n if (playerEl.contains(activeEl) || playerEl === activeEl) {\n this.previouslyActiveEl_ = activeEl;\n this.focus();\n }\n }\n\n /**\n * conditionally blur the element and refocus the last focused element\n *\n * @private\n */\n conditionalBlur_() {\n if (this.previouslyActiveEl_) {\n this.previouslyActiveEl_.focus();\n this.previouslyActiveEl_ = null;\n }\n }\n\n /**\n * Keydown handler. Attached when modal is focused.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n // Do not allow keydowns to reach out of the modal dialog.\n event.stopPropagation();\n if (keycode.isEventKey(event, 'Escape') && this.closeable()) {\n event.preventDefault();\n this.close();\n return;\n }\n\n // exit early if it isn't a tab key\n if (!keycode.isEventKey(event, 'Tab')) {\n return;\n }\n const focusableEls = this.focusableEls_();\n const activeEl = this.el_.querySelector(':focus');\n let focusIndex;\n for (let i = 0; i < focusableEls.length; i++) {\n if (activeEl === focusableEls[i]) {\n focusIndex = i;\n break;\n }\n }\n if (document.activeElement === this.el_) {\n focusIndex = 0;\n }\n if (event.shiftKey && focusIndex === 0) {\n focusableEls[focusableEls.length - 1].focus();\n event.preventDefault();\n } else if (!event.shiftKey && focusIndex === focusableEls.length - 1) {\n focusableEls[0].focus();\n event.preventDefault();\n }\n }\n\n /**\n * get all focusable elements\n *\n * @private\n */\n focusableEls_() {\n const allChildren = this.el_.querySelectorAll('*');\n return Array.prototype.filter.call(allChildren, child => {\n return (child instanceof window$1.HTMLAnchorElement || child instanceof window$1.HTMLAreaElement) && child.hasAttribute('href') || (child instanceof window$1.HTMLInputElement || child instanceof window$1.HTMLSelectElement || child instanceof window$1.HTMLTextAreaElement || child instanceof window$1.HTMLButtonElement) && !child.hasAttribute('disabled') || child instanceof window$1.HTMLIFrameElement || child instanceof window$1.HTMLObjectElement || child instanceof window$1.HTMLEmbedElement || child.hasAttribute('tabindex') && child.getAttribute('tabindex') !== -1 || child.hasAttribute('contenteditable');\n });\n }\n}\n\n/**\n * Default options for `ModalDialog` default options.\n *\n * @type {Object}\n * @private\n */\nModalDialog.prototype.options_ = {\n pauseOnOpen: true,\n temporary: true\n};\nComponent$1.registerComponent('ModalDialog', ModalDialog);\n\n/**\n * @file track-list.js\n */\n\n/**\n * Common functionaliy between {@link TextTrackList}, {@link AudioTrackList}, and\n * {@link VideoTrackList}\n *\n * @extends EventTarget\n */\nclass TrackList extends EventTarget$2 {\n /**\n * Create an instance of this class\n *\n * @param { import('./track').default[] } tracks\n * A list of tracks to initialize the list with.\n *\n * @abstract\n */\n constructor(tracks = []) {\n super();\n this.tracks_ = [];\n\n /**\n * @memberof TrackList\n * @member {number} length\n * The current number of `Track`s in the this Trackist.\n * @instance\n */\n Object.defineProperty(this, 'length', {\n get() {\n return this.tracks_.length;\n }\n });\n for (let i = 0; i < tracks.length; i++) {\n this.addTrack(tracks[i]);\n }\n }\n\n /**\n * Add a {@link Track} to the `TrackList`\n *\n * @param { import('./track').default } track\n * The audio, video, or text track to add to the list.\n *\n * @fires TrackList#addtrack\n */\n addTrack(track) {\n const index = this.tracks_.length;\n if (!('' + index in this)) {\n Object.defineProperty(this, index, {\n get() {\n return this.tracks_[index];\n }\n });\n }\n\n // Do not add duplicate tracks\n if (this.tracks_.indexOf(track) === -1) {\n this.tracks_.push(track);\n /**\n * Triggered when a track is added to a track list.\n *\n * @event TrackList#addtrack\n * @type {Event}\n * @property {Track} track\n * A reference to track that was added.\n */\n this.trigger({\n track,\n type: 'addtrack',\n target: this\n });\n }\n\n /**\n * Triggered when a track label is changed.\n *\n * @event TrackList#addtrack\n * @type {Event}\n * @property {Track} track\n * A reference to track that was added.\n */\n track.labelchange_ = () => {\n this.trigger({\n track,\n type: 'labelchange',\n target: this\n });\n };\n if (isEvented(track)) {\n track.addEventListener('labelchange', track.labelchange_);\n }\n }\n\n /**\n * Remove a {@link Track} from the `TrackList`\n *\n * @param { import('./track').default } rtrack\n * The audio, video, or text track to remove from the list.\n *\n * @fires TrackList#removetrack\n */\n removeTrack(rtrack) {\n let track;\n for (let i = 0, l = this.length; i < l; i++) {\n if (this[i] === rtrack) {\n track = this[i];\n if (track.off) {\n track.off();\n }\n this.tracks_.splice(i, 1);\n break;\n }\n }\n if (!track) {\n return;\n }\n\n /**\n * Triggered when a track is removed from track list.\n *\n * @event TrackList#removetrack\n * @type {Event}\n * @property {Track} track\n * A reference to track that was removed.\n */\n this.trigger({\n track,\n type: 'removetrack',\n target: this\n });\n }\n\n /**\n * Get a Track from the TrackList by a tracks id\n *\n * @param {string} id - the id of the track to get\n * @method getTrackById\n * @return { import('./track').default }\n * @private\n */\n getTrackById(id) {\n let result = null;\n for (let i = 0, l = this.length; i < l; i++) {\n const track = this[i];\n if (track.id === id) {\n result = track;\n break;\n }\n }\n return result;\n }\n}\n\n/**\n * Triggered when a different track is selected/enabled.\n *\n * @event TrackList#change\n * @type {Event}\n */\n\n/**\n * Events that can be called with on + eventName. See {@link EventHandler}.\n *\n * @property {Object} TrackList#allowedEvents_\n * @protected\n */\nTrackList.prototype.allowedEvents_ = {\n change: 'change',\n addtrack: 'addtrack',\n removetrack: 'removetrack',\n labelchange: 'labelchange'\n};\n\n// emulate attribute EventHandler support to allow for feature detection\nfor (const event in TrackList.prototype.allowedEvents_) {\n TrackList.prototype['on' + event] = null;\n}\n\n/**\n * @file audio-track-list.js\n */\n\n/**\n * Anywhere we call this function we diverge from the spec\n * as we only support one enabled audiotrack at a time\n *\n * @param {AudioTrackList} list\n * list to work on\n *\n * @param { import('./audio-track').default } track\n * The track to skip\n *\n * @private\n */\nconst disableOthers$1 = function (list, track) {\n for (let i = 0; i < list.length; i++) {\n if (!Object.keys(list[i]).length || track.id === list[i].id) {\n continue;\n }\n // another audio track is enabled, disable it\n list[i].enabled = false;\n }\n};\n\n/**\n * The current list of {@link AudioTrack} for a media file.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#audiotracklist}\n * @extends TrackList\n */\nclass AudioTrackList extends TrackList {\n /**\n * Create an instance of this class.\n *\n * @param { import('./audio-track').default[] } [tracks=[]]\n * A list of `AudioTrack` to instantiate the list with.\n */\n constructor(tracks = []) {\n // make sure only 1 track is enabled\n // sorted from last index to first index\n for (let i = tracks.length - 1; i >= 0; i--) {\n if (tracks[i].enabled) {\n disableOthers$1(tracks, tracks[i]);\n break;\n }\n }\n super(tracks);\n this.changing_ = false;\n }\n\n /**\n * Add an {@link AudioTrack} to the `AudioTrackList`.\n *\n * @param { import('./audio-track').default } track\n * The AudioTrack to add to the list\n *\n * @fires TrackList#addtrack\n */\n addTrack(track) {\n if (track.enabled) {\n disableOthers$1(this, track);\n }\n super.addTrack(track);\n // native tracks don't have this\n if (!track.addEventListener) {\n return;\n }\n track.enabledChange_ = () => {\n // when we are disabling other tracks (since we don't support\n // more than one track at a time) we will set changing_\n // to true so that we don't trigger additional change events\n if (this.changing_) {\n return;\n }\n this.changing_ = true;\n disableOthers$1(this, track);\n this.changing_ = false;\n this.trigger('change');\n };\n\n /**\n * @listens AudioTrack#enabledchange\n * @fires TrackList#change\n */\n track.addEventListener('enabledchange', track.enabledChange_);\n }\n removeTrack(rtrack) {\n super.removeTrack(rtrack);\n if (rtrack.removeEventListener && rtrack.enabledChange_) {\n rtrack.removeEventListener('enabledchange', rtrack.enabledChange_);\n rtrack.enabledChange_ = null;\n }\n }\n}\n\n/**\n * @file video-track-list.js\n */\n\n/**\n * Un-select all other {@link VideoTrack}s that are selected.\n *\n * @param {VideoTrackList} list\n * list to work on\n *\n * @param { import('./video-track').default } track\n * The track to skip\n *\n * @private\n */\nconst disableOthers = function (list, track) {\n for (let i = 0; i < list.length; i++) {\n if (!Object.keys(list[i]).length || track.id === list[i].id) {\n continue;\n }\n // another video track is enabled, disable it\n list[i].selected = false;\n }\n};\n\n/**\n * The current list of {@link VideoTrack} for a video.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#videotracklist}\n * @extends TrackList\n */\nclass VideoTrackList extends TrackList {\n /**\n * Create an instance of this class.\n *\n * @param {VideoTrack[]} [tracks=[]]\n * A list of `VideoTrack` to instantiate the list with.\n */\n constructor(tracks = []) {\n // make sure only 1 track is enabled\n // sorted from last index to first index\n for (let i = tracks.length - 1; i >= 0; i--) {\n if (tracks[i].selected) {\n disableOthers(tracks, tracks[i]);\n break;\n }\n }\n super(tracks);\n this.changing_ = false;\n\n /**\n * @member {number} VideoTrackList#selectedIndex\n * The current index of the selected {@link VideoTrack`}.\n */\n Object.defineProperty(this, 'selectedIndex', {\n get() {\n for (let i = 0; i < this.length; i++) {\n if (this[i].selected) {\n return i;\n }\n }\n return -1;\n },\n set() {}\n });\n }\n\n /**\n * Add a {@link VideoTrack} to the `VideoTrackList`.\n *\n * @param { import('./video-track').default } track\n * The VideoTrack to add to the list\n *\n * @fires TrackList#addtrack\n */\n addTrack(track) {\n if (track.selected) {\n disableOthers(this, track);\n }\n super.addTrack(track);\n // native tracks don't have this\n if (!track.addEventListener) {\n return;\n }\n track.selectedChange_ = () => {\n if (this.changing_) {\n return;\n }\n this.changing_ = true;\n disableOthers(this, track);\n this.changing_ = false;\n this.trigger('change');\n };\n\n /**\n * @listens VideoTrack#selectedchange\n * @fires TrackList#change\n */\n track.addEventListener('selectedchange', track.selectedChange_);\n }\n removeTrack(rtrack) {\n super.removeTrack(rtrack);\n if (rtrack.removeEventListener && rtrack.selectedChange_) {\n rtrack.removeEventListener('selectedchange', rtrack.selectedChange_);\n rtrack.selectedChange_ = null;\n }\n }\n}\n\n/**\n * @file text-track-list.js\n */\n\n/**\n * The current list of {@link TextTrack} for a media file.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttracklist}\n * @extends TrackList\n */\nclass TextTrackList extends TrackList {\n /**\n * Add a {@link TextTrack} to the `TextTrackList`\n *\n * @param { import('./text-track').default } track\n * The text track to add to the list.\n *\n * @fires TrackList#addtrack\n */\n addTrack(track) {\n super.addTrack(track);\n if (!this.queueChange_) {\n this.queueChange_ = () => this.queueTrigger('change');\n }\n if (!this.triggerSelectedlanguagechange) {\n this.triggerSelectedlanguagechange_ = () => this.trigger('selectedlanguagechange');\n }\n\n /**\n * @listens TextTrack#modechange\n * @fires TrackList#change\n */\n track.addEventListener('modechange', this.queueChange_);\n const nonLanguageTextTrackKind = ['metadata', 'chapters'];\n if (nonLanguageTextTrackKind.indexOf(track.kind) === -1) {\n track.addEventListener('modechange', this.triggerSelectedlanguagechange_);\n }\n }\n removeTrack(rtrack) {\n super.removeTrack(rtrack);\n\n // manually remove the event handlers we added\n if (rtrack.removeEventListener) {\n if (this.queueChange_) {\n rtrack.removeEventListener('modechange', this.queueChange_);\n }\n if (this.selectedlanguagechange_) {\n rtrack.removeEventListener('modechange', this.triggerSelectedlanguagechange_);\n }\n }\n }\n}\n\n/**\n * @file html-track-element-list.js\n */\n\n/**\n * The current list of {@link HtmlTrackElement}s.\n */\nclass HtmlTrackElementList {\n /**\n * Create an instance of this class.\n *\n * @param {HtmlTrackElement[]} [tracks=[]]\n * A list of `HtmlTrackElement` to instantiate the list with.\n */\n constructor(trackElements = []) {\n this.trackElements_ = [];\n\n /**\n * @memberof HtmlTrackElementList\n * @member {number} length\n * The current number of `Track`s in the this Trackist.\n * @instance\n */\n Object.defineProperty(this, 'length', {\n get() {\n return this.trackElements_.length;\n }\n });\n for (let i = 0, length = trackElements.length; i < length; i++) {\n this.addTrackElement_(trackElements[i]);\n }\n }\n\n /**\n * Add an {@link HtmlTrackElement} to the `HtmlTrackElementList`\n *\n * @param {HtmlTrackElement} trackElement\n * The track element to add to the list.\n *\n * @private\n */\n addTrackElement_(trackElement) {\n const index = this.trackElements_.length;\n if (!('' + index in this)) {\n Object.defineProperty(this, index, {\n get() {\n return this.trackElements_[index];\n }\n });\n }\n\n // Do not add duplicate elements\n if (this.trackElements_.indexOf(trackElement) === -1) {\n this.trackElements_.push(trackElement);\n }\n }\n\n /**\n * Get an {@link HtmlTrackElement} from the `HtmlTrackElementList` given an\n * {@link TextTrack}.\n *\n * @param {TextTrack} track\n * The track associated with a track element.\n *\n * @return {HtmlTrackElement|undefined}\n * The track element that was found or undefined.\n *\n * @private\n */\n getTrackElementByTrack_(track) {\n let trackElement_;\n for (let i = 0, length = this.trackElements_.length; i < length; i++) {\n if (track === this.trackElements_[i].track) {\n trackElement_ = this.trackElements_[i];\n break;\n }\n }\n return trackElement_;\n }\n\n /**\n * Remove a {@link HtmlTrackElement} from the `HtmlTrackElementList`\n *\n * @param {HtmlTrackElement} trackElement\n * The track element to remove from the list.\n *\n * @private\n */\n removeTrackElement_(trackElement) {\n for (let i = 0, length = this.trackElements_.length; i < length; i++) {\n if (trackElement === this.trackElements_[i]) {\n if (this.trackElements_[i].track && typeof this.trackElements_[i].track.off === 'function') {\n this.trackElements_[i].track.off();\n }\n if (typeof this.trackElements_[i].off === 'function') {\n this.trackElements_[i].off();\n }\n this.trackElements_.splice(i, 1);\n break;\n }\n }\n }\n}\n\n/**\n * @file text-track-cue-list.js\n */\n\n/**\n * @typedef {Object} TextTrackCueList~TextTrackCue\n *\n * @property {string} id\n * The unique id for this text track cue\n *\n * @property {number} startTime\n * The start time for this text track cue\n *\n * @property {number} endTime\n * The end time for this text track cue\n *\n * @property {boolean} pauseOnExit\n * Pause when the end time is reached if true.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttrackcue}\n */\n\n/**\n * A List of TextTrackCues.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttrackcuelist}\n */\nclass TextTrackCueList {\n /**\n * Create an instance of this class..\n *\n * @param {Array} cues\n * A list of cues to be initialized with\n */\n constructor(cues) {\n TextTrackCueList.prototype.setCues_.call(this, cues);\n\n /**\n * @memberof TextTrackCueList\n * @member {number} length\n * The current number of `TextTrackCue`s in the TextTrackCueList.\n * @instance\n */\n Object.defineProperty(this, 'length', {\n get() {\n return this.length_;\n }\n });\n }\n\n /**\n * A setter for cues in this list. Creates getters\n * an an index for the cues.\n *\n * @param {Array} cues\n * An array of cues to set\n *\n * @private\n */\n setCues_(cues) {\n const oldLength = this.length || 0;\n let i = 0;\n const l = cues.length;\n this.cues_ = cues;\n this.length_ = cues.length;\n const defineProp = function (index) {\n if (!('' + index in this)) {\n Object.defineProperty(this, '' + index, {\n get() {\n return this.cues_[index];\n }\n });\n }\n };\n if (oldLength < l) {\n i = oldLength;\n for (; i < l; i++) {\n defineProp.call(this, i);\n }\n }\n }\n\n /**\n * Get a `TextTrackCue` that is currently in the `TextTrackCueList` by id.\n *\n * @param {string} id\n * The id of the cue that should be searched for.\n *\n * @return {TextTrackCueList~TextTrackCue|null}\n * A single cue or null if none was found.\n */\n getCueById(id) {\n let result = null;\n for (let i = 0, l = this.length; i < l; i++) {\n const cue = this[i];\n if (cue.id === id) {\n result = cue;\n break;\n }\n }\n return result;\n }\n}\n\n/**\n * @file track-kinds.js\n */\n\n/**\n * All possible `VideoTrackKind`s\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-videotrack-kind\n * @typedef VideoTrack~Kind\n * @enum\n */\nconst VideoTrackKind = {\n alternative: 'alternative',\n captions: 'captions',\n main: 'main',\n sign: 'sign',\n subtitles: 'subtitles',\n commentary: 'commentary'\n};\n\n/**\n * All possible `AudioTrackKind`s\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-audiotrack-kind\n * @typedef AudioTrack~Kind\n * @enum\n */\nconst AudioTrackKind = {\n 'alternative': 'alternative',\n 'descriptions': 'descriptions',\n 'main': 'main',\n 'main-desc': 'main-desc',\n 'translation': 'translation',\n 'commentary': 'commentary'\n};\n\n/**\n * All possible `TextTrackKind`s\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-texttrack-kind\n * @typedef TextTrack~Kind\n * @enum\n */\nconst TextTrackKind = {\n subtitles: 'subtitles',\n captions: 'captions',\n descriptions: 'descriptions',\n chapters: 'chapters',\n metadata: 'metadata'\n};\n\n/**\n * All possible `TextTrackMode`s\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#texttrackmode\n * @typedef TextTrack~Mode\n * @enum\n */\nconst TextTrackMode = {\n disabled: 'disabled',\n hidden: 'hidden',\n showing: 'showing'\n};\n\n/**\n * @file track.js\n */\n\n/**\n * A Track class that contains all of the common functionality for {@link AudioTrack},\n * {@link VideoTrack}, and {@link TextTrack}.\n *\n * > Note: This class should not be used directly\n *\n * @see {@link https://html.spec.whatwg.org/multipage/embedded-content.html}\n * @extends EventTarget\n * @abstract\n */\nclass Track extends EventTarget$2 {\n /**\n * Create an instance of this class.\n *\n * @param {Object} [options={}]\n * Object of option names and values\n *\n * @param {string} [options.kind='']\n * A valid kind for the track type you are creating.\n *\n * @param {string} [options.id='vjs_track_' + Guid.newGUID()]\n * A unique id for this AudioTrack.\n *\n * @param {string} [options.label='']\n * The menu label for this track.\n *\n * @param {string} [options.language='']\n * A valid two character language code.\n *\n * @abstract\n */\n constructor(options = {}) {\n super();\n const trackProps = {\n id: options.id || 'vjs_track_' + newGUID(),\n kind: options.kind || '',\n language: options.language || ''\n };\n let label = options.label || '';\n\n /**\n * @memberof Track\n * @member {string} id\n * The id of this track. Cannot be changed after creation.\n * @instance\n *\n * @readonly\n */\n\n /**\n * @memberof Track\n * @member {string} kind\n * The kind of track that this is. Cannot be changed after creation.\n * @instance\n *\n * @readonly\n */\n\n /**\n * @memberof Track\n * @member {string} language\n * The two letter language code for this track. Cannot be changed after\n * creation.\n * @instance\n *\n * @readonly\n */\n\n for (const key in trackProps) {\n Object.defineProperty(this, key, {\n get() {\n return trackProps[key];\n },\n set() {}\n });\n }\n\n /**\n * @memberof Track\n * @member {string} label\n * The label of this track. Cannot be changed after creation.\n * @instance\n *\n * @fires Track#labelchange\n */\n Object.defineProperty(this, 'label', {\n get() {\n return label;\n },\n set(newLabel) {\n if (newLabel !== label) {\n label = newLabel;\n\n /**\n * An event that fires when label changes on this track.\n *\n * > Note: This is not part of the spec!\n *\n * @event Track#labelchange\n * @type {Event}\n */\n this.trigger('labelchange');\n }\n }\n });\n }\n}\n\n/**\n * @file url.js\n * @module url\n */\n\n/**\n * @typedef {Object} url:URLObject\n *\n * @property {string} protocol\n * The protocol of the url that was parsed.\n *\n * @property {string} hostname\n * The hostname of the url that was parsed.\n *\n * @property {string} port\n * The port of the url that was parsed.\n *\n * @property {string} pathname\n * The pathname of the url that was parsed.\n *\n * @property {string} search\n * The search query of the url that was parsed.\n *\n * @property {string} hash\n * The hash of the url that was parsed.\n *\n * @property {string} host\n * The host of the url that was parsed.\n */\n\n/**\n * Resolve and parse the elements of a URL.\n *\n * @function\n * @param {String} url\n * The url to parse\n *\n * @return {url:URLObject}\n * An object of url details\n */\nconst parseUrl = function (url) {\n // This entire method can be replace with URL once we are able to drop IE11\n\n const props = ['protocol', 'hostname', 'port', 'pathname', 'search', 'hash', 'host'];\n\n // add the url to an anchor and let the browser parse the URL\n const a = document.createElement('a');\n a.href = url;\n\n // Copy the specific URL properties to a new object\n // This is also needed for IE because the anchor loses its\n // properties when it's removed from the dom\n const details = {};\n for (let i = 0; i < props.length; i++) {\n details[props[i]] = a[props[i]];\n }\n\n // IE adds the port to the host property unlike everyone else. If\n // a port identifier is added for standard ports, strip it.\n if (details.protocol === 'http:') {\n details.host = details.host.replace(/:80$/, '');\n }\n if (details.protocol === 'https:') {\n details.host = details.host.replace(/:443$/, '');\n }\n if (!details.protocol) {\n details.protocol = window$1.location.protocol;\n }\n\n /* istanbul ignore if */\n if (!details.host) {\n details.host = window$1.location.host;\n }\n return details;\n};\n\n/**\n * Get absolute version of relative URL.\n *\n * @function\n * @param {string} url\n * URL to make absolute\n *\n * @return {string}\n * Absolute URL\n *\n * @see http://stackoverflow.com/questions/470832/getting-an-absolute-url-from-a-relative-one-ie6-issue\n */\nconst getAbsoluteURL = function (url) {\n // Check if absolute URL\n if (!url.match(/^https?:\\/\\//)) {\n // Add the url to an anchor and let the browser parse it to convert to an absolute url\n const a = document.createElement('a');\n a.href = url;\n url = a.href;\n }\n return url;\n};\n\n/**\n * Returns the extension of the passed file name. It will return an empty string\n * if passed an invalid path.\n *\n * @function\n * @param {string} path\n * The fileName path like '/path/to/file.mp4'\n *\n * @return {string}\n * The extension in lower case or an empty string if no\n * extension could be found.\n */\nconst getFileExtension = function (path) {\n if (typeof path === 'string') {\n const splitPathRe = /^(\\/?)([\\s\\S]*?)((?:\\.{1,2}|[^\\/]+?)(\\.([^\\.\\/\\?]+)))(?:[\\/]*|[\\?].*)$/;\n const pathParts = splitPathRe.exec(path);\n if (pathParts) {\n return pathParts.pop().toLowerCase();\n }\n }\n return '';\n};\n\n/**\n * Returns whether the url passed is a cross domain request or not.\n *\n * @function\n * @param {string} url\n * The url to check.\n *\n * @param {Object} [winLoc]\n * the domain to check the url against, defaults to window.location\n *\n * @param {string} [winLoc.protocol]\n * The window location protocol defaults to window.location.protocol\n *\n * @param {string} [winLoc.host]\n * The window location host defaults to window.location.host\n *\n * @return {boolean}\n * Whether it is a cross domain request or not.\n */\nconst isCrossOrigin = function (url, winLoc = window$1.location) {\n const urlInfo = parseUrl(url);\n\n // IE8 protocol relative urls will return ':' for protocol\n const srcProtocol = urlInfo.protocol === ':' ? winLoc.protocol : urlInfo.protocol;\n\n // Check if url is for another domain/origin\n // IE8 doesn't know location.origin, so we won't rely on it here\n const crossOrigin = srcProtocol + urlInfo.host !== winLoc.protocol + winLoc.host;\n return crossOrigin;\n};\n\nvar Url = /*#__PURE__*/Object.freeze({\n __proto__: null,\n parseUrl: parseUrl,\n getAbsoluteURL: getAbsoluteURL,\n getFileExtension: getFileExtension,\n isCrossOrigin: isCrossOrigin\n});\n\n/**\n * @file text-track.js\n */\n\n/**\n * Takes a webvtt file contents and parses it into cues\n *\n * @param {string} srcContent\n * webVTT file contents\n *\n * @param {TextTrack} track\n * TextTrack to add cues to. Cues come from the srcContent.\n *\n * @private\n */\nconst parseCues = function (srcContent, track) {\n const parser = new window$1.WebVTT.Parser(window$1, window$1.vttjs, window$1.WebVTT.StringDecoder());\n const errors = [];\n parser.oncue = function (cue) {\n track.addCue(cue);\n };\n parser.onparsingerror = function (error) {\n errors.push(error);\n };\n parser.onflush = function () {\n track.trigger({\n type: 'loadeddata',\n target: track\n });\n };\n parser.parse(srcContent);\n if (errors.length > 0) {\n if (window$1.console && window$1.console.groupCollapsed) {\n window$1.console.groupCollapsed(`Text Track parsing errors for ${track.src}`);\n }\n errors.forEach(error => log$1.error(error));\n if (window$1.console && window$1.console.groupEnd) {\n window$1.console.groupEnd();\n }\n }\n parser.flush();\n};\n\n/**\n * Load a `TextTrack` from a specified url.\n *\n * @param {string} src\n * Url to load track from.\n *\n * @param {TextTrack} track\n * Track to add cues to. Comes from the content at the end of `url`.\n *\n * @private\n */\nconst loadTrack = function (src, track) {\n const opts = {\n uri: src\n };\n const crossOrigin = isCrossOrigin(src);\n if (crossOrigin) {\n opts.cors = crossOrigin;\n }\n const withCredentials = track.tech_.crossOrigin() === 'use-credentials';\n if (withCredentials) {\n opts.withCredentials = withCredentials;\n }\n XHR(opts, bind_(this, function (err, response, responseBody) {\n if (err) {\n return log$1.error(err, response);\n }\n track.loaded_ = true;\n\n // Make sure that vttjs has loaded, otherwise, wait till it finished loading\n // NOTE: this is only used for the alt/video.novtt.js build\n if (typeof window$1.WebVTT !== 'function') {\n if (track.tech_) {\n // to prevent use before define eslint error, we define loadHandler\n // as a let here\n track.tech_.any(['vttjsloaded', 'vttjserror'], event => {\n if (event.type === 'vttjserror') {\n log$1.error(`vttjs failed to load, stopping trying to process ${track.src}`);\n return;\n }\n return parseCues(responseBody, track);\n });\n }\n } else {\n parseCues(responseBody, track);\n }\n }));\n};\n\n/**\n * A representation of a single `TextTrack`.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#texttrack}\n * @extends Track\n */\nclass TextTrack extends Track {\n /**\n * Create an instance of this class.\n *\n * @param {Object} options={}\n * Object of option names and values\n *\n * @param { import('../tech/tech').default } options.tech\n * A reference to the tech that owns this TextTrack.\n *\n * @param {TextTrack~Kind} [options.kind='subtitles']\n * A valid text track kind.\n *\n * @param {TextTrack~Mode} [options.mode='disabled']\n * A valid text track mode.\n *\n * @param {string} [options.id='vjs_track_' + Guid.newGUID()]\n * A unique id for this TextTrack.\n *\n * @param {string} [options.label='']\n * The menu label for this track.\n *\n * @param {string} [options.language='']\n * A valid two character language code.\n *\n * @param {string} [options.srclang='']\n * A valid two character language code. An alternative, but deprioritized\n * version of `options.language`\n *\n * @param {string} [options.src]\n * A url to TextTrack cues.\n *\n * @param {boolean} [options.default]\n * If this track should default to on or off.\n */\n constructor(options = {}) {\n if (!options.tech) {\n throw new Error('A tech was not provided.');\n }\n const settings = merge$1(options, {\n kind: TextTrackKind[options.kind] || 'subtitles',\n language: options.language || options.srclang || ''\n });\n let mode = TextTrackMode[settings.mode] || 'disabled';\n const default_ = settings.default;\n if (settings.kind === 'metadata' || settings.kind === 'chapters') {\n mode = 'hidden';\n }\n super(settings);\n this.tech_ = settings.tech;\n this.cues_ = [];\n this.activeCues_ = [];\n this.preload_ = this.tech_.preloadTextTracks !== false;\n const cues = new TextTrackCueList(this.cues_);\n const activeCues = new TextTrackCueList(this.activeCues_);\n let changed = false;\n this.timeupdateHandler = bind_(this, function (event = {}) {\n if (this.tech_.isDisposed()) {\n return;\n }\n if (!this.tech_.isReady_) {\n if (event.type !== 'timeupdate') {\n this.rvf_ = this.tech_.requestVideoFrameCallback(this.timeupdateHandler);\n }\n return;\n }\n\n // Accessing this.activeCues for the side-effects of updating itself\n // due to its nature as a getter function. Do not remove or cues will\n // stop updating!\n // Use the setter to prevent deletion from uglify (pure_getters rule)\n this.activeCues = this.activeCues;\n if (changed) {\n this.trigger('cuechange');\n changed = false;\n }\n if (event.type !== 'timeupdate') {\n this.rvf_ = this.tech_.requestVideoFrameCallback(this.timeupdateHandler);\n }\n });\n const disposeHandler = () => {\n this.stopTracking();\n };\n this.tech_.one('dispose', disposeHandler);\n if (mode !== 'disabled') {\n this.startTracking();\n }\n Object.defineProperties(this, {\n /**\n * @memberof TextTrack\n * @member {boolean} default\n * If this track was set to be on or off by default. Cannot be changed after\n * creation.\n * @instance\n *\n * @readonly\n */\n default: {\n get() {\n return default_;\n },\n set() {}\n },\n /**\n * @memberof TextTrack\n * @member {string} mode\n * Set the mode of this TextTrack to a valid {@link TextTrack~Mode}. Will\n * not be set if setting to an invalid mode.\n * @instance\n *\n * @fires TextTrack#modechange\n */\n mode: {\n get() {\n return mode;\n },\n set(newMode) {\n if (!TextTrackMode[newMode]) {\n return;\n }\n if (mode === newMode) {\n return;\n }\n mode = newMode;\n if (!this.preload_ && mode !== 'disabled' && this.cues.length === 0) {\n // On-demand load.\n loadTrack(this.src, this);\n }\n this.stopTracking();\n if (mode !== 'disabled') {\n this.startTracking();\n }\n /**\n * An event that fires when mode changes on this track. This allows\n * the TextTrackList that holds this track to act accordingly.\n *\n * > Note: This is not part of the spec!\n *\n * @event TextTrack#modechange\n * @type {Event}\n */\n this.trigger('modechange');\n }\n },\n /**\n * @memberof TextTrack\n * @member {TextTrackCueList} cues\n * The text track cue list for this TextTrack.\n * @instance\n */\n cues: {\n get() {\n if (!this.loaded_) {\n return null;\n }\n return cues;\n },\n set() {}\n },\n /**\n * @memberof TextTrack\n * @member {TextTrackCueList} activeCues\n * The list text track cues that are currently active for this TextTrack.\n * @instance\n */\n activeCues: {\n get() {\n if (!this.loaded_) {\n return null;\n }\n\n // nothing to do\n if (this.cues.length === 0) {\n return activeCues;\n }\n const ct = this.tech_.currentTime();\n const active = [];\n for (let i = 0, l = this.cues.length; i < l; i++) {\n const cue = this.cues[i];\n if (cue.startTime <= ct && cue.endTime >= ct) {\n active.push(cue);\n }\n }\n changed = false;\n if (active.length !== this.activeCues_.length) {\n changed = true;\n } else {\n for (let i = 0; i < active.length; i++) {\n if (this.activeCues_.indexOf(active[i]) === -1) {\n changed = true;\n }\n }\n }\n this.activeCues_ = active;\n activeCues.setCues_(this.activeCues_);\n return activeCues;\n },\n // /!\\ Keep this setter empty (see the timeupdate handler above)\n set() {}\n }\n });\n if (settings.src) {\n this.src = settings.src;\n if (!this.preload_) {\n // Tracks will load on-demand.\n // Act like we're loaded for other purposes.\n this.loaded_ = true;\n }\n if (this.preload_ || settings.kind !== 'subtitles' && settings.kind !== 'captions') {\n loadTrack(this.src, this);\n }\n } else {\n this.loaded_ = true;\n }\n }\n startTracking() {\n // More precise cues based on requestVideoFrameCallback with a requestAnimationFram fallback\n this.rvf_ = this.tech_.requestVideoFrameCallback(this.timeupdateHandler);\n // Also listen to timeupdate in case rVFC/rAF stops (window in background, audio in video el)\n this.tech_.on('timeupdate', this.timeupdateHandler);\n }\n stopTracking() {\n if (this.rvf_) {\n this.tech_.cancelVideoFrameCallback(this.rvf_);\n this.rvf_ = undefined;\n }\n this.tech_.off('timeupdate', this.timeupdateHandler);\n }\n\n /**\n * Add a cue to the internal list of cues.\n *\n * @param {TextTrack~Cue} cue\n * The cue to add to our internal list\n */\n addCue(originalCue) {\n let cue = originalCue;\n\n // Testing if the cue is a VTTCue in a way that survives minification\n if (!('getCueAsHTML' in cue)) {\n cue = new window$1.vttjs.VTTCue(originalCue.startTime, originalCue.endTime, originalCue.text);\n for (const prop in originalCue) {\n if (!(prop in cue)) {\n cue[prop] = originalCue[prop];\n }\n }\n\n // make sure that `id` is copied over\n cue.id = originalCue.id;\n cue.originalCue_ = originalCue;\n }\n const tracks = this.tech_.textTracks();\n for (let i = 0; i < tracks.length; i++) {\n if (tracks[i] !== this) {\n tracks[i].removeCue(cue);\n }\n }\n this.cues_.push(cue);\n this.cues.setCues_(this.cues_);\n }\n\n /**\n * Remove a cue from our internal list\n *\n * @param {TextTrack~Cue} removeCue\n * The cue to remove from our internal list\n */\n removeCue(removeCue) {\n let i = this.cues_.length;\n while (i--) {\n const cue = this.cues_[i];\n if (cue === removeCue || cue.originalCue_ && cue.originalCue_ === removeCue) {\n this.cues_.splice(i, 1);\n this.cues.setCues_(this.cues_);\n break;\n }\n }\n }\n}\n\n/**\n * cuechange - One or more cues in the track have become active or stopped being active.\n * @protected\n */\nTextTrack.prototype.allowedEvents_ = {\n cuechange: 'cuechange'\n};\n\n/**\n * A representation of a single `AudioTrack`. If it is part of an {@link AudioTrackList}\n * only one `AudioTrack` in the list will be enabled at a time.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#audiotrack}\n * @extends Track\n */\nclass AudioTrack extends Track {\n /**\n * Create an instance of this class.\n *\n * @param {Object} [options={}]\n * Object of option names and values\n *\n * @param {AudioTrack~Kind} [options.kind='']\n * A valid audio track kind\n *\n * @param {string} [options.id='vjs_track_' + Guid.newGUID()]\n * A unique id for this AudioTrack.\n *\n * @param {string} [options.label='']\n * The menu label for this track.\n *\n * @param {string} [options.language='']\n * A valid two character language code.\n *\n * @param {boolean} [options.enabled]\n * If this track is the one that is currently playing. If this track is part of\n * an {@link AudioTrackList}, only one {@link AudioTrack} will be enabled.\n */\n constructor(options = {}) {\n const settings = merge$1(options, {\n kind: AudioTrackKind[options.kind] || ''\n });\n super(settings);\n let enabled = false;\n\n /**\n * @memberof AudioTrack\n * @member {boolean} enabled\n * If this `AudioTrack` is enabled or not. When setting this will\n * fire {@link AudioTrack#enabledchange} if the state of enabled is changed.\n * @instance\n *\n * @fires VideoTrack#selectedchange\n */\n Object.defineProperty(this, 'enabled', {\n get() {\n return enabled;\n },\n set(newEnabled) {\n // an invalid or unchanged value\n if (typeof newEnabled !== 'boolean' || newEnabled === enabled) {\n return;\n }\n enabled = newEnabled;\n\n /**\n * An event that fires when enabled changes on this track. This allows\n * the AudioTrackList that holds this track to act accordingly.\n *\n * > Note: This is not part of the spec! Native tracks will do\n * this internally without an event.\n *\n * @event AudioTrack#enabledchange\n * @type {Event}\n */\n this.trigger('enabledchange');\n }\n });\n\n // if the user sets this track to selected then\n // set selected to that true value otherwise\n // we keep it false\n if (settings.enabled) {\n this.enabled = settings.enabled;\n }\n this.loaded_ = true;\n }\n}\n\n/**\n * A representation of a single `VideoTrack`.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#videotrack}\n * @extends Track\n */\nclass VideoTrack extends Track {\n /**\n * Create an instance of this class.\n *\n * @param {Object} [options={}]\n * Object of option names and values\n *\n * @param {string} [options.kind='']\n * A valid {@link VideoTrack~Kind}\n *\n * @param {string} [options.id='vjs_track_' + Guid.newGUID()]\n * A unique id for this AudioTrack.\n *\n * @param {string} [options.label='']\n * The menu label for this track.\n *\n * @param {string} [options.language='']\n * A valid two character language code.\n *\n * @param {boolean} [options.selected]\n * If this track is the one that is currently playing.\n */\n constructor(options = {}) {\n const settings = merge$1(options, {\n kind: VideoTrackKind[options.kind] || ''\n });\n super(settings);\n let selected = false;\n\n /**\n * @memberof VideoTrack\n * @member {boolean} selected\n * If this `VideoTrack` is selected or not. When setting this will\n * fire {@link VideoTrack#selectedchange} if the state of selected changed.\n * @instance\n *\n * @fires VideoTrack#selectedchange\n */\n Object.defineProperty(this, 'selected', {\n get() {\n return selected;\n },\n set(newSelected) {\n // an invalid or unchanged value\n if (typeof newSelected !== 'boolean' || newSelected === selected) {\n return;\n }\n selected = newSelected;\n\n /**\n * An event that fires when selected changes on this track. This allows\n * the VideoTrackList that holds this track to act accordingly.\n *\n * > Note: This is not part of the spec! Native tracks will do\n * this internally without an event.\n *\n * @event VideoTrack#selectedchange\n * @type {Event}\n */\n this.trigger('selectedchange');\n }\n });\n\n // if the user sets this track to selected then\n // set selected to that true value otherwise\n // we keep it false\n if (settings.selected) {\n this.selected = settings.selected;\n }\n }\n}\n\n/**\n * @file html-track-element.js\n */\n\n/**\n * A single track represented in the DOM.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#htmltrackelement}\n * @extends EventTarget\n */\nclass HTMLTrackElement extends EventTarget$2 {\n /**\n * Create an instance of this class.\n *\n * @param {Object} options={}\n * Object of option names and values\n *\n * @param { import('../tech/tech').default } options.tech\n * A reference to the tech that owns this HTMLTrackElement.\n *\n * @param {TextTrack~Kind} [options.kind='subtitles']\n * A valid text track kind.\n *\n * @param {TextTrack~Mode} [options.mode='disabled']\n * A valid text track mode.\n *\n * @param {string} [options.id='vjs_track_' + Guid.newGUID()]\n * A unique id for this TextTrack.\n *\n * @param {string} [options.label='']\n * The menu label for this track.\n *\n * @param {string} [options.language='']\n * A valid two character language code.\n *\n * @param {string} [options.srclang='']\n * A valid two character language code. An alternative, but deprioritized\n * version of `options.language`\n *\n * @param {string} [options.src]\n * A url to TextTrack cues.\n *\n * @param {boolean} [options.default]\n * If this track should default to on or off.\n */\n constructor(options = {}) {\n super();\n let readyState;\n const track = new TextTrack(options);\n this.kind = track.kind;\n this.src = track.src;\n this.srclang = track.language;\n this.label = track.label;\n this.default = track.default;\n Object.defineProperties(this, {\n /**\n * @memberof HTMLTrackElement\n * @member {HTMLTrackElement~ReadyState} readyState\n * The current ready state of the track element.\n * @instance\n */\n readyState: {\n get() {\n return readyState;\n }\n },\n /**\n * @memberof HTMLTrackElement\n * @member {TextTrack} track\n * The underlying TextTrack object.\n * @instance\n *\n */\n track: {\n get() {\n return track;\n }\n }\n });\n readyState = HTMLTrackElement.NONE;\n\n /**\n * @listens TextTrack#loadeddata\n * @fires HTMLTrackElement#load\n */\n track.addEventListener('loadeddata', () => {\n readyState = HTMLTrackElement.LOADED;\n this.trigger({\n type: 'load',\n target: this\n });\n });\n }\n}\n\n/**\n * @protected\n */\nHTMLTrackElement.prototype.allowedEvents_ = {\n load: 'load'\n};\n\n/**\n * The text track not loaded state.\n *\n * @type {number}\n * @static\n */\nHTMLTrackElement.NONE = 0;\n\n/**\n * The text track loading state.\n *\n * @type {number}\n * @static\n */\nHTMLTrackElement.LOADING = 1;\n\n/**\n * The text track loaded state.\n *\n * @type {number}\n * @static\n */\nHTMLTrackElement.LOADED = 2;\n\n/**\n * The text track failed to load state.\n *\n * @type {number}\n * @static\n */\nHTMLTrackElement.ERROR = 3;\n\n/*\n * This file contains all track properties that are used in\n * player.js, tech.js, html5.js and possibly other techs in the future.\n */\n\nconst NORMAL = {\n audio: {\n ListClass: AudioTrackList,\n TrackClass: AudioTrack,\n capitalName: 'Audio'\n },\n video: {\n ListClass: VideoTrackList,\n TrackClass: VideoTrack,\n capitalName: 'Video'\n },\n text: {\n ListClass: TextTrackList,\n TrackClass: TextTrack,\n capitalName: 'Text'\n }\n};\nObject.keys(NORMAL).forEach(function (type) {\n NORMAL[type].getterName = `${type}Tracks`;\n NORMAL[type].privateName = `${type}Tracks_`;\n});\nconst REMOTE = {\n remoteText: {\n ListClass: TextTrackList,\n TrackClass: TextTrack,\n capitalName: 'RemoteText',\n getterName: 'remoteTextTracks',\n privateName: 'remoteTextTracks_'\n },\n remoteTextEl: {\n ListClass: HtmlTrackElementList,\n TrackClass: HTMLTrackElement,\n capitalName: 'RemoteTextTrackEls',\n getterName: 'remoteTextTrackEls',\n privateName: 'remoteTextTrackEls_'\n }\n};\nconst ALL = Object.assign({}, NORMAL, REMOTE);\nREMOTE.names = Object.keys(REMOTE);\nNORMAL.names = Object.keys(NORMAL);\nALL.names = [].concat(REMOTE.names).concat(NORMAL.names);\n\n/**\n * @file tech.js\n */\n\n/**\n * An Object containing a structure like: `{src: 'url', type: 'mimetype'}` or string\n * that just contains the src url alone.\n * * `var SourceObject = {src: 'http://ex.com/video.mp4', type: 'video/mp4'};`\n * `var SourceString = 'http://example.com/some-video.mp4';`\n *\n * @typedef {Object|string} SourceObject\n *\n * @property {string} src\n * The url to the source\n *\n * @property {string} type\n * The mime type of the source\n */\n\n/**\n * A function used by {@link Tech} to create a new {@link TextTrack}.\n *\n * @private\n *\n * @param {Tech} self\n * An instance of the Tech class.\n *\n * @param {string} kind\n * `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata)\n *\n * @param {string} [label]\n * Label to identify the text track\n *\n * @param {string} [language]\n * Two letter language abbreviation\n *\n * @param {Object} [options={}]\n * An object with additional text track options\n *\n * @return {TextTrack}\n * The text track that was created.\n */\nfunction createTrackHelper(self, kind, label, language, options = {}) {\n const tracks = self.textTracks();\n options.kind = kind;\n if (label) {\n options.label = label;\n }\n if (language) {\n options.language = language;\n }\n options.tech = self;\n const track = new ALL.text.TrackClass(options);\n tracks.addTrack(track);\n return track;\n}\n\n/**\n * This is the base class for media playback technology controllers, such as\n * {@link HTML5}\n *\n * @extends Component\n */\nclass Tech extends Component$1 {\n /**\n * Create an instance of this Tech.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * Callback function to call when the `HTML5` Tech is ready.\n */\n constructor(options = {}, ready = function () {}) {\n // we don't want the tech to report user activity automatically.\n // This is done manually in addControlsListeners\n options.reportTouchActivity = false;\n super(null, options, ready);\n this.onDurationChange_ = e => this.onDurationChange(e);\n this.trackProgress_ = e => this.trackProgress(e);\n this.trackCurrentTime_ = e => this.trackCurrentTime(e);\n this.stopTrackingCurrentTime_ = e => this.stopTrackingCurrentTime(e);\n this.disposeSourceHandler_ = e => this.disposeSourceHandler(e);\n this.queuedHanders_ = new Set();\n\n // keep track of whether the current source has played at all to\n // implement a very limited played()\n this.hasStarted_ = false;\n this.on('playing', function () {\n this.hasStarted_ = true;\n });\n this.on('loadstart', function () {\n this.hasStarted_ = false;\n });\n ALL.names.forEach(name => {\n const props = ALL[name];\n if (options && options[props.getterName]) {\n this[props.privateName] = options[props.getterName];\n }\n });\n\n // Manually track progress in cases where the browser/tech doesn't report it.\n if (!this.featuresProgressEvents) {\n this.manualProgressOn();\n }\n\n // Manually track timeupdates in cases where the browser/tech doesn't report it.\n if (!this.featuresTimeupdateEvents) {\n this.manualTimeUpdatesOn();\n }\n ['Text', 'Audio', 'Video'].forEach(track => {\n if (options[`native${track}Tracks`] === false) {\n this[`featuresNative${track}Tracks`] = false;\n }\n });\n if (options.nativeCaptions === false || options.nativeTextTracks === false) {\n this.featuresNativeTextTracks = false;\n } else if (options.nativeCaptions === true || options.nativeTextTracks === true) {\n this.featuresNativeTextTracks = true;\n }\n if (!this.featuresNativeTextTracks) {\n this.emulateTextTracks();\n }\n this.preloadTextTracks = options.preloadTextTracks !== false;\n this.autoRemoteTextTracks_ = new ALL.text.ListClass();\n this.initTrackListeners();\n\n // Turn on component tap events only if not using native controls\n if (!options.nativeControlsForTouch) {\n this.emitTapEvents();\n }\n if (this.constructor) {\n this.name_ = this.constructor.name || 'Unknown Tech';\n }\n }\n\n /**\n * A special function to trigger source set in a way that will allow player\n * to re-trigger if the player or tech are not ready yet.\n *\n * @fires Tech#sourceset\n * @param {string} src The source string at the time of the source changing.\n */\n triggerSourceset(src) {\n if (!this.isReady_) {\n // on initial ready we have to trigger source set\n // 1ms after ready so that player can watch for it.\n this.one('ready', () => this.setTimeout(() => this.triggerSourceset(src), 1));\n }\n\n /**\n * Fired when the source is set on the tech causing the media element\n * to reload.\n *\n * @see {@link Player#event:sourceset}\n * @event Tech#sourceset\n * @type {Event}\n */\n this.trigger({\n src,\n type: 'sourceset'\n });\n }\n\n /* Fallbacks for unsupported event types\n ================================================================================ */\n\n /**\n * Polyfill the `progress` event for browsers that don't support it natively.\n *\n * @see {@link Tech#trackProgress}\n */\n manualProgressOn() {\n this.on('durationchange', this.onDurationChange_);\n this.manualProgress = true;\n\n // Trigger progress watching when a source begins loading\n this.one('ready', this.trackProgress_);\n }\n\n /**\n * Turn off the polyfill for `progress` events that was created in\n * {@link Tech#manualProgressOn}\n */\n manualProgressOff() {\n this.manualProgress = false;\n this.stopTrackingProgress();\n this.off('durationchange', this.onDurationChange_);\n }\n\n /**\n * This is used to trigger a `progress` event when the buffered percent changes. It\n * sets an interval function that will be called every 500 milliseconds to check if the\n * buffer end percent has changed.\n *\n * > This function is called by {@link Tech#manualProgressOn}\n *\n * @param {Event} event\n * The `ready` event that caused this to run.\n *\n * @listens Tech#ready\n * @fires Tech#progress\n */\n trackProgress(event) {\n this.stopTrackingProgress();\n this.progressInterval = this.setInterval(bind_(this, function () {\n // Don't trigger unless buffered amount is greater than last time\n\n const numBufferedPercent = this.bufferedPercent();\n if (this.bufferedPercent_ !== numBufferedPercent) {\n /**\n * See {@link Player#progress}\n *\n * @event Tech#progress\n * @type {Event}\n */\n this.trigger('progress');\n }\n this.bufferedPercent_ = numBufferedPercent;\n if (numBufferedPercent === 1) {\n this.stopTrackingProgress();\n }\n }), 500);\n }\n\n /**\n * Update our internal duration on a `durationchange` event by calling\n * {@link Tech#duration}.\n *\n * @param {Event} event\n * The `durationchange` event that caused this to run.\n *\n * @listens Tech#durationchange\n */\n onDurationChange(event) {\n this.duration_ = this.duration();\n }\n\n /**\n * Get and create a `TimeRange` object for buffering.\n *\n * @return { import('../utils/time').TimeRange }\n * The time range object that was created.\n */\n buffered() {\n return createTimeRanges$1(0, 0);\n }\n\n /**\n * Get the percentage of the current video that is currently buffered.\n *\n * @return {number}\n * A number from 0 to 1 that represents the decimal percentage of the\n * video that is buffered.\n *\n */\n bufferedPercent() {\n return bufferedPercent(this.buffered(), this.duration_);\n }\n\n /**\n * Turn off the polyfill for `progress` events that was created in\n * {@link Tech#manualProgressOn}\n * Stop manually tracking progress events by clearing the interval that was set in\n * {@link Tech#trackProgress}.\n */\n stopTrackingProgress() {\n this.clearInterval(this.progressInterval);\n }\n\n /**\n * Polyfill the `timeupdate` event for browsers that don't support it.\n *\n * @see {@link Tech#trackCurrentTime}\n */\n manualTimeUpdatesOn() {\n this.manualTimeUpdates = true;\n this.on('play', this.trackCurrentTime_);\n this.on('pause', this.stopTrackingCurrentTime_);\n }\n\n /**\n * Turn off the polyfill for `timeupdate` events that was created in\n * {@link Tech#manualTimeUpdatesOn}\n */\n manualTimeUpdatesOff() {\n this.manualTimeUpdates = false;\n this.stopTrackingCurrentTime();\n this.off('play', this.trackCurrentTime_);\n this.off('pause', this.stopTrackingCurrentTime_);\n }\n\n /**\n * Sets up an interval function to track current time and trigger `timeupdate` every\n * 250 milliseconds.\n *\n * @listens Tech#play\n * @triggers Tech#timeupdate\n */\n trackCurrentTime() {\n if (this.currentTimeInterval) {\n this.stopTrackingCurrentTime();\n }\n this.currentTimeInterval = this.setInterval(function () {\n /**\n * Triggered at an interval of 250ms to indicated that time is passing in the video.\n *\n * @event Tech#timeupdate\n * @type {Event}\n */\n this.trigger({\n type: 'timeupdate',\n target: this,\n manuallyTriggered: true\n });\n\n // 42 = 24 fps // 250 is what Webkit uses // FF uses 15\n }, 250);\n }\n\n /**\n * Stop the interval function created in {@link Tech#trackCurrentTime} so that the\n * `timeupdate` event is no longer triggered.\n *\n * @listens {Tech#pause}\n */\n stopTrackingCurrentTime() {\n this.clearInterval(this.currentTimeInterval);\n\n // #1002 - if the video ends right before the next timeupdate would happen,\n // the progress bar won't make it all the way to the end\n this.trigger({\n type: 'timeupdate',\n target: this,\n manuallyTriggered: true\n });\n }\n\n /**\n * Turn off all event polyfills, clear the `Tech`s {@link AudioTrackList},\n * {@link VideoTrackList}, and {@link TextTrackList}, and dispose of this Tech.\n *\n * @fires Component#dispose\n */\n dispose() {\n // clear out all tracks because we can't reuse them between techs\n this.clearTracks(NORMAL.names);\n\n // Turn off any manual progress or timeupdate tracking\n if (this.manualProgress) {\n this.manualProgressOff();\n }\n if (this.manualTimeUpdates) {\n this.manualTimeUpdatesOff();\n }\n super.dispose();\n }\n\n /**\n * Clear out a single `TrackList` or an array of `TrackLists` given their names.\n *\n * > Note: Techs without source handlers should call this between sources for `video`\n * & `audio` tracks. You don't want to use them between tracks!\n *\n * @param {string[]|string} types\n * TrackList names to clear, valid names are `video`, `audio`, and\n * `text`.\n */\n clearTracks(types) {\n types = [].concat(types);\n // clear out all tracks because we can't reuse them between techs\n types.forEach(type => {\n const list = this[`${type}Tracks`]() || [];\n let i = list.length;\n while (i--) {\n const track = list[i];\n if (type === 'text') {\n this.removeRemoteTextTrack(track);\n }\n list.removeTrack(track);\n }\n });\n }\n\n /**\n * Remove any TextTracks added via addRemoteTextTrack that are\n * flagged for automatic garbage collection\n */\n cleanupAutoTextTracks() {\n const list = this.autoRemoteTextTracks_ || [];\n let i = list.length;\n while (i--) {\n const track = list[i];\n this.removeRemoteTextTrack(track);\n }\n }\n\n /**\n * Reset the tech, which will removes all sources and reset the internal readyState.\n *\n * @abstract\n */\n reset() {}\n\n /**\n * Get the value of `crossOrigin` from the tech.\n *\n * @abstract\n *\n * @see {Html5#crossOrigin}\n */\n crossOrigin() {}\n\n /**\n * Set the value of `crossOrigin` on the tech.\n *\n * @abstract\n *\n * @param {string} crossOrigin the crossOrigin value\n * @see {Html5#setCrossOrigin}\n */\n setCrossOrigin() {}\n\n /**\n * Get or set an error on the Tech.\n *\n * @param {MediaError} [err]\n * Error to set on the Tech\n *\n * @return {MediaError|null}\n * The current error object on the tech, or null if there isn't one.\n */\n error(err) {\n if (err !== undefined) {\n this.error_ = new MediaError(err);\n this.trigger('error');\n }\n return this.error_;\n }\n\n /**\n * Returns the `TimeRange`s that have been played through for the current source.\n *\n * > NOTE: This implementation is incomplete. It does not track the played `TimeRange`.\n * It only checks whether the source has played at all or not.\n *\n * @return { import('../utils/time').TimeRange }\n * - A single time range if this video has played\n * - An empty set of ranges if not.\n */\n played() {\n if (this.hasStarted_) {\n return createTimeRanges$1(0, 0);\n }\n return createTimeRanges$1();\n }\n\n /**\n * Start playback\n *\n * @abstract\n *\n * @see {Html5#play}\n */\n play() {}\n\n /**\n * Set whether we are scrubbing or not\n *\n * @abstract\n * @param {boolean} _isScrubbing\n * - true for we are currently scrubbing\n * - false for we are no longer scrubbing\n *\n * @see {Html5#setScrubbing}\n */\n setScrubbing(_isScrubbing) {}\n\n /**\n * Get whether we are scrubbing or not\n *\n * @abstract\n *\n * @see {Html5#scrubbing}\n */\n scrubbing() {}\n\n /**\n * Causes a manual time update to occur if {@link Tech#manualTimeUpdatesOn} was\n * previously called.\n *\n * @param {number} _seconds\n * Set the current time of the media to this.\n * @fires Tech#timeupdate\n */\n setCurrentTime(_seconds) {\n // improve the accuracy of manual timeupdates\n if (this.manualTimeUpdates) {\n /**\n * A manual `timeupdate` event.\n *\n * @event Tech#timeupdate\n * @type {Event}\n */\n this.trigger({\n type: 'timeupdate',\n target: this,\n manuallyTriggered: true\n });\n }\n }\n\n /**\n * Turn on listeners for {@link VideoTrackList}, {@link {AudioTrackList}, and\n * {@link TextTrackList} events.\n *\n * This adds {@link EventTarget~EventListeners} for `addtrack`, and `removetrack`.\n *\n * @fires Tech#audiotrackchange\n * @fires Tech#videotrackchange\n * @fires Tech#texttrackchange\n */\n initTrackListeners() {\n /**\n * Triggered when tracks are added or removed on the Tech {@link AudioTrackList}\n *\n * @event Tech#audiotrackchange\n * @type {Event}\n */\n\n /**\n * Triggered when tracks are added or removed on the Tech {@link VideoTrackList}\n *\n * @event Tech#videotrackchange\n * @type {Event}\n */\n\n /**\n * Triggered when tracks are added or removed on the Tech {@link TextTrackList}\n *\n * @event Tech#texttrackchange\n * @type {Event}\n */\n NORMAL.names.forEach(name => {\n const props = NORMAL[name];\n const trackListChanges = () => {\n this.trigger(`${name}trackchange`);\n };\n const tracks = this[props.getterName]();\n tracks.addEventListener('removetrack', trackListChanges);\n tracks.addEventListener('addtrack', trackListChanges);\n this.on('dispose', () => {\n tracks.removeEventListener('removetrack', trackListChanges);\n tracks.removeEventListener('addtrack', trackListChanges);\n });\n });\n }\n\n /**\n * Emulate TextTracks using vtt.js if necessary\n *\n * @fires Tech#vttjsloaded\n * @fires Tech#vttjserror\n */\n addWebVttScript_() {\n if (window$1.WebVTT) {\n return;\n }\n\n // Initially, Tech.el_ is a child of a dummy-div wait until the Component system\n // signals that the Tech is ready at which point Tech.el_ is part of the DOM\n // before inserting the WebVTT script\n if (document.body.contains(this.el())) {\n // load via require if available and vtt.js script location was not passed in\n // as an option. novtt builds will turn the above require call into an empty object\n // which will cause this if check to always fail.\n if (!this.options_['vtt.js'] && isPlain(vtt) && Object.keys(vtt).length > 0) {\n this.trigger('vttjsloaded');\n return;\n }\n\n // load vtt.js via the script location option or the cdn of no location was\n // passed in\n const script = document.createElement('script');\n script.src = this.options_['vtt.js'] || 'https://vjs.zencdn.net/vttjs/0.14.1/vtt.min.js';\n script.onload = () => {\n /**\n * Fired when vtt.js is loaded.\n *\n * @event Tech#vttjsloaded\n * @type {Event}\n */\n this.trigger('vttjsloaded');\n };\n script.onerror = () => {\n /**\n * Fired when vtt.js was not loaded due to an error\n *\n * @event Tech#vttjsloaded\n * @type {Event}\n */\n this.trigger('vttjserror');\n };\n this.on('dispose', () => {\n script.onload = null;\n script.onerror = null;\n });\n // but have not loaded yet and we set it to true before the inject so that\n // we don't overwrite the injected window.WebVTT if it loads right away\n window$1.WebVTT = true;\n this.el().parentNode.appendChild(script);\n } else {\n this.ready(this.addWebVttScript_);\n }\n }\n\n /**\n * Emulate texttracks\n *\n */\n emulateTextTracks() {\n const tracks = this.textTracks();\n const remoteTracks = this.remoteTextTracks();\n const handleAddTrack = e => tracks.addTrack(e.track);\n const handleRemoveTrack = e => tracks.removeTrack(e.track);\n remoteTracks.on('addtrack', handleAddTrack);\n remoteTracks.on('removetrack', handleRemoveTrack);\n this.addWebVttScript_();\n const updateDisplay = () => this.trigger('texttrackchange');\n const textTracksChanges = () => {\n updateDisplay();\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n track.removeEventListener('cuechange', updateDisplay);\n if (track.mode === 'showing') {\n track.addEventListener('cuechange', updateDisplay);\n }\n }\n };\n textTracksChanges();\n tracks.addEventListener('change', textTracksChanges);\n tracks.addEventListener('addtrack', textTracksChanges);\n tracks.addEventListener('removetrack', textTracksChanges);\n this.on('dispose', function () {\n remoteTracks.off('addtrack', handleAddTrack);\n remoteTracks.off('removetrack', handleRemoveTrack);\n tracks.removeEventListener('change', textTracksChanges);\n tracks.removeEventListener('addtrack', textTracksChanges);\n tracks.removeEventListener('removetrack', textTracksChanges);\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n track.removeEventListener('cuechange', updateDisplay);\n }\n });\n }\n\n /**\n * Create and returns a remote {@link TextTrack} object.\n *\n * @param {string} kind\n * `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata)\n *\n * @param {string} [label]\n * Label to identify the text track\n *\n * @param {string} [language]\n * Two letter language abbreviation\n *\n * @return {TextTrack}\n * The TextTrack that gets created.\n */\n addTextTrack(kind, label, language) {\n if (!kind) {\n throw new Error('TextTrack kind is required but was not provided');\n }\n return createTrackHelper(this, kind, label, language);\n }\n\n /**\n * Create an emulated TextTrack for use by addRemoteTextTrack\n *\n * This is intended to be overridden by classes that inherit from\n * Tech in order to create native or custom TextTracks.\n *\n * @param {Object} options\n * The object should contain the options to initialize the TextTrack with.\n *\n * @param {string} [options.kind]\n * `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata).\n *\n * @param {string} [options.label].\n * Label to identify the text track\n *\n * @param {string} [options.language]\n * Two letter language abbreviation.\n *\n * @return {HTMLTrackElement}\n * The track element that gets created.\n */\n createRemoteTextTrack(options) {\n const track = merge$1(options, {\n tech: this\n });\n return new REMOTE.remoteTextEl.TrackClass(track);\n }\n\n /**\n * Creates a remote text track object and returns an html track element.\n *\n * > Note: This can be an emulated {@link HTMLTrackElement} or a native one.\n *\n * @param {Object} options\n * See {@link Tech#createRemoteTextTrack} for more detailed properties.\n *\n * @param {boolean} [manualCleanup=false]\n * - When false: the TextTrack will be automatically removed from the video\n * element whenever the source changes\n * - When True: The TextTrack will have to be cleaned up manually\n *\n * @return {HTMLTrackElement}\n * An Html Track Element.\n *\n */\n addRemoteTextTrack(options = {}, manualCleanup) {\n const htmlTrackElement = this.createRemoteTextTrack(options);\n if (typeof manualCleanup !== 'boolean') {\n manualCleanup = false;\n }\n\n // store HTMLTrackElement and TextTrack to remote list\n this.remoteTextTrackEls().addTrackElement_(htmlTrackElement);\n this.remoteTextTracks().addTrack(htmlTrackElement.track);\n if (manualCleanup === false) {\n // create the TextTrackList if it doesn't exist\n this.ready(() => this.autoRemoteTextTracks_.addTrack(htmlTrackElement.track));\n }\n return htmlTrackElement;\n }\n\n /**\n * Remove a remote text track from the remote `TextTrackList`.\n *\n * @param {TextTrack} track\n * `TextTrack` to remove from the `TextTrackList`\n */\n removeRemoteTextTrack(track) {\n const trackElement = this.remoteTextTrackEls().getTrackElementByTrack_(track);\n\n // remove HTMLTrackElement and TextTrack from remote list\n this.remoteTextTrackEls().removeTrackElement_(trackElement);\n this.remoteTextTracks().removeTrack(track);\n this.autoRemoteTextTracks_.removeTrack(track);\n }\n\n /**\n * Gets available media playback quality metrics as specified by the W3C's Media\n * Playback Quality API.\n *\n * @see [Spec]{@link https://wicg.github.io/media-playback-quality}\n *\n * @return {Object}\n * An object with supported media playback quality metrics\n *\n * @abstract\n */\n getVideoPlaybackQuality() {\n return {};\n }\n\n /**\n * Attempt to create a floating video window always on top of other windows\n * so that users may continue consuming media while they interact with other\n * content sites, or applications on their device.\n *\n * @see [Spec]{@link https://wicg.github.io/picture-in-picture}\n *\n * @return {Promise|undefined}\n * A promise with a Picture-in-Picture window if the browser supports\n * Promises (or one was passed in as an option). It returns undefined\n * otherwise.\n *\n * @abstract\n */\n requestPictureInPicture() {\n return Promise.reject();\n }\n\n /**\n * A method to check for the value of the 'disablePictureInPicture' property.\n * Defaults to true, as it should be considered disabled if the tech does not support pip\n *\n * @abstract\n */\n disablePictureInPicture() {\n return true;\n }\n\n /**\n * A method to set or unset the 'disablePictureInPicture' property.\n *\n * @abstract\n */\n setDisablePictureInPicture() {}\n\n /**\n * A fallback implementation of requestVideoFrameCallback using requestAnimationFrame\n *\n * @param {function} cb\n * @return {number} request id\n */\n requestVideoFrameCallback(cb) {\n const id = newGUID();\n if (!this.isReady_ || this.paused()) {\n this.queuedHanders_.add(id);\n this.one('playing', () => {\n if (this.queuedHanders_.has(id)) {\n this.queuedHanders_.delete(id);\n cb();\n }\n });\n } else {\n this.requestNamedAnimationFrame(id, cb);\n }\n return id;\n }\n\n /**\n * A fallback implementation of cancelVideoFrameCallback\n *\n * @param {number} id id of callback to be cancelled\n */\n cancelVideoFrameCallback(id) {\n if (this.queuedHanders_.has(id)) {\n this.queuedHanders_.delete(id);\n } else {\n this.cancelNamedAnimationFrame(id);\n }\n }\n\n /**\n * A method to set a poster from a `Tech`.\n *\n * @abstract\n */\n setPoster() {}\n\n /**\n * A method to check for the presence of the 'playsinline' attribute.\n *\n * @abstract\n */\n playsinline() {}\n\n /**\n * A method to set or unset the 'playsinline' attribute.\n *\n * @abstract\n */\n setPlaysinline() {}\n\n /**\n * Attempt to force override of native audio tracks.\n *\n * @param {boolean} override - If set to true native audio will be overridden,\n * otherwise native audio will potentially be used.\n *\n * @abstract\n */\n overrideNativeAudioTracks(override) {}\n\n /**\n * Attempt to force override of native video tracks.\n *\n * @param {boolean} override - If set to true native video will be overridden,\n * otherwise native video will potentially be used.\n *\n * @abstract\n */\n overrideNativeVideoTracks(override) {}\n\n /**\n * Check if the tech can support the given mime-type.\n *\n * The base tech does not support any type, but source handlers might\n * overwrite this.\n *\n * @param {string} _type\n * The mimetype to check for support\n *\n * @return {string}\n * 'probably', 'maybe', or empty string\n *\n * @see [Spec]{@link https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/canPlayType}\n *\n * @abstract\n */\n canPlayType(_type) {\n return '';\n }\n\n /**\n * Check if the type is supported by this tech.\n *\n * The base tech does not support any type, but source handlers might\n * overwrite this.\n *\n * @param {string} _type\n * The media type to check\n * @return {string} Returns the native video element's response\n */\n static canPlayType(_type) {\n return '';\n }\n\n /**\n * Check if the tech can support the given source\n *\n * @param {Object} srcObj\n * The source object\n * @param {Object} options\n * The options passed to the tech\n * @return {string} 'probably', 'maybe', or '' (empty string)\n */\n static canPlaySource(srcObj, options) {\n return Tech.canPlayType(srcObj.type);\n }\n\n /*\n * Return whether the argument is a Tech or not.\n * Can be passed either a Class like `Html5` or a instance like `player.tech_`\n *\n * @param {Object} component\n * The item to check\n *\n * @return {boolean}\n * Whether it is a tech or not\n * - True if it is a tech\n * - False if it is not\n */\n static isTech(component) {\n return component.prototype instanceof Tech || component instanceof Tech || component === Tech;\n }\n\n /**\n * Registers a `Tech` into a shared list for videojs.\n *\n * @param {string} name\n * Name of the `Tech` to register.\n *\n * @param {Object} tech\n * The `Tech` class to register.\n */\n static registerTech(name, tech) {\n if (!Tech.techs_) {\n Tech.techs_ = {};\n }\n if (!Tech.isTech(tech)) {\n throw new Error(`Tech ${name} must be a Tech`);\n }\n if (!Tech.canPlayType) {\n throw new Error('Techs must have a static canPlayType method on them');\n }\n if (!Tech.canPlaySource) {\n throw new Error('Techs must have a static canPlaySource method on them');\n }\n name = toTitleCase$1(name);\n Tech.techs_[name] = tech;\n Tech.techs_[toLowerCase(name)] = tech;\n if (name !== 'Tech') {\n // camel case the techName for use in techOrder\n Tech.defaultTechOrder_.push(name);\n }\n return tech;\n }\n\n /**\n * Get a `Tech` from the shared list by name.\n *\n * @param {string} name\n * `camelCase` or `TitleCase` name of the Tech to get\n *\n * @return {Tech|undefined}\n * The `Tech` or undefined if there was no tech with the name requested.\n */\n static getTech(name) {\n if (!name) {\n return;\n }\n if (Tech.techs_ && Tech.techs_[name]) {\n return Tech.techs_[name];\n }\n name = toTitleCase$1(name);\n if (window$1 && window$1.videojs && window$1.videojs[name]) {\n log$1.warn(`The ${name} tech was added to the videojs object when it should be registered using videojs.registerTech(name, tech)`);\n return window$1.videojs[name];\n }\n }\n}\n\n/**\n * Get the {@link VideoTrackList}\n *\n * @returns {VideoTrackList}\n * @method Tech.prototype.videoTracks\n */\n\n/**\n * Get the {@link AudioTrackList}\n *\n * @returns {AudioTrackList}\n * @method Tech.prototype.audioTracks\n */\n\n/**\n * Get the {@link TextTrackList}\n *\n * @returns {TextTrackList}\n * @method Tech.prototype.textTracks\n */\n\n/**\n * Get the remote element {@link TextTrackList}\n *\n * @returns {TextTrackList}\n * @method Tech.prototype.remoteTextTracks\n */\n\n/**\n * Get the remote element {@link HtmlTrackElementList}\n *\n * @returns {HtmlTrackElementList}\n * @method Tech.prototype.remoteTextTrackEls\n */\n\nALL.names.forEach(function (name) {\n const props = ALL[name];\n Tech.prototype[props.getterName] = function () {\n this[props.privateName] = this[props.privateName] || new props.ListClass();\n return this[props.privateName];\n };\n});\n\n/**\n * List of associated text tracks\n *\n * @type {TextTrackList}\n * @private\n * @property Tech#textTracks_\n */\n\n/**\n * List of associated audio tracks.\n *\n * @type {AudioTrackList}\n * @private\n * @property Tech#audioTracks_\n */\n\n/**\n * List of associated video tracks.\n *\n * @type {VideoTrackList}\n * @private\n * @property Tech#videoTracks_\n */\n\n/**\n * Boolean indicating whether the `Tech` supports volume control.\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresVolumeControl = true;\n\n/**\n * Boolean indicating whether the `Tech` supports muting volume.\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresMuteControl = true;\n\n/**\n * Boolean indicating whether the `Tech` supports fullscreen resize control.\n * Resizing plugins using request fullscreen reloads the plugin\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresFullscreenResize = false;\n\n/**\n * Boolean indicating whether the `Tech` supports changing the speed at which the video\n * plays. Examples:\n * - Set player to play 2x (twice) as fast\n * - Set player to play 0.5x (half) as fast\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresPlaybackRate = false;\n\n/**\n * Boolean indicating whether the `Tech` supports the `progress` event.\n * This will be used to determine if {@link Tech#manualProgressOn} should be called.\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresProgressEvents = false;\n\n/**\n * Boolean indicating whether the `Tech` supports the `sourceset` event.\n *\n * A tech should set this to `true` and then use {@link Tech#triggerSourceset}\n * to trigger a {@link Tech#event:sourceset} at the earliest time after getting\n * a new source.\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresSourceset = false;\n\n/**\n * Boolean indicating whether the `Tech` supports the `timeupdate` event.\n * This will be used to determine if {@link Tech#manualTimeUpdates} should be called.\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresTimeupdateEvents = false;\n\n/**\n * Boolean indicating whether the `Tech` supports the native `TextTrack`s.\n * This will help us integrate with native `TextTrack`s if the browser supports them.\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresNativeTextTracks = false;\n\n/**\n * Boolean indicating whether the `Tech` supports `requestVideoFrameCallback`.\n *\n * @type {boolean}\n * @default\n */\nTech.prototype.featuresVideoFrameCallback = false;\n\n/**\n * A functional mixin for techs that want to use the Source Handler pattern.\n * Source handlers are scripts for handling specific formats.\n * The source handler pattern is used for adaptive formats (HLS, DASH) that\n * manually load video data and feed it into a Source Buffer (Media Source Extensions)\n * Example: `Tech.withSourceHandlers.call(MyTech);`\n *\n * @param {Tech} _Tech\n * The tech to add source handler functions to.\n *\n * @mixes Tech~SourceHandlerAdditions\n */\nTech.withSourceHandlers = function (_Tech) {\n /**\n * Register a source handler\n *\n * @param {Function} handler\n * The source handler class\n *\n * @param {number} [index]\n * Register it at the following index\n */\n _Tech.registerSourceHandler = function (handler, index) {\n let handlers = _Tech.sourceHandlers;\n if (!handlers) {\n handlers = _Tech.sourceHandlers = [];\n }\n if (index === undefined) {\n // add to the end of the list\n index = handlers.length;\n }\n handlers.splice(index, 0, handler);\n };\n\n /**\n * Check if the tech can support the given type. Also checks the\n * Techs sourceHandlers.\n *\n * @param {string} type\n * The mimetype to check.\n *\n * @return {string}\n * 'probably', 'maybe', or '' (empty string)\n */\n _Tech.canPlayType = function (type) {\n const handlers = _Tech.sourceHandlers || [];\n let can;\n for (let i = 0; i < handlers.length; i++) {\n can = handlers[i].canPlayType(type);\n if (can) {\n return can;\n }\n }\n return '';\n };\n\n /**\n * Returns the first source handler that supports the source.\n *\n * TODO: Answer question: should 'probably' be prioritized over 'maybe'\n *\n * @param {SourceObject} source\n * The source object\n *\n * @param {Object} options\n * The options passed to the tech\n *\n * @return {SourceHandler|null}\n * The first source handler that supports the source or null if\n * no SourceHandler supports the source\n */\n _Tech.selectSourceHandler = function (source, options) {\n const handlers = _Tech.sourceHandlers || [];\n let can;\n for (let i = 0; i < handlers.length; i++) {\n can = handlers[i].canHandleSource(source, options);\n if (can) {\n return handlers[i];\n }\n }\n return null;\n };\n\n /**\n * Check if the tech can support the given source.\n *\n * @param {SourceObject} srcObj\n * The source object\n *\n * @param {Object} options\n * The options passed to the tech\n *\n * @return {string}\n * 'probably', 'maybe', or '' (empty string)\n */\n _Tech.canPlaySource = function (srcObj, options) {\n const sh = _Tech.selectSourceHandler(srcObj, options);\n if (sh) {\n return sh.canHandleSource(srcObj, options);\n }\n return '';\n };\n\n /**\n * When using a source handler, prefer its implementation of\n * any function normally provided by the tech.\n */\n const deferrable = ['seekable', 'seeking', 'duration'];\n\n /**\n * A wrapper around {@link Tech#seekable} that will call a `SourceHandler`s seekable\n * function if it exists, with a fallback to the Techs seekable function.\n *\n * @method _Tech.seekable\n */\n\n /**\n * A wrapper around {@link Tech#duration} that will call a `SourceHandler`s duration\n * function if it exists, otherwise it will fallback to the techs duration function.\n *\n * @method _Tech.duration\n */\n\n deferrable.forEach(function (fnName) {\n const originalFn = this[fnName];\n if (typeof originalFn !== 'function') {\n return;\n }\n this[fnName] = function () {\n if (this.sourceHandler_ && this.sourceHandler_[fnName]) {\n return this.sourceHandler_[fnName].apply(this.sourceHandler_, arguments);\n }\n return originalFn.apply(this, arguments);\n };\n }, _Tech.prototype);\n\n /**\n * Create a function for setting the source using a source object\n * and source handlers.\n * Should never be called unless a source handler was found.\n *\n * @param {SourceObject} source\n * A source object with src and type keys\n */\n _Tech.prototype.setSource = function (source) {\n let sh = _Tech.selectSourceHandler(source, this.options_);\n if (!sh) {\n // Fall back to a native source handler when unsupported sources are\n // deliberately set\n if (_Tech.nativeSourceHandler) {\n sh = _Tech.nativeSourceHandler;\n } else {\n log$1.error('No source handler found for the current source.');\n }\n }\n\n // Dispose any existing source handler\n this.disposeSourceHandler();\n this.off('dispose', this.disposeSourceHandler_);\n if (sh !== _Tech.nativeSourceHandler) {\n this.currentSource_ = source;\n }\n this.sourceHandler_ = sh.handleSource(source, this, this.options_);\n this.one('dispose', this.disposeSourceHandler_);\n };\n\n /**\n * Clean up any existing SourceHandlers and listeners when the Tech is disposed.\n *\n * @listens Tech#dispose\n */\n _Tech.prototype.disposeSourceHandler = function () {\n // if we have a source and get another one\n // then we are loading something new\n // than clear all of our current tracks\n if (this.currentSource_) {\n this.clearTracks(['audio', 'video']);\n this.currentSource_ = null;\n }\n\n // always clean up auto-text tracks\n this.cleanupAutoTextTracks();\n if (this.sourceHandler_) {\n if (this.sourceHandler_.dispose) {\n this.sourceHandler_.dispose();\n }\n this.sourceHandler_ = null;\n }\n };\n};\n\n// The base Tech class needs to be registered as a Component. It is the only\n// Tech that can be registered as a Component.\nComponent$1.registerComponent('Tech', Tech);\nTech.registerTech('Tech', Tech);\n\n/**\n * A list of techs that should be added to techOrder on Players\n *\n * @private\n */\nTech.defaultTechOrder_ = [];\n\n/**\n * @file middleware.js\n * @module middleware\n */\nconst middlewares = {};\nconst middlewareInstances = {};\nconst TERMINATOR = {};\n\n/**\n * A middleware object is a plain JavaScript object that has methods that\n * match the {@link Tech} methods found in the lists of allowed\n * {@link module:middleware.allowedGetters|getters},\n * {@link module:middleware.allowedSetters|setters}, and\n * {@link module:middleware.allowedMediators|mediators}.\n *\n * @typedef {Object} MiddlewareObject\n */\n\n/**\n * A middleware factory function that should return a\n * {@link module:middleware~MiddlewareObject|MiddlewareObject}.\n *\n * This factory will be called for each player when needed, with the player\n * passed in as an argument.\n *\n * @callback MiddlewareFactory\n * @param { import('../player').default } player\n * A Video.js player.\n */\n\n/**\n * Define a middleware that the player should use by way of a factory function\n * that returns a middleware object.\n *\n * @param {string} type\n * The MIME type to match or `\"*\"` for all MIME types.\n *\n * @param {MiddlewareFactory} middleware\n * A middleware factory function that will be executed for\n * matching types.\n */\nfunction use(type, middleware) {\n middlewares[type] = middlewares[type] || [];\n middlewares[type].push(middleware);\n}\n\n/**\n * Asynchronously sets a source using middleware by recursing through any\n * matching middlewares and calling `setSource` on each, passing along the\n * previous returned value each time.\n *\n * @param { import('../player').default } player\n * A {@link Player} instance.\n *\n * @param {Tech~SourceObject} src\n * A source object.\n *\n * @param {Function}\n * The next middleware to run.\n */\nfunction setSource(player, src, next) {\n player.setTimeout(() => setSourceHelper(src, middlewares[src.type], next, player), 1);\n}\n\n/**\n * When the tech is set, passes the tech to each middleware's `setTech` method.\n *\n * @param {Object[]} middleware\n * An array of middleware instances.\n *\n * @param { import('../tech/tech').default } tech\n * A Video.js tech.\n */\nfunction setTech(middleware, tech) {\n middleware.forEach(mw => mw.setTech && mw.setTech(tech));\n}\n\n/**\n * Calls a getter on the tech first, through each middleware\n * from right to left to the player.\n *\n * @param {Object[]} middleware\n * An array of middleware instances.\n *\n * @param { import('../tech/tech').default } tech\n * The current tech.\n *\n * @param {string} method\n * A method name.\n *\n * @return {*}\n * The final value from the tech after middleware has intercepted it.\n */\nfunction get(middleware, tech, method) {\n return middleware.reduceRight(middlewareIterator(method), tech[method]());\n}\n\n/**\n * Takes the argument given to the player and calls the setter method on each\n * middleware from left to right to the tech.\n *\n * @param {Object[]} middleware\n * An array of middleware instances.\n *\n * @param { import('../tech/tech').default } tech\n * The current tech.\n *\n * @param {string} method\n * A method name.\n *\n * @param {*} arg\n * The value to set on the tech.\n *\n * @return {*}\n * The return value of the `method` of the `tech`.\n */\nfunction set(middleware, tech, method, arg) {\n return tech[method](middleware.reduce(middlewareIterator(method), arg));\n}\n\n/**\n * Takes the argument given to the player and calls the `call` version of the\n * method on each middleware from left to right.\n *\n * Then, call the passed in method on the tech and return the result unchanged\n * back to the player, through middleware, this time from right to left.\n *\n * @param {Object[]} middleware\n * An array of middleware instances.\n *\n * @param { import('../tech/tech').default } tech\n * The current tech.\n *\n * @param {string} method\n * A method name.\n *\n * @param {*} arg\n * The value to set on the tech.\n *\n * @return {*}\n * The return value of the `method` of the `tech`, regardless of the\n * return values of middlewares.\n */\nfunction mediate(middleware, tech, method, arg = null) {\n const callMethod = 'call' + toTitleCase$1(method);\n const middlewareValue = middleware.reduce(middlewareIterator(callMethod), arg);\n const terminated = middlewareValue === TERMINATOR;\n // deprecated. The `null` return value should instead return TERMINATOR to\n // prevent confusion if a techs method actually returns null.\n const returnValue = terminated ? null : tech[method](middlewareValue);\n executeRight(middleware, method, returnValue, terminated);\n return returnValue;\n}\n\n/**\n * Enumeration of allowed getters where the keys are method names.\n *\n * @type {Object}\n */\nconst allowedGetters = {\n buffered: 1,\n currentTime: 1,\n duration: 1,\n muted: 1,\n played: 1,\n paused: 1,\n seekable: 1,\n volume: 1,\n ended: 1\n};\n\n/**\n * Enumeration of allowed setters where the keys are method names.\n *\n * @type {Object}\n */\nconst allowedSetters = {\n setCurrentTime: 1,\n setMuted: 1,\n setVolume: 1\n};\n\n/**\n * Enumeration of allowed mediators where the keys are method names.\n *\n * @type {Object}\n */\nconst allowedMediators = {\n play: 1,\n pause: 1\n};\nfunction middlewareIterator(method) {\n return (value, mw) => {\n // if the previous middleware terminated, pass along the termination\n if (value === TERMINATOR) {\n return TERMINATOR;\n }\n if (mw[method]) {\n return mw[method](value);\n }\n return value;\n };\n}\nfunction executeRight(mws, method, value, terminated) {\n for (let i = mws.length - 1; i >= 0; i--) {\n const mw = mws[i];\n if (mw[method]) {\n mw[method](terminated, value);\n }\n }\n}\n\n/**\n * Clear the middleware cache for a player.\n *\n * @param { import('../player').default } player\n * A {@link Player} instance.\n */\nfunction clearCacheForPlayer(player) {\n middlewareInstances[player.id()] = null;\n}\n\n/**\n * {\n * [playerId]: [[mwFactory, mwInstance], ...]\n * }\n *\n * @private\n */\nfunction getOrCreateFactory(player, mwFactory) {\n const mws = middlewareInstances[player.id()];\n let mw = null;\n if (mws === undefined || mws === null) {\n mw = mwFactory(player);\n middlewareInstances[player.id()] = [[mwFactory, mw]];\n return mw;\n }\n for (let i = 0; i < mws.length; i++) {\n const [mwf, mwi] = mws[i];\n if (mwf !== mwFactory) {\n continue;\n }\n mw = mwi;\n }\n if (mw === null) {\n mw = mwFactory(player);\n mws.push([mwFactory, mw]);\n }\n return mw;\n}\nfunction setSourceHelper(src = {}, middleware = [], next, player, acc = [], lastRun = false) {\n const [mwFactory, ...mwrest] = middleware;\n\n // if mwFactory is a string, then we're at a fork in the road\n if (typeof mwFactory === 'string') {\n setSourceHelper(src, middlewares[mwFactory], next, player, acc, lastRun);\n\n // if we have an mwFactory, call it with the player to get the mw,\n // then call the mw's setSource method\n } else if (mwFactory) {\n const mw = getOrCreateFactory(player, mwFactory);\n\n // if setSource isn't present, implicitly select this middleware\n if (!mw.setSource) {\n acc.push(mw);\n return setSourceHelper(src, mwrest, next, player, acc, lastRun);\n }\n mw.setSource(Object.assign({}, src), function (err, _src) {\n // something happened, try the next middleware on the current level\n // make sure to use the old src\n if (err) {\n return setSourceHelper(src, mwrest, next, player, acc, lastRun);\n }\n\n // we've succeeded, now we need to go deeper\n acc.push(mw);\n\n // if it's the same type, continue down the current chain\n // otherwise, we want to go down the new chain\n setSourceHelper(_src, src.type === _src.type ? mwrest : middlewares[_src.type], next, player, acc, lastRun);\n });\n } else if (mwrest.length) {\n setSourceHelper(src, mwrest, next, player, acc, lastRun);\n } else if (lastRun) {\n next(src, acc);\n } else {\n setSourceHelper(src, middlewares['*'], next, player, acc, true);\n }\n}\n\n/**\n * Mimetypes\n *\n * @see https://www.iana.org/assignments/media-types/media-types.xhtml\n * @typedef Mimetypes~Kind\n * @enum\n */\nconst MimetypesKind = {\n opus: 'video/ogg',\n ogv: 'video/ogg',\n mp4: 'video/mp4',\n mov: 'video/mp4',\n m4v: 'video/mp4',\n mkv: 'video/x-matroska',\n m4a: 'audio/mp4',\n mp3: 'audio/mpeg',\n aac: 'audio/aac',\n caf: 'audio/x-caf',\n flac: 'audio/flac',\n oga: 'audio/ogg',\n wav: 'audio/wav',\n m3u8: 'application/x-mpegURL',\n mpd: 'application/dash+xml',\n jpg: 'image/jpeg',\n jpeg: 'image/jpeg',\n gif: 'image/gif',\n png: 'image/png',\n svg: 'image/svg+xml',\n webp: 'image/webp'\n};\n\n/**\n * Get the mimetype of a given src url if possible\n *\n * @param {string} src\n * The url to the src\n *\n * @return {string}\n * return the mimetype if it was known or empty string otherwise\n */\nconst getMimetype = function (src = '') {\n const ext = getFileExtension(src);\n const mimetype = MimetypesKind[ext.toLowerCase()];\n return mimetype || '';\n};\n\n/**\n * Find the mime type of a given source string if possible. Uses the player\n * source cache.\n *\n * @param { import('../player').default } player\n * The player object\n *\n * @param {string} src\n * The source string\n *\n * @return {string}\n * The type that was found\n */\nconst findMimetype = (player, src) => {\n if (!src) {\n return '';\n }\n\n // 1. check for the type in the `source` cache\n if (player.cache_.source.src === src && player.cache_.source.type) {\n return player.cache_.source.type;\n }\n\n // 2. see if we have this source in our `currentSources` cache\n const matchingSources = player.cache_.sources.filter(s => s.src === src);\n if (matchingSources.length) {\n return matchingSources[0].type;\n }\n\n // 3. look for the src url in source elements and use the type there\n const sources = player.$$('source');\n for (let i = 0; i < sources.length; i++) {\n const s = sources[i];\n if (s.type && s.src && s.src === src) {\n return s.type;\n }\n }\n\n // 4. finally fallback to our list of mime types based on src url extension\n return getMimetype(src);\n};\n\n/**\n * @module filter-source\n */\n\n/**\n * Filter out single bad source objects or multiple source objects in an\n * array. Also flattens nested source object arrays into a 1 dimensional\n * array of source objects.\n *\n * @param {Tech~SourceObject|Tech~SourceObject[]} src\n * The src object to filter\n *\n * @return {Tech~SourceObject[]}\n * An array of sourceobjects containing only valid sources\n *\n * @private\n */\nconst filterSource = function (src) {\n // traverse array\n if (Array.isArray(src)) {\n let newsrc = [];\n src.forEach(function (srcobj) {\n srcobj = filterSource(srcobj);\n if (Array.isArray(srcobj)) {\n newsrc = newsrc.concat(srcobj);\n } else if (isObject(srcobj)) {\n newsrc.push(srcobj);\n }\n });\n src = newsrc;\n } else if (typeof src === 'string' && src.trim()) {\n // convert string into object\n src = [fixSource({\n src\n })];\n } else if (isObject(src) && typeof src.src === 'string' && src.src && src.src.trim()) {\n // src is already valid\n src = [fixSource(src)];\n } else {\n // invalid source, turn it into an empty array\n src = [];\n }\n return src;\n};\n\n/**\n * Checks src mimetype, adding it when possible\n *\n * @param {Tech~SourceObject} src\n * The src object to check\n * @return {Tech~SourceObject}\n * src Object with known type\n */\nfunction fixSource(src) {\n if (!src.type) {\n const mimetype = getMimetype(src.src);\n if (mimetype) {\n src.type = mimetype;\n }\n }\n return src;\n}\n\nvar icons = \"\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \";\n\n/**\n * @file loader.js\n */\n\n/**\n * The `MediaLoader` is the `Component` that decides which playback technology to load\n * when a player is initialized.\n *\n * @extends Component\n */\nclass MediaLoader extends Component$1 {\n /**\n * Create an instance of this class.\n *\n * @param { import('../player').default } player\n * The `Player` that this class should attach to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * The function that is run when this component is ready.\n */\n constructor(player, options, ready) {\n // MediaLoader has no element\n const options_ = merge$1({\n createEl: false\n }, options);\n super(player, options_, ready);\n\n // If there are no sources when the player is initialized,\n // load the first supported playback technology.\n\n if (!options.playerOptions.sources || options.playerOptions.sources.length === 0) {\n for (let i = 0, j = options.playerOptions.techOrder; i < j.length; i++) {\n const techName = toTitleCase$1(j[i]);\n let tech = Tech.getTech(techName);\n\n // Support old behavior of techs being registered as components.\n // Remove once that deprecated behavior is removed.\n if (!techName) {\n tech = Component$1.getComponent(techName);\n }\n\n // Check if the browser supports this technology\n if (tech && tech.isSupported()) {\n player.loadTech_(techName);\n break;\n }\n }\n } else {\n // Loop through playback technologies (e.g. HTML5) and check for support.\n // Then load the best source.\n // A few assumptions here:\n // All playback technologies respect preload false.\n player.src(options.playerOptions.sources);\n }\n }\n}\nComponent$1.registerComponent('MediaLoader', MediaLoader);\n\n/**\n * @file clickable-component.js\n */\n\n/**\n * Component which is clickable or keyboard actionable, but is not a\n * native HTML button.\n *\n * @extends Component\n */\nclass ClickableComponent extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of component options.\n *\n * @param {function} [options.clickHandler]\n * The function to call when the button is clicked / activated\n *\n * @param {string} [options.controlText]\n * The text to set on the button\n *\n * @param {string} [options.className]\n * A class or space separated list of classes to add the component\n *\n */\n constructor(player, options) {\n super(player, options);\n if (this.options_.controlText) {\n this.controlText(this.options_.controlText);\n }\n this.handleMouseOver_ = e => this.handleMouseOver(e);\n this.handleMouseOut_ = e => this.handleMouseOut(e);\n this.handleClick_ = e => this.handleClick(e);\n this.handleKeyDown_ = e => this.handleKeyDown(e);\n this.emitTapEvents();\n this.enable();\n }\n\n /**\n * Create the `ClickableComponent`s DOM element.\n *\n * @param {string} [tag=div]\n * The element's node type.\n *\n * @param {Object} [props={}]\n * An object of properties that should be set on the element.\n *\n * @param {Object} [attributes={}]\n * An object of attributes that should be set on the element.\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl(tag = 'div', props = {}, attributes = {}) {\n props = Object.assign({\n className: this.buildCSSClass(),\n tabIndex: 0\n }, props);\n if (tag === 'button') {\n log$1.error(`Creating a ClickableComponent with an HTML element of ${tag} is not supported; use a Button instead.`);\n }\n\n // Add ARIA attributes for clickable element which is not a native HTML button\n attributes = Object.assign({\n role: 'button'\n }, attributes);\n this.tabIndex_ = props.tabIndex;\n const el = createEl(tag, props, attributes);\n if (!this.player_.options_.experimentalSvgIcons) {\n el.appendChild(createEl('span', {\n className: 'vjs-icon-placeholder'\n }, {\n 'aria-hidden': true\n }));\n }\n this.createControlTextEl(el);\n return el;\n }\n dispose() {\n // remove controlTextEl_ on dispose\n this.controlTextEl_ = null;\n super.dispose();\n }\n\n /**\n * Create a control text element on this `ClickableComponent`\n *\n * @param {Element} [el]\n * Parent element for the control text.\n *\n * @return {Element}\n * The control text element that gets created.\n */\n createControlTextEl(el) {\n this.controlTextEl_ = createEl('span', {\n className: 'vjs-control-text'\n }, {\n // let the screen reader user know that the text of the element may change\n 'aria-live': 'polite'\n });\n if (el) {\n el.appendChild(this.controlTextEl_);\n }\n this.controlText(this.controlText_, el);\n return this.controlTextEl_;\n }\n\n /**\n * Get or set the localize text to use for the controls on the `ClickableComponent`.\n *\n * @param {string} [text]\n * Control text for element.\n *\n * @param {Element} [el=this.el()]\n * Element to set the title on.\n *\n * @return {string}\n * - The control text when getting\n */\n controlText(text, el = this.el()) {\n if (text === undefined) {\n return this.controlText_ || 'Need Text';\n }\n const localizedText = this.localize(text);\n\n /** @protected */\n this.controlText_ = text;\n textContent(this.controlTextEl_, localizedText);\n if (!this.nonIconControl && !this.player_.options_.noUITitleAttributes) {\n // Set title attribute if only an icon is shown\n el.setAttribute('title', localizedText);\n }\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-control vjs-button ${super.buildCSSClass()}`;\n }\n\n /**\n * Enable this `ClickableComponent`\n */\n enable() {\n if (!this.enabled_) {\n this.enabled_ = true;\n this.removeClass('vjs-disabled');\n this.el_.setAttribute('aria-disabled', 'false');\n if (typeof this.tabIndex_ !== 'undefined') {\n this.el_.setAttribute('tabIndex', this.tabIndex_);\n }\n this.on(['tap', 'click'], this.handleClick_);\n this.on('keydown', this.handleKeyDown_);\n }\n }\n\n /**\n * Disable this `ClickableComponent`\n */\n disable() {\n this.enabled_ = false;\n this.addClass('vjs-disabled');\n this.el_.setAttribute('aria-disabled', 'true');\n if (typeof this.tabIndex_ !== 'undefined') {\n this.el_.removeAttribute('tabIndex');\n }\n this.off('mouseover', this.handleMouseOver_);\n this.off('mouseout', this.handleMouseOut_);\n this.off(['tap', 'click'], this.handleClick_);\n this.off('keydown', this.handleKeyDown_);\n }\n\n /**\n * Handles language change in ClickableComponent for the player in components\n *\n *\n */\n handleLanguagechange() {\n this.controlText(this.controlText_);\n }\n\n /**\n * Event handler that is called when a `ClickableComponent` receives a\n * `click` or `tap` event.\n *\n * @param {Event} event\n * The `tap` or `click` event that caused this function to be called.\n *\n * @listens tap\n * @listens click\n * @abstract\n */\n handleClick(event) {\n if (this.options_.clickHandler) {\n this.options_.clickHandler.call(this, arguments);\n }\n }\n\n /**\n * Event handler that is called when a `ClickableComponent` receives a\n * `keydown` event.\n *\n * By default, if the key is Space or Enter, it will trigger a `click` event.\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n // Support Space or Enter key operation to fire a click event. Also,\n // prevent the event from propagating through the DOM and triggering\n // Player hotkeys.\n if (keycode.isEventKey(event, 'Space') || keycode.isEventKey(event, 'Enter')) {\n event.preventDefault();\n event.stopPropagation();\n this.trigger('click');\n } else {\n // Pass keypress handling up for unsupported keys\n super.handleKeyDown(event);\n }\n }\n}\nComponent$1.registerComponent('ClickableComponent', ClickableComponent);\n\n/**\n * @file poster-image.js\n */\n\n/**\n * A `ClickableComponent` that handles showing the poster image for the player.\n *\n * @extends ClickableComponent\n */\nclass PosterImage extends ClickableComponent {\n /**\n * Create an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should attach to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.update();\n this.update_ = e => this.update(e);\n player.on('posterchange', this.update_);\n }\n\n /**\n * Clean up and dispose of the `PosterImage`.\n */\n dispose() {\n this.player().off('posterchange', this.update_);\n super.dispose();\n }\n\n /**\n * Create the `PosterImage`s DOM element.\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl() {\n // The el is an empty div to keep position in the DOM\n // A picture and img el will be inserted when a source is set\n return createEl('div', {\n className: 'vjs-poster'\n });\n }\n\n /**\n * Get or set the `PosterImage`'s crossOrigin option.\n *\n * @param {string|null} [value]\n * The value to set the crossOrigin to. If an argument is\n * given, must be one of `'anonymous'` or `'use-credentials'`, or 'null'.\n *\n * @return {string|null}\n * - The current crossOrigin value of the `Player` when getting.\n * - undefined when setting\n */\n crossOrigin(value) {\n // `null` can be set to unset a value\n if (typeof value === 'undefined') {\n if (this.$('img')) {\n // If the poster's element exists, give its value\n return this.$('img').crossOrigin;\n } else if (this.player_.tech_ && this.player_.tech_.isReady_) {\n // If not but the tech is ready, query the tech\n return this.player_.crossOrigin();\n }\n // Otherwise check options as the poster is usually set before the state of crossorigin\n // can be retrieved by the getter\n return this.player_.options_.crossOrigin || this.player_.options_.crossorigin || null;\n }\n if (value !== null && value !== 'anonymous' && value !== 'use-credentials') {\n this.player_.log.warn(`crossOrigin must be null, \"anonymous\" or \"use-credentials\", given \"${value}\"`);\n return;\n }\n if (this.$('img')) {\n this.$('img').crossOrigin = value;\n }\n return;\n }\n\n /**\n * An {@link EventTarget~EventListener} for {@link Player#posterchange} events.\n *\n * @listens Player#posterchange\n *\n * @param {Event} [event]\n * The `Player#posterchange` event that triggered this function.\n */\n update(event) {\n const url = this.player().poster();\n this.setSrc(url);\n\n // If there's no poster source we should display:none on this component\n // so it's not still clickable or right-clickable\n if (url) {\n this.show();\n } else {\n this.hide();\n }\n }\n\n /**\n * Set the source of the `PosterImage` depending on the display method. (Re)creates\n * the inner picture and img elementss when needed.\n *\n * @param {string} [url]\n * The URL to the source for the `PosterImage`. If not specified or falsy,\n * any source and ant inner picture/img are removed.\n */\n setSrc(url) {\n if (!url) {\n this.el_.textContent = '';\n return;\n }\n if (!this.$('img')) {\n this.el_.appendChild(createEl('picture', {\n className: 'vjs-poster',\n // Don't want poster to be tabbable.\n tabIndex: -1\n }, {}, createEl('img', {\n loading: 'lazy',\n crossOrigin: this.crossOrigin()\n }, {\n alt: ''\n })));\n }\n this.$('img').src = url;\n }\n\n /**\n * An {@link EventTarget~EventListener} for clicks on the `PosterImage`. See\n * {@link ClickableComponent#handleClick} for instances where this will be triggered.\n *\n * @listens tap\n * @listens click\n * @listens keydown\n *\n * @param {Event} event\n + The `click`, `tap` or `keydown` event that caused this function to be called.\n */\n handleClick(event) {\n // We don't want a click to trigger playback when controls are disabled\n if (!this.player_.controls()) {\n return;\n }\n if (this.player_.tech(true)) {\n this.player_.tech(true).focus();\n }\n if (this.player_.paused()) {\n silencePromise(this.player_.play());\n } else {\n this.player_.pause();\n }\n }\n}\n\n/**\n * Get or set the `PosterImage`'s crossorigin option. For the HTML5 player, this\n * sets the `crossOrigin` property on the ` ` tag to control the CORS\n * behavior.\n *\n * @param {string|null} [value]\n * The value to set the `PosterImages`'s crossorigin to. If an argument is\n * given, must be one of `anonymous` or `use-credentials`.\n *\n * @return {string|null|undefined}\n * - The current crossorigin value of the `Player` when getting.\n * - undefined when setting\n */\nPosterImage.prototype.crossorigin = PosterImage.prototype.crossOrigin;\nComponent$1.registerComponent('PosterImage', PosterImage);\n\n/**\n * @file text-track-display.js\n */\nconst darkGray = '#222';\nconst lightGray = '#ccc';\nconst fontMap = {\n monospace: 'monospace',\n sansSerif: 'sans-serif',\n serif: 'serif',\n monospaceSansSerif: '\"Andale Mono\", \"Lucida Console\", monospace',\n monospaceSerif: '\"Courier New\", monospace',\n proportionalSansSerif: 'sans-serif',\n proportionalSerif: 'serif',\n casual: '\"Comic Sans MS\", Impact, fantasy',\n script: '\"Monotype Corsiva\", cursive',\n smallcaps: '\"Andale Mono\", \"Lucida Console\", monospace, sans-serif'\n};\n\n/**\n * Construct an rgba color from a given hex color code.\n *\n * @param {number} color\n * Hex number for color, like #f0e or #f604e2.\n *\n * @param {number} opacity\n * Value for opacity, 0.0 - 1.0.\n *\n * @return {string}\n * The rgba color that was created, like 'rgba(255, 0, 0, 0.3)'.\n */\nfunction constructColor(color, opacity) {\n let hex;\n if (color.length === 4) {\n // color looks like \"#f0e\"\n hex = color[1] + color[1] + color[2] + color[2] + color[3] + color[3];\n } else if (color.length === 7) {\n // color looks like \"#f604e2\"\n hex = color.slice(1);\n } else {\n throw new Error('Invalid color code provided, ' + color + '; must be formatted as e.g. #f0e or #f604e2.');\n }\n return 'rgba(' + parseInt(hex.slice(0, 2), 16) + ',' + parseInt(hex.slice(2, 4), 16) + ',' + parseInt(hex.slice(4, 6), 16) + ',' + opacity + ')';\n}\n\n/**\n * Try to update the style of a DOM element. Some style changes will throw an error,\n * particularly in IE8. Those should be noops.\n *\n * @param {Element} el\n * The DOM element to be styled.\n *\n * @param {string} style\n * The CSS property on the element that should be styled.\n *\n * @param {string} rule\n * The style rule that should be applied to the property.\n *\n * @private\n */\nfunction tryUpdateStyle(el, style, rule) {\n try {\n el.style[style] = rule;\n } catch (e) {\n // Satisfies linter.\n return;\n }\n}\n\n/**\n * Converts the CSS top/right/bottom/left property numeric value to string in pixels.\n *\n * @param {number} position\n * The CSS top/right/bottom/left property value.\n *\n * @return {string}\n * The CSS property value that was created, like '10px'.\n *\n * @private\n */\nfunction getCSSPositionValue(position) {\n return position ? `${position}px` : '';\n}\n\n/**\n * The component for displaying text track cues.\n *\n * @extends Component\n */\nclass TextTrackDisplay extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * The function to call when `TextTrackDisplay` is ready.\n */\n constructor(player, options, ready) {\n super(player, options, ready);\n const updateDisplayTextHandler = e => this.updateDisplay(e);\n const updateDisplayHandler = e => {\n this.updateDisplayOverlay();\n this.updateDisplay(e);\n };\n player.on('loadstart', e => this.toggleDisplay(e));\n player.on('texttrackchange', updateDisplayTextHandler);\n player.on('loadedmetadata', e => {\n this.updateDisplayOverlay();\n this.preselectTrack(e);\n });\n\n // This used to be called during player init, but was causing an error\n // if a track should show by default and the display hadn't loaded yet.\n // Should probably be moved to an external track loader when we support\n // tracks that don't need a display.\n player.ready(bind_(this, function () {\n if (player.tech_ && player.tech_.featuresNativeTextTracks) {\n this.hide();\n return;\n }\n player.on('fullscreenchange', updateDisplayHandler);\n player.on('playerresize', updateDisplayHandler);\n const screenOrientation = window$1.screen.orientation || window$1;\n const changeOrientationEvent = window$1.screen.orientation ? 'change' : 'orientationchange';\n screenOrientation.addEventListener(changeOrientationEvent, updateDisplayHandler);\n player.on('dispose', () => screenOrientation.removeEventListener(changeOrientationEvent, updateDisplayHandler));\n const tracks = this.options_.playerOptions.tracks || [];\n for (let i = 0; i < tracks.length; i++) {\n this.player_.addRemoteTextTrack(tracks[i], true);\n }\n this.preselectTrack();\n }));\n }\n\n /**\n * Preselect a track following this precedence:\n * - matches the previously selected {@link TextTrack}'s language and kind\n * - matches the previously selected {@link TextTrack}'s language only\n * - is the first default captions track\n * - is the first default descriptions track\n *\n * @listens Player#loadstart\n */\n preselectTrack() {\n const modes = {\n captions: 1,\n subtitles: 1\n };\n const trackList = this.player_.textTracks();\n const userPref = this.player_.cache_.selectedLanguage;\n let firstDesc;\n let firstCaptions;\n let preferredTrack;\n for (let i = 0; i < trackList.length; i++) {\n const track = trackList[i];\n if (userPref && userPref.enabled && userPref.language && userPref.language === track.language && track.kind in modes) {\n // Always choose the track that matches both language and kind\n if (track.kind === userPref.kind) {\n preferredTrack = track;\n // or choose the first track that matches language\n } else if (!preferredTrack) {\n preferredTrack = track;\n }\n\n // clear everything if offTextTrackMenuItem was clicked\n } else if (userPref && !userPref.enabled) {\n preferredTrack = null;\n firstDesc = null;\n firstCaptions = null;\n } else if (track.default) {\n if (track.kind === 'descriptions' && !firstDesc) {\n firstDesc = track;\n } else if (track.kind in modes && !firstCaptions) {\n firstCaptions = track;\n }\n }\n }\n\n // The preferredTrack matches the user preference and takes\n // precedence over all the other tracks.\n // So, display the preferredTrack before the first default track\n // and the subtitles/captions track before the descriptions track\n if (preferredTrack) {\n preferredTrack.mode = 'showing';\n } else if (firstCaptions) {\n firstCaptions.mode = 'showing';\n } else if (firstDesc) {\n firstDesc.mode = 'showing';\n }\n }\n\n /**\n * Turn display of {@link TextTrack}'s from the current state into the other state.\n * There are only two states:\n * - 'shown'\n * - 'hidden'\n *\n * @listens Player#loadstart\n */\n toggleDisplay() {\n if (this.player_.tech_ && this.player_.tech_.featuresNativeTextTracks) {\n this.hide();\n } else {\n this.show();\n }\n }\n\n /**\n * Create the {@link Component}'s DOM element.\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-text-track-display'\n }, {\n 'translate': 'yes',\n 'aria-live': 'off',\n 'aria-atomic': 'true'\n });\n }\n\n /**\n * Clear all displayed {@link TextTrack}s.\n */\n clearDisplay() {\n if (typeof window$1.WebVTT === 'function') {\n window$1.WebVTT.processCues(window$1, [], this.el_);\n }\n }\n\n /**\n * Update the displayed TextTrack when a either a {@link Player#texttrackchange} or\n * a {@link Player#fullscreenchange} is fired.\n *\n * @listens Player#texttrackchange\n * @listens Player#fullscreenchange\n */\n updateDisplay() {\n const tracks = this.player_.textTracks();\n const allowMultipleShowingTracks = this.options_.allowMultipleShowingTracks;\n this.clearDisplay();\n if (allowMultipleShowingTracks) {\n const showingTracks = [];\n for (let i = 0; i < tracks.length; ++i) {\n const track = tracks[i];\n if (track.mode !== 'showing') {\n continue;\n }\n showingTracks.push(track);\n }\n this.updateForTrack(showingTracks);\n return;\n }\n\n // Track display prioritization model: if multiple tracks are 'showing',\n // display the first 'subtitles' or 'captions' track which is 'showing',\n // otherwise display the first 'descriptions' track which is 'showing'\n\n let descriptionsTrack = null;\n let captionsSubtitlesTrack = null;\n let i = tracks.length;\n while (i--) {\n const track = tracks[i];\n if (track.mode === 'showing') {\n if (track.kind === 'descriptions') {\n descriptionsTrack = track;\n } else {\n captionsSubtitlesTrack = track;\n }\n }\n }\n if (captionsSubtitlesTrack) {\n if (this.getAttribute('aria-live') !== 'off') {\n this.setAttribute('aria-live', 'off');\n }\n this.updateForTrack(captionsSubtitlesTrack);\n } else if (descriptionsTrack) {\n if (this.getAttribute('aria-live') !== 'assertive') {\n this.setAttribute('aria-live', 'assertive');\n }\n this.updateForTrack(descriptionsTrack);\n }\n }\n\n /**\n * Updates the displayed TextTrack to be sure it overlays the video when a either\n * a {@link Player#texttrackchange} or a {@link Player#fullscreenchange} is fired.\n */\n updateDisplayOverlay() {\n // inset-inline and inset-block are not supprted on old chrome, but these are\n // only likely to be used on TV devices\n if (!this.player_.videoHeight() || !window$1.CSS.supports('inset-inline: 10px')) {\n return;\n }\n const playerWidth = this.player_.currentWidth();\n const playerHeight = this.player_.currentHeight();\n const playerAspectRatio = playerWidth / playerHeight;\n const videoAspectRatio = this.player_.videoWidth() / this.player_.videoHeight();\n let insetInlineMatch = 0;\n let insetBlockMatch = 0;\n if (Math.abs(playerAspectRatio - videoAspectRatio) > 0.1) {\n if (playerAspectRatio > videoAspectRatio) {\n insetInlineMatch = Math.round((playerWidth - playerHeight * videoAspectRatio) / 2);\n } else {\n insetBlockMatch = Math.round((playerHeight - playerWidth / videoAspectRatio) / 2);\n }\n }\n tryUpdateStyle(this.el_, 'insetInline', getCSSPositionValue(insetInlineMatch));\n tryUpdateStyle(this.el_, 'insetBlock', getCSSPositionValue(insetBlockMatch));\n }\n\n /**\n * Style {@Link TextTrack} activeCues according to {@Link TextTrackSettings}.\n *\n * @param {TextTrack} track\n * Text track object containing active cues to style.\n */\n updateDisplayState(track) {\n const overrides = this.player_.textTrackSettings.getValues();\n const cues = track.activeCues;\n let i = cues.length;\n while (i--) {\n const cue = cues[i];\n if (!cue) {\n continue;\n }\n const cueDiv = cue.displayState;\n if (overrides.color) {\n cueDiv.firstChild.style.color = overrides.color;\n }\n if (overrides.textOpacity) {\n tryUpdateStyle(cueDiv.firstChild, 'color', constructColor(overrides.color || '#fff', overrides.textOpacity));\n }\n if (overrides.backgroundColor) {\n cueDiv.firstChild.style.backgroundColor = overrides.backgroundColor;\n }\n if (overrides.backgroundOpacity) {\n tryUpdateStyle(cueDiv.firstChild, 'backgroundColor', constructColor(overrides.backgroundColor || '#000', overrides.backgroundOpacity));\n }\n if (overrides.windowColor) {\n if (overrides.windowOpacity) {\n tryUpdateStyle(cueDiv, 'backgroundColor', constructColor(overrides.windowColor, overrides.windowOpacity));\n } else {\n cueDiv.style.backgroundColor = overrides.windowColor;\n }\n }\n if (overrides.edgeStyle) {\n if (overrides.edgeStyle === 'dropshadow') {\n cueDiv.firstChild.style.textShadow = `2px 2px 3px ${darkGray}, 2px 2px 4px ${darkGray}, 2px 2px 5px ${darkGray}`;\n } else if (overrides.edgeStyle === 'raised') {\n cueDiv.firstChild.style.textShadow = `1px 1px ${darkGray}, 2px 2px ${darkGray}, 3px 3px ${darkGray}`;\n } else if (overrides.edgeStyle === 'depressed') {\n cueDiv.firstChild.style.textShadow = `1px 1px ${lightGray}, 0 1px ${lightGray}, -1px -1px ${darkGray}, 0 -1px ${darkGray}`;\n } else if (overrides.edgeStyle === 'uniform') {\n cueDiv.firstChild.style.textShadow = `0 0 4px ${darkGray}, 0 0 4px ${darkGray}, 0 0 4px ${darkGray}, 0 0 4px ${darkGray}`;\n }\n }\n if (overrides.fontPercent && overrides.fontPercent !== 1) {\n const fontSize = window$1.parseFloat(cueDiv.style.fontSize);\n cueDiv.style.fontSize = fontSize * overrides.fontPercent + 'px';\n cueDiv.style.height = 'auto';\n cueDiv.style.top = 'auto';\n }\n if (overrides.fontFamily && overrides.fontFamily !== 'default') {\n if (overrides.fontFamily === 'small-caps') {\n cueDiv.firstChild.style.fontVariant = 'small-caps';\n } else {\n cueDiv.firstChild.style.fontFamily = fontMap[overrides.fontFamily];\n }\n }\n }\n }\n\n /**\n * Add an {@link TextTrack} to to the {@link Tech}s {@link TextTrackList}.\n *\n * @param {TextTrack|TextTrack[]} tracks\n * Text track object or text track array to be added to the list.\n */\n updateForTrack(tracks) {\n if (!Array.isArray(tracks)) {\n tracks = [tracks];\n }\n if (typeof window$1.WebVTT !== 'function' || tracks.every(track => {\n return !track.activeCues;\n })) {\n return;\n }\n const cues = [];\n\n // push all active track cues\n for (let i = 0; i < tracks.length; ++i) {\n const track = tracks[i];\n for (let j = 0; j < track.activeCues.length; ++j) {\n cues.push(track.activeCues[j]);\n }\n }\n\n // removes all cues before it processes new ones\n window$1.WebVTT.processCues(window$1, cues, this.el_);\n\n // add unique class to each language text track & add settings styling if necessary\n for (let i = 0; i < tracks.length; ++i) {\n const track = tracks[i];\n for (let j = 0; j < track.activeCues.length; ++j) {\n const cueEl = track.activeCues[j].displayState;\n addClass(cueEl, 'vjs-text-track-cue', 'vjs-text-track-cue-' + (track.language ? track.language : i));\n if (track.language) {\n setAttribute(cueEl, 'lang', track.language);\n }\n }\n if (this.player_.textTrackSettings) {\n this.updateDisplayState(track);\n }\n }\n }\n}\nComponent$1.registerComponent('TextTrackDisplay', TextTrackDisplay);\n\n/**\n * @file loading-spinner.js\n */\n\n/**\n * A loading spinner for use during waiting/loading events.\n *\n * @extends Component\n */\nclass LoadingSpinner extends Component$1 {\n /**\n * Create the `LoadingSpinner`s DOM element.\n *\n * @return {Element}\n * The dom element that gets created.\n */\n createEl() {\n const isAudio = this.player_.isAudio();\n const playerType = this.localize(isAudio ? 'Audio Player' : 'Video Player');\n const controlText = createEl('span', {\n className: 'vjs-control-text',\n textContent: this.localize('{1} is loading.', [playerType])\n });\n const el = super.createEl('div', {\n className: 'vjs-loading-spinner',\n dir: 'ltr'\n });\n el.appendChild(controlText);\n return el;\n }\n\n /**\n * Update control text on languagechange\n */\n handleLanguagechange() {\n this.$('.vjs-control-text').textContent = this.localize('{1} is loading.', [this.player_.isAudio() ? 'Audio Player' : 'Video Player']);\n }\n}\nComponent$1.registerComponent('LoadingSpinner', LoadingSpinner);\n\n/**\n * @file button.js\n */\n\n/**\n * Base class for all buttons.\n *\n * @extends ClickableComponent\n */\nclass Button extends ClickableComponent {\n /**\n * Create the `Button`s DOM element.\n *\n * @param {string} [tag=\"button\"]\n * The element's node type. This argument is IGNORED: no matter what\n * is passed, it will always create a `button` element.\n *\n * @param {Object} [props={}]\n * An object of properties that should be set on the element.\n *\n * @param {Object} [attributes={}]\n * An object of attributes that should be set on the element.\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl(tag, props = {}, attributes = {}) {\n tag = 'button';\n props = Object.assign({\n className: this.buildCSSClass()\n }, props);\n\n // Add attributes for button element\n attributes = Object.assign({\n // Necessary since the default button type is \"submit\"\n type: 'button'\n }, attributes);\n const el = createEl(tag, props, attributes);\n if (!this.player_.options_.experimentalSvgIcons) {\n el.appendChild(createEl('span', {\n className: 'vjs-icon-placeholder'\n }, {\n 'aria-hidden': true\n }));\n }\n this.createControlTextEl(el);\n return el;\n }\n\n /**\n * Add a child `Component` inside of this `Button`.\n *\n * @param {string|Component} child\n * The name or instance of a child to add.\n *\n * @param {Object} [options={}]\n * The key/value store of options that will get passed to children of\n * the child.\n *\n * @return {Component}\n * The `Component` that gets added as a child. When using a string the\n * `Component` will get created by this process.\n *\n * @deprecated since version 5\n */\n addChild(child, options = {}) {\n const className = this.constructor.name;\n log$1.warn(`Adding an actionable (user controllable) child to a Button (${className}) is not supported; use a ClickableComponent instead.`);\n\n // Avoid the error message generated by ClickableComponent's addChild method\n return Component$1.prototype.addChild.call(this, child, options);\n }\n\n /**\n * Enable the `Button` element so that it can be activated or clicked. Use this with\n * {@link Button#disable}.\n */\n enable() {\n super.enable();\n this.el_.removeAttribute('disabled');\n }\n\n /**\n * Disable the `Button` element so that it cannot be activated or clicked. Use this with\n * {@link Button#enable}.\n */\n disable() {\n super.disable();\n this.el_.setAttribute('disabled', 'disabled');\n }\n\n /**\n * This gets called when a `Button` has focus and `keydown` is triggered via a key\n * press.\n *\n * @param {KeyboardEvent} event\n * The event that caused this function to get called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n // Ignore Space or Enter key operation, which is handled by the browser for\n // a button - though not for its super class, ClickableComponent. Also,\n // prevent the event from propagating through the DOM and triggering Player\n // hotkeys. We do not preventDefault here because we _want_ the browser to\n // handle it.\n if (keycode.isEventKey(event, 'Space') || keycode.isEventKey(event, 'Enter')) {\n event.stopPropagation();\n return;\n }\n\n // Pass keypress handling up for unsupported keys\n super.handleKeyDown(event);\n }\n}\nComponent$1.registerComponent('Button', Button);\n\n/**\n * @file big-play-button.js\n */\n\n/**\n * The initial play button that shows before the video has played. The hiding of the\n * `BigPlayButton` get done via CSS and `Player` states.\n *\n * @extends Button\n */\nclass BigPlayButton extends Button {\n constructor(player, options) {\n super(player, options);\n this.mouseused_ = false;\n this.setIcon('play');\n this.on('mousedown', e => this.handleMouseDown(e));\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object. Always returns 'vjs-big-play-button'.\n */\n buildCSSClass() {\n return 'vjs-big-play-button';\n }\n\n /**\n * This gets called when a `BigPlayButton` \"clicked\". See {@link ClickableComponent}\n * for more detailed information on what a click can be.\n *\n * @param {KeyboardEvent|MouseEvent|TouchEvent} event\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n const playPromise = this.player_.play();\n\n // exit early if clicked via the mouse\n if (this.mouseused_ && 'clientX' in event && 'clientY' in event) {\n silencePromise(playPromise);\n if (this.player_.tech(true)) {\n this.player_.tech(true).focus();\n }\n return;\n }\n const cb = this.player_.getChild('controlBar');\n const playToggle = cb && cb.getChild('playToggle');\n if (!playToggle) {\n this.player_.tech(true).focus();\n return;\n }\n const playFocus = () => playToggle.focus();\n if (isPromise(playPromise)) {\n playPromise.then(playFocus, () => {});\n } else {\n this.setTimeout(playFocus, 1);\n }\n }\n\n /**\n * Event handler that is called when a `BigPlayButton` receives a\n * `keydown` event.\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n this.mouseused_ = false;\n super.handleKeyDown(event);\n }\n\n /**\n * Handle `mousedown` events on the `BigPlayButton`.\n *\n * @param {MouseEvent} event\n * `mousedown` or `touchstart` event that triggered this function\n *\n * @listens mousedown\n */\n handleMouseDown(event) {\n this.mouseused_ = true;\n }\n}\n\n/**\n * The text that should display over the `BigPlayButton`s controls. Added to for localization.\n *\n * @type {string}\n * @protected\n */\nBigPlayButton.prototype.controlText_ = 'Play Video';\nComponent$1.registerComponent('BigPlayButton', BigPlayButton);\n\n/**\n * @file close-button.js\n */\n\n/**\n * The `CloseButton` is a `{@link Button}` that fires a `close` event when\n * it gets clicked.\n *\n * @extends Button\n */\nclass CloseButton extends Button {\n /**\n * Creates an instance of the this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.setIcon('cancel');\n this.controlText(options && options.controlText || this.localize('Close'));\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-close-button ${super.buildCSSClass()}`;\n }\n\n /**\n * This gets called when a `CloseButton` gets clicked. See\n * {@link ClickableComponent#handleClick} for more information on when\n * this will be triggered\n *\n * @param {Event} event\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n * @fires CloseButton#close\n */\n handleClick(event) {\n /**\n * Triggered when the a `CloseButton` is clicked.\n *\n * @event CloseButton#close\n * @type {Event}\n *\n * @property {boolean} [bubbles=false]\n * set to false so that the close event does not\n * bubble up to parents if there is no listener\n */\n this.trigger({\n type: 'close',\n bubbles: false\n });\n }\n /**\n * Event handler that is called when a `CloseButton` receives a\n * `keydown` event.\n *\n * By default, if the key is Esc, it will trigger a `click` event.\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n // Esc button will trigger `click` event\n if (keycode.isEventKey(event, 'Esc')) {\n event.preventDefault();\n event.stopPropagation();\n this.trigger('click');\n } else {\n // Pass keypress handling up for unsupported keys\n super.handleKeyDown(event);\n }\n }\n}\nComponent$1.registerComponent('CloseButton', CloseButton);\n\n/**\n * @file play-toggle.js\n */\n\n/**\n * Button to toggle between play and pause.\n *\n * @extends Button\n */\nclass PlayToggle extends Button {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options={}]\n * The key/value store of player options.\n */\n constructor(player, options = {}) {\n super(player, options);\n\n // show or hide replay icon\n options.replay = options.replay === undefined || options.replay;\n this.setIcon('play');\n this.on(player, 'play', e => this.handlePlay(e));\n this.on(player, 'pause', e => this.handlePause(e));\n if (options.replay) {\n this.on(player, 'ended', e => this.handleEnded(e));\n }\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-play-control ${super.buildCSSClass()}`;\n }\n\n /**\n * This gets called when an `PlayToggle` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n if (this.player_.paused()) {\n silencePromise(this.player_.play());\n } else {\n this.player_.pause();\n }\n }\n\n /**\n * This gets called once after the video has ended and the user seeks so that\n * we can change the replay button back to a play button.\n *\n * @param {Event} [event]\n * The event that caused this function to run.\n *\n * @listens Player#seeked\n */\n handleSeeked(event) {\n this.removeClass('vjs-ended');\n if (this.player_.paused()) {\n this.handlePause(event);\n } else {\n this.handlePlay(event);\n }\n }\n\n /**\n * Add the vjs-playing class to the element so it can change appearance.\n *\n * @param {Event} [event]\n * The event that caused this function to run.\n *\n * @listens Player#play\n */\n handlePlay(event) {\n this.removeClass('vjs-ended', 'vjs-paused');\n this.addClass('vjs-playing');\n // change the button text to \"Pause\"\n this.setIcon('pause');\n this.controlText('Pause');\n }\n\n /**\n * Add the vjs-paused class to the element so it can change appearance.\n *\n * @param {Event} [event]\n * The event that caused this function to run.\n *\n * @listens Player#pause\n */\n handlePause(event) {\n this.removeClass('vjs-playing');\n this.addClass('vjs-paused');\n // change the button text to \"Play\"\n this.setIcon('play');\n this.controlText('Play');\n }\n\n /**\n * Add the vjs-ended class to the element so it can change appearance\n *\n * @param {Event} [event]\n * The event that caused this function to run.\n *\n * @listens Player#ended\n */\n handleEnded(event) {\n this.removeClass('vjs-playing');\n this.addClass('vjs-ended');\n // change the button text to \"Replay\"\n this.setIcon('replay');\n this.controlText('Replay');\n\n // on the next seek remove the replay button\n this.one(this.player_, 'seeked', e => this.handleSeeked(e));\n }\n}\n\n/**\n * The text that should display over the `PlayToggle`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nPlayToggle.prototype.controlText_ = 'Play';\nComponent$1.registerComponent('PlayToggle', PlayToggle);\n\n/**\n * @file time-display.js\n */\n\n/**\n * Displays time information about the video\n *\n * @extends Component\n */\nclass TimeDisplay extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.on(player, ['timeupdate', 'ended', 'seeking'], e => this.update(e));\n this.updateTextNode_();\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const className = this.buildCSSClass();\n const el = super.createEl('div', {\n className: `${className} vjs-time-control vjs-control`\n });\n const span = createEl('span', {\n className: 'vjs-control-text',\n textContent: `${this.localize(this.labelText_)}\\u00a0`\n }, {\n role: 'presentation'\n });\n el.appendChild(span);\n this.contentEl_ = createEl('span', {\n className: `${className}-display`\n }, {\n // span elements have no implicit role, but some screen readers (notably VoiceOver)\n // treat them as a break between items in the DOM when using arrow keys\n // (or left-to-right swipes on iOS) to read contents of a page. Using\n // role='presentation' causes VoiceOver to NOT treat this span as a break.\n role: 'presentation'\n });\n el.appendChild(this.contentEl_);\n return el;\n }\n dispose() {\n this.contentEl_ = null;\n this.textNode_ = null;\n super.dispose();\n }\n\n /**\n * Updates the displayed time according to the `updateContent` function which is defined in the child class.\n *\n * @param {Event} [event]\n * The `timeupdate`, `ended` or `seeking` (if enableSmoothSeeking is true) event that caused this function to be called.\n */\n update(event) {\n if (!this.player_.options_.enableSmoothSeeking && event.type === 'seeking') {\n return;\n }\n this.updateContent(event);\n }\n\n /**\n * Updates the time display text node with a new time\n *\n * @param {number} [time=0] the time to update to\n *\n * @private\n */\n updateTextNode_(time = 0) {\n time = formatTime(time);\n if (this.formattedTime_ === time) {\n return;\n }\n this.formattedTime_ = time;\n this.requestNamedAnimationFrame('TimeDisplay#updateTextNode_', () => {\n if (!this.contentEl_) {\n return;\n }\n let oldNode = this.textNode_;\n if (oldNode && this.contentEl_.firstChild !== oldNode) {\n oldNode = null;\n log$1.warn('TimeDisplay#updateTextnode_: Prevented replacement of text node element since it was no longer a child of this node. Appending a new node instead.');\n }\n this.textNode_ = document.createTextNode(this.formattedTime_);\n if (!this.textNode_) {\n return;\n }\n if (oldNode) {\n this.contentEl_.replaceChild(this.textNode_, oldNode);\n } else {\n this.contentEl_.appendChild(this.textNode_);\n }\n });\n }\n\n /**\n * To be filled out in the child class, should update the displayed time\n * in accordance with the fact that the current time has changed.\n *\n * @param {Event} [event]\n * The `timeupdate` event that caused this to run.\n *\n * @listens Player#timeupdate\n */\n updateContent(event) {}\n}\n\n/**\n * The text that is added to the `TimeDisplay` for screen reader users.\n *\n * @type {string}\n * @private\n */\nTimeDisplay.prototype.labelText_ = 'Time';\n\n/**\n * The text that should display over the `TimeDisplay`s controls. Added to for localization.\n *\n * @type {string}\n * @protected\n *\n * @deprecated in v7; controlText_ is not used in non-active display Components\n */\nTimeDisplay.prototype.controlText_ = 'Time';\nComponent$1.registerComponent('TimeDisplay', TimeDisplay);\n\n/**\n * @file current-time-display.js\n */\n\n/**\n * Displays the current time\n *\n * @extends Component\n */\nclass CurrentTimeDisplay extends TimeDisplay {\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return 'vjs-current-time';\n }\n\n /**\n * Update current time display\n *\n * @param {Event} [event]\n * The `timeupdate` event that caused this function to run.\n *\n * @listens Player#timeupdate\n */\n updateContent(event) {\n // Allows for smooth scrubbing, when player can't keep up.\n let time;\n if (this.player_.ended()) {\n time = this.player_.duration();\n } else {\n time = this.player_.scrubbing() ? this.player_.getCache().currentTime : this.player_.currentTime();\n }\n this.updateTextNode_(time);\n }\n}\n\n/**\n * The text that is added to the `CurrentTimeDisplay` for screen reader users.\n *\n * @type {string}\n * @private\n */\nCurrentTimeDisplay.prototype.labelText_ = 'Current Time';\n\n/**\n * The text that should display over the `CurrentTimeDisplay`s controls. Added to for localization.\n *\n * @type {string}\n * @protected\n *\n * @deprecated in v7; controlText_ is not used in non-active display Components\n */\nCurrentTimeDisplay.prototype.controlText_ = 'Current Time';\nComponent$1.registerComponent('CurrentTimeDisplay', CurrentTimeDisplay);\n\n/**\n * @file duration-display.js\n */\n\n/**\n * Displays the duration\n *\n * @extends Component\n */\nclass DurationDisplay extends TimeDisplay {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n const updateContent = e => this.updateContent(e);\n\n // we do not want to/need to throttle duration changes,\n // as they should always display the changed duration as\n // it has changed\n this.on(player, 'durationchange', updateContent);\n\n // Listen to loadstart because the player duration is reset when a new media element is loaded,\n // but the durationchange on the user agent will not fire.\n // @see [Spec]{@link https://www.w3.org/TR/2011/WD-html5-20110113/video.html#media-element-load-algorithm}\n this.on(player, 'loadstart', updateContent);\n\n // Also listen for timeupdate (in the parent) and loadedmetadata because removing those\n // listeners could have broken dependent applications/libraries. These\n // can likely be removed for 7.0.\n this.on(player, 'loadedmetadata', updateContent);\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return 'vjs-duration';\n }\n\n /**\n * Update duration time display.\n *\n * @param {Event} [event]\n * The `durationchange`, `timeupdate`, or `loadedmetadata` event that caused\n * this function to be called.\n *\n * @listens Player#durationchange\n * @listens Player#timeupdate\n * @listens Player#loadedmetadata\n */\n updateContent(event) {\n const duration = this.player_.duration();\n this.updateTextNode_(duration);\n }\n}\n\n/**\n * The text that is added to the `DurationDisplay` for screen reader users.\n *\n * @type {string}\n * @private\n */\nDurationDisplay.prototype.labelText_ = 'Duration';\n\n/**\n * The text that should display over the `DurationDisplay`s controls. Added to for localization.\n *\n * @type {string}\n * @protected\n *\n * @deprecated in v7; controlText_ is not used in non-active display Components\n */\nDurationDisplay.prototype.controlText_ = 'Duration';\nComponent$1.registerComponent('DurationDisplay', DurationDisplay);\n\n/**\n * @file time-divider.js\n */\n\n/**\n * The separator between the current time and duration.\n * Can be hidden if it's not needed in the design.\n *\n * @extends Component\n */\nclass TimeDivider extends Component$1 {\n /**\n * Create the component's DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const el = super.createEl('div', {\n className: 'vjs-time-control vjs-time-divider'\n }, {\n // this element and its contents can be hidden from assistive techs since\n // it is made extraneous by the announcement of the control text\n // for the current time and duration displays\n 'aria-hidden': true\n });\n const div = super.createEl('div');\n const span = super.createEl('span', {\n textContent: '/'\n });\n div.appendChild(span);\n el.appendChild(div);\n return el;\n }\n}\nComponent$1.registerComponent('TimeDivider', TimeDivider);\n\n/**\n * @file remaining-time-display.js\n */\n\n/**\n * Displays the time left in the video\n *\n * @extends Component\n */\nclass RemainingTimeDisplay extends TimeDisplay {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.on(player, 'durationchange', e => this.updateContent(e));\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return 'vjs-remaining-time';\n }\n\n /**\n * Create the `Component`'s DOM element with the \"minus\" character prepend to the time\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const el = super.createEl();\n if (this.options_.displayNegative !== false) {\n el.insertBefore(createEl('span', {}, {\n 'aria-hidden': true\n }, '-'), this.contentEl_);\n }\n return el;\n }\n\n /**\n * Update remaining time display.\n *\n * @param {Event} [event]\n * The `timeupdate` or `durationchange` event that caused this to run.\n *\n * @listens Player#timeupdate\n * @listens Player#durationchange\n */\n updateContent(event) {\n if (typeof this.player_.duration() !== 'number') {\n return;\n }\n let time;\n\n // @deprecated We should only use remainingTimeDisplay\n // as of video.js 7\n if (this.player_.ended()) {\n time = 0;\n } else if (this.player_.remainingTimeDisplay) {\n time = this.player_.remainingTimeDisplay();\n } else {\n time = this.player_.remainingTime();\n }\n this.updateTextNode_(time);\n }\n}\n\n/**\n * The text that is added to the `RemainingTimeDisplay` for screen reader users.\n *\n * @type {string}\n * @private\n */\nRemainingTimeDisplay.prototype.labelText_ = 'Remaining Time';\n\n/**\n * The text that should display over the `RemainingTimeDisplay`s controls. Added to for localization.\n *\n * @type {string}\n * @protected\n *\n * @deprecated in v7; controlText_ is not used in non-active display Components\n */\nRemainingTimeDisplay.prototype.controlText_ = 'Remaining Time';\nComponent$1.registerComponent('RemainingTimeDisplay', RemainingTimeDisplay);\n\n/**\n * @file live-display.js\n */\n\n// TODO - Future make it click to snap to live\n\n/**\n * Displays the live indicator when duration is Infinity.\n *\n * @extends Component\n */\nclass LiveDisplay extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.updateShowing();\n this.on(this.player(), 'durationchange', e => this.updateShowing(e));\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const el = super.createEl('div', {\n className: 'vjs-live-control vjs-control'\n });\n this.contentEl_ = createEl('div', {\n className: 'vjs-live-display'\n }, {\n 'aria-live': 'off'\n });\n this.contentEl_.appendChild(createEl('span', {\n className: 'vjs-control-text',\n textContent: `${this.localize('Stream Type')}\\u00a0`\n }));\n this.contentEl_.appendChild(document.createTextNode(this.localize('LIVE')));\n el.appendChild(this.contentEl_);\n return el;\n }\n dispose() {\n this.contentEl_ = null;\n super.dispose();\n }\n\n /**\n * Check the duration to see if the LiveDisplay should be showing or not. Then show/hide\n * it accordingly\n *\n * @param {Event} [event]\n * The {@link Player#durationchange} event that caused this function to run.\n *\n * @listens Player#durationchange\n */\n updateShowing(event) {\n if (this.player().duration() === Infinity) {\n this.show();\n } else {\n this.hide();\n }\n }\n}\nComponent$1.registerComponent('LiveDisplay', LiveDisplay);\n\n/**\n * @file seek-to-live.js\n */\n\n/**\n * Displays the live indicator when duration is Infinity.\n *\n * @extends Component\n */\nclass SeekToLive extends Button {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.updateLiveEdgeStatus();\n if (this.player_.liveTracker) {\n this.updateLiveEdgeStatusHandler_ = e => this.updateLiveEdgeStatus(e);\n this.on(this.player_.liveTracker, 'liveedgechange', this.updateLiveEdgeStatusHandler_);\n }\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const el = super.createEl('button', {\n className: 'vjs-seek-to-live-control vjs-control'\n });\n this.setIcon('circle', el);\n this.textEl_ = createEl('span', {\n className: 'vjs-seek-to-live-text',\n textContent: this.localize('LIVE')\n }, {\n 'aria-hidden': 'true'\n });\n el.appendChild(this.textEl_);\n return el;\n }\n\n /**\n * Update the state of this button if we are at the live edge\n * or not\n */\n updateLiveEdgeStatus() {\n // default to live edge\n if (!this.player_.liveTracker || this.player_.liveTracker.atLiveEdge()) {\n this.setAttribute('aria-disabled', true);\n this.addClass('vjs-at-live-edge');\n this.controlText('Seek to live, currently playing live');\n } else {\n this.setAttribute('aria-disabled', false);\n this.removeClass('vjs-at-live-edge');\n this.controlText('Seek to live, currently behind live');\n }\n }\n\n /**\n * On click bring us as near to the live point as possible.\n * This requires that we wait for the next `live-seekable-change`\n * event which will happen every segment length seconds.\n */\n handleClick() {\n this.player_.liveTracker.seekToLiveEdge();\n }\n\n /**\n * Dispose of the element and stop tracking\n */\n dispose() {\n if (this.player_.liveTracker) {\n this.off(this.player_.liveTracker, 'liveedgechange', this.updateLiveEdgeStatusHandler_);\n }\n this.textEl_ = null;\n super.dispose();\n }\n}\n/**\n * The text that should display over the `SeekToLive`s control. Added for localization.\n *\n * @type {string}\n * @protected\n */\nSeekToLive.prototype.controlText_ = 'Seek to live, currently playing live';\nComponent$1.registerComponent('SeekToLive', SeekToLive);\n\n/**\n * @file num.js\n * @module num\n */\n\n/**\n * Keep a number between a min and a max value\n *\n * @param {number} number\n * The number to clamp\n *\n * @param {number} min\n * The minimum value\n * @param {number} max\n * The maximum value\n *\n * @return {number}\n * the clamped number\n */\nfunction clamp(number, min, max) {\n number = Number(number);\n return Math.min(max, Math.max(min, isNaN(number) ? min : number));\n}\n\nvar Num = /*#__PURE__*/Object.freeze({\n __proto__: null,\n clamp: clamp\n});\n\n/**\n * @file slider.js\n */\n\n/**\n * The base functionality for a slider. Can be vertical or horizontal.\n * For instance the volume bar or the seek bar on a video is a slider.\n *\n * @extends Component\n */\nclass Slider extends Component$1 {\n /**\n * Create an instance of this class\n *\n * @param { import('../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.handleMouseDown_ = e => this.handleMouseDown(e);\n this.handleMouseUp_ = e => this.handleMouseUp(e);\n this.handleKeyDown_ = e => this.handleKeyDown(e);\n this.handleClick_ = e => this.handleClick(e);\n this.handleMouseMove_ = e => this.handleMouseMove(e);\n this.update_ = e => this.update(e);\n\n // Set property names to bar to match with the child Slider class is looking for\n this.bar = this.getChild(this.options_.barName);\n\n // Set a horizontal or vertical class on the slider depending on the slider type\n this.vertical(!!this.options_.vertical);\n this.enable();\n }\n\n /**\n * Are controls are currently enabled for this slider or not.\n *\n * @return {boolean}\n * true if controls are enabled, false otherwise\n */\n enabled() {\n return this.enabled_;\n }\n\n /**\n * Enable controls for this slider if they are disabled\n */\n enable() {\n if (this.enabled()) {\n return;\n }\n this.on('mousedown', this.handleMouseDown_);\n this.on('touchstart', this.handleMouseDown_);\n this.on('keydown', this.handleKeyDown_);\n this.on('click', this.handleClick_);\n\n // TODO: deprecated, controlsvisible does not seem to be fired\n this.on(this.player_, 'controlsvisible', this.update);\n if (this.playerEvent) {\n this.on(this.player_, this.playerEvent, this.update);\n }\n this.removeClass('disabled');\n this.setAttribute('tabindex', 0);\n this.enabled_ = true;\n }\n\n /**\n * Disable controls for this slider if they are enabled\n */\n disable() {\n if (!this.enabled()) {\n return;\n }\n const doc = this.bar.el_.ownerDocument;\n this.off('mousedown', this.handleMouseDown_);\n this.off('touchstart', this.handleMouseDown_);\n this.off('keydown', this.handleKeyDown_);\n this.off('click', this.handleClick_);\n this.off(this.player_, 'controlsvisible', this.update_);\n this.off(doc, 'mousemove', this.handleMouseMove_);\n this.off(doc, 'mouseup', this.handleMouseUp_);\n this.off(doc, 'touchmove', this.handleMouseMove_);\n this.off(doc, 'touchend', this.handleMouseUp_);\n this.removeAttribute('tabindex');\n this.addClass('disabled');\n if (this.playerEvent) {\n this.off(this.player_, this.playerEvent, this.update);\n }\n this.enabled_ = false;\n }\n\n /**\n * Create the `Slider`s DOM element.\n *\n * @param {string} type\n * Type of element to create.\n *\n * @param {Object} [props={}]\n * List of properties in Object form.\n *\n * @param {Object} [attributes={}]\n * list of attributes in Object form.\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl(type, props = {}, attributes = {}) {\n // Add the slider element class to all sub classes\n props.className = props.className + ' vjs-slider';\n props = Object.assign({\n tabIndex: 0\n }, props);\n attributes = Object.assign({\n 'role': 'slider',\n 'aria-valuenow': 0,\n 'aria-valuemin': 0,\n 'aria-valuemax': 100\n }, attributes);\n return super.createEl(type, props, attributes);\n }\n\n /**\n * Handle `mousedown` or `touchstart` events on the `Slider`.\n *\n * @param {MouseEvent} event\n * `mousedown` or `touchstart` event that triggered this function\n *\n * @listens mousedown\n * @listens touchstart\n * @fires Slider#slideractive\n */\n handleMouseDown(event) {\n const doc = this.bar.el_.ownerDocument;\n if (event.type === 'mousedown') {\n event.preventDefault();\n }\n // Do not call preventDefault() on touchstart in Chrome\n // to avoid console warnings. Use a 'touch-action: none' style\n // instead to prevent unintended scrolling.\n // https://developers.google.com/web/updates/2017/01/scrolling-intervention\n if (event.type === 'touchstart' && !IS_CHROME) {\n event.preventDefault();\n }\n blockTextSelection();\n this.addClass('vjs-sliding');\n /**\n * Triggered when the slider is in an active state\n *\n * @event Slider#slideractive\n * @type {MouseEvent}\n */\n this.trigger('slideractive');\n this.on(doc, 'mousemove', this.handleMouseMove_);\n this.on(doc, 'mouseup', this.handleMouseUp_);\n this.on(doc, 'touchmove', this.handleMouseMove_);\n this.on(doc, 'touchend', this.handleMouseUp_);\n this.handleMouseMove(event, true);\n }\n\n /**\n * Handle the `mousemove`, `touchmove`, and `mousedown` events on this `Slider`.\n * The `mousemove` and `touchmove` events will only only trigger this function during\n * `mousedown` and `touchstart`. This is due to {@link Slider#handleMouseDown} and\n * {@link Slider#handleMouseUp}.\n *\n * @param {MouseEvent} event\n * `mousedown`, `mousemove`, `touchstart`, or `touchmove` event that triggered\n * this function\n * @param {boolean} mouseDown this is a flag that should be set to true if `handleMouseMove` is called directly. It allows us to skip things that should not happen if coming from mouse down but should happen on regular mouse move handler. Defaults to false.\n *\n * @listens mousemove\n * @listens touchmove\n */\n handleMouseMove(event) {}\n\n /**\n * Handle `mouseup` or `touchend` events on the `Slider`.\n *\n * @param {MouseEvent} event\n * `mouseup` or `touchend` event that triggered this function.\n *\n * @listens touchend\n * @listens mouseup\n * @fires Slider#sliderinactive\n */\n handleMouseUp(event) {\n const doc = this.bar.el_.ownerDocument;\n unblockTextSelection();\n this.removeClass('vjs-sliding');\n /**\n * Triggered when the slider is no longer in an active state.\n *\n * @event Slider#sliderinactive\n * @type {Event}\n */\n this.trigger('sliderinactive');\n this.off(doc, 'mousemove', this.handleMouseMove_);\n this.off(doc, 'mouseup', this.handleMouseUp_);\n this.off(doc, 'touchmove', this.handleMouseMove_);\n this.off(doc, 'touchend', this.handleMouseUp_);\n this.update();\n }\n\n /**\n * Update the progress bar of the `Slider`.\n *\n * @return {number}\n * The percentage of progress the progress bar represents as a\n * number from 0 to 1.\n */\n update() {\n // In VolumeBar init we have a setTimeout for update that pops and update\n // to the end of the execution stack. The player is destroyed before then\n // update will cause an error\n // If there's no bar...\n if (!this.el_ || !this.bar) {\n return;\n }\n\n // clamp progress between 0 and 1\n // and only round to four decimal places, as we round to two below\n const progress = this.getProgress();\n if (progress === this.progress_) {\n return progress;\n }\n this.progress_ = progress;\n this.requestNamedAnimationFrame('Slider#update', () => {\n // Set the new bar width or height\n const sizeKey = this.vertical() ? 'height' : 'width';\n\n // Convert to a percentage for css value\n this.bar.el().style[sizeKey] = (progress * 100).toFixed(2) + '%';\n });\n return progress;\n }\n\n /**\n * Get the percentage of the bar that should be filled\n * but clamped and rounded.\n *\n * @return {number}\n * percentage filled that the slider is\n */\n getProgress() {\n return Number(clamp(this.getPercent(), 0, 1).toFixed(4));\n }\n\n /**\n * Calculate distance for slider\n *\n * @param {Event} event\n * The event that caused this function to run.\n *\n * @return {number}\n * The current position of the Slider.\n * - position.x for vertical `Slider`s\n * - position.y for horizontal `Slider`s\n */\n calculateDistance(event) {\n const position = getPointerPosition(this.el_, event);\n if (this.vertical()) {\n return position.y;\n }\n return position.x;\n }\n\n /**\n * Handle a `keydown` event on the `Slider`. Watches for left, right, up, and down\n * arrow keys. This function will only be called when the slider has focus. See\n * {@link Slider#handleFocus} and {@link Slider#handleBlur}.\n *\n * @param {KeyboardEvent} event\n * the `keydown` event that caused this function to run.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n // Left and Down Arrows\n if (keycode.isEventKey(event, 'Left') || keycode.isEventKey(event, 'Down')) {\n event.preventDefault();\n event.stopPropagation();\n this.stepBack();\n\n // Up and Right Arrows\n } else if (keycode.isEventKey(event, 'Right') || keycode.isEventKey(event, 'Up')) {\n event.preventDefault();\n event.stopPropagation();\n this.stepForward();\n } else {\n // Pass keydown handling up for unsupported keys\n super.handleKeyDown(event);\n }\n }\n\n /**\n * Listener for click events on slider, used to prevent clicks\n * from bubbling up to parent elements like button menus.\n *\n * @param {Object} event\n * Event that caused this object to run\n */\n handleClick(event) {\n event.stopPropagation();\n event.preventDefault();\n }\n\n /**\n * Get/set if slider is horizontal for vertical\n *\n * @param {boolean} [bool]\n * - true if slider is vertical,\n * - false is horizontal\n *\n * @return {boolean}\n * - true if slider is vertical, and getting\n * - false if the slider is horizontal, and getting\n */\n vertical(bool) {\n if (bool === undefined) {\n return this.vertical_ || false;\n }\n this.vertical_ = !!bool;\n if (this.vertical_) {\n this.addClass('vjs-slider-vertical');\n } else {\n this.addClass('vjs-slider-horizontal');\n }\n }\n}\nComponent$1.registerComponent('Slider', Slider);\n\n/**\n * @file load-progress-bar.js\n */\n\n// get the percent width of a time compared to the total end\nconst percentify = (time, end) => clamp(time / end * 100, 0, 100).toFixed(2) + '%';\n\n/**\n * Shows loading progress\n *\n * @extends Component\n */\nclass LoadProgressBar extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.partEls_ = [];\n this.on(player, 'progress', e => this.update(e));\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const el = super.createEl('div', {\n className: 'vjs-load-progress'\n });\n const wrapper = createEl('span', {\n className: 'vjs-control-text'\n });\n const loadedText = createEl('span', {\n textContent: this.localize('Loaded')\n });\n const separator = document.createTextNode(': ');\n this.percentageEl_ = createEl('span', {\n className: 'vjs-control-text-loaded-percentage',\n textContent: '0%'\n });\n el.appendChild(wrapper);\n wrapper.appendChild(loadedText);\n wrapper.appendChild(separator);\n wrapper.appendChild(this.percentageEl_);\n return el;\n }\n dispose() {\n this.partEls_ = null;\n this.percentageEl_ = null;\n super.dispose();\n }\n\n /**\n * Update progress bar\n *\n * @param {Event} [event]\n * The `progress` event that caused this function to run.\n *\n * @listens Player#progress\n */\n update(event) {\n this.requestNamedAnimationFrame('LoadProgressBar#update', () => {\n const liveTracker = this.player_.liveTracker;\n const buffered = this.player_.buffered();\n const duration = liveTracker && liveTracker.isLive() ? liveTracker.seekableEnd() : this.player_.duration();\n const bufferedEnd = this.player_.bufferedEnd();\n const children = this.partEls_;\n const percent = percentify(bufferedEnd, duration);\n if (this.percent_ !== percent) {\n // update the width of the progress bar\n this.el_.style.width = percent;\n // update the control-text\n textContent(this.percentageEl_, percent);\n this.percent_ = percent;\n }\n\n // add child elements to represent the individual buffered time ranges\n for (let i = 0; i < buffered.length; i++) {\n const start = buffered.start(i);\n const end = buffered.end(i);\n let part = children[i];\n if (!part) {\n part = this.el_.appendChild(createEl());\n children[i] = part;\n }\n\n // only update if changed\n if (part.dataset.start === start && part.dataset.end === end) {\n continue;\n }\n part.dataset.start = start;\n part.dataset.end = end;\n\n // set the percent based on the width of the progress bar (bufferedEnd)\n part.style.left = percentify(start, bufferedEnd);\n part.style.width = percentify(end - start, bufferedEnd);\n }\n\n // remove unused buffered range elements\n for (let i = children.length; i > buffered.length; i--) {\n this.el_.removeChild(children[i - 1]);\n }\n children.length = buffered.length;\n });\n }\n}\nComponent$1.registerComponent('LoadProgressBar', LoadProgressBar);\n\n/**\n * @file time-tooltip.js\n */\n\n/**\n * Time tooltips display a time above the progress bar.\n *\n * @extends Component\n */\nclass TimeTooltip extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The {@link Player} that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.update = throttle(bind_(this, this.update), UPDATE_REFRESH_INTERVAL);\n }\n\n /**\n * Create the time tooltip DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-time-tooltip'\n }, {\n 'aria-hidden': 'true'\n });\n }\n\n /**\n * Updates the position of the time tooltip relative to the `SeekBar`.\n *\n * @param {Object} seekBarRect\n * The `ClientRect` for the {@link SeekBar} element.\n *\n * @param {number} seekBarPoint\n * A number from 0 to 1, representing a horizontal reference point\n * from the left edge of the {@link SeekBar}\n */\n update(seekBarRect, seekBarPoint, content) {\n const tooltipRect = findPosition(this.el_);\n const playerRect = getBoundingClientRect(this.player_.el());\n const seekBarPointPx = seekBarRect.width * seekBarPoint;\n\n // do nothing if either rect isn't available\n // for example, if the player isn't in the DOM for testing\n if (!playerRect || !tooltipRect) {\n return;\n }\n\n // This is the space left of the `seekBarPoint` available within the bounds\n // of the player. We calculate any gap between the left edge of the player\n // and the left edge of the `SeekBar` and add the number of pixels in the\n // `SeekBar` before hitting the `seekBarPoint`\n let spaceLeftOfPoint = seekBarRect.left - playerRect.left + seekBarPointPx;\n\n // This is the space right of the `seekBarPoint` available within the bounds\n // of the player. We calculate the number of pixels from the `seekBarPoint`\n // to the right edge of the `SeekBar` and add to that any gap between the\n // right edge of the `SeekBar` and the player.\n let spaceRightOfPoint = seekBarRect.width - seekBarPointPx + (playerRect.right - seekBarRect.right);\n\n // spaceRightOfPoint is always NaN for mouse time display\n // because the seekbarRect does not have a right property. This causes\n // the mouse tool tip to be truncated when it's close to the right edge of the player.\n // In such cases, we ignore the `playerRect.right - seekBarRect.right` value when calculating.\n // For the sake of consistency, we ignore seekBarRect.left - playerRect.left for the left edge.\n if (!spaceRightOfPoint) {\n spaceRightOfPoint = seekBarRect.width - seekBarPointPx;\n spaceLeftOfPoint = seekBarPointPx;\n }\n // This is the number of pixels by which the tooltip will need to be pulled\n // further to the right to center it over the `seekBarPoint`.\n let pullTooltipBy = tooltipRect.width / 2;\n\n // Adjust the `pullTooltipBy` distance to the left or right depending on\n // the results of the space calculations above.\n if (spaceLeftOfPoint < pullTooltipBy) {\n pullTooltipBy += pullTooltipBy - spaceLeftOfPoint;\n } else if (spaceRightOfPoint < pullTooltipBy) {\n pullTooltipBy = spaceRightOfPoint;\n }\n\n // Due to the imprecision of decimal/ratio based calculations and varying\n // rounding behaviors, there are cases where the spacing adjustment is off\n // by a pixel or two. This adds insurance to these calculations.\n if (pullTooltipBy < 0) {\n pullTooltipBy = 0;\n } else if (pullTooltipBy > tooltipRect.width) {\n pullTooltipBy = tooltipRect.width;\n }\n\n // prevent small width fluctuations within 0.4px from\n // changing the value below.\n // This really helps for live to prevent the play\n // progress time tooltip from jittering\n pullTooltipBy = Math.round(pullTooltipBy);\n this.el_.style.right = `-${pullTooltipBy}px`;\n this.write(content);\n }\n\n /**\n * Write the time to the tooltip DOM element.\n *\n * @param {string} content\n * The formatted time for the tooltip.\n */\n write(content) {\n textContent(this.el_, content);\n }\n\n /**\n * Updates the position of the time tooltip relative to the `SeekBar`.\n *\n * @param {Object} seekBarRect\n * The `ClientRect` for the {@link SeekBar} element.\n *\n * @param {number} seekBarPoint\n * A number from 0 to 1, representing a horizontal reference point\n * from the left edge of the {@link SeekBar}\n *\n * @param {number} time\n * The time to update the tooltip to, not used during live playback\n *\n * @param {Function} cb\n * A function that will be called during the request animation frame\n * for tooltips that need to do additional animations from the default\n */\n updateTime(seekBarRect, seekBarPoint, time, cb) {\n this.requestNamedAnimationFrame('TimeTooltip#updateTime', () => {\n let content;\n const duration = this.player_.duration();\n if (this.player_.liveTracker && this.player_.liveTracker.isLive()) {\n const liveWindow = this.player_.liveTracker.liveWindow();\n const secondsBehind = liveWindow - seekBarPoint * liveWindow;\n content = (secondsBehind < 1 ? '' : '-') + formatTime(secondsBehind, liveWindow);\n } else {\n content = formatTime(time, duration);\n }\n this.update(seekBarRect, seekBarPoint, content);\n if (cb) {\n cb();\n }\n });\n }\n}\nComponent$1.registerComponent('TimeTooltip', TimeTooltip);\n\n/**\n * @file play-progress-bar.js\n */\n\n/**\n * Used by {@link SeekBar} to display media playback progress as part of the\n * {@link ProgressControl}.\n *\n * @extends Component\n */\nclass PlayProgressBar extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The {@link Player} that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.setIcon('circle');\n this.update = throttle(bind_(this, this.update), UPDATE_REFRESH_INTERVAL);\n }\n\n /**\n * Create the the DOM element for this class.\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-play-progress vjs-slider-bar'\n }, {\n 'aria-hidden': 'true'\n });\n }\n\n /**\n * Enqueues updates to its own DOM as well as the DOM of its\n * {@link TimeTooltip} child.\n *\n * @param {Object} seekBarRect\n * The `ClientRect` for the {@link SeekBar} element.\n *\n * @param {number} seekBarPoint\n * A number from 0 to 1, representing a horizontal reference point\n * from the left edge of the {@link SeekBar}\n */\n update(seekBarRect, seekBarPoint) {\n const timeTooltip = this.getChild('timeTooltip');\n if (!timeTooltip) {\n return;\n }\n const time = this.player_.scrubbing() ? this.player_.getCache().currentTime : this.player_.currentTime();\n timeTooltip.updateTime(seekBarRect, seekBarPoint, time);\n }\n}\n\n/**\n * Default options for {@link PlayProgressBar}.\n *\n * @type {Object}\n * @private\n */\nPlayProgressBar.prototype.options_ = {\n children: []\n};\n\n// Time tooltips should not be added to a player on mobile devices\nif (!IS_IOS && !IS_ANDROID) {\n PlayProgressBar.prototype.options_.children.push('timeTooltip');\n}\nComponent$1.registerComponent('PlayProgressBar', PlayProgressBar);\n\n/**\n * @file mouse-time-display.js\n */\n\n/**\n * The {@link MouseTimeDisplay} component tracks mouse movement over the\n * {@link ProgressControl}. It displays an indicator and a {@link TimeTooltip}\n * indicating the time which is represented by a given point in the\n * {@link ProgressControl}.\n *\n * @extends Component\n */\nclass MouseTimeDisplay extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The {@link Player} that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.update = throttle(bind_(this, this.update), UPDATE_REFRESH_INTERVAL);\n }\n\n /**\n * Create the DOM element for this class.\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-mouse-display'\n });\n }\n\n /**\n * Enqueues updates to its own DOM as well as the DOM of its\n * {@link TimeTooltip} child.\n *\n * @param {Object} seekBarRect\n * The `ClientRect` for the {@link SeekBar} element.\n *\n * @param {number} seekBarPoint\n * A number from 0 to 1, representing a horizontal reference point\n * from the left edge of the {@link SeekBar}\n */\n update(seekBarRect, seekBarPoint) {\n const time = seekBarPoint * this.player_.duration();\n this.getChild('timeTooltip').updateTime(seekBarRect, seekBarPoint, time, () => {\n this.el_.style.left = `${seekBarRect.width * seekBarPoint}px`;\n });\n }\n}\n\n/**\n * Default options for `MouseTimeDisplay`\n *\n * @type {Object}\n * @private\n */\nMouseTimeDisplay.prototype.options_ = {\n children: ['timeTooltip']\n};\nComponent$1.registerComponent('MouseTimeDisplay', MouseTimeDisplay);\n\n/**\n * @file seek-bar.js\n */\n\n// The number of seconds the `step*` functions move the timeline.\nconst STEP_SECONDS = 5;\n\n// The multiplier of STEP_SECONDS that PgUp/PgDown move the timeline.\nconst PAGE_KEY_MULTIPLIER = 12;\n\n/**\n * Seek bar and container for the progress bars. Uses {@link PlayProgressBar}\n * as its `bar`.\n *\n * @extends Slider\n */\nclass SeekBar extends Slider {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.setEventHandlers_();\n }\n\n /**\n * Sets the event handlers\n *\n * @private\n */\n setEventHandlers_() {\n this.update_ = bind_(this, this.update);\n this.update = throttle(this.update_, UPDATE_REFRESH_INTERVAL);\n this.on(this.player_, ['ended', 'durationchange', 'timeupdate'], this.update);\n if (this.player_.liveTracker) {\n this.on(this.player_.liveTracker, 'liveedgechange', this.update);\n }\n\n // when playing, let's ensure we smoothly update the play progress bar\n // via an interval\n this.updateInterval = null;\n this.enableIntervalHandler_ = e => this.enableInterval_(e);\n this.disableIntervalHandler_ = e => this.disableInterval_(e);\n this.on(this.player_, ['playing'], this.enableIntervalHandler_);\n this.on(this.player_, ['ended', 'pause', 'waiting'], this.disableIntervalHandler_);\n\n // we don't need to update the play progress if the document is hidden,\n // also, this causes the CPU to spike and eventually crash the page on IE11.\n if ('hidden' in document && 'visibilityState' in document) {\n this.on(document, 'visibilitychange', this.toggleVisibility_);\n }\n }\n toggleVisibility_(e) {\n if (document.visibilityState === 'hidden') {\n this.cancelNamedAnimationFrame('SeekBar#update');\n this.cancelNamedAnimationFrame('Slider#update');\n this.disableInterval_(e);\n } else {\n if (!this.player_.ended() && !this.player_.paused()) {\n this.enableInterval_();\n }\n\n // we just switched back to the page and someone may be looking, so, update ASAP\n this.update();\n }\n }\n enableInterval_() {\n if (this.updateInterval) {\n return;\n }\n this.updateInterval = this.setInterval(this.update, UPDATE_REFRESH_INTERVAL);\n }\n disableInterval_(e) {\n if (this.player_.liveTracker && this.player_.liveTracker.isLive() && e && e.type !== 'ended') {\n return;\n }\n if (!this.updateInterval) {\n return;\n }\n this.clearInterval(this.updateInterval);\n this.updateInterval = null;\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-progress-holder'\n }, {\n 'aria-label': this.localize('Progress Bar')\n });\n }\n\n /**\n * This function updates the play progress bar and accessibility\n * attributes to whatever is passed in.\n *\n * @param {Event} [event]\n * The `timeupdate` or `ended` event that caused this to run.\n *\n * @listens Player#timeupdate\n *\n * @return {number}\n * The current percent at a number from 0-1\n */\n update(event) {\n // ignore updates while the tab is hidden\n if (document.visibilityState === 'hidden') {\n return;\n }\n const percent = super.update();\n this.requestNamedAnimationFrame('SeekBar#update', () => {\n const currentTime = this.player_.ended() ? this.player_.duration() : this.getCurrentTime_();\n const liveTracker = this.player_.liveTracker;\n let duration = this.player_.duration();\n if (liveTracker && liveTracker.isLive()) {\n duration = this.player_.liveTracker.liveCurrentTime();\n }\n if (this.percent_ !== percent) {\n // machine readable value of progress bar (percentage complete)\n this.el_.setAttribute('aria-valuenow', (percent * 100).toFixed(2));\n this.percent_ = percent;\n }\n if (this.currentTime_ !== currentTime || this.duration_ !== duration) {\n // human readable value of progress bar (time complete)\n this.el_.setAttribute('aria-valuetext', this.localize('progress bar timing: currentTime={1} duration={2}', [formatTime(currentTime, duration), formatTime(duration, duration)], '{1} of {2}'));\n this.currentTime_ = currentTime;\n this.duration_ = duration;\n }\n\n // update the progress bar time tooltip with the current time\n if (this.bar) {\n this.bar.update(getBoundingClientRect(this.el()), this.getProgress());\n }\n });\n return percent;\n }\n\n /**\n * Prevent liveThreshold from causing seeks to seem like they\n * are not happening from a user perspective.\n *\n * @param {number} ct\n * current time to seek to\n */\n userSeek_(ct) {\n if (this.player_.liveTracker && this.player_.liveTracker.isLive()) {\n this.player_.liveTracker.nextSeekedFromUser();\n }\n this.player_.currentTime(ct);\n }\n\n /**\n * Get the value of current time but allows for smooth scrubbing,\n * when player can't keep up.\n *\n * @return {number}\n * The current time value to display\n *\n * @private\n */\n getCurrentTime_() {\n return this.player_.scrubbing() ? this.player_.getCache().currentTime : this.player_.currentTime();\n }\n\n /**\n * Get the percentage of media played so far.\n *\n * @return {number}\n * The percentage of media played so far (0 to 1).\n */\n getPercent() {\n const currentTime = this.getCurrentTime_();\n let percent;\n const liveTracker = this.player_.liveTracker;\n if (liveTracker && liveTracker.isLive()) {\n percent = (currentTime - liveTracker.seekableStart()) / liveTracker.liveWindow();\n\n // prevent the percent from changing at the live edge\n if (liveTracker.atLiveEdge()) {\n percent = 1;\n }\n } else {\n percent = currentTime / this.player_.duration();\n }\n return percent;\n }\n\n /**\n * Handle mouse down on seek bar\n *\n * @param {MouseEvent} event\n * The `mousedown` event that caused this to run.\n *\n * @listens mousedown\n */\n handleMouseDown(event) {\n if (!isSingleLeftClick(event)) {\n return;\n }\n\n // Stop event propagation to prevent double fire in progress-control.js\n event.stopPropagation();\n this.videoWasPlaying = !this.player_.paused();\n this.player_.pause();\n super.handleMouseDown(event);\n }\n\n /**\n * Handle mouse move on seek bar\n *\n * @param {MouseEvent} event\n * The `mousemove` event that caused this to run.\n * @param {boolean} mouseDown this is a flag that should be set to true if `handleMouseMove` is called directly. It allows us to skip things that should not happen if coming from mouse down but should happen on regular mouse move handler. Defaults to false\n *\n * @listens mousemove\n */\n handleMouseMove(event, mouseDown = false) {\n if (!isSingleLeftClick(event) || isNaN(this.player_.duration())) {\n return;\n }\n if (!mouseDown && !this.player_.scrubbing()) {\n this.player_.scrubbing(true);\n }\n let newTime;\n const distance = this.calculateDistance(event);\n const liveTracker = this.player_.liveTracker;\n if (!liveTracker || !liveTracker.isLive()) {\n newTime = distance * this.player_.duration();\n\n // Don't let video end while scrubbing.\n if (newTime === this.player_.duration()) {\n newTime = newTime - 0.1;\n }\n } else {\n if (distance >= 0.99) {\n liveTracker.seekToLiveEdge();\n return;\n }\n const seekableStart = liveTracker.seekableStart();\n const seekableEnd = liveTracker.liveCurrentTime();\n newTime = seekableStart + distance * liveTracker.liveWindow();\n\n // Don't let video end while scrubbing.\n if (newTime >= seekableEnd) {\n newTime = seekableEnd;\n }\n\n // Compensate for precision differences so that currentTime is not less\n // than seekable start\n if (newTime <= seekableStart) {\n newTime = seekableStart + 0.1;\n }\n\n // On android seekableEnd can be Infinity sometimes,\n // this will cause newTime to be Infinity, which is\n // not a valid currentTime.\n if (newTime === Infinity) {\n return;\n }\n }\n\n // Set new time (tell player to seek to new time)\n this.userSeek_(newTime);\n if (this.player_.options_.enableSmoothSeeking) {\n this.update();\n }\n }\n enable() {\n super.enable();\n const mouseTimeDisplay = this.getChild('mouseTimeDisplay');\n if (!mouseTimeDisplay) {\n return;\n }\n mouseTimeDisplay.show();\n }\n disable() {\n super.disable();\n const mouseTimeDisplay = this.getChild('mouseTimeDisplay');\n if (!mouseTimeDisplay) {\n return;\n }\n mouseTimeDisplay.hide();\n }\n\n /**\n * Handle mouse up on seek bar\n *\n * @param {MouseEvent} event\n * The `mouseup` event that caused this to run.\n *\n * @listens mouseup\n */\n handleMouseUp(event) {\n super.handleMouseUp(event);\n\n // Stop event propagation to prevent double fire in progress-control.js\n if (event) {\n event.stopPropagation();\n }\n this.player_.scrubbing(false);\n\n /**\n * Trigger timeupdate because we're done seeking and the time has changed.\n * This is particularly useful for if the player is paused to time the time displays.\n *\n * @event Tech#timeupdate\n * @type {Event}\n */\n this.player_.trigger({\n type: 'timeupdate',\n target: this,\n manuallyTriggered: true\n });\n if (this.videoWasPlaying) {\n silencePromise(this.player_.play());\n } else {\n // We're done seeking and the time has changed.\n // If the player is paused, make sure we display the correct time on the seek bar.\n this.update_();\n }\n }\n\n /**\n * Move more quickly fast forward for keyboard-only users\n */\n stepForward() {\n this.userSeek_(this.player_.currentTime() + STEP_SECONDS);\n }\n\n /**\n * Move more quickly rewind for keyboard-only users\n */\n stepBack() {\n this.userSeek_(this.player_.currentTime() - STEP_SECONDS);\n }\n\n /**\n * Toggles the playback state of the player\n * This gets called when enter or space is used on the seekbar\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called\n *\n */\n handleAction(event) {\n if (this.player_.paused()) {\n this.player_.play();\n } else {\n this.player_.pause();\n }\n }\n\n /**\n * Called when this SeekBar has focus and a key gets pressed down.\n * Supports the following keys:\n *\n * Space or Enter key fire a click event\n * Home key moves to start of the timeline\n * End key moves to end of the timeline\n * Digit \"0\" through \"9\" keys move to 0%, 10% ... 80%, 90% of the timeline\n * PageDown key moves back a larger step than ArrowDown\n * PageUp key moves forward a large step\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n const liveTracker = this.player_.liveTracker;\n if (keycode.isEventKey(event, 'Space') || keycode.isEventKey(event, 'Enter')) {\n event.preventDefault();\n event.stopPropagation();\n this.handleAction(event);\n } else if (keycode.isEventKey(event, 'Home')) {\n event.preventDefault();\n event.stopPropagation();\n this.userSeek_(0);\n } else if (keycode.isEventKey(event, 'End')) {\n event.preventDefault();\n event.stopPropagation();\n if (liveTracker && liveTracker.isLive()) {\n this.userSeek_(liveTracker.liveCurrentTime());\n } else {\n this.userSeek_(this.player_.duration());\n }\n } else if (/^[0-9]$/.test(keycode(event))) {\n event.preventDefault();\n event.stopPropagation();\n const gotoFraction = (keycode.codes[keycode(event)] - keycode.codes['0']) * 10.0 / 100.0;\n if (liveTracker && liveTracker.isLive()) {\n this.userSeek_(liveTracker.seekableStart() + liveTracker.liveWindow() * gotoFraction);\n } else {\n this.userSeek_(this.player_.duration() * gotoFraction);\n }\n } else if (keycode.isEventKey(event, 'PgDn')) {\n event.preventDefault();\n event.stopPropagation();\n this.userSeek_(this.player_.currentTime() - STEP_SECONDS * PAGE_KEY_MULTIPLIER);\n } else if (keycode.isEventKey(event, 'PgUp')) {\n event.preventDefault();\n event.stopPropagation();\n this.userSeek_(this.player_.currentTime() + STEP_SECONDS * PAGE_KEY_MULTIPLIER);\n } else {\n // Pass keydown handling up for unsupported keys\n super.handleKeyDown(event);\n }\n }\n dispose() {\n this.disableInterval_();\n this.off(this.player_, ['ended', 'durationchange', 'timeupdate'], this.update);\n if (this.player_.liveTracker) {\n this.off(this.player_.liveTracker, 'liveedgechange', this.update);\n }\n this.off(this.player_, ['playing'], this.enableIntervalHandler_);\n this.off(this.player_, ['ended', 'pause', 'waiting'], this.disableIntervalHandler_);\n\n // we don't need to update the play progress if the document is hidden,\n // also, this causes the CPU to spike and eventually crash the page on IE11.\n if ('hidden' in document && 'visibilityState' in document) {\n this.off(document, 'visibilitychange', this.toggleVisibility_);\n }\n super.dispose();\n }\n}\n\n/**\n * Default options for the `SeekBar`\n *\n * @type {Object}\n * @private\n */\nSeekBar.prototype.options_ = {\n children: ['loadProgressBar', 'playProgressBar'],\n barName: 'playProgressBar'\n};\n\n// MouseTimeDisplay tooltips should not be added to a player on mobile devices\nif (!IS_IOS && !IS_ANDROID) {\n SeekBar.prototype.options_.children.splice(1, 0, 'mouseTimeDisplay');\n}\nComponent$1.registerComponent('SeekBar', SeekBar);\n\n/**\n * @file progress-control.js\n */\n\n/**\n * The Progress Control component contains the seek bar, load progress,\n * and play progress.\n *\n * @extends Component\n */\nclass ProgressControl extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.handleMouseMove = throttle(bind_(this, this.handleMouseMove), UPDATE_REFRESH_INTERVAL);\n this.throttledHandleMouseSeek = throttle(bind_(this, this.handleMouseSeek), UPDATE_REFRESH_INTERVAL);\n this.handleMouseUpHandler_ = e => this.handleMouseUp(e);\n this.handleMouseDownHandler_ = e => this.handleMouseDown(e);\n this.enable();\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-progress-control vjs-control'\n });\n }\n\n /**\n * When the mouse moves over the `ProgressControl`, the pointer position\n * gets passed down to the `MouseTimeDisplay` component.\n *\n * @param {Event} event\n * The `mousemove` event that caused this function to run.\n *\n * @listen mousemove\n */\n handleMouseMove(event) {\n const seekBar = this.getChild('seekBar');\n if (!seekBar) {\n return;\n }\n const playProgressBar = seekBar.getChild('playProgressBar');\n const mouseTimeDisplay = seekBar.getChild('mouseTimeDisplay');\n if (!playProgressBar && !mouseTimeDisplay) {\n return;\n }\n const seekBarEl = seekBar.el();\n const seekBarRect = findPosition(seekBarEl);\n let seekBarPoint = getPointerPosition(seekBarEl, event).x;\n\n // The default skin has a gap on either side of the `SeekBar`. This means\n // that it's possible to trigger this behavior outside the boundaries of\n // the `SeekBar`. This ensures we stay within it at all times.\n seekBarPoint = clamp(seekBarPoint, 0, 1);\n if (mouseTimeDisplay) {\n mouseTimeDisplay.update(seekBarRect, seekBarPoint);\n }\n if (playProgressBar) {\n playProgressBar.update(seekBarRect, seekBar.getProgress());\n }\n }\n\n /**\n * A throttled version of the {@link ProgressControl#handleMouseSeek} listener.\n *\n * @method ProgressControl#throttledHandleMouseSeek\n * @param {Event} event\n * The `mousemove` event that caused this function to run.\n *\n * @listen mousemove\n * @listen touchmove\n */\n\n /**\n * Handle `mousemove` or `touchmove` events on the `ProgressControl`.\n *\n * @param {Event} event\n * `mousedown` or `touchstart` event that triggered this function\n *\n * @listens mousemove\n * @listens touchmove\n */\n handleMouseSeek(event) {\n const seekBar = this.getChild('seekBar');\n if (seekBar) {\n seekBar.handleMouseMove(event);\n }\n }\n\n /**\n * Are controls are currently enabled for this progress control.\n *\n * @return {boolean}\n * true if controls are enabled, false otherwise\n */\n enabled() {\n return this.enabled_;\n }\n\n /**\n * Disable all controls on the progress control and its children\n */\n disable() {\n this.children().forEach(child => child.disable && child.disable());\n if (!this.enabled()) {\n return;\n }\n this.off(['mousedown', 'touchstart'], this.handleMouseDownHandler_);\n this.off(this.el_, 'mousemove', this.handleMouseMove);\n this.removeListenersAddedOnMousedownAndTouchstart();\n this.addClass('disabled');\n this.enabled_ = false;\n\n // Restore normal playback state if controls are disabled while scrubbing\n if (this.player_.scrubbing()) {\n const seekBar = this.getChild('seekBar');\n this.player_.scrubbing(false);\n if (seekBar.videoWasPlaying) {\n silencePromise(this.player_.play());\n }\n }\n }\n\n /**\n * Enable all controls on the progress control and its children\n */\n enable() {\n this.children().forEach(child => child.enable && child.enable());\n if (this.enabled()) {\n return;\n }\n this.on(['mousedown', 'touchstart'], this.handleMouseDownHandler_);\n this.on(this.el_, 'mousemove', this.handleMouseMove);\n this.removeClass('disabled');\n this.enabled_ = true;\n }\n\n /**\n * Cleanup listeners after the user finishes interacting with the progress controls\n */\n removeListenersAddedOnMousedownAndTouchstart() {\n const doc = this.el_.ownerDocument;\n this.off(doc, 'mousemove', this.throttledHandleMouseSeek);\n this.off(doc, 'touchmove', this.throttledHandleMouseSeek);\n this.off(doc, 'mouseup', this.handleMouseUpHandler_);\n this.off(doc, 'touchend', this.handleMouseUpHandler_);\n }\n\n /**\n * Handle `mousedown` or `touchstart` events on the `ProgressControl`.\n *\n * @param {Event} event\n * `mousedown` or `touchstart` event that triggered this function\n *\n * @listens mousedown\n * @listens touchstart\n */\n handleMouseDown(event) {\n const doc = this.el_.ownerDocument;\n const seekBar = this.getChild('seekBar');\n if (seekBar) {\n seekBar.handleMouseDown(event);\n }\n this.on(doc, 'mousemove', this.throttledHandleMouseSeek);\n this.on(doc, 'touchmove', this.throttledHandleMouseSeek);\n this.on(doc, 'mouseup', this.handleMouseUpHandler_);\n this.on(doc, 'touchend', this.handleMouseUpHandler_);\n }\n\n /**\n * Handle `mouseup` or `touchend` events on the `ProgressControl`.\n *\n * @param {Event} event\n * `mouseup` or `touchend` event that triggered this function.\n *\n * @listens touchend\n * @listens mouseup\n */\n handleMouseUp(event) {\n const seekBar = this.getChild('seekBar');\n if (seekBar) {\n seekBar.handleMouseUp(event);\n }\n this.removeListenersAddedOnMousedownAndTouchstart();\n }\n}\n\n/**\n * Default options for `ProgressControl`\n *\n * @type {Object}\n * @private\n */\nProgressControl.prototype.options_ = {\n children: ['seekBar']\n};\nComponent$1.registerComponent('ProgressControl', ProgressControl);\n\n/**\n * @file picture-in-picture-toggle.js\n */\n\n/**\n * Toggle Picture-in-Picture mode\n *\n * @extends Button\n */\nclass PictureInPictureToggle extends Button {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @listens Player#enterpictureinpicture\n * @listens Player#leavepictureinpicture\n */\n constructor(player, options) {\n super(player, options);\n this.setIcon('picture-in-picture-enter');\n this.on(player, ['enterpictureinpicture', 'leavepictureinpicture'], e => this.handlePictureInPictureChange(e));\n this.on(player, ['disablepictureinpicturechanged', 'loadedmetadata'], e => this.handlePictureInPictureEnabledChange(e));\n this.on(player, ['loadedmetadata', 'audioonlymodechange', 'audiopostermodechange'], () => this.handlePictureInPictureAudioModeChange());\n\n // TODO: Deactivate button on player emptied event.\n this.disable();\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-picture-in-picture-control vjs-hidden ${super.buildCSSClass()}`;\n }\n\n /**\n * Displays or hides the button depending on the audio mode detection.\n * Exits picture-in-picture if it is enabled when switching to audio mode.\n */\n handlePictureInPictureAudioModeChange() {\n // This audio detection will not detect HLS or DASH audio-only streams because there was no reliable way to detect them at the time\n const isSourceAudio = this.player_.currentType().substring(0, 5) === 'audio';\n const isAudioMode = isSourceAudio || this.player_.audioPosterMode() || this.player_.audioOnlyMode();\n if (!isAudioMode) {\n this.show();\n return;\n }\n if (this.player_.isInPictureInPicture()) {\n this.player_.exitPictureInPicture();\n }\n this.hide();\n }\n\n /**\n * Enables or disables button based on availability of a Picture-In-Picture mode.\n *\n * Enabled if\n * - `player.options().enableDocumentPictureInPicture` is true and\n * window.documentPictureInPicture is available; or\n * - `player.disablePictureInPicture()` is false and\n * element.requestPictureInPicture is available\n */\n handlePictureInPictureEnabledChange() {\n if (document.pictureInPictureEnabled && this.player_.disablePictureInPicture() === false || this.player_.options_.enableDocumentPictureInPicture && 'documentPictureInPicture' in window$1) {\n this.enable();\n } else {\n this.disable();\n }\n }\n\n /**\n * Handles enterpictureinpicture and leavepictureinpicture on the player and change control text accordingly.\n *\n * @param {Event} [event]\n * The {@link Player#enterpictureinpicture} or {@link Player#leavepictureinpicture} event that caused this function to be\n * called.\n *\n * @listens Player#enterpictureinpicture\n * @listens Player#leavepictureinpicture\n */\n handlePictureInPictureChange(event) {\n if (this.player_.isInPictureInPicture()) {\n this.setIcon('picture-in-picture-exit');\n this.controlText('Exit Picture-in-Picture');\n } else {\n this.setIcon('picture-in-picture-enter');\n this.controlText('Picture-in-Picture');\n }\n this.handlePictureInPictureEnabledChange();\n }\n\n /**\n * This gets called when an `PictureInPictureToggle` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n if (!this.player_.isInPictureInPicture()) {\n this.player_.requestPictureInPicture();\n } else {\n this.player_.exitPictureInPicture();\n }\n }\n\n /**\n * Show the `Component`s element if it is hidden by removing the\n * 'vjs-hidden' class name from it only in browsers that support the Picture-in-Picture API.\n */\n show() {\n // Does not allow to display the pictureInPictureToggle in browsers that do not support the Picture-in-Picture API, e.g. Firefox.\n if (typeof document.exitPictureInPicture !== 'function') {\n return;\n }\n super.show();\n }\n}\n\n/**\n * The text that should display over the `PictureInPictureToggle`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nPictureInPictureToggle.prototype.controlText_ = 'Picture-in-Picture';\nComponent$1.registerComponent('PictureInPictureToggle', PictureInPictureToggle);\n\n/**\n * @file fullscreen-toggle.js\n */\n\n/**\n * Toggle fullscreen video\n *\n * @extends Button\n */\nclass FullscreenToggle extends Button {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.setIcon('fullscreen-enter');\n this.on(player, 'fullscreenchange', e => this.handleFullscreenChange(e));\n if (document[player.fsApi_.fullscreenEnabled] === false) {\n this.disable();\n }\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-fullscreen-control ${super.buildCSSClass()}`;\n }\n\n /**\n * Handles fullscreenchange on the player and change control text accordingly.\n *\n * @param {Event} [event]\n * The {@link Player#fullscreenchange} event that caused this function to be\n * called.\n *\n * @listens Player#fullscreenchange\n */\n handleFullscreenChange(event) {\n if (this.player_.isFullscreen()) {\n this.controlText('Exit Fullscreen');\n this.setIcon('fullscreen-exit');\n } else {\n this.controlText('Fullscreen');\n this.setIcon('fullscreen-enter');\n }\n }\n\n /**\n * This gets called when an `FullscreenToggle` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n if (!this.player_.isFullscreen()) {\n this.player_.requestFullscreen();\n } else {\n this.player_.exitFullscreen();\n }\n }\n}\n\n/**\n * The text that should display over the `FullscreenToggle`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nFullscreenToggle.prototype.controlText_ = 'Fullscreen';\nComponent$1.registerComponent('FullscreenToggle', FullscreenToggle);\n\n/**\n * Check if volume control is supported and if it isn't hide the\n * `Component` that was passed using the `vjs-hidden` class.\n *\n * @param { import('../../component').default } self\n * The component that should be hidden if volume is unsupported\n *\n * @param { import('../../player').default } player\n * A reference to the player\n *\n * @private\n */\nconst checkVolumeSupport = function (self, player) {\n // hide volume controls when they're not supported by the current tech\n if (player.tech_ && !player.tech_.featuresVolumeControl) {\n self.addClass('vjs-hidden');\n }\n self.on(player, 'loadstart', function () {\n if (!player.tech_.featuresVolumeControl) {\n self.addClass('vjs-hidden');\n } else {\n self.removeClass('vjs-hidden');\n }\n });\n};\n\n/**\n * @file volume-level.js\n */\n\n/**\n * Shows volume level\n *\n * @extends Component\n */\nclass VolumeLevel extends Component$1 {\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const el = super.createEl('div', {\n className: 'vjs-volume-level'\n });\n this.setIcon('circle', el);\n el.appendChild(super.createEl('span', {\n className: 'vjs-control-text'\n }));\n return el;\n }\n}\nComponent$1.registerComponent('VolumeLevel', VolumeLevel);\n\n/**\n * @file volume-level-tooltip.js\n */\n\n/**\n * Volume level tooltips display a volume above or side by side the volume bar.\n *\n * @extends Component\n */\nclass VolumeLevelTooltip extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The {@link Player} that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.update = throttle(bind_(this, this.update), UPDATE_REFRESH_INTERVAL);\n }\n\n /**\n * Create the volume tooltip DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-volume-tooltip'\n }, {\n 'aria-hidden': 'true'\n });\n }\n\n /**\n * Updates the position of the tooltip relative to the `VolumeBar` and\n * its content text.\n *\n * @param {Object} rangeBarRect\n * The `ClientRect` for the {@link VolumeBar} element.\n *\n * @param {number} rangeBarPoint\n * A number from 0 to 1, representing a horizontal/vertical reference point\n * from the left edge of the {@link VolumeBar}\n *\n * @param {boolean} vertical\n * Referees to the Volume control position\n * in the control bar{@link VolumeControl}\n *\n */\n update(rangeBarRect, rangeBarPoint, vertical, content) {\n if (!vertical) {\n const tooltipRect = getBoundingClientRect(this.el_);\n const playerRect = getBoundingClientRect(this.player_.el());\n const volumeBarPointPx = rangeBarRect.width * rangeBarPoint;\n if (!playerRect || !tooltipRect) {\n return;\n }\n const spaceLeftOfPoint = rangeBarRect.left - playerRect.left + volumeBarPointPx;\n const spaceRightOfPoint = rangeBarRect.width - volumeBarPointPx + (playerRect.right - rangeBarRect.right);\n let pullTooltipBy = tooltipRect.width / 2;\n if (spaceLeftOfPoint < pullTooltipBy) {\n pullTooltipBy += pullTooltipBy - spaceLeftOfPoint;\n } else if (spaceRightOfPoint < pullTooltipBy) {\n pullTooltipBy = spaceRightOfPoint;\n }\n if (pullTooltipBy < 0) {\n pullTooltipBy = 0;\n } else if (pullTooltipBy > tooltipRect.width) {\n pullTooltipBy = tooltipRect.width;\n }\n this.el_.style.right = `-${pullTooltipBy}px`;\n }\n this.write(`${content}%`);\n }\n\n /**\n * Write the volume to the tooltip DOM element.\n *\n * @param {string} content\n * The formatted volume for the tooltip.\n */\n write(content) {\n textContent(this.el_, content);\n }\n\n /**\n * Updates the position of the volume tooltip relative to the `VolumeBar`.\n *\n * @param {Object} rangeBarRect\n * The `ClientRect` for the {@link VolumeBar} element.\n *\n * @param {number} rangeBarPoint\n * A number from 0 to 1, representing a horizontal/vertical reference point\n * from the left edge of the {@link VolumeBar}\n *\n * @param {boolean} vertical\n * Referees to the Volume control position\n * in the control bar{@link VolumeControl}\n *\n * @param {number} volume\n * The volume level to update the tooltip to\n *\n * @param {Function} cb\n * A function that will be called during the request animation frame\n * for tooltips that need to do additional animations from the default\n */\n updateVolume(rangeBarRect, rangeBarPoint, vertical, volume, cb) {\n this.requestNamedAnimationFrame('VolumeLevelTooltip#updateVolume', () => {\n this.update(rangeBarRect, rangeBarPoint, vertical, volume.toFixed(0));\n if (cb) {\n cb();\n }\n });\n }\n}\nComponent$1.registerComponent('VolumeLevelTooltip', VolumeLevelTooltip);\n\n/**\n * @file mouse-volume-level-display.js\n */\n\n/**\n * The {@link MouseVolumeLevelDisplay} component tracks mouse movement over the\n * {@link VolumeControl}. It displays an indicator and a {@link VolumeLevelTooltip}\n * indicating the volume level which is represented by a given point in the\n * {@link VolumeBar}.\n *\n * @extends Component\n */\nclass MouseVolumeLevelDisplay extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The {@link Player} that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.update = throttle(bind_(this, this.update), UPDATE_REFRESH_INTERVAL);\n }\n\n /**\n * Create the DOM element for this class.\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-mouse-display'\n });\n }\n\n /**\n * Enquires updates to its own DOM as well as the DOM of its\n * {@link VolumeLevelTooltip} child.\n *\n * @param {Object} rangeBarRect\n * The `ClientRect` for the {@link VolumeBar} element.\n *\n * @param {number} rangeBarPoint\n * A number from 0 to 1, representing a horizontal/vertical reference point\n * from the left edge of the {@link VolumeBar}\n *\n * @param {boolean} vertical\n * Referees to the Volume control position\n * in the control bar{@link VolumeControl}\n *\n */\n update(rangeBarRect, rangeBarPoint, vertical) {\n const volume = 100 * rangeBarPoint;\n this.getChild('volumeLevelTooltip').updateVolume(rangeBarRect, rangeBarPoint, vertical, volume, () => {\n if (vertical) {\n this.el_.style.bottom = `${rangeBarRect.height * rangeBarPoint}px`;\n } else {\n this.el_.style.left = `${rangeBarRect.width * rangeBarPoint}px`;\n }\n });\n }\n}\n\n/**\n * Default options for `MouseVolumeLevelDisplay`\n *\n * @type {Object}\n * @private\n */\nMouseVolumeLevelDisplay.prototype.options_ = {\n children: ['volumeLevelTooltip']\n};\nComponent$1.registerComponent('MouseVolumeLevelDisplay', MouseVolumeLevelDisplay);\n\n/**\n * @file volume-bar.js\n */\n\n/**\n * The bar that contains the volume level and can be clicked on to adjust the level\n *\n * @extends Slider\n */\nclass VolumeBar extends Slider {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.on('slideractive', e => this.updateLastVolume_(e));\n this.on(player, 'volumechange', e => this.updateARIAAttributes(e));\n player.ready(() => this.updateARIAAttributes());\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-volume-bar vjs-slider-bar'\n }, {\n 'aria-label': this.localize('Volume Level'),\n 'aria-live': 'polite'\n });\n }\n\n /**\n * Handle mouse down on volume bar\n *\n * @param {Event} event\n * The `mousedown` event that caused this to run.\n *\n * @listens mousedown\n */\n handleMouseDown(event) {\n if (!isSingleLeftClick(event)) {\n return;\n }\n super.handleMouseDown(event);\n }\n\n /**\n * Handle movement events on the {@link VolumeMenuButton}.\n *\n * @param {Event} event\n * The event that caused this function to run.\n *\n * @listens mousemove\n */\n handleMouseMove(event) {\n const mouseVolumeLevelDisplay = this.getChild('mouseVolumeLevelDisplay');\n if (mouseVolumeLevelDisplay) {\n const volumeBarEl = this.el();\n const volumeBarRect = getBoundingClientRect(volumeBarEl);\n const vertical = this.vertical();\n let volumeBarPoint = getPointerPosition(volumeBarEl, event);\n volumeBarPoint = vertical ? volumeBarPoint.y : volumeBarPoint.x;\n // The default skin has a gap on either side of the `VolumeBar`. This means\n // that it's possible to trigger this behavior outside the boundaries of\n // the `VolumeBar`. This ensures we stay within it at all times.\n volumeBarPoint = clamp(volumeBarPoint, 0, 1);\n mouseVolumeLevelDisplay.update(volumeBarRect, volumeBarPoint, vertical);\n }\n if (!isSingleLeftClick(event)) {\n return;\n }\n this.checkMuted();\n this.player_.volume(this.calculateDistance(event));\n }\n\n /**\n * If the player is muted unmute it.\n */\n checkMuted() {\n if (this.player_.muted()) {\n this.player_.muted(false);\n }\n }\n\n /**\n * Get percent of volume level\n *\n * @return {number}\n * Volume level percent as a decimal number.\n */\n getPercent() {\n if (this.player_.muted()) {\n return 0;\n }\n return this.player_.volume();\n }\n\n /**\n * Increase volume level for keyboard users\n */\n stepForward() {\n this.checkMuted();\n this.player_.volume(this.player_.volume() + 0.1);\n }\n\n /**\n * Decrease volume level for keyboard users\n */\n stepBack() {\n this.checkMuted();\n this.player_.volume(this.player_.volume() - 0.1);\n }\n\n /**\n * Update ARIA accessibility attributes\n *\n * @param {Event} [event]\n * The `volumechange` event that caused this function to run.\n *\n * @listens Player#volumechange\n */\n updateARIAAttributes(event) {\n const ariaValue = this.player_.muted() ? 0 : this.volumeAsPercentage_();\n this.el_.setAttribute('aria-valuenow', ariaValue);\n this.el_.setAttribute('aria-valuetext', ariaValue + '%');\n }\n\n /**\n * Returns the current value of the player volume as a percentage\n *\n * @private\n */\n volumeAsPercentage_() {\n return Math.round(this.player_.volume() * 100);\n }\n\n /**\n * When user starts dragging the VolumeBar, store the volume and listen for\n * the end of the drag. When the drag ends, if the volume was set to zero,\n * set lastVolume to the stored volume.\n *\n * @listens slideractive\n * @private\n */\n updateLastVolume_() {\n const volumeBeforeDrag = this.player_.volume();\n this.one('sliderinactive', () => {\n if (this.player_.volume() === 0) {\n this.player_.lastVolume_(volumeBeforeDrag);\n }\n });\n }\n}\n\n/**\n * Default options for the `VolumeBar`\n *\n * @type {Object}\n * @private\n */\nVolumeBar.prototype.options_ = {\n children: ['volumeLevel'],\n barName: 'volumeLevel'\n};\n\n// MouseVolumeLevelDisplay tooltip should not be added to a player on mobile devices\nif (!IS_IOS && !IS_ANDROID) {\n VolumeBar.prototype.options_.children.splice(0, 0, 'mouseVolumeLevelDisplay');\n}\n\n/**\n * Call the update event for this Slider when this event happens on the player.\n *\n * @type {string}\n */\nVolumeBar.prototype.playerEvent = 'volumechange';\nComponent$1.registerComponent('VolumeBar', VolumeBar);\n\n/**\n * @file volume-control.js\n */\n\n/**\n * The component for controlling the volume level\n *\n * @extends Component\n */\nclass VolumeControl extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options={}]\n * The key/value store of player options.\n */\n constructor(player, options = {}) {\n options.vertical = options.vertical || false;\n\n // Pass the vertical option down to the VolumeBar if\n // the VolumeBar is turned on.\n if (typeof options.volumeBar === 'undefined' || isPlain(options.volumeBar)) {\n options.volumeBar = options.volumeBar || {};\n options.volumeBar.vertical = options.vertical;\n }\n super(player, options);\n\n // hide this control if volume support is missing\n checkVolumeSupport(this, player);\n this.throttledHandleMouseMove = throttle(bind_(this, this.handleMouseMove), UPDATE_REFRESH_INTERVAL);\n this.handleMouseUpHandler_ = e => this.handleMouseUp(e);\n this.on('mousedown', e => this.handleMouseDown(e));\n this.on('touchstart', e => this.handleMouseDown(e));\n this.on('mousemove', e => this.handleMouseMove(e));\n\n // while the slider is active (the mouse has been pressed down and\n // is dragging) or in focus we do not want to hide the VolumeBar\n this.on(this.volumeBar, ['focus', 'slideractive'], () => {\n this.volumeBar.addClass('vjs-slider-active');\n this.addClass('vjs-slider-active');\n this.trigger('slideractive');\n });\n this.on(this.volumeBar, ['blur', 'sliderinactive'], () => {\n this.volumeBar.removeClass('vjs-slider-active');\n this.removeClass('vjs-slider-active');\n this.trigger('sliderinactive');\n });\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n let orientationClass = 'vjs-volume-horizontal';\n if (this.options_.vertical) {\n orientationClass = 'vjs-volume-vertical';\n }\n return super.createEl('div', {\n className: `vjs-volume-control vjs-control ${orientationClass}`\n });\n }\n\n /**\n * Handle `mousedown` or `touchstart` events on the `VolumeControl`.\n *\n * @param {Event} event\n * `mousedown` or `touchstart` event that triggered this function\n *\n * @listens mousedown\n * @listens touchstart\n */\n handleMouseDown(event) {\n const doc = this.el_.ownerDocument;\n this.on(doc, 'mousemove', this.throttledHandleMouseMove);\n this.on(doc, 'touchmove', this.throttledHandleMouseMove);\n this.on(doc, 'mouseup', this.handleMouseUpHandler_);\n this.on(doc, 'touchend', this.handleMouseUpHandler_);\n }\n\n /**\n * Handle `mouseup` or `touchend` events on the `VolumeControl`.\n *\n * @param {Event} event\n * `mouseup` or `touchend` event that triggered this function.\n *\n * @listens touchend\n * @listens mouseup\n */\n handleMouseUp(event) {\n const doc = this.el_.ownerDocument;\n this.off(doc, 'mousemove', this.throttledHandleMouseMove);\n this.off(doc, 'touchmove', this.throttledHandleMouseMove);\n this.off(doc, 'mouseup', this.handleMouseUpHandler_);\n this.off(doc, 'touchend', this.handleMouseUpHandler_);\n }\n\n /**\n * Handle `mousedown` or `touchstart` events on the `VolumeControl`.\n *\n * @param {Event} event\n * `mousedown` or `touchstart` event that triggered this function\n *\n * @listens mousedown\n * @listens touchstart\n */\n handleMouseMove(event) {\n this.volumeBar.handleMouseMove(event);\n }\n}\n\n/**\n * Default options for the `VolumeControl`\n *\n * @type {Object}\n * @private\n */\nVolumeControl.prototype.options_ = {\n children: ['volumeBar']\n};\nComponent$1.registerComponent('VolumeControl', VolumeControl);\n\n/**\n * Check if muting volume is supported and if it isn't hide the mute toggle\n * button.\n *\n * @param { import('../../component').default } self\n * A reference to the mute toggle button\n *\n * @param { import('../../player').default } player\n * A reference to the player\n *\n * @private\n */\nconst checkMuteSupport = function (self, player) {\n // hide mute toggle button if it's not supported by the current tech\n if (player.tech_ && !player.tech_.featuresMuteControl) {\n self.addClass('vjs-hidden');\n }\n self.on(player, 'loadstart', function () {\n if (!player.tech_.featuresMuteControl) {\n self.addClass('vjs-hidden');\n } else {\n self.removeClass('vjs-hidden');\n }\n });\n};\n\n/**\n * @file mute-toggle.js\n */\n\n/**\n * A button component for muting the audio.\n *\n * @extends Button\n */\nclass MuteToggle extends Button {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n\n // hide this control if volume support is missing\n checkMuteSupport(this, player);\n this.on(player, ['loadstart', 'volumechange'], e => this.update(e));\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-mute-control ${super.buildCSSClass()}`;\n }\n\n /**\n * This gets called when an `MuteToggle` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n const vol = this.player_.volume();\n const lastVolume = this.player_.lastVolume_();\n if (vol === 0) {\n const volumeToSet = lastVolume < 0.1 ? 0.1 : lastVolume;\n this.player_.volume(volumeToSet);\n this.player_.muted(false);\n } else {\n this.player_.muted(this.player_.muted() ? false : true);\n }\n }\n\n /**\n * Update the `MuteToggle` button based on the state of `volume` and `muted`\n * on the player.\n *\n * @param {Event} [event]\n * The {@link Player#loadstart} event if this function was called\n * through an event.\n *\n * @listens Player#loadstart\n * @listens Player#volumechange\n */\n update(event) {\n this.updateIcon_();\n this.updateControlText_();\n }\n\n /**\n * Update the appearance of the `MuteToggle` icon.\n *\n * Possible states (given `level` variable below):\n * - 0: crossed out\n * - 1: zero bars of volume\n * - 2: one bar of volume\n * - 3: two bars of volume\n *\n * @private\n */\n updateIcon_() {\n const vol = this.player_.volume();\n let level = 3;\n this.setIcon('volume-high');\n\n // in iOS when a player is loaded with muted attribute\n // and volume is changed with a native mute button\n // we want to make sure muted state is updated\n if (IS_IOS && this.player_.tech_ && this.player_.tech_.el_) {\n this.player_.muted(this.player_.tech_.el_.muted);\n }\n if (vol === 0 || this.player_.muted()) {\n this.setIcon('volume-mute');\n level = 0;\n } else if (vol < 0.33) {\n this.setIcon('volume-low');\n level = 1;\n } else if (vol < 0.67) {\n this.setIcon('volume-medium');\n level = 2;\n }\n removeClass(this.el_, [0, 1, 2, 3].reduce((str, i) => str + `${i ? ' ' : ''}vjs-vol-${i}`, ''));\n addClass(this.el_, `vjs-vol-${level}`);\n }\n\n /**\n * If `muted` has changed on the player, update the control text\n * (`title` attribute on `vjs-mute-control` element and content of\n * `vjs-control-text` element).\n *\n * @private\n */\n updateControlText_() {\n const soundOff = this.player_.muted() || this.player_.volume() === 0;\n const text = soundOff ? 'Unmute' : 'Mute';\n if (this.controlText() !== text) {\n this.controlText(text);\n }\n }\n}\n\n/**\n * The text that should display over the `MuteToggle`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nMuteToggle.prototype.controlText_ = 'Mute';\nComponent$1.registerComponent('MuteToggle', MuteToggle);\n\n/**\n * @file volume-control.js\n */\n\n/**\n * A Component to contain the MuteToggle and VolumeControl so that\n * they can work together.\n *\n * @extends Component\n */\nclass VolumePanel extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options={}]\n * The key/value store of player options.\n */\n constructor(player, options = {}) {\n if (typeof options.inline !== 'undefined') {\n options.inline = options.inline;\n } else {\n options.inline = true;\n }\n\n // pass the inline option down to the VolumeControl as vertical if\n // the VolumeControl is on.\n if (typeof options.volumeControl === 'undefined' || isPlain(options.volumeControl)) {\n options.volumeControl = options.volumeControl || {};\n options.volumeControl.vertical = !options.inline;\n }\n super(player, options);\n\n // this handler is used by mouse handler methods below\n this.handleKeyPressHandler_ = e => this.handleKeyPress(e);\n this.on(player, ['loadstart'], e => this.volumePanelState_(e));\n this.on(this.muteToggle, 'keyup', e => this.handleKeyPress(e));\n this.on(this.volumeControl, 'keyup', e => this.handleVolumeControlKeyUp(e));\n this.on('keydown', e => this.handleKeyPress(e));\n this.on('mouseover', e => this.handleMouseOver(e));\n this.on('mouseout', e => this.handleMouseOut(e));\n\n // while the slider is active (the mouse has been pressed down and\n // is dragging) we do not want to hide the VolumeBar\n this.on(this.volumeControl, ['slideractive'], this.sliderActive_);\n this.on(this.volumeControl, ['sliderinactive'], this.sliderInactive_);\n }\n\n /**\n * Add vjs-slider-active class to the VolumePanel\n *\n * @listens VolumeControl#slideractive\n * @private\n */\n sliderActive_() {\n this.addClass('vjs-slider-active');\n }\n\n /**\n * Removes vjs-slider-active class to the VolumePanel\n *\n * @listens VolumeControl#sliderinactive\n * @private\n */\n sliderInactive_() {\n this.removeClass('vjs-slider-active');\n }\n\n /**\n * Adds vjs-hidden or vjs-mute-toggle-only to the VolumePanel\n * depending on MuteToggle and VolumeControl state\n *\n * @listens Player#loadstart\n * @private\n */\n volumePanelState_() {\n // hide volume panel if neither volume control or mute toggle\n // are displayed\n if (this.volumeControl.hasClass('vjs-hidden') && this.muteToggle.hasClass('vjs-hidden')) {\n this.addClass('vjs-hidden');\n }\n\n // if only mute toggle is visible we don't want\n // volume panel expanding when hovered or active\n if (this.volumeControl.hasClass('vjs-hidden') && !this.muteToggle.hasClass('vjs-hidden')) {\n this.addClass('vjs-mute-toggle-only');\n }\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n let orientationClass = 'vjs-volume-panel-horizontal';\n if (!this.options_.inline) {\n orientationClass = 'vjs-volume-panel-vertical';\n }\n return super.createEl('div', {\n className: `vjs-volume-panel vjs-control ${orientationClass}`\n });\n }\n\n /**\n * Dispose of the `volume-panel` and all child components.\n */\n dispose() {\n this.handleMouseOut();\n super.dispose();\n }\n\n /**\n * Handles `keyup` events on the `VolumeControl`, looking for ESC, which closes\n * the volume panel and sets focus on `MuteToggle`.\n *\n * @param {Event} event\n * The `keyup` event that caused this function to be called.\n *\n * @listens keyup\n */\n handleVolumeControlKeyUp(event) {\n if (keycode.isEventKey(event, 'Esc')) {\n this.muteToggle.focus();\n }\n }\n\n /**\n * This gets called when a `VolumePanel` gains hover via a `mouseover` event.\n * Turns on listening for `mouseover` event. When they happen it\n * calls `this.handleMouseOver`.\n *\n * @param {Event} event\n * The `mouseover` event that caused this function to be called.\n *\n * @listens mouseover\n */\n handleMouseOver(event) {\n this.addClass('vjs-hover');\n on(document, 'keyup', this.handleKeyPressHandler_);\n }\n\n /**\n * This gets called when a `VolumePanel` gains hover via a `mouseout` event.\n * Turns on listening for `mouseout` event. When they happen it\n * calls `this.handleMouseOut`.\n *\n * @param {Event} event\n * The `mouseout` event that caused this function to be called.\n *\n * @listens mouseout\n */\n handleMouseOut(event) {\n this.removeClass('vjs-hover');\n off(document, 'keyup', this.handleKeyPressHandler_);\n }\n\n /**\n * Handles `keyup` event on the document or `keydown` event on the `VolumePanel`,\n * looking for ESC, which hides the `VolumeControl`.\n *\n * @param {Event} event\n * The keypress that triggered this event.\n *\n * @listens keydown | keyup\n */\n handleKeyPress(event) {\n if (keycode.isEventKey(event, 'Esc')) {\n this.handleMouseOut();\n }\n }\n}\n\n/**\n * Default options for the `VolumeControl`\n *\n * @type {Object}\n * @private\n */\nVolumePanel.prototype.options_ = {\n children: ['muteToggle', 'volumeControl']\n};\nComponent$1.registerComponent('VolumePanel', VolumePanel);\n\n/**\n * Button to skip forward a configurable amount of time\n * through a video. Renders in the control bar.\n *\n * e.g. options: {controlBar: {skipButtons: forward: 5}}\n *\n * @extends Button\n */\nclass SkipForward extends Button {\n constructor(player, options) {\n super(player, options);\n this.validOptions = [5, 10, 30];\n this.skipTime = this.getSkipForwardTime();\n if (this.skipTime && this.validOptions.includes(this.skipTime)) {\n this.setIcon(`forward-${this.skipTime}`);\n this.controlText(this.localize('Skip forward {1} seconds', [this.skipTime.toLocaleString(player.language())]));\n this.show();\n } else {\n this.hide();\n }\n }\n getSkipForwardTime() {\n const playerOptions = this.options_.playerOptions;\n return playerOptions.controlBar && playerOptions.controlBar.skipButtons && playerOptions.controlBar.skipButtons.forward;\n }\n buildCSSClass() {\n return `vjs-skip-forward-${this.getSkipForwardTime()} ${super.buildCSSClass()}`;\n }\n\n /**\n * On click, skips forward in the duration/seekable range by a configurable amount of seconds.\n * If the time left in the duration/seekable range is less than the configured 'skip forward' time,\n * skips to end of duration/seekable range.\n *\n * Handle a click on a `SkipForward` button\n *\n * @param {EventTarget~Event} event\n * The `click` event that caused this function\n * to be called\n */\n handleClick(event) {\n if (isNaN(this.player_.duration())) {\n return;\n }\n const currentVideoTime = this.player_.currentTime();\n const liveTracker = this.player_.liveTracker;\n const duration = liveTracker && liveTracker.isLive() ? liveTracker.seekableEnd() : this.player_.duration();\n let newTime;\n if (currentVideoTime + this.skipTime <= duration) {\n newTime = currentVideoTime + this.skipTime;\n } else {\n newTime = duration;\n }\n this.player_.currentTime(newTime);\n }\n\n /**\n * Update control text on languagechange\n */\n handleLanguagechange() {\n this.controlText(this.localize('Skip forward {1} seconds', [this.skipTime]));\n }\n}\nSkipForward.prototype.controlText_ = 'Skip Forward';\nComponent$1.registerComponent('SkipForward', SkipForward);\n\n/**\n * Button to skip backward a configurable amount of time\n * through a video. Renders in the control bar.\n *\n * * e.g. options: {controlBar: {skipButtons: backward: 5}}\n *\n * @extends Button\n */\nclass SkipBackward extends Button {\n constructor(player, options) {\n super(player, options);\n this.validOptions = [5, 10, 30];\n this.skipTime = this.getSkipBackwardTime();\n if (this.skipTime && this.validOptions.includes(this.skipTime)) {\n this.setIcon(`replay-${this.skipTime}`);\n this.controlText(this.localize('Skip backward {1} seconds', [this.skipTime.toLocaleString(player.language())]));\n this.show();\n } else {\n this.hide();\n }\n }\n getSkipBackwardTime() {\n const playerOptions = this.options_.playerOptions;\n return playerOptions.controlBar && playerOptions.controlBar.skipButtons && playerOptions.controlBar.skipButtons.backward;\n }\n buildCSSClass() {\n return `vjs-skip-backward-${this.getSkipBackwardTime()} ${super.buildCSSClass()}`;\n }\n\n /**\n * On click, skips backward in the video by a configurable amount of seconds.\n * If the current time in the video is less than the configured 'skip backward' time,\n * skips to beginning of video or seekable range.\n *\n * Handle a click on a `SkipBackward` button\n *\n * @param {EventTarget~Event} event\n * The `click` event that caused this function\n * to be called\n */\n handleClick(event) {\n const currentVideoTime = this.player_.currentTime();\n const liveTracker = this.player_.liveTracker;\n const seekableStart = liveTracker && liveTracker.isLive() && liveTracker.seekableStart();\n let newTime;\n if (seekableStart && currentVideoTime - this.skipTime <= seekableStart) {\n newTime = seekableStart;\n } else if (currentVideoTime >= this.skipTime) {\n newTime = currentVideoTime - this.skipTime;\n } else {\n newTime = 0;\n }\n this.player_.currentTime(newTime);\n }\n\n /**\n * Update control text on languagechange\n */\n handleLanguagechange() {\n this.controlText(this.localize('Skip backward {1} seconds', [this.skipTime]));\n }\n}\nSkipBackward.prototype.controlText_ = 'Skip Backward';\nComponent$1.registerComponent('SkipBackward', SkipBackward);\n\n/**\n * @file menu.js\n */\n\n/**\n * The Menu component is used to build popup menus, including subtitle and\n * captions selection menus.\n *\n * @extends Component\n */\nclass Menu extends Component$1 {\n /**\n * Create an instance of this class.\n *\n * @param { import('../player').default } player\n * the player that this component should attach to\n *\n * @param {Object} [options]\n * Object of option names and values\n *\n */\n constructor(player, options) {\n super(player, options);\n if (options) {\n this.menuButton_ = options.menuButton;\n }\n this.focusedChild_ = -1;\n this.on('keydown', e => this.handleKeyDown(e));\n\n // All the menu item instances share the same blur handler provided by the menu container.\n this.boundHandleBlur_ = e => this.handleBlur(e);\n this.boundHandleTapClick_ = e => this.handleTapClick(e);\n }\n\n /**\n * Add event listeners to the {@link MenuItem}.\n *\n * @param {Object} component\n * The instance of the `MenuItem` to add listeners to.\n *\n */\n addEventListenerForItem(component) {\n if (!(component instanceof Component$1)) {\n return;\n }\n this.on(component, 'blur', this.boundHandleBlur_);\n this.on(component, ['tap', 'click'], this.boundHandleTapClick_);\n }\n\n /**\n * Remove event listeners from the {@link MenuItem}.\n *\n * @param {Object} component\n * The instance of the `MenuItem` to remove listeners.\n *\n */\n removeEventListenerForItem(component) {\n if (!(component instanceof Component$1)) {\n return;\n }\n this.off(component, 'blur', this.boundHandleBlur_);\n this.off(component, ['tap', 'click'], this.boundHandleTapClick_);\n }\n\n /**\n * This method will be called indirectly when the component has been added\n * before the component adds to the new menu instance by `addItem`.\n * In this case, the original menu instance will remove the component\n * by calling `removeChild`.\n *\n * @param {Object} component\n * The instance of the `MenuItem`\n */\n removeChild(component) {\n if (typeof component === 'string') {\n component = this.getChild(component);\n }\n this.removeEventListenerForItem(component);\n super.removeChild(component);\n }\n\n /**\n * Add a {@link MenuItem} to the menu.\n *\n * @param {Object|string} component\n * The name or instance of the `MenuItem` to add.\n *\n */\n addItem(component) {\n const childComponent = this.addChild(component);\n if (childComponent) {\n this.addEventListenerForItem(childComponent);\n }\n }\n\n /**\n * Create the `Menu`s DOM element.\n *\n * @return {Element}\n * the element that was created\n */\n createEl() {\n const contentElType = this.options_.contentElType || 'ul';\n this.contentEl_ = createEl(contentElType, {\n className: 'vjs-menu-content'\n });\n this.contentEl_.setAttribute('role', 'menu');\n const el = super.createEl('div', {\n append: this.contentEl_,\n className: 'vjs-menu'\n });\n el.appendChild(this.contentEl_);\n\n // Prevent clicks from bubbling up. Needed for Menu Buttons,\n // where a click on the parent is significant\n on(el, 'click', function (event) {\n event.preventDefault();\n event.stopImmediatePropagation();\n });\n return el;\n }\n dispose() {\n this.contentEl_ = null;\n this.boundHandleBlur_ = null;\n this.boundHandleTapClick_ = null;\n super.dispose();\n }\n\n /**\n * Called when a `MenuItem` loses focus.\n *\n * @param {Event} event\n * The `blur` event that caused this function to be called.\n *\n * @listens blur\n */\n handleBlur(event) {\n const relatedTarget = event.relatedTarget || document.activeElement;\n\n // Close menu popup when a user clicks outside the menu\n if (!this.children().some(element => {\n return element.el() === relatedTarget;\n })) {\n const btn = this.menuButton_;\n if (btn && btn.buttonPressed_ && relatedTarget !== btn.el().firstChild) {\n btn.unpressButton();\n }\n }\n }\n\n /**\n * Called when a `MenuItem` gets clicked or tapped.\n *\n * @param {Event} event\n * The `click` or `tap` event that caused this function to be called.\n *\n * @listens click,tap\n */\n handleTapClick(event) {\n // Unpress the associated MenuButton, and move focus back to it\n if (this.menuButton_) {\n this.menuButton_.unpressButton();\n const childComponents = this.children();\n if (!Array.isArray(childComponents)) {\n return;\n }\n const foundComponent = childComponents.filter(component => component.el() === event.target)[0];\n if (!foundComponent) {\n return;\n }\n\n // don't focus menu button if item is a caption settings item\n // because focus will move elsewhere\n if (foundComponent.name() !== 'CaptionSettingsMenuItem') {\n this.menuButton_.focus();\n }\n }\n }\n\n /**\n * Handle a `keydown` event on this menu. This listener is added in the constructor.\n *\n * @param {KeyboardEvent} event\n * A `keydown` event that happened on the menu.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n // Left and Down Arrows\n if (keycode.isEventKey(event, 'Left') || keycode.isEventKey(event, 'Down')) {\n event.preventDefault();\n event.stopPropagation();\n this.stepForward();\n\n // Up and Right Arrows\n } else if (keycode.isEventKey(event, 'Right') || keycode.isEventKey(event, 'Up')) {\n event.preventDefault();\n event.stopPropagation();\n this.stepBack();\n }\n }\n\n /**\n * Move to next (lower) menu item for keyboard users.\n */\n stepForward() {\n let stepChild = 0;\n if (this.focusedChild_ !== undefined) {\n stepChild = this.focusedChild_ + 1;\n }\n this.focus(stepChild);\n }\n\n /**\n * Move to previous (higher) menu item for keyboard users.\n */\n stepBack() {\n let stepChild = 0;\n if (this.focusedChild_ !== undefined) {\n stepChild = this.focusedChild_ - 1;\n }\n this.focus(stepChild);\n }\n\n /**\n * Set focus on a {@link MenuItem} in the `Menu`.\n *\n * @param {Object|string} [item=0]\n * Index of child item set focus on.\n */\n focus(item = 0) {\n const children = this.children().slice();\n const haveTitle = children.length && children[0].hasClass('vjs-menu-title');\n if (haveTitle) {\n children.shift();\n }\n if (children.length > 0) {\n if (item < 0) {\n item = 0;\n } else if (item >= children.length) {\n item = children.length - 1;\n }\n this.focusedChild_ = item;\n children[item].el_.focus();\n }\n }\n}\nComponent$1.registerComponent('Menu', Menu);\n\n/**\n * @file menu-button.js\n */\n\n/**\n * A `MenuButton` class for any popup {@link Menu}.\n *\n * @extends Component\n */\nclass MenuButton extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options={}]\n * The key/value store of player options.\n */\n constructor(player, options = {}) {\n super(player, options);\n this.menuButton_ = new Button(player, options);\n this.menuButton_.controlText(this.controlText_);\n this.menuButton_.el_.setAttribute('aria-haspopup', 'true');\n\n // Add buildCSSClass values to the button, not the wrapper\n const buttonClass = Button.prototype.buildCSSClass();\n this.menuButton_.el_.className = this.buildCSSClass() + ' ' + buttonClass;\n this.menuButton_.removeClass('vjs-control');\n this.addChild(this.menuButton_);\n this.update();\n this.enabled_ = true;\n const handleClick = e => this.handleClick(e);\n this.handleMenuKeyUp_ = e => this.handleMenuKeyUp(e);\n this.on(this.menuButton_, 'tap', handleClick);\n this.on(this.menuButton_, 'click', handleClick);\n this.on(this.menuButton_, 'keydown', e => this.handleKeyDown(e));\n this.on(this.menuButton_, 'mouseenter', () => {\n this.addClass('vjs-hover');\n this.menu.show();\n on(document, 'keyup', this.handleMenuKeyUp_);\n });\n this.on('mouseleave', e => this.handleMouseLeave(e));\n this.on('keydown', e => this.handleSubmenuKeyDown(e));\n }\n\n /**\n * Update the menu based on the current state of its items.\n */\n update() {\n const menu = this.createMenu();\n if (this.menu) {\n this.menu.dispose();\n this.removeChild(this.menu);\n }\n this.menu = menu;\n this.addChild(menu);\n\n /**\n * Track the state of the menu button\n *\n * @type {Boolean}\n * @private\n */\n this.buttonPressed_ = false;\n this.menuButton_.el_.setAttribute('aria-expanded', 'false');\n if (this.items && this.items.length <= this.hideThreshold_) {\n this.hide();\n this.menu.contentEl_.removeAttribute('role');\n } else {\n this.show();\n this.menu.contentEl_.setAttribute('role', 'menu');\n }\n }\n\n /**\n * Create the menu and add all items to it.\n *\n * @return {Menu}\n * The constructed menu\n */\n createMenu() {\n const menu = new Menu(this.player_, {\n menuButton: this\n });\n\n /**\n * Hide the menu if the number of items is less than or equal to this threshold. This defaults\n * to 0 and whenever we add items which can be hidden to the menu we'll increment it. We list\n * it here because every time we run `createMenu` we need to reset the value.\n *\n * @protected\n * @type {Number}\n */\n this.hideThreshold_ = 0;\n\n // Add a title list item to the top\n if (this.options_.title) {\n const titleEl = createEl('li', {\n className: 'vjs-menu-title',\n textContent: toTitleCase$1(this.options_.title),\n tabIndex: -1\n });\n const titleComponent = new Component$1(this.player_, {\n el: titleEl\n });\n menu.addItem(titleComponent);\n }\n this.items = this.createItems();\n if (this.items) {\n // Add menu items to the menu\n for (let i = 0; i < this.items.length; i++) {\n menu.addItem(this.items[i]);\n }\n }\n return menu;\n }\n\n /**\n * Create the list of menu items. Specific to each subclass.\n *\n * @abstract\n */\n createItems() {}\n\n /**\n * Create the `MenuButtons`s DOM element.\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl() {\n return super.createEl('div', {\n className: this.buildWrapperCSSClass()\n }, {});\n }\n\n /**\n * Overwrites the `setIcon` method from `Component`.\n * In this case, we want the icon to be appended to the menuButton.\n *\n * @param {string} name\n * The icon name to be added.\n */\n setIcon(name) {\n super.setIcon(name, this.menuButton_.el_);\n }\n\n /**\n * Allow sub components to stack CSS class names for the wrapper element\n *\n * @return {string}\n * The constructed wrapper DOM `className`\n */\n buildWrapperCSSClass() {\n let menuButtonClass = 'vjs-menu-button';\n\n // If the inline option is passed, we want to use different styles altogether.\n if (this.options_.inline === true) {\n menuButtonClass += '-inline';\n } else {\n menuButtonClass += '-popup';\n }\n\n // TODO: Fix the CSS so that this isn't necessary\n const buttonClass = Button.prototype.buildCSSClass();\n return `vjs-menu-button ${menuButtonClass} ${buttonClass} ${super.buildCSSClass()}`;\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n let menuButtonClass = 'vjs-menu-button';\n\n // If the inline option is passed, we want to use different styles altogether.\n if (this.options_.inline === true) {\n menuButtonClass += '-inline';\n } else {\n menuButtonClass += '-popup';\n }\n return `vjs-menu-button ${menuButtonClass} ${super.buildCSSClass()}`;\n }\n\n /**\n * Get or set the localized control text that will be used for accessibility.\n *\n * > NOTE: This will come from the internal `menuButton_` element.\n *\n * @param {string} [text]\n * Control text for element.\n *\n * @param {Element} [el=this.menuButton_.el()]\n * Element to set the title on.\n *\n * @return {string}\n * - The control text when getting\n */\n controlText(text, el = this.menuButton_.el()) {\n return this.menuButton_.controlText(text, el);\n }\n\n /**\n * Dispose of the `menu-button` and all child components.\n */\n dispose() {\n this.handleMouseLeave();\n super.dispose();\n }\n\n /**\n * Handle a click on a `MenuButton`.\n * See {@link ClickableComponent#handleClick} for instances where this is called.\n *\n * @param {Event} event\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n if (this.buttonPressed_) {\n this.unpressButton();\n } else {\n this.pressButton();\n }\n }\n\n /**\n * Handle `mouseleave` for `MenuButton`.\n *\n * @param {Event} event\n * The `mouseleave` event that caused this function to be called.\n *\n * @listens mouseleave\n */\n handleMouseLeave(event) {\n this.removeClass('vjs-hover');\n off(document, 'keyup', this.handleMenuKeyUp_);\n }\n\n /**\n * Set the focus to the actual button, not to this element\n */\n focus() {\n this.menuButton_.focus();\n }\n\n /**\n * Remove the focus from the actual button, not this element\n */\n blur() {\n this.menuButton_.blur();\n }\n\n /**\n * Handle tab, escape, down arrow, and up arrow keys for `MenuButton`. See\n * {@link ClickableComponent#handleKeyDown} for instances where this is called.\n *\n * @param {Event} event\n * The `keydown` event that caused this function to be called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n // Escape or Tab unpress the 'button'\n if (keycode.isEventKey(event, 'Esc') || keycode.isEventKey(event, 'Tab')) {\n if (this.buttonPressed_) {\n this.unpressButton();\n }\n\n // Don't preventDefault for Tab key - we still want to lose focus\n if (!keycode.isEventKey(event, 'Tab')) {\n event.preventDefault();\n // Set focus back to the menu button's button\n this.menuButton_.focus();\n }\n // Up Arrow or Down Arrow also 'press' the button to open the menu\n } else if (keycode.isEventKey(event, 'Up') || keycode.isEventKey(event, 'Down')) {\n if (!this.buttonPressed_) {\n event.preventDefault();\n this.pressButton();\n }\n }\n }\n\n /**\n * Handle a `keyup` event on a `MenuButton`. The listener for this is added in\n * the constructor.\n *\n * @param {Event} event\n * Key press event\n *\n * @listens keyup\n */\n handleMenuKeyUp(event) {\n // Escape hides popup menu\n if (keycode.isEventKey(event, 'Esc') || keycode.isEventKey(event, 'Tab')) {\n this.removeClass('vjs-hover');\n }\n }\n\n /**\n * This method name now delegates to `handleSubmenuKeyDown`. This means\n * anyone calling `handleSubmenuKeyPress` will not see their method calls\n * stop working.\n *\n * @param {Event} event\n * The event that caused this function to be called.\n */\n handleSubmenuKeyPress(event) {\n this.handleSubmenuKeyDown(event);\n }\n\n /**\n * Handle a `keydown` event on a sub-menu. The listener for this is added in\n * the constructor.\n *\n * @param {Event} event\n * Key press event\n *\n * @listens keydown\n */\n handleSubmenuKeyDown(event) {\n // Escape or Tab unpress the 'button'\n if (keycode.isEventKey(event, 'Esc') || keycode.isEventKey(event, 'Tab')) {\n if (this.buttonPressed_) {\n this.unpressButton();\n }\n // Don't preventDefault for Tab key - we still want to lose focus\n if (!keycode.isEventKey(event, 'Tab')) {\n event.preventDefault();\n // Set focus back to the menu button's button\n this.menuButton_.focus();\n }\n }\n }\n\n /**\n * Put the current `MenuButton` into a pressed state.\n */\n pressButton() {\n if (this.enabled_) {\n this.buttonPressed_ = true;\n this.menu.show();\n this.menu.lockShowing();\n this.menuButton_.el_.setAttribute('aria-expanded', 'true');\n\n // set the focus into the submenu, except on iOS where it is resulting in\n // undesired scrolling behavior when the player is in an iframe\n if (IS_IOS && isInFrame()) {\n // Return early so that the menu isn't focused\n return;\n }\n this.menu.focus();\n }\n }\n\n /**\n * Take the current `MenuButton` out of a pressed state.\n */\n unpressButton() {\n if (this.enabled_) {\n this.buttonPressed_ = false;\n this.menu.unlockShowing();\n this.menu.hide();\n this.menuButton_.el_.setAttribute('aria-expanded', 'false');\n }\n }\n\n /**\n * Disable the `MenuButton`. Don't allow it to be clicked.\n */\n disable() {\n this.unpressButton();\n this.enabled_ = false;\n this.addClass('vjs-disabled');\n this.menuButton_.disable();\n }\n\n /**\n * Enable the `MenuButton`. Allow it to be clicked.\n */\n enable() {\n this.enabled_ = true;\n this.removeClass('vjs-disabled');\n this.menuButton_.enable();\n }\n}\nComponent$1.registerComponent('MenuButton', MenuButton);\n\n/**\n * @file track-button.js\n */\n\n/**\n * The base class for buttons that toggle specific track types (e.g. subtitles).\n *\n * @extends MenuButton\n */\nclass TrackButton extends MenuButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n const tracks = options.tracks;\n super(player, options);\n if (this.items.length <= 1) {\n this.hide();\n }\n if (!tracks) {\n return;\n }\n const updateHandler = bind_(this, this.update);\n tracks.addEventListener('removetrack', updateHandler);\n tracks.addEventListener('addtrack', updateHandler);\n tracks.addEventListener('labelchange', updateHandler);\n this.player_.on('ready', updateHandler);\n this.player_.on('dispose', function () {\n tracks.removeEventListener('removetrack', updateHandler);\n tracks.removeEventListener('addtrack', updateHandler);\n tracks.removeEventListener('labelchange', updateHandler);\n });\n }\n}\nComponent$1.registerComponent('TrackButton', TrackButton);\n\n/**\n * @file menu-keys.js\n */\n\n/**\n * All keys used for operation of a menu (`MenuButton`, `Menu`, and `MenuItem`)\n * Note that 'Enter' and 'Space' are not included here (otherwise they would\n * prevent the `MenuButton` and `MenuItem` from being keyboard-clickable)\n *\n * @typedef MenuKeys\n * @array\n */\nconst MenuKeys = ['Tab', 'Esc', 'Up', 'Down', 'Right', 'Left'];\n\n/**\n * @file menu-item.js\n */\n\n/**\n * The component for a menu item. ``\n *\n * @extends ClickableComponent\n */\nclass MenuItem extends ClickableComponent {\n /**\n * Creates an instance of the this class.\n *\n * @param { import('../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options={}]\n * The key/value store of player options.\n *\n */\n constructor(player, options) {\n super(player, options);\n this.selectable = options.selectable;\n this.isSelected_ = options.selected || false;\n this.multiSelectable = options.multiSelectable;\n this.selected(this.isSelected_);\n if (this.selectable) {\n if (this.multiSelectable) {\n this.el_.setAttribute('role', 'menuitemcheckbox');\n } else {\n this.el_.setAttribute('role', 'menuitemradio');\n }\n } else {\n this.el_.setAttribute('role', 'menuitem');\n }\n }\n\n /**\n * Create the `MenuItem's DOM element\n *\n * @param {string} [type=li]\n * Element's node type, not actually used, always set to `li`.\n *\n * @param {Object} [props={}]\n * An object of properties that should be set on the element\n *\n * @param {Object} [attrs={}]\n * An object of attributes that should be set on the element\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl(type, props, attrs) {\n // The control is textual, not just an icon\n this.nonIconControl = true;\n const el = super.createEl('li', Object.assign({\n className: 'vjs-menu-item',\n tabIndex: -1\n }, props), attrs);\n\n // swap icon with menu item text.\n const menuItemEl = createEl('span', {\n className: 'vjs-menu-item-text',\n textContent: this.localize(this.options_.label)\n });\n\n // If using SVG icons, the element with vjs-icon-placeholder will be added separately.\n if (this.player_.options_.experimentalSvgIcons) {\n el.appendChild(menuItemEl);\n } else {\n el.replaceChild(menuItemEl, el.querySelector('.vjs-icon-placeholder'));\n }\n return el;\n }\n\n /**\n * Ignore keys which are used by the menu, but pass any other ones up. See\n * {@link ClickableComponent#handleKeyDown} for instances where this is called.\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n if (!MenuKeys.some(key => keycode.isEventKey(event, key))) {\n // Pass keydown handling up for unused keys\n super.handleKeyDown(event);\n }\n }\n\n /**\n * Any click on a `MenuItem` puts it into the selected state.\n * See {@link ClickableComponent#handleClick} for instances where this is called.\n *\n * @param {Event} event\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n this.selected(true);\n }\n\n /**\n * Set the state for this menu item as selected or not.\n *\n * @param {boolean} selected\n * if the menu item is selected or not\n */\n selected(selected) {\n if (this.selectable) {\n if (selected) {\n this.addClass('vjs-selected');\n this.el_.setAttribute('aria-checked', 'true');\n // aria-checked isn't fully supported by browsers/screen readers,\n // so indicate selected state to screen reader in the control text.\n this.controlText(', selected');\n this.isSelected_ = true;\n } else {\n this.removeClass('vjs-selected');\n this.el_.setAttribute('aria-checked', 'false');\n // Indicate un-selected state to screen reader\n this.controlText('');\n this.isSelected_ = false;\n }\n }\n }\n}\nComponent$1.registerComponent('MenuItem', MenuItem);\n\n/**\n * @file text-track-menu-item.js\n */\n\n/**\n * The specific menu item type for selecting a language within a text track kind\n *\n * @extends MenuItem\n */\nclass TextTrackMenuItem extends MenuItem {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n const track = options.track;\n const tracks = player.textTracks();\n\n // Modify options for parent MenuItem class's init.\n options.label = track.label || track.language || 'Unknown';\n options.selected = track.mode === 'showing';\n super(player, options);\n this.track = track;\n // Determine the relevant kind(s) of tracks for this component and filter\n // out empty kinds.\n this.kinds = (options.kinds || [options.kind || this.track.kind]).filter(Boolean);\n const changeHandler = (...args) => {\n this.handleTracksChange.apply(this, args);\n };\n const selectedLanguageChangeHandler = (...args) => {\n this.handleSelectedLanguageChange.apply(this, args);\n };\n player.on(['loadstart', 'texttrackchange'], changeHandler);\n tracks.addEventListener('change', changeHandler);\n tracks.addEventListener('selectedlanguagechange', selectedLanguageChangeHandler);\n this.on('dispose', function () {\n player.off(['loadstart', 'texttrackchange'], changeHandler);\n tracks.removeEventListener('change', changeHandler);\n tracks.removeEventListener('selectedlanguagechange', selectedLanguageChangeHandler);\n });\n\n // iOS7 doesn't dispatch change events to TextTrackLists when an\n // associated track's mode changes. Without something like\n // Object.observe() (also not present on iOS7), it's not\n // possible to detect changes to the mode attribute and polyfill\n // the change event. As a poor substitute, we manually dispatch\n // change events whenever the controls modify the mode.\n if (tracks.onchange === undefined) {\n let event;\n this.on(['tap', 'click'], function () {\n if (typeof window$1.Event !== 'object') {\n // Android 2.3 throws an Illegal Constructor error for window.Event\n try {\n event = new window$1.Event('change');\n } catch (err) {\n // continue regardless of error\n }\n }\n if (!event) {\n event = document.createEvent('Event');\n event.initEvent('change', true, true);\n }\n tracks.dispatchEvent(event);\n });\n }\n\n // set the default state based on current tracks\n this.handleTracksChange();\n }\n\n /**\n * This gets called when an `TextTrackMenuItem` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} event\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n const referenceTrack = this.track;\n const tracks = this.player_.textTracks();\n super.handleClick(event);\n if (!tracks) {\n return;\n }\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n\n // If the track from the text tracks list is not of the right kind,\n // skip it. We do not want to affect tracks of incompatible kind(s).\n if (this.kinds.indexOf(track.kind) === -1) {\n continue;\n }\n\n // If this text track is the component's track and it is not showing,\n // set it to showing.\n if (track === referenceTrack) {\n if (track.mode !== 'showing') {\n track.mode = 'showing';\n }\n\n // If this text track is not the component's track and it is not\n // disabled, set it to disabled.\n } else if (track.mode !== 'disabled') {\n track.mode = 'disabled';\n }\n }\n }\n\n /**\n * Handle text track list change\n *\n * @param {Event} event\n * The `change` event that caused this function to be called.\n *\n * @listens TextTrackList#change\n */\n handleTracksChange(event) {\n const shouldBeSelected = this.track.mode === 'showing';\n\n // Prevent redundant selected() calls because they may cause\n // screen readers to read the appended control text unnecessarily\n if (shouldBeSelected !== this.isSelected_) {\n this.selected(shouldBeSelected);\n }\n }\n handleSelectedLanguageChange(event) {\n if (this.track.mode === 'showing') {\n const selectedLanguage = this.player_.cache_.selectedLanguage;\n\n // Don't replace the kind of track across the same language\n if (selectedLanguage && selectedLanguage.enabled && selectedLanguage.language === this.track.language && selectedLanguage.kind !== this.track.kind) {\n return;\n }\n this.player_.cache_.selectedLanguage = {\n enabled: true,\n language: this.track.language,\n kind: this.track.kind\n };\n }\n }\n dispose() {\n // remove reference to track object on dispose\n this.track = null;\n super.dispose();\n }\n}\nComponent$1.registerComponent('TextTrackMenuItem', TextTrackMenuItem);\n\n/**\n * @file off-text-track-menu-item.js\n */\n\n/**\n * A special menu item for turning off a specific type of text track\n *\n * @extends TextTrackMenuItem\n */\nclass OffTextTrackMenuItem extends TextTrackMenuItem {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n // Create pseudo track info\n // Requires options['kind']\n options.track = {\n player,\n // it is no longer necessary to store `kind` or `kinds` on the track itself\n // since they are now stored in the `kinds` property of all instances of\n // TextTrackMenuItem, but this will remain for backwards compatibility\n kind: options.kind,\n kinds: options.kinds,\n default: false,\n mode: 'disabled'\n };\n if (!options.kinds) {\n options.kinds = [options.kind];\n }\n if (options.label) {\n options.track.label = options.label;\n } else {\n options.track.label = options.kinds.join(' and ') + ' off';\n }\n\n // MenuItem is selectable\n options.selectable = true;\n // MenuItem is NOT multiSelectable (i.e. only one can be marked \"selected\" at a time)\n options.multiSelectable = false;\n super(player, options);\n }\n\n /**\n * Handle text track change\n *\n * @param {Event} event\n * The event that caused this function to run\n */\n handleTracksChange(event) {\n const tracks = this.player().textTracks();\n let shouldBeSelected = true;\n for (let i = 0, l = tracks.length; i < l; i++) {\n const track = tracks[i];\n if (this.options_.kinds.indexOf(track.kind) > -1 && track.mode === 'showing') {\n shouldBeSelected = false;\n break;\n }\n }\n\n // Prevent redundant selected() calls because they may cause\n // screen readers to read the appended control text unnecessarily\n if (shouldBeSelected !== this.isSelected_) {\n this.selected(shouldBeSelected);\n }\n }\n handleSelectedLanguageChange(event) {\n const tracks = this.player().textTracks();\n let allHidden = true;\n for (let i = 0, l = tracks.length; i < l; i++) {\n const track = tracks[i];\n if (['captions', 'descriptions', 'subtitles'].indexOf(track.kind) > -1 && track.mode === 'showing') {\n allHidden = false;\n break;\n }\n }\n if (allHidden) {\n this.player_.cache_.selectedLanguage = {\n enabled: false\n };\n }\n }\n\n /**\n * Update control text and label on languagechange\n */\n handleLanguagechange() {\n this.$('.vjs-menu-item-text').textContent = this.player_.localize(this.options_.label);\n super.handleLanguagechange();\n }\n}\nComponent$1.registerComponent('OffTextTrackMenuItem', OffTextTrackMenuItem);\n\n/**\n * @file text-track-button.js\n */\n\n/**\n * The base class for buttons that toggle specific text track types (e.g. subtitles)\n *\n * @extends MenuButton\n */\nclass TextTrackButton extends TrackButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options={}]\n * The key/value store of player options.\n */\n constructor(player, options = {}) {\n options.tracks = player.textTracks();\n super(player, options);\n }\n\n /**\n * Create a menu item for each text track\n *\n * @param {TextTrackMenuItem[]} [items=[]]\n * Existing array of items to use during creation\n *\n * @return {TextTrackMenuItem[]}\n * Array of menu items that were created\n */\n createItems(items = [], TrackMenuItem = TextTrackMenuItem) {\n // Label is an override for the [track] off label\n // USed to localise captions/subtitles\n let label;\n if (this.label_) {\n label = `${this.label_} off`;\n }\n // Add an OFF menu item to turn all tracks off\n items.push(new OffTextTrackMenuItem(this.player_, {\n kinds: this.kinds_,\n kind: this.kind_,\n label\n }));\n this.hideThreshold_ += 1;\n const tracks = this.player_.textTracks();\n if (!Array.isArray(this.kinds_)) {\n this.kinds_ = [this.kind_];\n }\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n\n // only add tracks that are of an appropriate kind and have a label\n if (this.kinds_.indexOf(track.kind) > -1) {\n const item = new TrackMenuItem(this.player_, {\n track,\n kinds: this.kinds_,\n kind: this.kind_,\n // MenuItem is selectable\n selectable: true,\n // MenuItem is NOT multiSelectable (i.e. only one can be marked \"selected\" at a time)\n multiSelectable: false\n });\n item.addClass(`vjs-${track.kind}-menu-item`);\n items.push(item);\n }\n }\n return items;\n }\n}\nComponent$1.registerComponent('TextTrackButton', TextTrackButton);\n\n/**\n * @file chapters-track-menu-item.js\n */\n\n/**\n * The chapter track menu item\n *\n * @extends MenuItem\n */\nclass ChaptersTrackMenuItem extends MenuItem {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n const track = options.track;\n const cue = options.cue;\n const currentTime = player.currentTime();\n\n // Modify options for parent MenuItem class's init.\n options.selectable = true;\n options.multiSelectable = false;\n options.label = cue.text;\n options.selected = cue.startTime <= currentTime && currentTime < cue.endTime;\n super(player, options);\n this.track = track;\n this.cue = cue;\n }\n\n /**\n * This gets called when an `ChaptersTrackMenuItem` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n super.handleClick();\n this.player_.currentTime(this.cue.startTime);\n }\n}\nComponent$1.registerComponent('ChaptersTrackMenuItem', ChaptersTrackMenuItem);\n\n/**\n * @file chapters-button.js\n */\n\n/**\n * The button component for toggling and selecting chapters\n * Chapters act much differently than other text tracks\n * Cues are navigation vs. other tracks of alternative languages\n *\n * @extends TextTrackButton\n */\nclass ChaptersButton extends TextTrackButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * The function to call when this function is ready.\n */\n constructor(player, options, ready) {\n super(player, options, ready);\n this.setIcon('chapters');\n this.selectCurrentItem_ = () => {\n this.items.forEach(item => {\n item.selected(this.track_.activeCues[0] === item.cue);\n });\n };\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-chapters-button ${super.buildCSSClass()}`;\n }\n buildWrapperCSSClass() {\n return `vjs-chapters-button ${super.buildWrapperCSSClass()}`;\n }\n\n /**\n * Update the menu based on the current state of its items.\n *\n * @param {Event} [event]\n * An event that triggered this function to run.\n *\n * @listens TextTrackList#addtrack\n * @listens TextTrackList#removetrack\n * @listens TextTrackList#change\n */\n update(event) {\n if (event && event.track && event.track.kind !== 'chapters') {\n return;\n }\n const track = this.findChaptersTrack();\n if (track !== this.track_) {\n this.setTrack(track);\n super.update();\n } else if (!this.items || track && track.cues && track.cues.length !== this.items.length) {\n // Update the menu initially or if the number of cues has changed since set\n super.update();\n }\n }\n\n /**\n * Set the currently selected track for the chapters button.\n *\n * @param {TextTrack} track\n * The new track to select. Nothing will change if this is the currently selected\n * track.\n */\n setTrack(track) {\n if (this.track_ === track) {\n return;\n }\n if (!this.updateHandler_) {\n this.updateHandler_ = this.update.bind(this);\n }\n\n // here this.track_ refers to the old track instance\n if (this.track_) {\n const remoteTextTrackEl = this.player_.remoteTextTrackEls().getTrackElementByTrack_(this.track_);\n if (remoteTextTrackEl) {\n remoteTextTrackEl.removeEventListener('load', this.updateHandler_);\n }\n this.track_.removeEventListener('cuechange', this.selectCurrentItem_);\n this.track_ = null;\n }\n this.track_ = track;\n\n // here this.track_ refers to the new track instance\n if (this.track_) {\n this.track_.mode = 'hidden';\n const remoteTextTrackEl = this.player_.remoteTextTrackEls().getTrackElementByTrack_(this.track_);\n if (remoteTextTrackEl) {\n remoteTextTrackEl.addEventListener('load', this.updateHandler_);\n }\n this.track_.addEventListener('cuechange', this.selectCurrentItem_);\n }\n }\n\n /**\n * Find the track object that is currently in use by this ChaptersButton\n *\n * @return {TextTrack|undefined}\n * The current track or undefined if none was found.\n */\n findChaptersTrack() {\n const tracks = this.player_.textTracks() || [];\n for (let i = tracks.length - 1; i >= 0; i--) {\n // We will always choose the last track as our chaptersTrack\n const track = tracks[i];\n if (track.kind === this.kind_) {\n return track;\n }\n }\n }\n\n /**\n * Get the caption for the ChaptersButton based on the track label. This will also\n * use the current tracks localized kind as a fallback if a label does not exist.\n *\n * @return {string}\n * The tracks current label or the localized track kind.\n */\n getMenuCaption() {\n if (this.track_ && this.track_.label) {\n return this.track_.label;\n }\n return this.localize(toTitleCase$1(this.kind_));\n }\n\n /**\n * Create menu from chapter track\n *\n * @return { import('../../menu/menu').default }\n * New menu for the chapter buttons\n */\n createMenu() {\n this.options_.title = this.getMenuCaption();\n return super.createMenu();\n }\n\n /**\n * Create a menu item for each text track\n *\n * @return { import('./text-track-menu-item').default[] }\n * Array of menu items\n */\n createItems() {\n const items = [];\n if (!this.track_) {\n return items;\n }\n const cues = this.track_.cues;\n if (!cues) {\n return items;\n }\n for (let i = 0, l = cues.length; i < l; i++) {\n const cue = cues[i];\n const mi = new ChaptersTrackMenuItem(this.player_, {\n track: this.track_,\n cue\n });\n items.push(mi);\n }\n return items;\n }\n}\n\n/**\n * `kind` of TextTrack to look for to associate it with this menu.\n *\n * @type {string}\n * @private\n */\nChaptersButton.prototype.kind_ = 'chapters';\n\n/**\n * The text that should display over the `ChaptersButton`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nChaptersButton.prototype.controlText_ = 'Chapters';\nComponent$1.registerComponent('ChaptersButton', ChaptersButton);\n\n/**\n * @file descriptions-button.js\n */\n\n/**\n * The button component for toggling and selecting descriptions\n *\n * @extends TextTrackButton\n */\nclass DescriptionsButton extends TextTrackButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * The function to call when this component is ready.\n */\n constructor(player, options, ready) {\n super(player, options, ready);\n this.setIcon('audio-description');\n const tracks = player.textTracks();\n const changeHandler = bind_(this, this.handleTracksChange);\n tracks.addEventListener('change', changeHandler);\n this.on('dispose', function () {\n tracks.removeEventListener('change', changeHandler);\n });\n }\n\n /**\n * Handle text track change\n *\n * @param {Event} event\n * The event that caused this function to run\n *\n * @listens TextTrackList#change\n */\n handleTracksChange(event) {\n const tracks = this.player().textTracks();\n let disabled = false;\n\n // Check whether a track of a different kind is showing\n for (let i = 0, l = tracks.length; i < l; i++) {\n const track = tracks[i];\n if (track.kind !== this.kind_ && track.mode === 'showing') {\n disabled = true;\n break;\n }\n }\n\n // If another track is showing, disable this menu button\n if (disabled) {\n this.disable();\n } else {\n this.enable();\n }\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-descriptions-button ${super.buildCSSClass()}`;\n }\n buildWrapperCSSClass() {\n return `vjs-descriptions-button ${super.buildWrapperCSSClass()}`;\n }\n}\n\n/**\n * `kind` of TextTrack to look for to associate it with this menu.\n *\n * @type {string}\n * @private\n */\nDescriptionsButton.prototype.kind_ = 'descriptions';\n\n/**\n * The text that should display over the `DescriptionsButton`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nDescriptionsButton.prototype.controlText_ = 'Descriptions';\nComponent$1.registerComponent('DescriptionsButton', DescriptionsButton);\n\n/**\n * @file subtitles-button.js\n */\n\n/**\n * The button component for toggling and selecting subtitles\n *\n * @extends TextTrackButton\n */\nclass SubtitlesButton extends TextTrackButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * The function to call when this component is ready.\n */\n constructor(player, options, ready) {\n super(player, options, ready);\n this.setIcon('subtitles');\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-subtitles-button ${super.buildCSSClass()}`;\n }\n buildWrapperCSSClass() {\n return `vjs-subtitles-button ${super.buildWrapperCSSClass()}`;\n }\n}\n\n/**\n * `kind` of TextTrack to look for to associate it with this menu.\n *\n * @type {string}\n * @private\n */\nSubtitlesButton.prototype.kind_ = 'subtitles';\n\n/**\n * The text that should display over the `SubtitlesButton`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nSubtitlesButton.prototype.controlText_ = 'Subtitles';\nComponent$1.registerComponent('SubtitlesButton', SubtitlesButton);\n\n/**\n * @file caption-settings-menu-item.js\n */\n\n/**\n * The menu item for caption track settings menu\n *\n * @extends TextTrackMenuItem\n */\nclass CaptionSettingsMenuItem extends TextTrackMenuItem {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n options.track = {\n player,\n kind: options.kind,\n label: options.kind + ' settings',\n selectable: false,\n default: false,\n mode: 'disabled'\n };\n\n // CaptionSettingsMenuItem has no concept of 'selected'\n options.selectable = false;\n options.name = 'CaptionSettingsMenuItem';\n super(player, options);\n this.addClass('vjs-texttrack-settings');\n this.controlText(', opens ' + options.kind + ' settings dialog');\n }\n\n /**\n * This gets called when an `CaptionSettingsMenuItem` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n this.player().getChild('textTrackSettings').open();\n }\n\n /**\n * Update control text and label on languagechange\n */\n handleLanguagechange() {\n this.$('.vjs-menu-item-text').textContent = this.player_.localize(this.options_.kind + ' settings');\n super.handleLanguagechange();\n }\n}\nComponent$1.registerComponent('CaptionSettingsMenuItem', CaptionSettingsMenuItem);\n\n/**\n * @file captions-button.js\n */\n\n/**\n * The button component for toggling and selecting captions\n *\n * @extends TextTrackButton\n */\nclass CaptionsButton extends TextTrackButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * The function to call when this component is ready.\n */\n constructor(player, options, ready) {\n super(player, options, ready);\n this.setIcon('captions');\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-captions-button ${super.buildCSSClass()}`;\n }\n buildWrapperCSSClass() {\n return `vjs-captions-button ${super.buildWrapperCSSClass()}`;\n }\n\n /**\n * Create caption menu items\n *\n * @return {CaptionSettingsMenuItem[]}\n * The array of current menu items.\n */\n createItems() {\n const items = [];\n if (!(this.player().tech_ && this.player().tech_.featuresNativeTextTracks) && this.player().getChild('textTrackSettings')) {\n items.push(new CaptionSettingsMenuItem(this.player_, {\n kind: this.kind_\n }));\n this.hideThreshold_ += 1;\n }\n return super.createItems(items);\n }\n}\n\n/**\n * `kind` of TextTrack to look for to associate it with this menu.\n *\n * @type {string}\n * @private\n */\nCaptionsButton.prototype.kind_ = 'captions';\n\n/**\n * The text that should display over the `CaptionsButton`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nCaptionsButton.prototype.controlText_ = 'Captions';\nComponent$1.registerComponent('CaptionsButton', CaptionsButton);\n\n/**\n * @file subs-caps-menu-item.js\n */\n\n/**\n * SubsCapsMenuItem has an [cc] icon to distinguish captions from subtitles\n * in the SubsCapsMenu.\n *\n * @extends TextTrackMenuItem\n */\nclass SubsCapsMenuItem extends TextTrackMenuItem {\n createEl(type, props, attrs) {\n const el = super.createEl(type, props, attrs);\n const parentSpan = el.querySelector('.vjs-menu-item-text');\n if (this.options_.track.kind === 'captions') {\n if (this.player_.options_.experimentalSvgIcons) {\n this.setIcon('captions', el);\n } else {\n parentSpan.appendChild(createEl('span', {\n className: 'vjs-icon-placeholder'\n }, {\n 'aria-hidden': true\n }));\n }\n parentSpan.appendChild(createEl('span', {\n className: 'vjs-control-text',\n // space added as the text will visually flow with the\n // label\n textContent: ` ${this.localize('Captions')}`\n }));\n }\n return el;\n }\n}\nComponent$1.registerComponent('SubsCapsMenuItem', SubsCapsMenuItem);\n\n/**\n * @file sub-caps-button.js\n */\n\n/**\n * The button component for toggling and selecting captions and/or subtitles\n *\n * @extends TextTrackButton\n */\nclass SubsCapsButton extends TextTrackButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * The function to call when this component is ready.\n */\n constructor(player, options = {}) {\n super(player, options);\n\n // Although North America uses \"captions\" in most cases for\n // \"captions and subtitles\" other locales use \"subtitles\"\n this.label_ = 'subtitles';\n this.setIcon('subtitles');\n if (['en', 'en-us', 'en-ca', 'fr-ca'].indexOf(this.player_.language_) > -1) {\n this.label_ = 'captions';\n this.setIcon('captions');\n }\n this.menuButton_.controlText(toTitleCase$1(this.label_));\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-subs-caps-button ${super.buildCSSClass()}`;\n }\n buildWrapperCSSClass() {\n return `vjs-subs-caps-button ${super.buildWrapperCSSClass()}`;\n }\n\n /**\n * Create caption/subtitles menu items\n *\n * @return {CaptionSettingsMenuItem[]}\n * The array of current menu items.\n */\n createItems() {\n let items = [];\n if (!(this.player().tech_ && this.player().tech_.featuresNativeTextTracks) && this.player().getChild('textTrackSettings')) {\n items.push(new CaptionSettingsMenuItem(this.player_, {\n kind: this.label_\n }));\n this.hideThreshold_ += 1;\n }\n items = super.createItems(items, SubsCapsMenuItem);\n return items;\n }\n}\n\n/**\n * `kind`s of TextTrack to look for to associate it with this menu.\n *\n * @type {array}\n * @private\n */\nSubsCapsButton.prototype.kinds_ = ['captions', 'subtitles'];\n\n/**\n * The text that should display over the `SubsCapsButton`s controls.\n *\n *\n * @type {string}\n * @protected\n */\nSubsCapsButton.prototype.controlText_ = 'Subtitles';\nComponent$1.registerComponent('SubsCapsButton', SubsCapsButton);\n\n/**\n * @file audio-track-menu-item.js\n */\n\n/**\n * An {@link AudioTrack} {@link MenuItem}\n *\n * @extends MenuItem\n */\nclass AudioTrackMenuItem extends MenuItem {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n const track = options.track;\n const tracks = player.audioTracks();\n\n // Modify options for parent MenuItem class's init.\n options.label = track.label || track.language || 'Unknown';\n options.selected = track.enabled;\n super(player, options);\n this.track = track;\n this.addClass(`vjs-${track.kind}-menu-item`);\n const changeHandler = (...args) => {\n this.handleTracksChange.apply(this, args);\n };\n tracks.addEventListener('change', changeHandler);\n this.on('dispose', () => {\n tracks.removeEventListener('change', changeHandler);\n });\n }\n createEl(type, props, attrs) {\n const el = super.createEl(type, props, attrs);\n const parentSpan = el.querySelector('.vjs-menu-item-text');\n if (['main-desc', 'description'].indexOf(this.options_.track.kind) >= 0) {\n parentSpan.appendChild(createEl('span', {\n className: 'vjs-icon-placeholder'\n }, {\n 'aria-hidden': true\n }));\n parentSpan.appendChild(createEl('span', {\n className: 'vjs-control-text',\n textContent: ' ' + this.localize('Descriptions')\n }));\n }\n return el;\n }\n\n /**\n * This gets called when an `AudioTrackMenuItem is \"clicked\". See {@link ClickableComponent}\n * for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n super.handleClick(event);\n\n // the audio track list will automatically toggle other tracks\n // off for us.\n this.track.enabled = true;\n\n // when native audio tracks are used, we want to make sure that other tracks are turned off\n if (this.player_.tech_.featuresNativeAudioTracks) {\n const tracks = this.player_.audioTracks();\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n\n // skip the current track since we enabled it above\n if (track === this.track) {\n continue;\n }\n track.enabled = track === this.track;\n }\n }\n }\n\n /**\n * Handle any {@link AudioTrack} change.\n *\n * @param {Event} [event]\n * The {@link AudioTrackList#change} event that caused this to run.\n *\n * @listens AudioTrackList#change\n */\n handleTracksChange(event) {\n this.selected(this.track.enabled);\n }\n}\nComponent$1.registerComponent('AudioTrackMenuItem', AudioTrackMenuItem);\n\n/**\n * @file audio-track-button.js\n */\n\n/**\n * The base class for buttons that toggle specific {@link AudioTrack} types.\n *\n * @extends TrackButton\n */\nclass AudioTrackButton extends TrackButton {\n /**\n * Creates an instance of this class.\n *\n * @param {Player} player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options={}]\n * The key/value store of player options.\n */\n constructor(player, options = {}) {\n options.tracks = player.audioTracks();\n super(player, options);\n this.setIcon('audio');\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-audio-button ${super.buildCSSClass()}`;\n }\n buildWrapperCSSClass() {\n return `vjs-audio-button ${super.buildWrapperCSSClass()}`;\n }\n\n /**\n * Create a menu item for each audio track\n *\n * @param {AudioTrackMenuItem[]} [items=[]]\n * An array of existing menu items to use.\n *\n * @return {AudioTrackMenuItem[]}\n * An array of menu items\n */\n createItems(items = []) {\n // if there's only one audio track, there no point in showing it\n this.hideThreshold_ = 1;\n const tracks = this.player_.audioTracks();\n for (let i = 0; i < tracks.length; i++) {\n const track = tracks[i];\n items.push(new AudioTrackMenuItem(this.player_, {\n track,\n // MenuItem is selectable\n selectable: true,\n // MenuItem is NOT multiSelectable (i.e. only one can be marked \"selected\" at a time)\n multiSelectable: false\n }));\n }\n return items;\n }\n}\n\n/**\n * The text that should display over the `AudioTrackButton`s controls. Added for localization.\n *\n * @type {string}\n * @protected\n */\nAudioTrackButton.prototype.controlText_ = 'Audio Track';\nComponent$1.registerComponent('AudioTrackButton', AudioTrackButton);\n\n/**\n * @file playback-rate-menu-item.js\n */\n\n/**\n * The specific menu item type for selecting a playback rate.\n *\n * @extends MenuItem\n */\nclass PlaybackRateMenuItem extends MenuItem {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n const label = options.rate;\n const rate = parseFloat(label, 10);\n\n // Modify options for parent MenuItem class's init.\n options.label = label;\n options.selected = rate === player.playbackRate();\n options.selectable = true;\n options.multiSelectable = false;\n super(player, options);\n this.label = label;\n this.rate = rate;\n this.on(player, 'ratechange', e => this.update(e));\n }\n\n /**\n * This gets called when an `PlaybackRateMenuItem` is \"clicked\". See\n * {@link ClickableComponent} for more detailed information on what a click can be.\n *\n * @param {Event} [event]\n * The `keydown`, `tap`, or `click` event that caused this function to be\n * called.\n *\n * @listens tap\n * @listens click\n */\n handleClick(event) {\n super.handleClick();\n this.player().playbackRate(this.rate);\n }\n\n /**\n * Update the PlaybackRateMenuItem when the playbackrate changes.\n *\n * @param {Event} [event]\n * The `ratechange` event that caused this function to run.\n *\n * @listens Player#ratechange\n */\n update(event) {\n this.selected(this.player().playbackRate() === this.rate);\n }\n}\n\n/**\n * The text that should display over the `PlaybackRateMenuItem`s controls. Added for localization.\n *\n * @type {string}\n * @private\n */\nPlaybackRateMenuItem.prototype.contentElType = 'button';\nComponent$1.registerComponent('PlaybackRateMenuItem', PlaybackRateMenuItem);\n\n/**\n * @file playback-rate-menu-button.js\n */\n\n/**\n * The component for controlling the playback rate.\n *\n * @extends MenuButton\n */\nclass PlaybackRateMenuButton extends MenuButton {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.menuButton_.el_.setAttribute('aria-describedby', this.labelElId_);\n this.updateVisibility();\n this.updateLabel();\n this.on(player, 'loadstart', e => this.updateVisibility(e));\n this.on(player, 'ratechange', e => this.updateLabel(e));\n this.on(player, 'playbackrateschange', e => this.handlePlaybackRateschange(e));\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n const el = super.createEl();\n this.labelElId_ = 'vjs-playback-rate-value-label-' + this.id_;\n this.labelEl_ = createEl('div', {\n className: 'vjs-playback-rate-value',\n id: this.labelElId_,\n textContent: '1x'\n });\n el.appendChild(this.labelEl_);\n return el;\n }\n dispose() {\n this.labelEl_ = null;\n super.dispose();\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-playback-rate ${super.buildCSSClass()}`;\n }\n buildWrapperCSSClass() {\n return `vjs-playback-rate ${super.buildWrapperCSSClass()}`;\n }\n\n /**\n * Create the list of menu items. Specific to each subclass.\n *\n */\n createItems() {\n const rates = this.playbackRates();\n const items = [];\n for (let i = rates.length - 1; i >= 0; i--) {\n items.push(new PlaybackRateMenuItem(this.player(), {\n rate: rates[i] + 'x'\n }));\n }\n return items;\n }\n\n /**\n * On playbackrateschange, update the menu to account for the new items.\n *\n * @listens Player#playbackrateschange\n */\n handlePlaybackRateschange(event) {\n this.update();\n }\n\n /**\n * Get possible playback rates\n *\n * @return {Array}\n * All possible playback rates\n */\n playbackRates() {\n const player = this.player();\n return player.playbackRates && player.playbackRates() || [];\n }\n\n /**\n * Get whether playback rates is supported by the tech\n * and an array of playback rates exists\n *\n * @return {boolean}\n * Whether changing playback rate is supported\n */\n playbackRateSupported() {\n return this.player().tech_ && this.player().tech_.featuresPlaybackRate && this.playbackRates() && this.playbackRates().length > 0;\n }\n\n /**\n * Hide playback rate controls when they're no playback rate options to select\n *\n * @param {Event} [event]\n * The event that caused this function to run.\n *\n * @listens Player#loadstart\n */\n updateVisibility(event) {\n if (this.playbackRateSupported()) {\n this.removeClass('vjs-hidden');\n } else {\n this.addClass('vjs-hidden');\n }\n }\n\n /**\n * Update button label when rate changed\n *\n * @param {Event} [event]\n * The event that caused this function to run.\n *\n * @listens Player#ratechange\n */\n updateLabel(event) {\n if (this.playbackRateSupported()) {\n this.labelEl_.textContent = this.player().playbackRate() + 'x';\n }\n }\n}\n\n/**\n * The text that should display over the `PlaybackRateMenuButton`s controls.\n *\n * Added for localization.\n *\n * @type {string}\n * @protected\n */\nPlaybackRateMenuButton.prototype.controlText_ = 'Playback Rate';\nComponent$1.registerComponent('PlaybackRateMenuButton', PlaybackRateMenuButton);\n\n/**\n * @file spacer.js\n */\n\n/**\n * Just an empty spacer element that can be used as an append point for plugins, etc.\n * Also can be used to create space between elements when necessary.\n *\n * @extends Component\n */\nclass Spacer extends Component$1 {\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-spacer ${super.buildCSSClass()}`;\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl(tag = 'div', props = {}, attributes = {}) {\n if (!props.className) {\n props.className = this.buildCSSClass();\n }\n return super.createEl(tag, props, attributes);\n }\n}\nComponent$1.registerComponent('Spacer', Spacer);\n\n/**\n * @file custom-control-spacer.js\n */\n\n/**\n * Spacer specifically meant to be used as an insertion point for new plugins, etc.\n *\n * @extends Spacer\n */\nclass CustomControlSpacer extends Spacer {\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n */\n buildCSSClass() {\n return `vjs-custom-control-spacer ${super.buildCSSClass()}`;\n }\n\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: this.buildCSSClass(),\n // No-flex/table-cell mode requires there be some content\n // in the cell to fill the remaining space of the table.\n textContent: '\\u00a0'\n });\n }\n}\nComponent$1.registerComponent('CustomControlSpacer', CustomControlSpacer);\n\n/**\n * @file control-bar.js\n */\n\n/**\n * Container of main controls.\n *\n * @extends Component\n */\nclass ControlBar extends Component$1 {\n /**\n * Create the `Component`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n return super.createEl('div', {\n className: 'vjs-control-bar',\n dir: 'ltr'\n });\n }\n}\n\n/**\n * Default options for `ControlBar`\n *\n * @type {Object}\n * @private\n */\nControlBar.prototype.options_ = {\n children: ['playToggle', 'skipBackward', 'skipForward', 'volumePanel', 'currentTimeDisplay', 'timeDivider', 'durationDisplay', 'progressControl', 'liveDisplay', 'seekToLive', 'remainingTimeDisplay', 'customControlSpacer', 'playbackRateMenuButton', 'chaptersButton', 'descriptionsButton', 'subsCapsButton', 'audioTrackButton', 'pictureInPictureToggle', 'fullscreenToggle']\n};\nComponent$1.registerComponent('ControlBar', ControlBar);\n\n/**\n * @file error-display.js\n */\n\n/**\n * A display that indicates an error has occurred. This means that the video\n * is unplayable.\n *\n * @extends ModalDialog\n */\nclass ErrorDisplay extends ModalDialog {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n super(player, options);\n this.on(player, 'error', e => {\n this.open(e);\n });\n }\n\n /**\n * Builds the default DOM `className`.\n *\n * @return {string}\n * The DOM `className` for this object.\n *\n * @deprecated Since version 5.\n */\n buildCSSClass() {\n return `vjs-error-display ${super.buildCSSClass()}`;\n }\n\n /**\n * Gets the localized error message based on the `Player`s error.\n *\n * @return {string}\n * The `Player`s error message localized or an empty string.\n */\n content() {\n const error = this.player().error();\n return error ? this.localize(error.message) : '';\n }\n}\n\n/**\n * The default options for an `ErrorDisplay`.\n *\n * @private\n */\nErrorDisplay.prototype.options_ = Object.assign({}, ModalDialog.prototype.options_, {\n pauseOnOpen: false,\n fillAlways: true,\n temporary: false,\n uncloseable: true\n});\nComponent$1.registerComponent('ErrorDisplay', ErrorDisplay);\n\n/**\n * @file text-track-settings.js\n */\nconst LOCAL_STORAGE_KEY$1 = 'vjs-text-track-settings';\nconst COLOR_BLACK = ['#000', 'Black'];\nconst COLOR_BLUE = ['#00F', 'Blue'];\nconst COLOR_CYAN = ['#0FF', 'Cyan'];\nconst COLOR_GREEN = ['#0F0', 'Green'];\nconst COLOR_MAGENTA = ['#F0F', 'Magenta'];\nconst COLOR_RED = ['#F00', 'Red'];\nconst COLOR_WHITE = ['#FFF', 'White'];\nconst COLOR_YELLOW = ['#FF0', 'Yellow'];\nconst OPACITY_OPAQUE = ['1', 'Opaque'];\nconst OPACITY_SEMI = ['0.5', 'Semi-Transparent'];\nconst OPACITY_TRANS = ['0', 'Transparent'];\n\n// Configuration for the various elements in the DOM of this component.\n//\n// Possible keys include:\n//\n// `default`:\n// The default option index. Only needs to be provided if not zero.\n// `parser`:\n// A function which is used to parse the value from the selected option in\n// a customized way.\n// `selector`:\n// The selector used to find the associated element.\nconst selectConfigs = {\n backgroundColor: {\n selector: '.vjs-bg-color > select',\n id: 'captions-background-color-%s',\n label: 'Color',\n options: [COLOR_BLACK, COLOR_WHITE, COLOR_RED, COLOR_GREEN, COLOR_BLUE, COLOR_YELLOW, COLOR_MAGENTA, COLOR_CYAN]\n },\n backgroundOpacity: {\n selector: '.vjs-bg-opacity > select',\n id: 'captions-background-opacity-%s',\n label: 'Opacity',\n options: [OPACITY_OPAQUE, OPACITY_SEMI, OPACITY_TRANS]\n },\n color: {\n selector: '.vjs-text-color > select',\n id: 'captions-foreground-color-%s',\n label: 'Color',\n options: [COLOR_WHITE, COLOR_BLACK, COLOR_RED, COLOR_GREEN, COLOR_BLUE, COLOR_YELLOW, COLOR_MAGENTA, COLOR_CYAN]\n },\n edgeStyle: {\n selector: '.vjs-edge-style > select',\n id: '%s',\n label: 'Text Edge Style',\n options: [['none', 'None'], ['raised', 'Raised'], ['depressed', 'Depressed'], ['uniform', 'Uniform'], ['dropshadow', 'Drop shadow']]\n },\n fontFamily: {\n selector: '.vjs-font-family > select',\n id: 'captions-font-family-%s',\n label: 'Font Family',\n options: [['proportionalSansSerif', 'Proportional Sans-Serif'], ['monospaceSansSerif', 'Monospace Sans-Serif'], ['proportionalSerif', 'Proportional Serif'], ['monospaceSerif', 'Monospace Serif'], ['casual', 'Casual'], ['script', 'Script'], ['small-caps', 'Small Caps']]\n },\n fontPercent: {\n selector: '.vjs-font-percent > select',\n id: 'captions-font-size-%s',\n label: 'Font Size',\n options: [['0.50', '50%'], ['0.75', '75%'], ['1.00', '100%'], ['1.25', '125%'], ['1.50', '150%'], ['1.75', '175%'], ['2.00', '200%'], ['3.00', '300%'], ['4.00', '400%']],\n default: 2,\n parser: v => v === '1.00' ? null : Number(v)\n },\n textOpacity: {\n selector: '.vjs-text-opacity > select',\n id: 'captions-foreground-opacity-%s',\n label: 'Opacity',\n options: [OPACITY_OPAQUE, OPACITY_SEMI]\n },\n // Options for this object are defined below.\n windowColor: {\n selector: '.vjs-window-color > select',\n id: 'captions-window-color-%s',\n label: 'Color'\n },\n // Options for this object are defined below.\n windowOpacity: {\n selector: '.vjs-window-opacity > select',\n id: 'captions-window-opacity-%s',\n label: 'Opacity',\n options: [OPACITY_TRANS, OPACITY_SEMI, OPACITY_OPAQUE]\n }\n};\nselectConfigs.windowColor.options = selectConfigs.backgroundColor.options;\n\n/**\n * Get the actual value of an option.\n *\n * @param {string} value\n * The value to get\n *\n * @param {Function} [parser]\n * Optional function to adjust the value.\n *\n * @return {*}\n * - Will be `undefined` if no value exists\n * - Will be `undefined` if the given value is \"none\".\n * - Will be the actual value otherwise.\n *\n * @private\n */\nfunction parseOptionValue(value, parser) {\n if (parser) {\n value = parser(value);\n }\n if (value && value !== 'none') {\n return value;\n }\n}\n\n/**\n * Gets the value of the selected element within a element.\n *\n * @param {Element} el\n * the element to look in\n *\n * @param {Function} [parser]\n * Optional function to adjust the value.\n *\n * @return {*}\n * - Will be `undefined` if no value exists\n * - Will be `undefined` if the given value is \"none\".\n * - Will be the actual value otherwise.\n *\n * @private\n */\nfunction getSelectedOptionValue(el, parser) {\n const value = el.options[el.options.selectedIndex].value;\n return parseOptionValue(value, parser);\n}\n\n/**\n * Sets the selected element within a element based on a\n * given value.\n *\n * @param {Element} el\n * The element to look in.\n *\n * @param {string} value\n * the property to look on.\n *\n * @param {Function} [parser]\n * Optional function to adjust the value before comparing.\n *\n * @private\n */\nfunction setSelectedOption(el, value, parser) {\n if (!value) {\n return;\n }\n for (let i = 0; i < el.options.length; i++) {\n if (parseOptionValue(el.options[i].value, parser) === value) {\n el.selectedIndex = i;\n break;\n }\n }\n}\n\n/**\n * Manipulate Text Tracks settings.\n *\n * @extends ModalDialog\n */\nclass TextTrackSettings extends ModalDialog {\n /**\n * Creates an instance of this class.\n *\n * @param { import('../player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n */\n constructor(player, options) {\n options.temporary = false;\n super(player, options);\n this.updateDisplay = this.updateDisplay.bind(this);\n\n // fill the modal and pretend we have opened it\n this.fill();\n this.hasBeenOpened_ = this.hasBeenFilled_ = true;\n this.endDialog = createEl('p', {\n className: 'vjs-control-text',\n textContent: this.localize('End of dialog window.')\n });\n this.el().appendChild(this.endDialog);\n this.setDefaults();\n\n // Grab `persistTextTrackSettings` from the player options if not passed in child options\n if (options.persistTextTrackSettings === undefined) {\n this.options_.persistTextTrackSettings = this.options_.playerOptions.persistTextTrackSettings;\n }\n this.on(this.$('.vjs-done-button'), 'click', () => {\n this.saveSettings();\n this.close();\n });\n this.on(this.$('.vjs-default-button'), 'click', () => {\n this.setDefaults();\n this.updateDisplay();\n });\n each(selectConfigs, config => {\n this.on(this.$(config.selector), 'change', this.updateDisplay);\n });\n if (this.options_.persistTextTrackSettings) {\n this.restoreSettings();\n }\n }\n dispose() {\n this.endDialog = null;\n super.dispose();\n }\n\n /**\n * Create a element with configured options.\n *\n * @param {string} key\n * Configuration key to use during creation.\n *\n * @param {string} [legendId]\n * Id of associated .\n *\n * @param {string} [type=label]\n * Type of labelling element, `label` or `legend`\n *\n * @return {string}\n * An HTML string.\n *\n * @private\n */\n createElSelect_(key, legendId = '', type = 'label') {\n const config = selectConfigs[key];\n const id = config.id.replace('%s', this.id_);\n const selectLabelledbyIds = [legendId, id].join(' ').trim();\n const guid = `vjs_select_${newGUID()}`;\n return [`<${type} id=\"${id}\"${type === 'label' ? ` for=\"${guid}\" class=\"vjs-label\"` : ''}>`, this.localize(config.label), `${type}>`, ``].concat(config.options.map(o => {\n const optionId = id + '-' + o[1].replace(/\\W+/g, '');\n return [``, this.localize(o[1]), ' '].join('');\n })).concat(' ').join('');\n }\n\n /**\n * Create foreground color element for the component\n *\n * @return {string}\n * An HTML string.\n *\n * @private\n */\n createElFgColor_() {\n const legendId = `captions-text-legend-${this.id_}`;\n return [' ', ``, this.localize('Text'), ' ', '', this.createElSelect_('color', legendId), ' ', '', this.createElSelect_('textOpacity', legendId), ' ', ' '].join('');\n }\n\n /**\n * Create background color element for the component\n *\n * @return {string}\n * An HTML string.\n *\n * @private\n */\n createElBgColor_() {\n const legendId = `captions-background-${this.id_}`;\n return ['', ``, this.localize('Text Background'), ' ', '', this.createElSelect_('backgroundColor', legendId), ' ', '', this.createElSelect_('backgroundOpacity', legendId), ' ', ' '].join('');\n }\n\n /**\n * Create window color element for the component\n *\n * @return {string}\n * An HTML string.\n *\n * @private\n */\n createElWinColor_() {\n const legendId = `captions-window-${this.id_}`;\n return ['', ``, this.localize('Caption Area Background'), ' ', '', this.createElSelect_('windowColor', legendId), ' ', '', this.createElSelect_('windowOpacity', legendId), ' ', ' '].join('');\n }\n\n /**\n * Create color elements for the component\n *\n * @return {Element}\n * The element that was created\n *\n * @private\n */\n createElColors_() {\n return createEl('div', {\n className: 'vjs-track-settings-colors',\n innerHTML: [this.createElFgColor_(), this.createElBgColor_(), this.createElWinColor_()].join('')\n });\n }\n\n /**\n * Create font elements for the component\n *\n * @return {Element}\n * The element that was created.\n *\n * @private\n */\n createElFont_() {\n return createEl('div', {\n className: 'vjs-track-settings-font',\n innerHTML: ['', this.createElSelect_('fontPercent', '', 'legend'), ' ', '', this.createElSelect_('edgeStyle', '', 'legend'), ' ', '', this.createElSelect_('fontFamily', '', 'legend'), ' '].join('')\n });\n }\n\n /**\n * Create controls for the component\n *\n * @return {Element}\n * The element that was created.\n *\n * @private\n */\n createElControls_() {\n const defaultsDescription = this.localize('restore all settings to the default values');\n return createEl('div', {\n className: 'vjs-track-settings-controls',\n innerHTML: [``, this.localize('Reset'), ` ${defaultsDescription} `, ' ', `${this.localize('Done')} `].join('')\n });\n }\n content() {\n return [this.createElColors_(), this.createElFont_(), this.createElControls_()];\n }\n label() {\n return this.localize('Caption Settings Dialog');\n }\n description() {\n return this.localize('Beginning of dialog window. Escape will cancel and close the window.');\n }\n buildCSSClass() {\n return super.buildCSSClass() + ' vjs-text-track-settings';\n }\n\n /**\n * Gets an object of text track settings (or null).\n *\n * @return {Object}\n * An object with config values parsed from the DOM or localStorage.\n */\n getValues() {\n return reduce(selectConfigs, (accum, config, key) => {\n const value = getSelectedOptionValue(this.$(config.selector), config.parser);\n if (value !== undefined) {\n accum[key] = value;\n }\n return accum;\n }, {});\n }\n\n /**\n * Sets text track settings from an object of values.\n *\n * @param {Object} values\n * An object with config values parsed from the DOM or localStorage.\n */\n setValues(values) {\n each(selectConfigs, (config, key) => {\n setSelectedOption(this.$(config.selector), values[key], config.parser);\n });\n }\n\n /**\n * Sets all `` elements to their default values.\n */\n setDefaults() {\n each(selectConfigs, config => {\n const index = config.hasOwnProperty('default') ? config.default : 0;\n this.$(config.selector).selectedIndex = index;\n });\n }\n\n /**\n * Restore texttrack settings from localStorage\n */\n restoreSettings() {\n let values;\n try {\n values = JSON.parse(window$1.localStorage.getItem(LOCAL_STORAGE_KEY$1));\n } catch (err) {\n log$1.warn(err);\n }\n if (values) {\n this.setValues(values);\n }\n }\n\n /**\n * Save text track settings to localStorage\n */\n saveSettings() {\n if (!this.options_.persistTextTrackSettings) {\n return;\n }\n const values = this.getValues();\n try {\n if (Object.keys(values).length) {\n window$1.localStorage.setItem(LOCAL_STORAGE_KEY$1, JSON.stringify(values));\n } else {\n window$1.localStorage.removeItem(LOCAL_STORAGE_KEY$1);\n }\n } catch (err) {\n log$1.warn(err);\n }\n }\n\n /**\n * Update display of text track settings\n */\n updateDisplay() {\n const ttDisplay = this.player_.getChild('textTrackDisplay');\n if (ttDisplay) {\n ttDisplay.updateDisplay();\n }\n }\n\n /**\n * conditionally blur the element and refocus the captions button\n *\n * @private\n */\n conditionalBlur_() {\n this.previouslyActiveEl_ = null;\n const cb = this.player_.controlBar;\n const subsCapsBtn = cb && cb.subsCapsButton;\n const ccBtn = cb && cb.captionsButton;\n if (subsCapsBtn) {\n subsCapsBtn.focus();\n } else if (ccBtn) {\n ccBtn.focus();\n }\n }\n\n /**\n * Repopulate dialog with new localizations on languagechange\n */\n handleLanguagechange() {\n this.fill();\n }\n}\nComponent$1.registerComponent('TextTrackSettings', TextTrackSettings);\n\n/**\n * @file resize-manager.js\n */\n\n/**\n * A Resize Manager. It is in charge of triggering `playerresize` on the player in the right conditions.\n *\n * It'll either create an iframe and use a debounced resize handler on it or use the new {@link https://wicg.github.io/ResizeObserver/|ResizeObserver}.\n *\n * If the ResizeObserver is available natively, it will be used. A polyfill can be passed in as an option.\n * If a `playerresize` event is not needed, the ResizeManager component can be removed from the player, see the example below.\n *\n * @example How to disable the resize manager \n * const player = videojs('#vid', {\n * resizeManager: false\n * });\n *\n * @see {@link https://wicg.github.io/ResizeObserver/|ResizeObserver specification}\n *\n * @extends Component\n */\nclass ResizeManager extends Component$1 {\n /**\n * Create the ResizeManager.\n *\n * @param {Object} player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of ResizeManager options.\n *\n * @param {Object} [options.ResizeObserver]\n * A polyfill for ResizeObserver can be passed in here.\n * If this is set to null it will ignore the native ResizeObserver and fall back to the iframe fallback.\n */\n constructor(player, options) {\n let RESIZE_OBSERVER_AVAILABLE = options.ResizeObserver || window$1.ResizeObserver;\n\n // if `null` was passed, we want to disable the ResizeObserver\n if (options.ResizeObserver === null) {\n RESIZE_OBSERVER_AVAILABLE = false;\n }\n\n // Only create an element when ResizeObserver isn't available\n const options_ = merge$1({\n createEl: !RESIZE_OBSERVER_AVAILABLE,\n reportTouchActivity: false\n }, options);\n super(player, options_);\n this.ResizeObserver = options.ResizeObserver || window$1.ResizeObserver;\n this.loadListener_ = null;\n this.resizeObserver_ = null;\n this.debouncedHandler_ = debounce(() => {\n this.resizeHandler();\n }, 100, false, this);\n if (RESIZE_OBSERVER_AVAILABLE) {\n this.resizeObserver_ = new this.ResizeObserver(this.debouncedHandler_);\n this.resizeObserver_.observe(player.el());\n } else {\n this.loadListener_ = () => {\n if (!this.el_ || !this.el_.contentWindow) {\n return;\n }\n const debouncedHandler_ = this.debouncedHandler_;\n let unloadListener_ = this.unloadListener_ = function () {\n off(this, 'resize', debouncedHandler_);\n off(this, 'unload', unloadListener_);\n unloadListener_ = null;\n };\n\n // safari and edge can unload the iframe before resizemanager dispose\n // we have to dispose of event handlers correctly before that happens\n on(this.el_.contentWindow, 'unload', unloadListener_);\n on(this.el_.contentWindow, 'resize', debouncedHandler_);\n };\n this.one('load', this.loadListener_);\n }\n }\n createEl() {\n return super.createEl('iframe', {\n className: 'vjs-resize-manager',\n tabIndex: -1,\n title: this.localize('No content')\n }, {\n 'aria-hidden': 'true'\n });\n }\n\n /**\n * Called when a resize is triggered on the iframe or a resize is observed via the ResizeObserver\n *\n * @fires Player#playerresize\n */\n resizeHandler() {\n /**\n * Called when the player size has changed\n *\n * @event Player#playerresize\n * @type {Event}\n */\n // make sure player is still around to trigger\n // prevents this from causing an error after dispose\n if (!this.player_ || !this.player_.trigger) {\n return;\n }\n this.player_.trigger('playerresize');\n }\n dispose() {\n if (this.debouncedHandler_) {\n this.debouncedHandler_.cancel();\n }\n if (this.resizeObserver_) {\n if (this.player_.el()) {\n this.resizeObserver_.unobserve(this.player_.el());\n }\n this.resizeObserver_.disconnect();\n }\n if (this.loadListener_) {\n this.off('load', this.loadListener_);\n }\n if (this.el_ && this.el_.contentWindow && this.unloadListener_) {\n this.unloadListener_.call(this.el_.contentWindow);\n }\n this.ResizeObserver = null;\n this.resizeObserver = null;\n this.debouncedHandler_ = null;\n this.loadListener_ = null;\n super.dispose();\n }\n}\nComponent$1.registerComponent('ResizeManager', ResizeManager);\n\nconst defaults = {\n trackingThreshold: 20,\n liveTolerance: 15\n};\n\n/*\n track when we are at the live edge, and other helpers for live playback */\n\n/**\n * A class for checking live current time and determining when the player\n * is at or behind the live edge.\n */\nclass LiveTracker extends Component$1 {\n /**\n * Creates an instance of this class.\n *\n * @param { import('./player').default } player\n * The `Player` that this class should be attached to.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {number} [options.trackingThreshold=20]\n * Number of seconds of live window (seekableEnd - seekableStart) that\n * media needs to have before the liveui will be shown.\n *\n * @param {number} [options.liveTolerance=15]\n * Number of seconds behind live that we have to be\n * before we will be considered non-live. Note that this will only\n * be used when playing at the live edge. This allows large seekable end\n * changes to not effect whether we are live or not.\n */\n constructor(player, options) {\n // LiveTracker does not need an element\n const options_ = merge$1(defaults, options, {\n createEl: false\n });\n super(player, options_);\n this.trackLiveHandler_ = () => this.trackLive_();\n this.handlePlay_ = e => this.handlePlay(e);\n this.handleFirstTimeupdate_ = e => this.handleFirstTimeupdate(e);\n this.handleSeeked_ = e => this.handleSeeked(e);\n this.seekToLiveEdge_ = e => this.seekToLiveEdge(e);\n this.reset_();\n this.on(this.player_, 'durationchange', e => this.handleDurationchange(e));\n // we should try to toggle tracking on canplay as native playback engines, like Safari\n // may not have the proper values for things like seekableEnd until then\n this.on(this.player_, 'canplay', () => this.toggleTracking());\n }\n\n /**\n * all the functionality for tracking when seek end changes\n * and for tracking how far past seek end we should be\n */\n trackLive_() {\n const seekable = this.player_.seekable();\n\n // skip undefined seekable\n if (!seekable || !seekable.length) {\n return;\n }\n const newTime = Number(window$1.performance.now().toFixed(4));\n const deltaTime = this.lastTime_ === -1 ? 0 : (newTime - this.lastTime_) / 1000;\n this.lastTime_ = newTime;\n this.pastSeekEnd_ = this.pastSeekEnd() + deltaTime;\n const liveCurrentTime = this.liveCurrentTime();\n const currentTime = this.player_.currentTime();\n\n // we are behind live if any are true\n // 1. the player is paused\n // 2. the user seeked to a location 2 seconds away from live\n // 3. the difference between live and current time is greater\n // liveTolerance which defaults to 15s\n let isBehind = this.player_.paused() || this.seekedBehindLive_ || Math.abs(liveCurrentTime - currentTime) > this.options_.liveTolerance;\n\n // we cannot be behind if\n // 1. until we have not seen a timeupdate yet\n // 2. liveCurrentTime is Infinity, which happens on Android and Native Safari\n if (!this.timeupdateSeen_ || liveCurrentTime === Infinity) {\n isBehind = false;\n }\n if (isBehind !== this.behindLiveEdge_) {\n this.behindLiveEdge_ = isBehind;\n this.trigger('liveedgechange');\n }\n }\n\n /**\n * handle a durationchange event on the player\n * and start/stop tracking accordingly.\n */\n handleDurationchange() {\n this.toggleTracking();\n }\n\n /**\n * start/stop tracking\n */\n toggleTracking() {\n if (this.player_.duration() === Infinity && this.liveWindow() >= this.options_.trackingThreshold) {\n if (this.player_.options_.liveui) {\n this.player_.addClass('vjs-liveui');\n }\n this.startTracking();\n } else {\n this.player_.removeClass('vjs-liveui');\n this.stopTracking();\n }\n }\n\n /**\n * start tracking live playback\n */\n startTracking() {\n if (this.isTracking()) {\n return;\n }\n\n // If we haven't seen a timeupdate, we need to check whether playback\n // began before this component started tracking. This can happen commonly\n // when using autoplay.\n if (!this.timeupdateSeen_) {\n this.timeupdateSeen_ = this.player_.hasStarted();\n }\n this.trackingInterval_ = this.setInterval(this.trackLiveHandler_, UPDATE_REFRESH_INTERVAL);\n this.trackLive_();\n this.on(this.player_, ['play', 'pause'], this.trackLiveHandler_);\n if (!this.timeupdateSeen_) {\n this.one(this.player_, 'play', this.handlePlay_);\n this.one(this.player_, 'timeupdate', this.handleFirstTimeupdate_);\n } else {\n this.on(this.player_, 'seeked', this.handleSeeked_);\n }\n }\n\n /**\n * handle the first timeupdate on the player if it wasn't already playing\n * when live tracker started tracking.\n */\n handleFirstTimeupdate() {\n this.timeupdateSeen_ = true;\n this.on(this.player_, 'seeked', this.handleSeeked_);\n }\n\n /**\n * Keep track of what time a seek starts, and listen for seeked\n * to find where a seek ends.\n */\n handleSeeked() {\n const timeDiff = Math.abs(this.liveCurrentTime() - this.player_.currentTime());\n this.seekedBehindLive_ = this.nextSeekedFromUser_ && timeDiff > 2;\n this.nextSeekedFromUser_ = false;\n this.trackLive_();\n }\n\n /**\n * handle the first play on the player, and make sure that we seek\n * right to the live edge.\n */\n handlePlay() {\n this.one(this.player_, 'timeupdate', this.seekToLiveEdge_);\n }\n\n /**\n * Stop tracking, and set all internal variables to\n * their initial value.\n */\n reset_() {\n this.lastTime_ = -1;\n this.pastSeekEnd_ = 0;\n this.lastSeekEnd_ = -1;\n this.behindLiveEdge_ = true;\n this.timeupdateSeen_ = false;\n this.seekedBehindLive_ = false;\n this.nextSeekedFromUser_ = false;\n this.clearInterval(this.trackingInterval_);\n this.trackingInterval_ = null;\n this.off(this.player_, ['play', 'pause'], this.trackLiveHandler_);\n this.off(this.player_, 'seeked', this.handleSeeked_);\n this.off(this.player_, 'play', this.handlePlay_);\n this.off(this.player_, 'timeupdate', this.handleFirstTimeupdate_);\n this.off(this.player_, 'timeupdate', this.seekToLiveEdge_);\n }\n\n /**\n * The next seeked event is from the user. Meaning that any seek\n * > 2s behind live will be considered behind live for real and\n * liveTolerance will be ignored.\n */\n nextSeekedFromUser() {\n this.nextSeekedFromUser_ = true;\n }\n\n /**\n * stop tracking live playback\n */\n stopTracking() {\n if (!this.isTracking()) {\n return;\n }\n this.reset_();\n this.trigger('liveedgechange');\n }\n\n /**\n * A helper to get the player seekable end\n * so that we don't have to null check everywhere\n *\n * @return {number}\n * The furthest seekable end or Infinity.\n */\n seekableEnd() {\n const seekable = this.player_.seekable();\n const seekableEnds = [];\n let i = seekable ? seekable.length : 0;\n while (i--) {\n seekableEnds.push(seekable.end(i));\n }\n\n // grab the furthest seekable end after sorting, or if there are none\n // default to Infinity\n return seekableEnds.length ? seekableEnds.sort()[seekableEnds.length - 1] : Infinity;\n }\n\n /**\n * A helper to get the player seekable start\n * so that we don't have to null check everywhere\n *\n * @return {number}\n * The earliest seekable start or 0.\n */\n seekableStart() {\n const seekable = this.player_.seekable();\n const seekableStarts = [];\n let i = seekable ? seekable.length : 0;\n while (i--) {\n seekableStarts.push(seekable.start(i));\n }\n\n // grab the first seekable start after sorting, or if there are none\n // default to 0\n return seekableStarts.length ? seekableStarts.sort()[0] : 0;\n }\n\n /**\n * Get the live time window aka\n * the amount of time between seekable start and\n * live current time.\n *\n * @return {number}\n * The amount of seconds that are seekable in\n * the live video.\n */\n liveWindow() {\n const liveCurrentTime = this.liveCurrentTime();\n\n // if liveCurrenTime is Infinity then we don't have a liveWindow at all\n if (liveCurrentTime === Infinity) {\n return 0;\n }\n return liveCurrentTime - this.seekableStart();\n }\n\n /**\n * Determines if the player is live, only checks if this component\n * is tracking live playback or not\n *\n * @return {boolean}\n * Whether liveTracker is tracking\n */\n isLive() {\n return this.isTracking();\n }\n\n /**\n * Determines if currentTime is at the live edge and won't fall behind\n * on each seekableendchange\n *\n * @return {boolean}\n * Whether playback is at the live edge\n */\n atLiveEdge() {\n return !this.behindLiveEdge();\n }\n\n /**\n * get what we expect the live current time to be\n *\n * @return {number}\n * The expected live current time\n */\n liveCurrentTime() {\n return this.pastSeekEnd() + this.seekableEnd();\n }\n\n /**\n * The number of seconds that have occurred after seekable end\n * changed. This will be reset to 0 once seekable end changes.\n *\n * @return {number}\n * Seconds past the current seekable end\n */\n pastSeekEnd() {\n const seekableEnd = this.seekableEnd();\n if (this.lastSeekEnd_ !== -1 && seekableEnd !== this.lastSeekEnd_) {\n this.pastSeekEnd_ = 0;\n }\n this.lastSeekEnd_ = seekableEnd;\n return this.pastSeekEnd_;\n }\n\n /**\n * If we are currently behind the live edge, aka currentTime will be\n * behind on a seekableendchange\n *\n * @return {boolean}\n * If we are behind the live edge\n */\n behindLiveEdge() {\n return this.behindLiveEdge_;\n }\n\n /**\n * Whether live tracker is currently tracking or not.\n */\n isTracking() {\n return typeof this.trackingInterval_ === 'number';\n }\n\n /**\n * Seek to the live edge if we are behind the live edge\n */\n seekToLiveEdge() {\n this.seekedBehindLive_ = false;\n if (this.atLiveEdge()) {\n return;\n }\n this.nextSeekedFromUser_ = false;\n this.player_.currentTime(this.liveCurrentTime());\n }\n\n /**\n * Dispose of liveTracker\n */\n dispose() {\n this.stopTracking();\n super.dispose();\n }\n}\nComponent$1.registerComponent('LiveTracker', LiveTracker);\n\n/**\n * Displays an element over the player which contains an optional title and\n * description for the current content.\n *\n * Much of the code for this component originated in the now obsolete\n * videojs-dock plugin: https://github.com/brightcove/videojs-dock/\n *\n * @extends Component\n */\nclass TitleBar extends Component$1 {\n constructor(player, options) {\n super(player, options);\n this.on('statechanged', e => this.updateDom_());\n this.updateDom_();\n }\n\n /**\n * Create the `TitleBar`'s DOM element\n *\n * @return {Element}\n * The element that was created.\n */\n createEl() {\n this.els = {\n title: createEl('div', {\n className: 'vjs-title-bar-title',\n id: `vjs-title-bar-title-${newGUID()}`\n }),\n description: createEl('div', {\n className: 'vjs-title-bar-description',\n id: `vjs-title-bar-description-${newGUID()}`\n })\n };\n return createEl('div', {\n className: 'vjs-title-bar'\n }, {}, values(this.els));\n }\n\n /**\n * Updates the DOM based on the component's state object.\n */\n updateDom_() {\n const tech = this.player_.tech_;\n const techEl = tech && tech.el_;\n const techAriaAttrs = {\n title: 'aria-labelledby',\n description: 'aria-describedby'\n };\n ['title', 'description'].forEach(k => {\n const value = this.state[k];\n const el = this.els[k];\n const techAriaAttr = techAriaAttrs[k];\n emptyEl(el);\n if (value) {\n textContent(el, value);\n }\n\n // If there is a tech element available, update its ARIA attributes\n // according to whether a title and/or description have been provided.\n if (techEl) {\n techEl.removeAttribute(techAriaAttr);\n if (value) {\n techEl.setAttribute(techAriaAttr, el.id);\n }\n }\n });\n if (this.state.title || this.state.description) {\n this.show();\n } else {\n this.hide();\n }\n }\n\n /**\n * Update the contents of the title bar component with new title and\n * description text.\n *\n * If both title and description are missing, the title bar will be hidden.\n *\n * If either title or description are present, the title bar will be visible.\n *\n * NOTE: Any previously set value will be preserved. To unset a previously\n * set value, you must pass an empty string or null.\n *\n * For example:\n *\n * ```\n * update({title: 'foo', description: 'bar'}) // title: 'foo', description: 'bar'\n * update({description: 'bar2'}) // title: 'foo', description: 'bar2'\n * update({title: ''}) // title: '', description: 'bar2'\n * update({title: 'foo', description: null}) // title: 'foo', description: null\n * ```\n *\n * @param {Object} [options={}]\n * An options object. When empty, the title bar will be hidden.\n *\n * @param {string} [options.title]\n * A title to display in the title bar.\n *\n * @param {string} [options.description]\n * A description to display in the title bar.\n */\n update(options) {\n this.setState(options);\n }\n\n /**\n * Dispose the component.\n */\n dispose() {\n const tech = this.player_.tech_;\n const techEl = tech && tech.el_;\n if (techEl) {\n techEl.removeAttribute('aria-labelledby');\n techEl.removeAttribute('aria-describedby');\n }\n super.dispose();\n this.els = null;\n }\n}\nComponent$1.registerComponent('TitleBar', TitleBar);\n\n/**\n * This function is used to fire a sourceset when there is something\n * similar to `mediaEl.load()` being called. It will try to find the source via\n * the `src` attribute and then the `` elements. It will then fire `sourceset`\n * with the source that was found or empty string if we cannot know. If it cannot\n * find a source then `sourceset` will not be fired.\n *\n * @param { import('./html5').default } tech\n * The tech object that sourceset was setup on\n *\n * @return {boolean}\n * returns false if the sourceset was not fired and true otherwise.\n */\nconst sourcesetLoad = tech => {\n const el = tech.el();\n\n // if `el.src` is set, that source will be loaded.\n if (el.hasAttribute('src')) {\n tech.triggerSourceset(el.src);\n return true;\n }\n\n /**\n * Since there isn't a src property on the media element, source elements will be used for\n * implementing the source selection algorithm. This happens asynchronously and\n * for most cases were there is more than one source we cannot tell what source will\n * be loaded, without re-implementing the source selection algorithm. At this time we are not\n * going to do that. There are three special cases that we do handle here though:\n *\n * 1. If there are no sources, do not fire `sourceset`.\n * 2. If there is only one `` with a `src` property/attribute that is our `src`\n * 3. If there is more than one `` but all of them have the same `src` url.\n * That will be our src.\n */\n const sources = tech.$$('source');\n const srcUrls = [];\n let src = '';\n\n // if there are no sources, do not fire sourceset\n if (!sources.length) {\n return false;\n }\n\n // only count valid/non-duplicate source elements\n for (let i = 0; i < sources.length; i++) {\n const url = sources[i].src;\n if (url && srcUrls.indexOf(url) === -1) {\n srcUrls.push(url);\n }\n }\n\n // there were no valid sources\n if (!srcUrls.length) {\n return false;\n }\n\n // there is only one valid source element url\n // use that\n if (srcUrls.length === 1) {\n src = srcUrls[0];\n }\n tech.triggerSourceset(src);\n return true;\n};\n\n/**\n * our implementation of an `innerHTML` descriptor for browsers\n * that do not have one.\n */\nconst innerHTMLDescriptorPolyfill = Object.defineProperty({}, 'innerHTML', {\n get() {\n return this.cloneNode(true).innerHTML;\n },\n set(v) {\n // make a dummy node to use innerHTML on\n const dummy = document.createElement(this.nodeName.toLowerCase());\n\n // set innerHTML to the value provided\n dummy.innerHTML = v;\n\n // make a document fragment to hold the nodes from dummy\n const docFrag = document.createDocumentFragment();\n\n // copy all of the nodes created by the innerHTML on dummy\n // to the document fragment\n while (dummy.childNodes.length) {\n docFrag.appendChild(dummy.childNodes[0]);\n }\n\n // remove content\n this.innerText = '';\n\n // now we add all of that html in one by appending the\n // document fragment. This is how innerHTML does it.\n window$1.Element.prototype.appendChild.call(this, docFrag);\n\n // then return the result that innerHTML's setter would\n return this.innerHTML;\n }\n});\n\n/**\n * Get a property descriptor given a list of priorities and the\n * property to get.\n */\nconst getDescriptor = (priority, prop) => {\n let descriptor = {};\n for (let i = 0; i < priority.length; i++) {\n descriptor = Object.getOwnPropertyDescriptor(priority[i], prop);\n if (descriptor && descriptor.set && descriptor.get) {\n break;\n }\n }\n descriptor.enumerable = true;\n descriptor.configurable = true;\n return descriptor;\n};\nconst getInnerHTMLDescriptor = tech => getDescriptor([tech.el(), window$1.HTMLMediaElement.prototype, window$1.Element.prototype, innerHTMLDescriptorPolyfill], 'innerHTML');\n\n/**\n * Patches browser internal functions so that we can tell synchronously\n * if a `` was appended to the media element. For some reason this\n * causes a `sourceset` if the the media element is ready and has no source.\n * This happens when:\n * - The page has just loaded and the media element does not have a source.\n * - The media element was emptied of all sources, then `load()` was called.\n *\n * It does this by patching the following functions/properties when they are supported:\n *\n * - `append()` - can be used to add a `` element to the media element\n * - `appendChild()` - can be used to add a `` element to the media element\n * - `insertAdjacentHTML()` - can be used to add a `` element to the media element\n * - `innerHTML` - can be used to add a `` element to the media element\n *\n * @param {Html5} tech\n * The tech object that sourceset is being setup on.\n */\nconst firstSourceWatch = function (tech) {\n const el = tech.el();\n\n // make sure firstSourceWatch isn't setup twice.\n if (el.resetSourceWatch_) {\n return;\n }\n const old = {};\n const innerDescriptor = getInnerHTMLDescriptor(tech);\n const appendWrapper = appendFn => (...args) => {\n const retval = appendFn.apply(el, args);\n sourcesetLoad(tech);\n return retval;\n };\n ['append', 'appendChild', 'insertAdjacentHTML'].forEach(k => {\n if (!el[k]) {\n return;\n }\n\n // store the old function\n old[k] = el[k];\n\n // call the old function with a sourceset if a source\n // was loaded\n el[k] = appendWrapper(old[k]);\n });\n Object.defineProperty(el, 'innerHTML', merge$1(innerDescriptor, {\n set: appendWrapper(innerDescriptor.set)\n }));\n el.resetSourceWatch_ = () => {\n el.resetSourceWatch_ = null;\n Object.keys(old).forEach(k => {\n el[k] = old[k];\n });\n Object.defineProperty(el, 'innerHTML', innerDescriptor);\n };\n\n // on the first sourceset, we need to revert our changes\n tech.one('sourceset', el.resetSourceWatch_);\n};\n\n/**\n * our implementation of a `src` descriptor for browsers\n * that do not have one\n */\nconst srcDescriptorPolyfill = Object.defineProperty({}, 'src', {\n get() {\n if (this.hasAttribute('src')) {\n return getAbsoluteURL(window$1.Element.prototype.getAttribute.call(this, 'src'));\n }\n return '';\n },\n set(v) {\n window$1.Element.prototype.setAttribute.call(this, 'src', v);\n return v;\n }\n});\nconst getSrcDescriptor = tech => getDescriptor([tech.el(), window$1.HTMLMediaElement.prototype, srcDescriptorPolyfill], 'src');\n\n/**\n * setup `sourceset` handling on the `Html5` tech. This function\n * patches the following element properties/functions:\n *\n * - `src` - to determine when `src` is set\n * - `setAttribute()` - to determine when `src` is set\n * - `load()` - this re-triggers the source selection algorithm, and can\n * cause a sourceset.\n *\n * If there is no source when we are adding `sourceset` support or during a `load()`\n * we also patch the functions listed in `firstSourceWatch`.\n *\n * @param {Html5} tech\n * The tech to patch\n */\nconst setupSourceset = function (tech) {\n if (!tech.featuresSourceset) {\n return;\n }\n const el = tech.el();\n\n // make sure sourceset isn't setup twice.\n if (el.resetSourceset_) {\n return;\n }\n const srcDescriptor = getSrcDescriptor(tech);\n const oldSetAttribute = el.setAttribute;\n const oldLoad = el.load;\n Object.defineProperty(el, 'src', merge$1(srcDescriptor, {\n set: v => {\n const retval = srcDescriptor.set.call(el, v);\n\n // we use the getter here to get the actual value set on src\n tech.triggerSourceset(el.src);\n return retval;\n }\n }));\n el.setAttribute = (n, v) => {\n const retval = oldSetAttribute.call(el, n, v);\n if (/src/i.test(n)) {\n tech.triggerSourceset(el.src);\n }\n return retval;\n };\n el.load = () => {\n const retval = oldLoad.call(el);\n\n // if load was called, but there was no source to fire\n // sourceset on. We have to watch for a source append\n // as that can trigger a `sourceset` when the media element\n // has no source\n if (!sourcesetLoad(tech)) {\n tech.triggerSourceset('');\n firstSourceWatch(tech);\n }\n return retval;\n };\n if (el.currentSrc) {\n tech.triggerSourceset(el.currentSrc);\n } else if (!sourcesetLoad(tech)) {\n firstSourceWatch(tech);\n }\n el.resetSourceset_ = () => {\n el.resetSourceset_ = null;\n el.load = oldLoad;\n el.setAttribute = oldSetAttribute;\n Object.defineProperty(el, 'src', srcDescriptor);\n if (el.resetSourceWatch_) {\n el.resetSourceWatch_();\n }\n };\n};\n\n/**\n * @file html5.js\n */\n\n/**\n * HTML5 Media Controller - Wrapper for HTML5 Media API\n *\n * @mixes Tech~SourceHandlerAdditions\n * @extends Tech\n */\nclass Html5 extends Tech {\n /**\n * Create an instance of this Tech.\n *\n * @param {Object} [options]\n * The key/value store of player options.\n *\n * @param {Function} [ready]\n * Callback function to call when the `HTML5` Tech is ready.\n */\n constructor(options, ready) {\n super(options, ready);\n const source = options.source;\n let crossoriginTracks = false;\n this.featuresVideoFrameCallback = this.featuresVideoFrameCallback && this.el_.tagName === 'VIDEO';\n\n // Set the source if one is provided\n // 1) Check if the source is new (if not, we want to keep the original so playback isn't interrupted)\n // 2) Check to see if the network state of the tag was failed at init, and if so, reset the source\n // anyway so the error gets fired.\n if (source && (this.el_.currentSrc !== source.src || options.tag && options.tag.initNetworkState_ === 3)) {\n this.setSource(source);\n } else {\n this.handleLateInit_(this.el_);\n }\n\n // setup sourceset after late sourceset/init\n if (options.enableSourceset) {\n this.setupSourcesetHandling_();\n }\n this.isScrubbing_ = false;\n if (this.el_.hasChildNodes()) {\n const nodes = this.el_.childNodes;\n let nodesLength = nodes.length;\n const removeNodes = [];\n while (nodesLength--) {\n const node = nodes[nodesLength];\n const nodeName = node.nodeName.toLowerCase();\n if (nodeName === 'track') {\n if (!this.featuresNativeTextTracks) {\n // Empty video tag tracks so the built-in player doesn't use them also.\n // This may not be fast enough to stop HTML5 browsers from reading the tags\n // so we'll need to turn off any default tracks if we're manually doing\n // captions and subtitles. videoElement.textTracks\n removeNodes.push(node);\n } else {\n // store HTMLTrackElement and TextTrack to remote list\n this.remoteTextTrackEls().addTrackElement_(node);\n this.remoteTextTracks().addTrack(node.track);\n this.textTracks().addTrack(node.track);\n if (!crossoriginTracks && !this.el_.hasAttribute('crossorigin') && isCrossOrigin(node.src)) {\n crossoriginTracks = true;\n }\n }\n }\n }\n for (let i = 0; i < removeNodes.length; i++) {\n this.el_.removeChild(removeNodes[i]);\n }\n }\n this.proxyNativeTracks_();\n if (this.featuresNativeTextTracks && crossoriginTracks) {\n log$1.warn('Text Tracks are being loaded from another origin but the crossorigin attribute isn\\'t used.\\n' + 'This may prevent text tracks from loading.');\n }\n\n // prevent iOS Safari from disabling metadata text tracks during native playback\n this.restoreMetadataTracksInIOSNativePlayer_();\n\n // Determine if native controls should be used\n // Our goal should be to get the custom controls on mobile solid everywhere\n // so we can remove this all together. Right now this will block custom\n // controls on touch enabled laptops like the Chrome Pixel\n if ((TOUCH_ENABLED || IS_IPHONE) && options.nativeControlsForTouch === true) {\n this.setControls(true);\n }\n\n // on iOS, we want to proxy `webkitbeginfullscreen` and `webkitendfullscreen`\n // into a `fullscreenchange` event\n this.proxyWebkitFullscreen_();\n this.triggerReady();\n }\n\n /**\n * Dispose of `HTML5` media element and remove all tracks.\n */\n dispose() {\n if (this.el_ && this.el_.resetSourceset_) {\n this.el_.resetSourceset_();\n }\n Html5.disposeMediaElement(this.el_);\n this.options_ = null;\n\n // tech will handle clearing of the emulated track list\n super.dispose();\n }\n\n /**\n * Modify the media element so that we can detect when\n * the source is changed. Fires `sourceset` just after the source has changed\n */\n setupSourcesetHandling_() {\n setupSourceset(this);\n }\n\n /**\n * When a captions track is enabled in the iOS Safari native player, all other\n * tracks are disabled (including metadata tracks), which nulls all of their\n * associated cue points. This will restore metadata tracks to their pre-fullscreen\n * state in those cases so that cue points are not needlessly lost.\n *\n * @private\n */\n restoreMetadataTracksInIOSNativePlayer_() {\n const textTracks = this.textTracks();\n let metadataTracksPreFullscreenState;\n\n // captures a snapshot of every metadata track's current state\n const takeMetadataTrackSnapshot = () => {\n metadataTracksPreFullscreenState = [];\n for (let i = 0; i < textTracks.length; i++) {\n const track = textTracks[i];\n if (track.kind === 'metadata') {\n metadataTracksPreFullscreenState.push({\n track,\n storedMode: track.mode\n });\n }\n }\n };\n\n // snapshot each metadata track's initial state, and update the snapshot\n // each time there is a track 'change' event\n takeMetadataTrackSnapshot();\n textTracks.addEventListener('change', takeMetadataTrackSnapshot);\n this.on('dispose', () => textTracks.removeEventListener('change', takeMetadataTrackSnapshot));\n const restoreTrackMode = () => {\n for (let i = 0; i < metadataTracksPreFullscreenState.length; i++) {\n const storedTrack = metadataTracksPreFullscreenState[i];\n if (storedTrack.track.mode === 'disabled' && storedTrack.track.mode !== storedTrack.storedMode) {\n storedTrack.track.mode = storedTrack.storedMode;\n }\n }\n // we only want this handler to be executed on the first 'change' event\n textTracks.removeEventListener('change', restoreTrackMode);\n };\n\n // when we enter fullscreen playback, stop updating the snapshot and\n // restore all track modes to their pre-fullscreen state\n this.on('webkitbeginfullscreen', () => {\n textTracks.removeEventListener('change', takeMetadataTrackSnapshot);\n\n // remove the listener before adding it just in case it wasn't previously removed\n textTracks.removeEventListener('change', restoreTrackMode);\n textTracks.addEventListener('change', restoreTrackMode);\n });\n\n // start updating the snapshot again after leaving fullscreen\n this.on('webkitendfullscreen', () => {\n // remove the listener before adding it just in case it wasn't previously removed\n textTracks.removeEventListener('change', takeMetadataTrackSnapshot);\n textTracks.addEventListener('change', takeMetadataTrackSnapshot);\n\n // remove the restoreTrackMode handler in case it wasn't triggered during fullscreen playback\n textTracks.removeEventListener('change', restoreTrackMode);\n });\n }\n\n /**\n * Attempt to force override of tracks for the given type\n *\n * @param {string} type - Track type to override, possible values include 'Audio',\n * 'Video', and 'Text'.\n * @param {boolean} override - If set to true native audio/video will be overridden,\n * otherwise native audio/video will potentially be used.\n * @private\n */\n overrideNative_(type, override) {\n // If there is no behavioral change don't add/remove listeners\n if (override !== this[`featuresNative${type}Tracks`]) {\n return;\n }\n const lowerCaseType = type.toLowerCase();\n if (this[`${lowerCaseType}TracksListeners_`]) {\n Object.keys(this[`${lowerCaseType}TracksListeners_`]).forEach(eventName => {\n const elTracks = this.el()[`${lowerCaseType}Tracks`];\n elTracks.removeEventListener(eventName, this[`${lowerCaseType}TracksListeners_`][eventName]);\n });\n }\n this[`featuresNative${type}Tracks`] = !override;\n this[`${lowerCaseType}TracksListeners_`] = null;\n this.proxyNativeTracksForType_(lowerCaseType);\n }\n\n /**\n * Attempt to force override of native audio tracks.\n *\n * @param {boolean} override - If set to true native audio will be overridden,\n * otherwise native audio will potentially be used.\n */\n overrideNativeAudioTracks(override) {\n this.overrideNative_('Audio', override);\n }\n\n /**\n * Attempt to force override of native video tracks.\n *\n * @param {boolean} override - If set to true native video will be overridden,\n * otherwise native video will potentially be used.\n */\n overrideNativeVideoTracks(override) {\n this.overrideNative_('Video', override);\n }\n\n /**\n * Proxy native track list events for the given type to our track\n * lists if the browser we are playing in supports that type of track list.\n *\n * @param {string} name - Track type; values include 'audio', 'video', and 'text'\n * @private\n */\n proxyNativeTracksForType_(name) {\n const props = NORMAL[name];\n const elTracks = this.el()[props.getterName];\n const techTracks = this[props.getterName]();\n if (!this[`featuresNative${props.capitalName}Tracks`] || !elTracks || !elTracks.addEventListener) {\n return;\n }\n const listeners = {\n change: e => {\n const event = {\n type: 'change',\n target: techTracks,\n currentTarget: techTracks,\n srcElement: techTracks\n };\n techTracks.trigger(event);\n\n // if we are a text track change event, we should also notify the\n // remote text track list. This can potentially cause a false positive\n // if we were to get a change event on a non-remote track and\n // we triggered the event on the remote text track list which doesn't\n // contain that track. However, best practices mean looping through the\n // list of tracks and searching for the appropriate mode value, so,\n // this shouldn't pose an issue\n if (name === 'text') {\n this[REMOTE.remoteText.getterName]().trigger(event);\n }\n },\n addtrack(e) {\n techTracks.addTrack(e.track);\n },\n removetrack(e) {\n techTracks.removeTrack(e.track);\n }\n };\n const removeOldTracks = function () {\n const removeTracks = [];\n for (let i = 0; i < techTracks.length; i++) {\n let found = false;\n for (let j = 0; j < elTracks.length; j++) {\n if (elTracks[j] === techTracks[i]) {\n found = true;\n break;\n }\n }\n if (!found) {\n removeTracks.push(techTracks[i]);\n }\n }\n while (removeTracks.length) {\n techTracks.removeTrack(removeTracks.shift());\n }\n };\n this[props.getterName + 'Listeners_'] = listeners;\n Object.keys(listeners).forEach(eventName => {\n const listener = listeners[eventName];\n elTracks.addEventListener(eventName, listener);\n this.on('dispose', e => elTracks.removeEventListener(eventName, listener));\n });\n\n // Remove (native) tracks that are not used anymore\n this.on('loadstart', removeOldTracks);\n this.on('dispose', e => this.off('loadstart', removeOldTracks));\n }\n\n /**\n * Proxy all native track list events to our track lists if the browser we are playing\n * in supports that type of track list.\n *\n * @private\n */\n proxyNativeTracks_() {\n NORMAL.names.forEach(name => {\n this.proxyNativeTracksForType_(name);\n });\n }\n\n /**\n * Create the `Html5` Tech's DOM element.\n *\n * @return {Element}\n * The element that gets created.\n */\n createEl() {\n let el = this.options_.tag;\n\n // Check if this browser supports moving the element into the box.\n // On the iPhone video will break if you move the element,\n // So we have to create a brand new element.\n // If we ingested the player div, we do not need to move the media element.\n if (!el || !(this.options_.playerElIngest || this.movingMediaElementInDOM)) {\n // If the original tag is still there, clone and remove it.\n if (el) {\n const clone = el.cloneNode(true);\n if (el.parentNode) {\n el.parentNode.insertBefore(clone, el);\n }\n Html5.disposeMediaElement(el);\n el = clone;\n } else {\n el = document.createElement('video');\n\n // determine if native controls should be used\n const tagAttributes = this.options_.tag && getAttributes(this.options_.tag);\n const attributes = merge$1({}, tagAttributes);\n if (!TOUCH_ENABLED || this.options_.nativeControlsForTouch !== true) {\n delete attributes.controls;\n }\n setAttributes(el, Object.assign(attributes, {\n id: this.options_.techId,\n class: 'vjs-tech'\n }));\n }\n el.playerId = this.options_.playerId;\n }\n if (typeof this.options_.preload !== 'undefined') {\n setAttribute(el, 'preload', this.options_.preload);\n }\n if (this.options_.disablePictureInPicture !== undefined) {\n el.disablePictureInPicture = this.options_.disablePictureInPicture;\n }\n\n // Update specific tag settings, in case they were overridden\n // `autoplay` has to be *last* so that `muted` and `playsinline` are present\n // when iOS/Safari or other browsers attempt to autoplay.\n const settingsAttrs = ['loop', 'muted', 'playsinline', 'autoplay'];\n for (let i = 0; i < settingsAttrs.length; i++) {\n const attr = settingsAttrs[i];\n const value = this.options_[attr];\n if (typeof value !== 'undefined') {\n if (value) {\n setAttribute(el, attr, attr);\n } else {\n removeAttribute(el, attr);\n }\n el[attr] = value;\n }\n }\n return el;\n }\n\n /**\n * This will be triggered if the loadstart event has already fired, before videojs was\n * ready. Two known examples of when this can happen are:\n * 1. If we're loading the playback object after it has started loading\n * 2. The media is already playing the (often with autoplay on) then\n *\n * This function will fire another loadstart so that videojs can catchup.\n *\n * @fires Tech#loadstart\n *\n * @return {undefined}\n * returns nothing.\n */\n handleLateInit_(el) {\n if (el.networkState === 0 || el.networkState === 3) {\n // The video element hasn't started loading the source yet\n // or didn't find a source\n return;\n }\n if (el.readyState === 0) {\n // NetworkState is set synchronously BUT loadstart is fired at the\n // end of the current stack, usually before setInterval(fn, 0).\n // So at this point we know loadstart may have already fired or is\n // about to fire, and either way the player hasn't seen it yet.\n // We don't want to fire loadstart prematurely here and cause a\n // double loadstart so we'll wait and see if it happens between now\n // and the next loop, and fire it if not.\n // HOWEVER, we also want to make sure it fires before loadedmetadata\n // which could also happen between now and the next loop, so we'll\n // watch for that also.\n let loadstartFired = false;\n const setLoadstartFired = function () {\n loadstartFired = true;\n };\n this.on('loadstart', setLoadstartFired);\n const triggerLoadstart = function () {\n // We did miss the original loadstart. Make sure the player\n // sees loadstart before loadedmetadata\n if (!loadstartFired) {\n this.trigger('loadstart');\n }\n };\n this.on('loadedmetadata', triggerLoadstart);\n this.ready(function () {\n this.off('loadstart', setLoadstartFired);\n this.off('loadedmetadata', triggerLoadstart);\n if (!loadstartFired) {\n // We did miss the original native loadstart. Fire it now.\n this.trigger('loadstart');\n }\n });\n return;\n }\n\n // From here on we know that loadstart already fired and we missed it.\n // The other readyState events aren't as much of a problem if we double\n // them, so not going to go to as much trouble as loadstart to prevent\n // that unless we find reason to.\n const eventsToTrigger = ['loadstart'];\n\n // loadedmetadata: newly equal to HAVE_METADATA (1) or greater\n eventsToTrigger.push('loadedmetadata');\n\n // loadeddata: newly increased to HAVE_CURRENT_DATA (2) or greater\n if (el.readyState >= 2) {\n eventsToTrigger.push('loadeddata');\n }\n\n // canplay: newly increased to HAVE_FUTURE_DATA (3) or greater\n if (el.readyState >= 3) {\n eventsToTrigger.push('canplay');\n }\n\n // canplaythrough: newly equal to HAVE_ENOUGH_DATA (4)\n if (el.readyState >= 4) {\n eventsToTrigger.push('canplaythrough');\n }\n\n // We still need to give the player time to add event listeners\n this.ready(function () {\n eventsToTrigger.forEach(function (type) {\n this.trigger(type);\n }, this);\n });\n }\n\n /**\n * Set whether we are scrubbing or not.\n * This is used to decide whether we should use `fastSeek` or not.\n * `fastSeek` is used to provide trick play on Safari browsers.\n *\n * @param {boolean} isScrubbing\n * - true for we are currently scrubbing\n * - false for we are no longer scrubbing\n */\n setScrubbing(isScrubbing) {\n this.isScrubbing_ = isScrubbing;\n }\n\n /**\n * Get whether we are scrubbing or not.\n *\n * @return {boolean} isScrubbing\n * - true for we are currently scrubbing\n * - false for we are no longer scrubbing\n */\n scrubbing() {\n return this.isScrubbing_;\n }\n\n /**\n * Set current time for the `HTML5` tech.\n *\n * @param {number} seconds\n * Set the current time of the media to this.\n */\n setCurrentTime(seconds) {\n try {\n if (this.isScrubbing_ && this.el_.fastSeek && IS_ANY_SAFARI) {\n this.el_.fastSeek(seconds);\n } else {\n this.el_.currentTime = seconds;\n }\n } catch (e) {\n log$1(e, 'Video is not ready. (Video.js)');\n // this.warning(VideoJS.warnings.videoNotReady);\n }\n }\n\n /**\n * Get the current duration of the HTML5 media element.\n *\n * @return {number}\n * The duration of the media or 0 if there is no duration.\n */\n duration() {\n // Android Chrome will report duration as Infinity for VOD HLS until after\n // playback has started, which triggers the live display erroneously.\n // Return NaN if playback has not started and trigger a durationupdate once\n // the duration can be reliably known.\n if (this.el_.duration === Infinity && IS_ANDROID && IS_CHROME && this.el_.currentTime === 0) {\n // Wait for the first `timeupdate` with currentTime > 0 - there may be\n // several with 0\n const checkProgress = () => {\n if (this.el_.currentTime > 0) {\n // Trigger durationchange for genuinely live video\n if (this.el_.duration === Infinity) {\n this.trigger('durationchange');\n }\n this.off('timeupdate', checkProgress);\n }\n };\n this.on('timeupdate', checkProgress);\n return NaN;\n }\n return this.el_.duration || NaN;\n }\n\n /**\n * Get the current width of the HTML5 media element.\n *\n * @return {number}\n * The width of the HTML5 media element.\n */\n width() {\n return this.el_.offsetWidth;\n }\n\n /**\n * Get the current height of the HTML5 media element.\n *\n * @return {number}\n * The height of the HTML5 media element.\n */\n height() {\n return this.el_.offsetHeight;\n }\n\n /**\n * Proxy iOS `webkitbeginfullscreen` and `webkitendfullscreen` into\n * `fullscreenchange` event.\n *\n * @private\n * @fires fullscreenchange\n * @listens webkitendfullscreen\n * @listens webkitbeginfullscreen\n * @listens webkitbeginfullscreen\n */\n proxyWebkitFullscreen_() {\n if (!('webkitDisplayingFullscreen' in this.el_)) {\n return;\n }\n const endFn = function () {\n this.trigger('fullscreenchange', {\n isFullscreen: false\n });\n // Safari will sometimes set controls on the videoelement when existing fullscreen.\n if (this.el_.controls && !this.options_.nativeControlsForTouch && this.controls()) {\n this.el_.controls = false;\n }\n };\n const beginFn = function () {\n if ('webkitPresentationMode' in this.el_ && this.el_.webkitPresentationMode !== 'picture-in-picture') {\n this.one('webkitendfullscreen', endFn);\n this.trigger('fullscreenchange', {\n isFullscreen: true,\n // set a flag in case another tech triggers fullscreenchange\n nativeIOSFullscreen: true\n });\n }\n };\n this.on('webkitbeginfullscreen', beginFn);\n this.on('dispose', () => {\n this.off('webkitbeginfullscreen', beginFn);\n this.off('webkitendfullscreen', endFn);\n });\n }\n\n /**\n * Check if fullscreen is supported on the video el.\n *\n * @return {boolean}\n * - True if fullscreen is supported.\n * - False if fullscreen is not supported.\n */\n supportsFullScreen() {\n return typeof this.el_.webkitEnterFullScreen === 'function';\n }\n\n /**\n * Request that the `HTML5` Tech enter fullscreen.\n */\n enterFullScreen() {\n const video = this.el_;\n if (video.paused && video.networkState <= video.HAVE_METADATA) {\n // attempt to prime the video element for programmatic access\n // this isn't necessary on the desktop but shouldn't hurt\n silencePromise(this.el_.play());\n\n // playing and pausing synchronously during the transition to fullscreen\n // can get iOS ~6.1 devices into a play/pause loop\n this.setTimeout(function () {\n video.pause();\n try {\n video.webkitEnterFullScreen();\n } catch (e) {\n this.trigger('fullscreenerror', e);\n }\n }, 0);\n } else {\n try {\n video.webkitEnterFullScreen();\n } catch (e) {\n this.trigger('fullscreenerror', e);\n }\n }\n }\n\n /**\n * Request that the `HTML5` Tech exit fullscreen.\n */\n exitFullScreen() {\n if (!this.el_.webkitDisplayingFullscreen) {\n this.trigger('fullscreenerror', new Error('The video is not fullscreen'));\n return;\n }\n this.el_.webkitExitFullScreen();\n }\n\n /**\n * Create a floating video window always on top of other windows so that users may\n * continue consuming media while they interact with other content sites, or\n * applications on their device.\n *\n * @see [Spec]{@link https://wicg.github.io/picture-in-picture}\n *\n * @return {Promise}\n * A promise with a Picture-in-Picture window.\n */\n requestPictureInPicture() {\n return this.el_.requestPictureInPicture();\n }\n\n /**\n * Native requestVideoFrameCallback if supported by browser/tech, or fallback\n * Don't use rVCF on Safari when DRM is playing, as it doesn't fire\n * Needs to be checked later than the constructor\n * This will be a false positive for clear sources loaded after a Fairplay source\n *\n * @param {function} cb function to call\n * @return {number} id of request\n */\n requestVideoFrameCallback(cb) {\n if (this.featuresVideoFrameCallback && !this.el_.webkitKeys) {\n return this.el_.requestVideoFrameCallback(cb);\n }\n return super.requestVideoFrameCallback(cb);\n }\n\n /**\n * Native or fallback requestVideoFrameCallback\n *\n * @param {number} id request id to cancel\n */\n cancelVideoFrameCallback(id) {\n if (this.featuresVideoFrameCallback && !this.el_.webkitKeys) {\n this.el_.cancelVideoFrameCallback(id);\n } else {\n super.cancelVideoFrameCallback(id);\n }\n }\n\n /**\n * A getter/setter for the `Html5` Tech's source object.\n * > Note: Please use {@link Html5#setSource}\n *\n * @param {Tech~SourceObject} [src]\n * The source object you want to set on the `HTML5` techs element.\n *\n * @return {Tech~SourceObject|undefined}\n * - The current source object when a source is not passed in.\n * - undefined when setting\n *\n * @deprecated Since version 5.\n */\n src(src) {\n if (src === undefined) {\n return this.el_.src;\n }\n\n // Setting src through `src` instead of `setSrc` will be deprecated\n this.setSrc(src);\n }\n\n /**\n * Reset the tech by removing all sources and then calling\n * {@link Html5.resetMediaElement}.\n */\n reset() {\n Html5.resetMediaElement(this.el_);\n }\n\n /**\n * Get the current source on the `HTML5` Tech. Falls back to returning the source from\n * the HTML5 media element.\n *\n * @return {Tech~SourceObject}\n * The current source object from the HTML5 tech. With a fallback to the\n * elements source.\n */\n currentSrc() {\n if (this.currentSource_) {\n return this.currentSource_.src;\n }\n return this.el_.currentSrc;\n }\n\n /**\n * Set controls attribute for the HTML5 media Element.\n *\n * @param {string} val\n * Value to set the controls attribute to\n */\n setControls(val) {\n this.el_.controls = !!val;\n }\n\n /**\n * Create and returns a remote {@link TextTrack} object.\n *\n * @param {string} kind\n * `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata)\n *\n * @param {string} [label]\n * Label to identify the text track\n *\n * @param {string} [language]\n * Two letter language abbreviation\n *\n * @return {TextTrack}\n * The TextTrack that gets created.\n */\n addTextTrack(kind, label, language) {\n if (!this.featuresNativeTextTracks) {\n return super.addTextTrack(kind, label, language);\n }\n return this.el_.addTextTrack(kind, label, language);\n }\n\n /**\n * Creates either native TextTrack or an emulated TextTrack depending\n * on the value of `featuresNativeTextTracks`\n *\n * @param {Object} options\n * The object should contain the options to initialize the TextTrack with.\n *\n * @param {string} [options.kind]\n * `TextTrack` kind (subtitles, captions, descriptions, chapters, or metadata).\n *\n * @param {string} [options.label]\n * Label to identify the text track\n *\n * @param {string} [options.language]\n * Two letter language abbreviation.\n *\n * @param {boolean} [options.default]\n * Default this track to on.\n *\n * @param {string} [options.id]\n * The internal id to assign this track.\n *\n * @param {string} [options.src]\n * A source url for the track.\n *\n * @return {HTMLTrackElement}\n * The track element that gets created.\n */\n createRemoteTextTrack(options) {\n if (!this.featuresNativeTextTracks) {\n return super.createRemoteTextTrack(options);\n }\n const htmlTrackElement = document.createElement('track');\n if (options.kind) {\n htmlTrackElement.kind = options.kind;\n }\n if (options.label) {\n htmlTrackElement.label = options.label;\n }\n if (options.language || options.srclang) {\n htmlTrackElement.srclang = options.language || options.srclang;\n }\n if (options.default) {\n htmlTrackElement.default = options.default;\n }\n if (options.id) {\n htmlTrackElement.id = options.id;\n }\n if (options.src) {\n htmlTrackElement.src = options.src;\n }\n return htmlTrackElement;\n }\n\n /**\n * Creates a remote text track object and returns an html track element.\n *\n * @param {Object} options The object should contain values for\n * kind, language, label, and src (location of the WebVTT file)\n * @param {boolean} [manualCleanup=false] if set to true, the TextTrack\n * will not be removed from the TextTrackList and HtmlTrackElementList\n * after a source change\n * @return {HTMLTrackElement} An Html Track Element.\n * This can be an emulated {@link HTMLTrackElement} or a native one.\n *\n */\n addRemoteTextTrack(options, manualCleanup) {\n const htmlTrackElement = super.addRemoteTextTrack(options, manualCleanup);\n if (this.featuresNativeTextTracks) {\n this.el().appendChild(htmlTrackElement);\n }\n return htmlTrackElement;\n }\n\n /**\n * Remove remote `TextTrack` from `TextTrackList` object\n *\n * @param {TextTrack} track\n * `TextTrack` object to remove\n */\n removeRemoteTextTrack(track) {\n super.removeRemoteTextTrack(track);\n if (this.featuresNativeTextTracks) {\n const tracks = this.$$('track');\n let i = tracks.length;\n while (i--) {\n if (track === tracks[i] || track === tracks[i].track) {\n this.el().removeChild(tracks[i]);\n }\n }\n }\n }\n\n /**\n * Gets available media playback quality metrics as specified by the W3C's Media\n * Playback Quality API.\n *\n * @see [Spec]{@link https://wicg.github.io/media-playback-quality}\n *\n * @return {Object}\n * An object with supported media playback quality metrics\n */\n getVideoPlaybackQuality() {\n if (typeof this.el().getVideoPlaybackQuality === 'function') {\n return this.el().getVideoPlaybackQuality();\n }\n const videoPlaybackQuality = {};\n if (typeof this.el().webkitDroppedFrameCount !== 'undefined' && typeof this.el().webkitDecodedFrameCount !== 'undefined') {\n videoPlaybackQuality.droppedVideoFrames = this.el().webkitDroppedFrameCount;\n videoPlaybackQuality.totalVideoFrames = this.el().webkitDecodedFrameCount;\n }\n if (window$1.performance) {\n videoPlaybackQuality.creationTime = window$1.performance.now();\n }\n return videoPlaybackQuality;\n }\n}\n\n/* HTML5 Support Testing ---------------------------------------------------- */\n\n/**\n * Element for testing browser HTML5 media capabilities\n *\n * @type {Element}\n * @constant\n * @private\n */\ndefineLazyProperty(Html5, 'TEST_VID', function () {\n if (!isReal()) {\n return;\n }\n const video = document.createElement('video');\n const track = document.createElement('track');\n track.kind = 'captions';\n track.srclang = 'en';\n track.label = 'English';\n video.appendChild(track);\n return video;\n});\n\n/**\n * Check if HTML5 media is supported by this browser/device.\n *\n * @return {boolean}\n * - True if HTML5 media is supported.\n * - False if HTML5 media is not supported.\n */\nHtml5.isSupported = function () {\n // IE with no Media Player is a LIAR! (#984)\n try {\n Html5.TEST_VID.volume = 0.5;\n } catch (e) {\n return false;\n }\n return !!(Html5.TEST_VID && Html5.TEST_VID.canPlayType);\n};\n\n/**\n * Check if the tech can support the given type\n *\n * @param {string} type\n * The mimetype to check\n * @return {string} 'probably', 'maybe', or '' (empty string)\n */\nHtml5.canPlayType = function (type) {\n return Html5.TEST_VID.canPlayType(type);\n};\n\n/**\n * Check if the tech can support the given source\n *\n * @param {Object} srcObj\n * The source object\n * @param {Object} options\n * The options passed to the tech\n * @return {string} 'probably', 'maybe', or '' (empty string)\n */\nHtml5.canPlaySource = function (srcObj, options) {\n return Html5.canPlayType(srcObj.type);\n};\n\n/**\n * Check if the volume can be changed in this browser/device.\n * Volume cannot be changed in a lot of mobile devices.\n * Specifically, it can't be changed from 1 on iOS.\n *\n * @return {boolean}\n * - True if volume can be controlled\n * - False otherwise\n */\nHtml5.canControlVolume = function () {\n // IE will error if Windows Media Player not installed #3315\n try {\n const volume = Html5.TEST_VID.volume;\n Html5.TEST_VID.volume = volume / 2 + 0.1;\n const canControl = volume !== Html5.TEST_VID.volume;\n\n // With the introduction of iOS 15, there are cases where the volume is read as\n // changed but reverts back to its original state at the start of the next tick.\n // To determine whether volume can be controlled on iOS,\n // a timeout is set and the volume is checked asynchronously.\n // Since `features` doesn't currently work asynchronously, the value is manually set.\n if (canControl && IS_IOS) {\n window$1.setTimeout(() => {\n if (Html5 && Html5.prototype) {\n Html5.prototype.featuresVolumeControl = volume !== Html5.TEST_VID.volume;\n }\n });\n\n // default iOS to false, which will be updated in the timeout above.\n return false;\n }\n return canControl;\n } catch (e) {\n return false;\n }\n};\n\n/**\n * Check if the volume can be muted in this browser/device.\n * Some devices, e.g. iOS, don't allow changing volume\n * but permits muting/unmuting.\n *\n * @return {boolean}\n * - True if volume can be muted\n * - False otherwise\n */\nHtml5.canMuteVolume = function () {\n try {\n const muted = Html5.TEST_VID.muted;\n\n // in some versions of iOS muted property doesn't always\n // work, so we want to set both property and attribute\n Html5.TEST_VID.muted = !muted;\n if (Html5.TEST_VID.muted) {\n setAttribute(Html5.TEST_VID, 'muted', 'muted');\n } else {\n removeAttribute(Html5.TEST_VID, 'muted', 'muted');\n }\n return muted !== Html5.TEST_VID.muted;\n } catch (e) {\n return false;\n }\n};\n\n/**\n * Check if the playback rate can be changed in this browser/device.\n *\n * @return {boolean}\n * - True if playback rate can be controlled\n * - False otherwise\n */\nHtml5.canControlPlaybackRate = function () {\n // Playback rate API is implemented in Android Chrome, but doesn't do anything\n // https://github.com/videojs/video.js/issues/3180\n if (IS_ANDROID && IS_CHROME && CHROME_VERSION < 58) {\n return false;\n }\n // IE will error if Windows Media Player not installed #3315\n try {\n const playbackRate = Html5.TEST_VID.playbackRate;\n Html5.TEST_VID.playbackRate = playbackRate / 2 + 0.1;\n return playbackRate !== Html5.TEST_VID.playbackRate;\n } catch (e) {\n return false;\n }\n};\n\n/**\n * Check if we can override a video/audio elements attributes, with\n * Object.defineProperty.\n *\n * @return {boolean}\n * - True if builtin attributes can be overridden\n * - False otherwise\n */\nHtml5.canOverrideAttributes = function () {\n // if we cannot overwrite the src/innerHTML property, there is no support\n // iOS 7 safari for instance cannot do this.\n try {\n const noop = () => {};\n Object.defineProperty(document.createElement('video'), 'src', {\n get: noop,\n set: noop\n });\n Object.defineProperty(document.createElement('audio'), 'src', {\n get: noop,\n set: noop\n });\n Object.defineProperty(document.createElement('video'), 'innerHTML', {\n get: noop,\n set: noop\n });\n Object.defineProperty(document.createElement('audio'), 'innerHTML', {\n get: noop,\n set: noop\n });\n } catch (e) {\n return false;\n }\n return true;\n};\n\n/**\n * Check to see if native `TextTrack`s are supported by this browser/device.\n *\n * @return {boolean}\n * - True if native `TextTrack`s are supported.\n * - False otherwise\n */\nHtml5.supportsNativeTextTracks = function () {\n return IS_ANY_SAFARI || IS_IOS && IS_CHROME;\n};\n\n/**\n * Check to see if native `VideoTrack`s are supported by this browser/device\n *\n * @return {boolean}\n * - True if native `VideoTrack`s are supported.\n * - False otherwise\n */\nHtml5.supportsNativeVideoTracks = function () {\n return !!(Html5.TEST_VID && Html5.TEST_VID.videoTracks);\n};\n\n/**\n * Check to see if native `AudioTrack`s are supported by this browser/device\n *\n * @return {boolean}\n * - True if native `AudioTrack`s are supported.\n * - False otherwise\n */\nHtml5.supportsNativeAudioTracks = function () {\n return !!(Html5.TEST_VID && Html5.TEST_VID.audioTracks);\n};\n\n/**\n * An array of events available on the Html5 tech.\n *\n * @private\n * @type {Array}\n */\nHtml5.Events = ['loadstart', 'suspend', 'abort', 'error', 'emptied', 'stalled', 'loadedmetadata', 'loadeddata', 'canplay', 'canplaythrough', 'playing', 'waiting', 'seeking', 'seeked', 'ended', 'durationchange', 'timeupdate', 'progress', 'play', 'pause', 'ratechange', 'resize', 'volumechange'];\n\n/**\n * Boolean indicating whether the `Tech` supports volume control.\n *\n * @type {boolean}\n * @default {@link Html5.canControlVolume}\n */\n/**\n * Boolean indicating whether the `Tech` supports muting volume.\n *\n * @type {boolean}\n * @default {@link Html5.canMuteVolume}\n */\n\n/**\n * Boolean indicating whether the `Tech` supports changing the speed at which the media\n * plays. Examples:\n * - Set player to play 2x (twice) as fast\n * - Set player to play 0.5x (half) as fast\n *\n * @type {boolean}\n * @default {@link Html5.canControlPlaybackRate}\n */\n\n/**\n * Boolean indicating whether the `Tech` supports the `sourceset` event.\n *\n * @type {boolean}\n * @default\n */\n/**\n * Boolean indicating whether the `HTML5` tech currently supports native `TextTrack`s.\n *\n * @type {boolean}\n * @default {@link Html5.supportsNativeTextTracks}\n */\n/**\n * Boolean indicating whether the `HTML5` tech currently supports native `VideoTrack`s.\n *\n * @type {boolean}\n * @default {@link Html5.supportsNativeVideoTracks}\n */\n/**\n * Boolean indicating whether the `HTML5` tech currently supports native `AudioTrack`s.\n *\n * @type {boolean}\n * @default {@link Html5.supportsNativeAudioTracks}\n */\n[['featuresMuteControl', 'canMuteVolume'], ['featuresPlaybackRate', 'canControlPlaybackRate'], ['featuresSourceset', 'canOverrideAttributes'], ['featuresNativeTextTracks', 'supportsNativeTextTracks'], ['featuresNativeVideoTracks', 'supportsNativeVideoTracks'], ['featuresNativeAudioTracks', 'supportsNativeAudioTracks']].forEach(function ([key, fn]) {\n defineLazyProperty(Html5.prototype, key, () => Html5[fn](), true);\n});\nHtml5.prototype.featuresVolumeControl = Html5.canControlVolume();\n\n/**\n * Boolean indicating whether the `HTML5` tech currently supports the media element\n * moving in the DOM. iOS breaks if you move the media element, so this is set this to\n * false there. Everywhere else this should be true.\n *\n * @type {boolean}\n * @default\n */\nHtml5.prototype.movingMediaElementInDOM = !IS_IOS;\n\n// TODO: Previous comment: No longer appears to be used. Can probably be removed.\n// Is this true?\n/**\n * Boolean indicating whether the `HTML5` tech currently supports automatic media resize\n * when going into fullscreen.\n *\n * @type {boolean}\n * @default\n */\nHtml5.prototype.featuresFullscreenResize = true;\n\n/**\n * Boolean indicating whether the `HTML5` tech currently supports the progress event.\n * If this is false, manual `progress` events will be triggered instead.\n *\n * @type {boolean}\n * @default\n */\nHtml5.prototype.featuresProgressEvents = true;\n\n/**\n * Boolean indicating whether the `HTML5` tech currently supports the timeupdate event.\n * If this is false, manual `timeupdate` events will be triggered instead.\n *\n * @default\n */\nHtml5.prototype.featuresTimeupdateEvents = true;\n\n/**\n * Whether the HTML5 el supports `requestVideoFrameCallback`\n *\n * @type {boolean}\n */\nHtml5.prototype.featuresVideoFrameCallback = !!(Html5.TEST_VID && Html5.TEST_VID.requestVideoFrameCallback);\nHtml5.disposeMediaElement = function (el) {\n if (!el) {\n return;\n }\n if (el.parentNode) {\n el.parentNode.removeChild(el);\n }\n\n // remove any child track or source nodes to prevent their loading\n while (el.hasChildNodes()) {\n el.removeChild(el.firstChild);\n }\n\n // remove any src reference. not setting `src=''` because that causes a warning\n // in firefox\n el.removeAttribute('src');\n\n // force the media element to update its loading state by calling load()\n // however IE on Windows 7N has a bug that throws an error so need a try/catch (#793)\n if (typeof el.load === 'function') {\n // wrapping in an iife so it's not deoptimized (#1060#discussion_r10324473)\n (function () {\n try {\n el.load();\n } catch (e) {\n // not supported\n }\n })();\n }\n};\nHtml5.resetMediaElement = function (el) {\n if (!el) {\n return;\n }\n const sources = el.querySelectorAll('source');\n let i = sources.length;\n while (i--) {\n el.removeChild(sources[i]);\n }\n\n // remove any src reference.\n // not setting `src=''` because that throws an error\n el.removeAttribute('src');\n if (typeof el.load === 'function') {\n // wrapping in an iife so it's not deoptimized (#1060#discussion_r10324473)\n (function () {\n try {\n el.load();\n } catch (e) {\n // satisfy linter\n }\n })();\n }\n};\n\n/* Native HTML5 element property wrapping ----------------------------------- */\n// Wrap native boolean attributes with getters that check both property and attribute\n// The list is as followed:\n// muted, defaultMuted, autoplay, controls, loop, playsinline\n[\n/**\n * Get the value of `muted` from the media element. `muted` indicates\n * that the volume for the media should be set to silent. This does not actually change\n * the `volume` attribute.\n *\n * @method Html5#muted\n * @return {boolean}\n * - True if the value of `volume` should be ignored and the audio set to silent.\n * - False if the value of `volume` should be used.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-muted}\n */\n'muted',\n/**\n * Get the value of `defaultMuted` from the media element. `defaultMuted` indicates\n * whether the media should start muted or not. Only changes the default state of the\n * media. `muted` and `defaultMuted` can have different values. {@link Html5#muted} indicates the\n * current state.\n *\n * @method Html5#defaultMuted\n * @return {boolean}\n * - The value of `defaultMuted` from the media element.\n * - True indicates that the media should start muted.\n * - False indicates that the media should not start muted\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-defaultmuted}\n */\n'defaultMuted',\n/**\n * Get the value of `autoplay` from the media element. `autoplay` indicates\n * that the media should start to play as soon as the page is ready.\n *\n * @method Html5#autoplay\n * @return {boolean}\n * - The value of `autoplay` from the media element.\n * - True indicates that the media should start as soon as the page loads.\n * - False indicates that the media should not start as soon as the page loads.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-autoplay}\n */\n'autoplay',\n/**\n * Get the value of `controls` from the media element. `controls` indicates\n * whether the native media controls should be shown or hidden.\n *\n * @method Html5#controls\n * @return {boolean}\n * - The value of `controls` from the media element.\n * - True indicates that native controls should be showing.\n * - False indicates that native controls should be hidden.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-controls}\n */\n'controls',\n/**\n * Get the value of `loop` from the media element. `loop` indicates\n * that the media should return to the start of the media and continue playing once\n * it reaches the end.\n *\n * @method Html5#loop\n * @return {boolean}\n * - The value of `loop` from the media element.\n * - True indicates that playback should seek back to start once\n * the end of a media is reached.\n * - False indicates that playback should not loop back to the start when the\n * end of the media is reached.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-loop}\n */\n'loop',\n/**\n * Get the value of `playsinline` from the media element. `playsinline` indicates\n * to the browser that non-fullscreen playback is preferred when fullscreen\n * playback is the native default, such as in iOS Safari.\n *\n * @method Html5#playsinline\n * @return {boolean}\n * - The value of `playsinline` from the media element.\n * - True indicates that the media should play inline.\n * - False indicates that the media should not play inline.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/#attr-video-playsinline}\n */\n'playsinline'].forEach(function (prop) {\n Html5.prototype[prop] = function () {\n return this.el_[prop] || this.el_.hasAttribute(prop);\n };\n});\n\n// Wrap native boolean attributes with setters that set both property and attribute\n// The list is as followed:\n// setMuted, setDefaultMuted, setAutoplay, setLoop, setPlaysinline\n// setControls is special-cased above\n[\n/**\n * Set the value of `muted` on the media element. `muted` indicates that the current\n * audio level should be silent.\n *\n * @method Html5#setMuted\n * @param {boolean} muted\n * - True if the audio should be set to silent\n * - False otherwise\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-muted}\n */\n'muted',\n/**\n * Set the value of `defaultMuted` on the media element. `defaultMuted` indicates that the current\n * audio level should be silent, but will only effect the muted level on initial playback..\n *\n * @method Html5.prototype.setDefaultMuted\n * @param {boolean} defaultMuted\n * - True if the audio should be set to silent\n * - False otherwise\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-defaultmuted}\n */\n'defaultMuted',\n/**\n * Set the value of `autoplay` on the media element. `autoplay` indicates\n * that the media should start to play as soon as the page is ready.\n *\n * @method Html5#setAutoplay\n * @param {boolean} autoplay\n * - True indicates that the media should start as soon as the page loads.\n * - False indicates that the media should not start as soon as the page loads.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-autoplay}\n */\n'autoplay',\n/**\n * Set the value of `loop` on the media element. `loop` indicates\n * that the media should return to the start of the media and continue playing once\n * it reaches the end.\n *\n * @method Html5#setLoop\n * @param {boolean} loop\n * - True indicates that playback should seek back to start once\n * the end of a media is reached.\n * - False indicates that playback should not loop back to the start when the\n * end of the media is reached.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-loop}\n */\n'loop',\n/**\n * Set the value of `playsinline` from the media element. `playsinline` indicates\n * to the browser that non-fullscreen playback is preferred when fullscreen\n * playback is the native default, such as in iOS Safari.\n *\n * @method Html5#setPlaysinline\n * @param {boolean} playsinline\n * - True indicates that the media should play inline.\n * - False indicates that the media should not play inline.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/#attr-video-playsinline}\n */\n'playsinline'].forEach(function (prop) {\n Html5.prototype['set' + toTitleCase$1(prop)] = function (v) {\n this.el_[prop] = v;\n if (v) {\n this.el_.setAttribute(prop, prop);\n } else {\n this.el_.removeAttribute(prop);\n }\n };\n});\n\n// Wrap native properties with a getter\n// The list is as followed\n// paused, currentTime, buffered, volume, poster, preload, error, seeking\n// seekable, ended, playbackRate, defaultPlaybackRate, disablePictureInPicture\n// played, networkState, readyState, videoWidth, videoHeight, crossOrigin\n[\n/**\n * Get the value of `paused` from the media element. `paused` indicates whether the media element\n * is currently paused or not.\n *\n * @method Html5#paused\n * @return {boolean}\n * The value of `paused` from the media element.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-paused}\n */\n'paused',\n/**\n * Get the value of `currentTime` from the media element. `currentTime` indicates\n * the current second that the media is at in playback.\n *\n * @method Html5#currentTime\n * @return {number}\n * The value of `currentTime` from the media element.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-currenttime}\n */\n'currentTime',\n/**\n * Get the value of `buffered` from the media element. `buffered` is a `TimeRange`\n * object that represents the parts of the media that are already downloaded and\n * available for playback.\n *\n * @method Html5#buffered\n * @return {TimeRange}\n * The value of `buffered` from the media element.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-buffered}\n */\n'buffered',\n/**\n * Get the value of `volume` from the media element. `volume` indicates\n * the current playback volume of audio for a media. `volume` will be a value from 0\n * (silent) to 1 (loudest and default).\n *\n * @method Html5#volume\n * @return {number}\n * The value of `volume` from the media element. Value will be between 0-1.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-a-volume}\n */\n'volume',\n/**\n * Get the value of `poster` from the media element. `poster` indicates\n * that the url of an image file that can/will be shown when no media data is available.\n *\n * @method Html5#poster\n * @return {string}\n * The value of `poster` from the media element. Value will be a url to an\n * image.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-video-poster}\n */\n'poster',\n/**\n * Get the value of `preload` from the media element. `preload` indicates\n * what should download before the media is interacted with. It can have the following\n * values:\n * - none: nothing should be downloaded\n * - metadata: poster and the first few frames of the media may be downloaded to get\n * media dimensions and other metadata\n * - auto: allow the media and metadata for the media to be downloaded before\n * interaction\n *\n * @method Html5#preload\n * @return {string}\n * The value of `preload` from the media element. Will be 'none', 'metadata',\n * or 'auto'.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-preload}\n */\n'preload',\n/**\n * Get the value of the `error` from the media element. `error` indicates any\n * MediaError that may have occurred during playback. If error returns null there is no\n * current error.\n *\n * @method Html5#error\n * @return {MediaError|null}\n * The value of `error` from the media element. Will be `MediaError` if there\n * is a current error and null otherwise.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-error}\n */\n'error',\n/**\n * Get the value of `seeking` from the media element. `seeking` indicates whether the\n * media is currently seeking to a new position or not.\n *\n * @method Html5#seeking\n * @return {boolean}\n * - The value of `seeking` from the media element.\n * - True indicates that the media is currently seeking to a new position.\n * - False indicates that the media is not seeking to a new position at this time.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-seeking}\n */\n'seeking',\n/**\n * Get the value of `seekable` from the media element. `seekable` returns a\n * `TimeRange` object indicating ranges of time that can currently be `seeked` to.\n *\n * @method Html5#seekable\n * @return {TimeRange}\n * The value of `seekable` from the media element. A `TimeRange` object\n * indicating the current ranges of time that can be seeked to.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-seekable}\n */\n'seekable',\n/**\n * Get the value of `ended` from the media element. `ended` indicates whether\n * the media has reached the end or not.\n *\n * @method Html5#ended\n * @return {boolean}\n * - The value of `ended` from the media element.\n * - True indicates that the media has ended.\n * - False indicates that the media has not ended.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-ended}\n */\n'ended',\n/**\n * Get the value of `playbackRate` from the media element. `playbackRate` indicates\n * the rate at which the media is currently playing back. Examples:\n * - if playbackRate is set to 2, media will play twice as fast.\n * - if playbackRate is set to 0.5, media will play half as fast.\n *\n * @method Html5#playbackRate\n * @return {number}\n * The value of `playbackRate` from the media element. A number indicating\n * the current playback speed of the media, where 1 is normal speed.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-playbackrate}\n */\n'playbackRate',\n/**\n * Get the value of `defaultPlaybackRate` from the media element. `defaultPlaybackRate` indicates\n * the rate at which the media is currently playing back. This value will not indicate the current\n * `playbackRate` after playback has started, use {@link Html5#playbackRate} for that.\n *\n * Examples:\n * - if defaultPlaybackRate is set to 2, media will play twice as fast.\n * - if defaultPlaybackRate is set to 0.5, media will play half as fast.\n *\n * @method Html5.prototype.defaultPlaybackRate\n * @return {number}\n * The value of `defaultPlaybackRate` from the media element. A number indicating\n * the current playback speed of the media, where 1 is normal speed.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-playbackrate}\n */\n'defaultPlaybackRate',\n/**\n * Get the value of 'disablePictureInPicture' from the video element.\n *\n * @method Html5#disablePictureInPicture\n * @return {boolean} value\n * - The value of `disablePictureInPicture` from the video element.\n * - True indicates that the video can't be played in Picture-In-Picture mode\n * - False indicates that the video can be played in Picture-In-Picture mode\n *\n * @see [Spec]{@link https://w3c.github.io/picture-in-picture/#disable-pip}\n */\n'disablePictureInPicture',\n/**\n * Get the value of `played` from the media element. `played` returns a `TimeRange`\n * object representing points in the media timeline that have been played.\n *\n * @method Html5#played\n * @return {TimeRange}\n * The value of `played` from the media element. A `TimeRange` object indicating\n * the ranges of time that have been played.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-played}\n */\n'played',\n/**\n * Get the value of `networkState` from the media element. `networkState` indicates\n * the current network state. It returns an enumeration from the following list:\n * - 0: NETWORK_EMPTY\n * - 1: NETWORK_IDLE\n * - 2: NETWORK_LOADING\n * - 3: NETWORK_NO_SOURCE\n *\n * @method Html5#networkState\n * @return {number}\n * The value of `networkState` from the media element. This will be a number\n * from the list in the description.\n *\n * @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-networkstate}\n */\n'networkState',\n/**\n * Get the value of `readyState` from the media element. `readyState` indicates\n * the current state of the media element. It returns an enumeration from the\n * following list:\n * - 0: HAVE_NOTHING\n * - 1: HAVE_METADATA\n * - 2: HAVE_CURRENT_DATA\n * - 3: HAVE_FUTURE_DATA\n * - 4: HAVE_ENOUGH_DATA\n *\n * @method Html5#readyState\n * @return {number}\n * The value of `readyState` from the media element. This will be a number\n * from the list in the description.\n *\n * @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#ready-states}\n */\n'readyState',\n/**\n * Get the value of `videoWidth` from the video element. `videoWidth` indicates\n * the current width of the video in css pixels.\n *\n * @method Html5#videoWidth\n * @return {number}\n * The value of `videoWidth` from the video element. This will be a number\n * in css pixels.\n *\n * @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-video-videowidth}\n */\n'videoWidth',\n/**\n * Get the value of `videoHeight` from the video element. `videoHeight` indicates\n * the current height of the video in css pixels.\n *\n * @method Html5#videoHeight\n * @return {number}\n * The value of `videoHeight` from the video element. This will be a number\n * in css pixels.\n *\n * @see [Spec] {@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-video-videowidth}\n */\n'videoHeight',\n/**\n * Get the value of `crossOrigin` from the media element. `crossOrigin` indicates\n * to the browser that should sent the cookies along with the requests for the\n * different assets/playlists\n *\n * @method Html5#crossOrigin\n * @return {string}\n * - anonymous indicates that the media should not sent cookies.\n * - use-credentials indicates that the media should sent cookies along the requests.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/#attr-media-crossorigin}\n */\n'crossOrigin'].forEach(function (prop) {\n Html5.prototype[prop] = function () {\n return this.el_[prop];\n };\n});\n\n// Wrap native properties with a setter in this format:\n// set + toTitleCase(name)\n// The list is as follows:\n// setVolume, setSrc, setPoster, setPreload, setPlaybackRate, setDefaultPlaybackRate,\n// setDisablePictureInPicture, setCrossOrigin\n[\n/**\n * Set the value of `volume` on the media element. `volume` indicates the current\n * audio level as a percentage in decimal form. This means that 1 is 100%, 0.5 is 50%, and\n * so on.\n *\n * @method Html5#setVolume\n * @param {number} percentAsDecimal\n * The volume percent as a decimal. Valid range is from 0-1.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-a-volume}\n */\n'volume',\n/**\n * Set the value of `src` on the media element. `src` indicates the current\n * {@link Tech~SourceObject} for the media.\n *\n * @method Html5#setSrc\n * @param {Tech~SourceObject} src\n * The source object to set as the current source.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-src}\n */\n'src',\n/**\n * Set the value of `poster` on the media element. `poster` is the url to\n * an image file that can/will be shown when no media data is available.\n *\n * @method Html5#setPoster\n * @param {string} poster\n * The url to an image that should be used as the `poster` for the media\n * element.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-poster}\n */\n'poster',\n/**\n * Set the value of `preload` on the media element. `preload` indicates\n * what should download before the media is interacted with. It can have the following\n * values:\n * - none: nothing should be downloaded\n * - metadata: poster and the first few frames of the media may be downloaded to get\n * media dimensions and other metadata\n * - auto: allow the media and metadata for the media to be downloaded before\n * interaction\n *\n * @method Html5#setPreload\n * @param {string} preload\n * The value of `preload` to set on the media element. Must be 'none', 'metadata',\n * or 'auto'.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#attr-media-preload}\n */\n'preload',\n/**\n * Set the value of `playbackRate` on the media element. `playbackRate` indicates\n * the rate at which the media should play back. Examples:\n * - if playbackRate is set to 2, media will play twice as fast.\n * - if playbackRate is set to 0.5, media will play half as fast.\n *\n * @method Html5#setPlaybackRate\n * @return {number}\n * The value of `playbackRate` from the media element. A number indicating\n * the current playback speed of the media, where 1 is normal speed.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-playbackrate}\n */\n'playbackRate',\n/**\n * Set the value of `defaultPlaybackRate` on the media element. `defaultPlaybackRate` indicates\n * the rate at which the media should play back upon initial startup. Changing this value\n * after a video has started will do nothing. Instead you should used {@link Html5#setPlaybackRate}.\n *\n * Example Values:\n * - if playbackRate is set to 2, media will play twice as fast.\n * - if playbackRate is set to 0.5, media will play half as fast.\n *\n * @method Html5.prototype.setDefaultPlaybackRate\n * @return {number}\n * The value of `defaultPlaybackRate` from the media element. A number indicating\n * the current playback speed of the media, where 1 is normal speed.\n *\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-defaultplaybackrate}\n */\n'defaultPlaybackRate',\n/**\n * Prevents the browser from suggesting a Picture-in-Picture context menu\n * or to request Picture-in-Picture automatically in some cases.\n *\n * @method Html5#setDisablePictureInPicture\n * @param {boolean} value\n * The true value will disable Picture-in-Picture mode.\n *\n * @see [Spec]{@link https://w3c.github.io/picture-in-picture/#disable-pip}\n */\n'disablePictureInPicture',\n/**\n * Set the value of `crossOrigin` from the media element. `crossOrigin` indicates\n * to the browser that should sent the cookies along with the requests for the\n * different assets/playlists\n *\n * @method Html5#setCrossOrigin\n * @param {string} crossOrigin\n * - anonymous indicates that the media should not sent cookies.\n * - use-credentials indicates that the media should sent cookies along the requests.\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/#attr-media-crossorigin}\n */\n'crossOrigin'].forEach(function (prop) {\n Html5.prototype['set' + toTitleCase$1(prop)] = function (v) {\n this.el_[prop] = v;\n };\n});\n\n// wrap native functions with a function\n// The list is as follows:\n// pause, load, play\n[\n/**\n * A wrapper around the media elements `pause` function. This will call the `HTML5`\n * media elements `pause` function.\n *\n * @method Html5#pause\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-pause}\n */\n'pause',\n/**\n * A wrapper around the media elements `load` function. This will call the `HTML5`s\n * media element `load` function.\n *\n * @method Html5#load\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-load}\n */\n'load',\n/**\n * A wrapper around the media elements `play` function. This will call the `HTML5`s\n * media element `play` function.\n *\n * @method Html5#play\n * @see [Spec]{@link https://www.w3.org/TR/html5/embedded-content-0.html#dom-media-play}\n */\n'play'].forEach(function (prop) {\n Html5.prototype[prop] = function () {\n return this.el_[prop]();\n };\n});\nTech.withSourceHandlers(Html5);\n\n/**\n * Native source handler for Html5, simply passes the source to the media element.\n *\n * @property {Tech~SourceObject} source\n * The source object\n *\n * @property {Html5} tech\n * The instance of the HTML5 tech.\n */\nHtml5.nativeSourceHandler = {};\n\n/**\n * Check if the media element can play the given mime type.\n *\n * @param {string} type\n * The mimetype to check\n *\n * @return {string}\n * 'probably', 'maybe', or '' (empty string)\n */\nHtml5.nativeSourceHandler.canPlayType = function (type) {\n // IE without MediaPlayer throws an error (#519)\n try {\n return Html5.TEST_VID.canPlayType(type);\n } catch (e) {\n return '';\n }\n};\n\n/**\n * Check if the media element can handle a source natively.\n *\n * @param {Tech~SourceObject} source\n * The source object\n *\n * @param {Object} [options]\n * Options to be passed to the tech.\n *\n * @return {string}\n * 'probably', 'maybe', or '' (empty string).\n */\nHtml5.nativeSourceHandler.canHandleSource = function (source, options) {\n // If a type was provided we should rely on that\n if (source.type) {\n return Html5.nativeSourceHandler.canPlayType(source.type);\n\n // If no type, fall back to checking 'video/[EXTENSION]'\n } else if (source.src) {\n const ext = getFileExtension(source.src);\n return Html5.nativeSourceHandler.canPlayType(`video/${ext}`);\n }\n return '';\n};\n\n/**\n * Pass the source to the native media element.\n *\n * @param {Tech~SourceObject} source\n * The source object\n *\n * @param {Html5} tech\n * The instance of the Html5 tech\n *\n * @param {Object} [options]\n * The options to pass to the source\n */\nHtml5.nativeSourceHandler.handleSource = function (source, tech, options) {\n tech.setSrc(source.src);\n};\n\n/**\n * A noop for the native dispose function, as cleanup is not needed.\n */\nHtml5.nativeSourceHandler.dispose = function () {};\n\n// Register the native source handler\nHtml5.registerSourceHandler(Html5.nativeSourceHandler);\nTech.registerTech('Html5', Html5);\n\n/**\n * @file player.js\n */\n\n// The following tech events are simply re-triggered\n// on the player when they happen\nconst TECH_EVENTS_RETRIGGER = [\n/**\n * Fired while the user agent is downloading media data.\n *\n * @event Player#progress\n * @type {Event}\n */\n/**\n * Retrigger the `progress` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechProgress_\n * @fires Player#progress\n * @listens Tech#progress\n */\n'progress',\n/**\n * Fires when the loading of an audio/video is aborted.\n *\n * @event Player#abort\n * @type {Event}\n */\n/**\n * Retrigger the `abort` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechAbort_\n * @fires Player#abort\n * @listens Tech#abort\n */\n'abort',\n/**\n * Fires when the browser is intentionally not getting media data.\n *\n * @event Player#suspend\n * @type {Event}\n */\n/**\n * Retrigger the `suspend` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechSuspend_\n * @fires Player#suspend\n * @listens Tech#suspend\n */\n'suspend',\n/**\n * Fires when the current playlist is empty.\n *\n * @event Player#emptied\n * @type {Event}\n */\n/**\n * Retrigger the `emptied` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechEmptied_\n * @fires Player#emptied\n * @listens Tech#emptied\n */\n'emptied',\n/**\n * Fires when the browser is trying to get media data, but data is not available.\n *\n * @event Player#stalled\n * @type {Event}\n */\n/**\n * Retrigger the `stalled` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechStalled_\n * @fires Player#stalled\n * @listens Tech#stalled\n */\n'stalled',\n/**\n * Fires when the browser has loaded meta data for the audio/video.\n *\n * @event Player#loadedmetadata\n * @type {Event}\n */\n/**\n * Retrigger the `loadedmetadata` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechLoadedmetadata_\n * @fires Player#loadedmetadata\n * @listens Tech#loadedmetadata\n */\n'loadedmetadata',\n/**\n * Fires when the browser has loaded the current frame of the audio/video.\n *\n * @event Player#loadeddata\n * @type {event}\n */\n/**\n * Retrigger the `loadeddata` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechLoaddeddata_\n * @fires Player#loadeddata\n * @listens Tech#loadeddata\n */\n'loadeddata',\n/**\n * Fires when the current playback position has changed.\n *\n * @event Player#timeupdate\n * @type {event}\n */\n/**\n * Retrigger the `timeupdate` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechTimeUpdate_\n * @fires Player#timeupdate\n * @listens Tech#timeupdate\n */\n'timeupdate',\n/**\n * Fires when the video's intrinsic dimensions change\n *\n * @event Player#resize\n * @type {event}\n */\n/**\n * Retrigger the `resize` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechResize_\n * @fires Player#resize\n * @listens Tech#resize\n */\n'resize',\n/**\n * Fires when the volume has been changed\n *\n * @event Player#volumechange\n * @type {event}\n */\n/**\n * Retrigger the `volumechange` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechVolumechange_\n * @fires Player#volumechange\n * @listens Tech#volumechange\n */\n'volumechange',\n/**\n * Fires when the text track has been changed\n *\n * @event Player#texttrackchange\n * @type {event}\n */\n/**\n * Retrigger the `texttrackchange` event that was triggered by the {@link Tech}.\n *\n * @private\n * @method Player#handleTechTexttrackchange_\n * @fires Player#texttrackchange\n * @listens Tech#texttrackchange\n */\n'texttrackchange'];\n\n// events to queue when playback rate is zero\n// this is a hash for the sole purpose of mapping non-camel-cased event names\n// to camel-cased function names\nconst TECH_EVENTS_QUEUE = {\n canplay: 'CanPlay',\n canplaythrough: 'CanPlayThrough',\n playing: 'Playing',\n seeked: 'Seeked'\n};\nconst BREAKPOINT_ORDER = ['tiny', 'xsmall', 'small', 'medium', 'large', 'xlarge', 'huge'];\nconst BREAKPOINT_CLASSES = {};\n\n// grep: vjs-layout-tiny\n// grep: vjs-layout-x-small\n// grep: vjs-layout-small\n// grep: vjs-layout-medium\n// grep: vjs-layout-large\n// grep: vjs-layout-x-large\n// grep: vjs-layout-huge\nBREAKPOINT_ORDER.forEach(k => {\n const v = k.charAt(0) === 'x' ? `x-${k.substring(1)}` : k;\n BREAKPOINT_CLASSES[k] = `vjs-layout-${v}`;\n});\nconst DEFAULT_BREAKPOINTS = {\n tiny: 210,\n xsmall: 320,\n small: 425,\n medium: 768,\n large: 1440,\n xlarge: 2560,\n huge: Infinity\n};\n\n/**\n * An instance of the `Player` class is created when any of the Video.js setup methods\n * are used to initialize a video.\n *\n * After an instance has been created it can be accessed globally in three ways:\n * 1. By calling `videojs.getPlayer('example_video_1');`\n * 2. By calling `videojs('example_video_1');` (not recommended)\n * 2. By using it directly via `videojs.players.example_video_1;`\n *\n * @extends Component\n * @global\n */\nclass Player extends Component$1 {\n /**\n * Create an instance of this class.\n *\n * @param {Element} tag\n * The original video DOM element used for configuring options.\n *\n * @param {Object} [options]\n * Object of option names and values.\n *\n * @param {Function} [ready]\n * Ready callback function.\n */\n constructor(tag, options, ready) {\n // Make sure tag ID exists\n // also here.. probably better\n tag.id = tag.id || options.id || `vjs_video_${newGUID()}`;\n\n // Set Options\n // The options argument overrides options set in the video tag\n // which overrides globally set options.\n // This latter part coincides with the load order\n // (tag must exist before Player)\n options = Object.assign(Player.getTagSettings(tag), options);\n\n // Delay the initialization of children because we need to set up\n // player properties first, and can't use `this` before `super()`\n options.initChildren = false;\n\n // Same with creating the element\n options.createEl = false;\n\n // don't auto mixin the evented mixin\n options.evented = false;\n\n // we don't want the player to report touch activity on itself\n // see enableTouchActivity in Component\n options.reportTouchActivity = false;\n\n // If language is not set, get the closest lang attribute\n if (!options.language) {\n const closest = tag.closest('[lang]');\n if (closest) {\n options.language = closest.getAttribute('lang');\n }\n }\n\n // Run base component initializing with new options\n super(null, options, ready);\n\n // Create bound methods for document listeners.\n this.boundDocumentFullscreenChange_ = e => this.documentFullscreenChange_(e);\n this.boundFullWindowOnEscKey_ = e => this.fullWindowOnEscKey(e);\n this.boundUpdateStyleEl_ = e => this.updateStyleEl_(e);\n this.boundApplyInitTime_ = e => this.applyInitTime_(e);\n this.boundUpdateCurrentBreakpoint_ = e => this.updateCurrentBreakpoint_(e);\n this.boundHandleTechClick_ = e => this.handleTechClick_(e);\n this.boundHandleTechDoubleClick_ = e => this.handleTechDoubleClick_(e);\n this.boundHandleTechTouchStart_ = e => this.handleTechTouchStart_(e);\n this.boundHandleTechTouchMove_ = e => this.handleTechTouchMove_(e);\n this.boundHandleTechTouchEnd_ = e => this.handleTechTouchEnd_(e);\n this.boundHandleTechTap_ = e => this.handleTechTap_(e);\n\n // default isFullscreen_ to false\n this.isFullscreen_ = false;\n\n // create logger\n this.log = createLogger(this.id_);\n\n // Hold our own reference to fullscreen api so it can be mocked in tests\n this.fsApi_ = FullscreenApi;\n\n // Tracks when a tech changes the poster\n this.isPosterFromTech_ = false;\n\n // Holds callback info that gets queued when playback rate is zero\n // and a seek is happening\n this.queuedCallbacks_ = [];\n\n // Turn off API access because we're loading a new tech that might load asynchronously\n this.isReady_ = false;\n\n // Init state hasStarted_\n this.hasStarted_ = false;\n\n // Init state userActive_\n this.userActive_ = false;\n\n // Init debugEnabled_\n this.debugEnabled_ = false;\n\n // Init state audioOnlyMode_\n this.audioOnlyMode_ = false;\n\n // Init state audioPosterMode_\n this.audioPosterMode_ = false;\n\n // Init state audioOnlyCache_\n this.audioOnlyCache_ = {\n playerHeight: null,\n hiddenChildren: []\n };\n\n // if the global option object was accidentally blown away by\n // someone, bail early with an informative error\n if (!this.options_ || !this.options_.techOrder || !this.options_.techOrder.length) {\n throw new Error('No techOrder specified. Did you overwrite ' + 'videojs.options instead of just changing the ' + 'properties you want to override?');\n }\n\n // Store the original tag used to set options\n this.tag = tag;\n\n // Store the tag attributes used to restore html5 element\n this.tagAttributes = tag && getAttributes(tag);\n\n // Update current language\n this.language(this.options_.language);\n\n // Update Supported Languages\n if (options.languages) {\n // Normalise player option languages to lowercase\n const languagesToLower = {};\n Object.getOwnPropertyNames(options.languages).forEach(function (name) {\n languagesToLower[name.toLowerCase()] = options.languages[name];\n });\n this.languages_ = languagesToLower;\n } else {\n this.languages_ = Player.prototype.options_.languages;\n }\n this.resetCache_();\n\n // Set poster\n /** @type string */\n this.poster_ = options.poster || '';\n\n // Set controls\n /** @type {boolean} */\n this.controls_ = !!options.controls;\n\n // Original tag settings stored in options\n // now remove immediately so native controls don't flash.\n // May be turned back on by HTML5 tech if nativeControlsForTouch is true\n tag.controls = false;\n tag.removeAttribute('controls');\n this.changingSrc_ = false;\n this.playCallbacks_ = [];\n this.playTerminatedQueue_ = [];\n\n // the attribute overrides the option\n if (tag.hasAttribute('autoplay')) {\n this.autoplay(true);\n } else {\n // otherwise use the setter to validate and\n // set the correct value.\n this.autoplay(this.options_.autoplay);\n }\n\n // check plugins\n if (options.plugins) {\n Object.keys(options.plugins).forEach(name => {\n if (typeof this[name] !== 'function') {\n throw new Error(`plugin \"${name}\" does not exist`);\n }\n });\n }\n\n /*\n * Store the internal state of scrubbing\n *\n * @private\n * @return {Boolean} True if the user is scrubbing\n */\n this.scrubbing_ = false;\n this.el_ = this.createEl();\n\n // Make this an evented object and use `el_` as its event bus.\n evented(this, {\n eventBusKey: 'el_'\n });\n\n // listen to document and player fullscreenchange handlers so we receive those events\n // before a user can receive them so we can update isFullscreen appropriately.\n // make sure that we listen to fullscreenchange events before everything else to make sure that\n // our isFullscreen method is updated properly for internal components as well as external.\n if (this.fsApi_.requestFullscreen) {\n on(document, this.fsApi_.fullscreenchange, this.boundDocumentFullscreenChange_);\n this.on(this.fsApi_.fullscreenchange, this.boundDocumentFullscreenChange_);\n }\n if (this.fluid_) {\n this.on(['playerreset', 'resize'], this.boundUpdateStyleEl_);\n }\n // We also want to pass the original player options to each component and plugin\n // as well so they don't need to reach back into the player for options later.\n // We also need to do another copy of this.options_ so we don't end up with\n // an infinite loop.\n const playerOptionsCopy = merge$1(this.options_);\n\n // Load plugins\n if (options.plugins) {\n Object.keys(options.plugins).forEach(name => {\n this[name](options.plugins[name]);\n });\n }\n\n // Enable debug mode to fire debugon event for all plugins.\n if (options.debug) {\n this.debug(true);\n }\n this.options_.playerOptions = playerOptionsCopy;\n this.middleware_ = [];\n this.playbackRates(options.playbackRates);\n if (options.experimentalSvgIcons) {\n // Add SVG Sprite to the DOM\n const parser = new window$1.DOMParser();\n const parsedSVG = parser.parseFromString(icons, 'image/svg+xml');\n const errorNode = parsedSVG.querySelector('parsererror');\n if (errorNode) {\n log$1.warn('Failed to load SVG Icons. Falling back to Font Icons.');\n this.options_.experimentalSvgIcons = null;\n } else {\n const sprite = parsedSVG.documentElement;\n sprite.style.display = 'none';\n this.el_.appendChild(sprite);\n this.addClass('vjs-svg-icons-enabled');\n }\n }\n this.initChildren();\n\n // Set isAudio based on whether or not an audio tag was used\n this.isAudio(tag.nodeName.toLowerCase() === 'audio');\n\n // Update controls className. Can't do this when the controls are initially\n // set because the element doesn't exist yet.\n if (this.controls()) {\n this.addClass('vjs-controls-enabled');\n } else {\n this.addClass('vjs-controls-disabled');\n }\n\n // Set ARIA label and region role depending on player type\n this.el_.setAttribute('role', 'region');\n if (this.isAudio()) {\n this.el_.setAttribute('aria-label', this.localize('Audio Player'));\n } else {\n this.el_.setAttribute('aria-label', this.localize('Video Player'));\n }\n if (this.isAudio()) {\n this.addClass('vjs-audio');\n }\n\n // TODO: Make this smarter. Toggle user state between touching/mousing\n // using events, since devices can have both touch and mouse events.\n // TODO: Make this check be performed again when the window switches between monitors\n // (See https://github.com/videojs/video.js/issues/5683)\n if (TOUCH_ENABLED) {\n this.addClass('vjs-touch-enabled');\n }\n\n // iOS Safari has broken hover handling\n if (!IS_IOS) {\n this.addClass('vjs-workinghover');\n }\n\n // Make player easily findable by ID\n Player.players[this.id_] = this;\n\n // Add a major version class to aid css in plugins\n const majorVersion = version$6.split('.')[0];\n this.addClass(`vjs-v${majorVersion}`);\n\n // When the player is first initialized, trigger activity so components\n // like the control bar show themselves if needed\n this.userActive(true);\n this.reportUserActivity();\n this.one('play', e => this.listenForUserActivity_(e));\n this.on('keydown', e => this.handleKeyDown(e));\n this.on('languagechange', e => this.handleLanguagechange(e));\n this.breakpoints(this.options_.breakpoints);\n this.responsive(this.options_.responsive);\n\n // Calling both the audio mode methods after the player is fully\n // setup to be able to listen to the events triggered by them\n this.on('ready', () => {\n // Calling the audioPosterMode method first so that\n // the audioOnlyMode can take precedence when both options are set to true\n this.audioPosterMode(this.options_.audioPosterMode);\n this.audioOnlyMode(this.options_.audioOnlyMode);\n });\n }\n\n /**\n * Destroys the video player and does any necessary cleanup.\n *\n * This is especially helpful if you are dynamically adding and removing videos\n * to/from the DOM.\n *\n * @fires Player#dispose\n */\n dispose() {\n /**\n * Called when the player is being disposed of.\n *\n * @event Player#dispose\n * @type {Event}\n */\n this.trigger('dispose');\n // prevent dispose from being called twice\n this.off('dispose');\n\n // Make sure all player-specific document listeners are unbound. This is\n off(document, this.fsApi_.fullscreenchange, this.boundDocumentFullscreenChange_);\n off(document, 'keydown', this.boundFullWindowOnEscKey_);\n if (this.styleEl_ && this.styleEl_.parentNode) {\n this.styleEl_.parentNode.removeChild(this.styleEl_);\n this.styleEl_ = null;\n }\n\n // Kill reference to this player\n Player.players[this.id_] = null;\n if (this.tag && this.tag.player) {\n this.tag.player = null;\n }\n if (this.el_ && this.el_.player) {\n this.el_.player = null;\n }\n if (this.tech_) {\n this.tech_.dispose();\n this.isPosterFromTech_ = false;\n this.poster_ = '';\n }\n if (this.playerElIngest_) {\n this.playerElIngest_ = null;\n }\n if (this.tag) {\n this.tag = null;\n }\n clearCacheForPlayer(this);\n\n // remove all event handlers for track lists\n // all tracks and track listeners are removed on\n // tech dispose\n ALL.names.forEach(name => {\n const props = ALL[name];\n const list = this[props.getterName]();\n\n // if it is not a native list\n // we have to manually remove event listeners\n if (list && list.off) {\n list.off();\n }\n });\n\n // the actual .el_ is removed here, or replaced if\n super.dispose({\n restoreEl: this.options_.restoreEl\n });\n }\n\n /**\n * Create the `Player`'s DOM element.\n *\n * @return {Element}\n * The DOM element that gets created.\n */\n createEl() {\n let tag = this.tag;\n let el;\n let playerElIngest = this.playerElIngest_ = tag.parentNode && tag.parentNode.hasAttribute && tag.parentNode.hasAttribute('data-vjs-player');\n const divEmbed = this.tag.tagName.toLowerCase() === 'video-js';\n if (playerElIngest) {\n el = this.el_ = tag.parentNode;\n } else if (!divEmbed) {\n el = this.el_ = super.createEl('div');\n }\n\n // Copy over all the attributes from the tag, including ID and class\n // ID will now reference player box, not the video tag\n const attrs = getAttributes(tag);\n if (divEmbed) {\n el = this.el_ = tag;\n tag = this.tag = document.createElement('video');\n while (el.children.length) {\n tag.appendChild(el.firstChild);\n }\n if (!hasClass(el, 'video-js')) {\n addClass(el, 'video-js');\n }\n el.appendChild(tag);\n playerElIngest = this.playerElIngest_ = el;\n // move properties over from our custom `video-js` element\n // to our new `video` element. This will move things like\n // `src` or `controls` that were set via js before the player\n // was initialized.\n Object.keys(el).forEach(k => {\n try {\n tag[k] = el[k];\n } catch (e) {\n // we got a a property like outerHTML which we can't actually copy, ignore it\n }\n });\n }\n\n // set tabindex to -1 to remove the video element from the focus order\n tag.setAttribute('tabindex', '-1');\n attrs.tabindex = '-1';\n\n // Workaround for #4583 on Chrome (on Windows) with JAWS.\n // See https://github.com/FreedomScientific/VFO-standards-support/issues/78\n // Note that we can't detect if JAWS is being used, but this ARIA attribute\n // doesn't change behavior of Chrome if JAWS is not being used\n if (IS_CHROME && IS_WINDOWS) {\n tag.setAttribute('role', 'application');\n attrs.role = 'application';\n }\n\n // Remove width/height attrs from tag so CSS can make it 100% width/height\n tag.removeAttribute('width');\n tag.removeAttribute('height');\n if ('width' in attrs) {\n delete attrs.width;\n }\n if ('height' in attrs) {\n delete attrs.height;\n }\n Object.getOwnPropertyNames(attrs).forEach(function (attr) {\n // don't copy over the class attribute to the player element when we're in a div embed\n // the class is already set up properly in the divEmbed case\n // and we want to make sure that the `video-js` class doesn't get lost\n if (!(divEmbed && attr === 'class')) {\n el.setAttribute(attr, attrs[attr]);\n }\n if (divEmbed) {\n tag.setAttribute(attr, attrs[attr]);\n }\n });\n\n // Update tag id/class for use as HTML5 playback tech\n // Might think we should do this after embedding in container so .vjs-tech class\n // doesn't flash 100% width/height, but class only applies with .video-js parent\n tag.playerId = tag.id;\n tag.id += '_html5_api';\n tag.className = 'vjs-tech';\n\n // Make player findable on elements\n tag.player = el.player = this;\n // Default state of video is paused\n this.addClass('vjs-paused');\n const deviceClassNames = ['IS_SMART_TV', 'IS_TIZEN', 'IS_WEBOS', 'IS_ANDROID', 'IS_IPAD', 'IS_IPHONE'].filter(key => browser[key]).map(key => {\n return 'vjs-device-' + key.substring(3).toLowerCase().replace(/\\_/g, '-');\n });\n this.addClass(...deviceClassNames);\n\n // Add a style element in the player that we'll use to set the width/height\n // of the player in a way that's still overridable by CSS, just like the\n // video element\n if (window$1.VIDEOJS_NO_DYNAMIC_STYLE !== true) {\n this.styleEl_ = createStyleElement('vjs-styles-dimensions');\n const defaultsStyleEl = $('.vjs-styles-defaults');\n const head = $('head');\n head.insertBefore(this.styleEl_, defaultsStyleEl ? defaultsStyleEl.nextSibling : head.firstChild);\n }\n this.fill_ = false;\n this.fluid_ = false;\n\n // Pass in the width/height/aspectRatio options which will update the style el\n this.width(this.options_.width);\n this.height(this.options_.height);\n this.fill(this.options_.fill);\n this.fluid(this.options_.fluid);\n this.aspectRatio(this.options_.aspectRatio);\n // support both crossOrigin and crossorigin to reduce confusion and issues around the name\n this.crossOrigin(this.options_.crossOrigin || this.options_.crossorigin);\n\n // Hide any links within the video/audio tag,\n // because IE doesn't hide them completely from screen readers.\n const links = tag.getElementsByTagName('a');\n for (let i = 0; i < links.length; i++) {\n const linkEl = links.item(i);\n addClass(linkEl, 'vjs-hidden');\n linkEl.setAttribute('hidden', 'hidden');\n }\n\n // insertElFirst seems to cause the networkState to flicker from 3 to 2, so\n // keep track of the original for later so we can know if the source originally failed\n tag.initNetworkState_ = tag.networkState;\n\n // Wrap video tag in div (el/box) container\n if (tag.parentNode && !playerElIngest) {\n tag.parentNode.insertBefore(el, tag);\n }\n\n // insert the tag as the first child of the player element\n // then manually add it to the children array so that this.addChild\n // will work properly for other components\n //\n // Breaks iPhone, fixed in HTML5 setup.\n prependTo(tag, el);\n this.children_.unshift(tag);\n\n // Set lang attr on player to ensure CSS :lang() in consistent with player\n // if it's been set to something different to the doc\n this.el_.setAttribute('lang', this.language_);\n this.el_.setAttribute('translate', 'no');\n this.el_ = el;\n return el;\n }\n\n /**\n * Get or set the `Player`'s crossOrigin option. For the HTML5 player, this\n * sets the `crossOrigin` property on the `` tag to control the CORS\n * behavior.\n *\n * @see [Video Element Attributes]{@link https://developer.mozilla.org/en-US/docs/Web/HTML/Element/video#attr-crossorigin}\n *\n * @param {string|null} [value]\n * The value to set the `Player`'s crossOrigin to. If an argument is\n * given, must be one of `'anonymous'` or `'use-credentials'`, or 'null'.\n *\n * @return {string|null|undefined}\n * - The current crossOrigin value of the `Player` when getting.\n * - undefined when setting\n */\n crossOrigin(value) {\n // `null` can be set to unset a value\n if (typeof value === 'undefined') {\n return this.techGet_('crossOrigin');\n }\n if (value !== null && value !== 'anonymous' && value !== 'use-credentials') {\n log$1.warn(`crossOrigin must be null, \"anonymous\" or \"use-credentials\", given \"${value}\"`);\n return;\n }\n this.techCall_('setCrossOrigin', value);\n if (this.posterImage) {\n this.posterImage.crossOrigin(value);\n }\n return;\n }\n\n /**\n * A getter/setter for the `Player`'s width. Returns the player's configured value.\n * To get the current width use `currentWidth()`.\n *\n * @param {number|string} [value]\n * CSS value to set the `Player`'s width to.\n *\n * @return {number|undefined}\n * - The current width of the `Player` when getting.\n * - Nothing when setting\n */\n width(value) {\n return this.dimension('width', value);\n }\n\n /**\n * A getter/setter for the `Player`'s height. Returns the player's configured value.\n * To get the current height use `currentheight()`.\n *\n * @param {number|string} [value]\n * CSS value to set the `Player`'s height to.\n *\n * @return {number|undefined}\n * - The current height of the `Player` when getting.\n * - Nothing when setting\n */\n height(value) {\n return this.dimension('height', value);\n }\n\n /**\n * A getter/setter for the `Player`'s width & height.\n *\n * @param {string} dimension\n * This string can be:\n * - 'width'\n * - 'height'\n *\n * @param {number|string} [value]\n * Value for dimension specified in the first argument.\n *\n * @return {number}\n * The dimension arguments value when getting (width/height).\n */\n dimension(dimension, value) {\n const privDimension = dimension + '_';\n if (value === undefined) {\n return this[privDimension] || 0;\n }\n if (value === '' || value === 'auto') {\n // If an empty string is given, reset the dimension to be automatic\n this[privDimension] = undefined;\n this.updateStyleEl_();\n return;\n }\n const parsedVal = parseFloat(value);\n if (isNaN(parsedVal)) {\n log$1.error(`Improper value \"${value}\" supplied for for ${dimension}`);\n return;\n }\n this[privDimension] = parsedVal;\n this.updateStyleEl_();\n }\n\n /**\n * A getter/setter/toggler for the vjs-fluid `className` on the `Player`.\n *\n * Turning this on will turn off fill mode.\n *\n * @param {boolean} [bool]\n * - A value of true adds the class.\n * - A value of false removes the class.\n * - No value will be a getter.\n *\n * @return {boolean|undefined}\n * - The value of fluid when getting.\n * - `undefined` when setting.\n */\n fluid(bool) {\n if (bool === undefined) {\n return !!this.fluid_;\n }\n this.fluid_ = !!bool;\n if (isEvented(this)) {\n this.off(['playerreset', 'resize'], this.boundUpdateStyleEl_);\n }\n if (bool) {\n this.addClass('vjs-fluid');\n this.fill(false);\n addEventedCallback(this, () => {\n this.on(['playerreset', 'resize'], this.boundUpdateStyleEl_);\n });\n } else {\n this.removeClass('vjs-fluid');\n }\n this.updateStyleEl_();\n }\n\n /**\n * A getter/setter/toggler for the vjs-fill `className` on the `Player`.\n *\n * Turning this on will turn off fluid mode.\n *\n * @param {boolean} [bool]\n * - A value of true adds the class.\n * - A value of false removes the class.\n * - No value will be a getter.\n *\n * @return {boolean|undefined}\n * - The value of fluid when getting.\n * - `undefined` when setting.\n */\n fill(bool) {\n if (bool === undefined) {\n return !!this.fill_;\n }\n this.fill_ = !!bool;\n if (bool) {\n this.addClass('vjs-fill');\n this.fluid(false);\n } else {\n this.removeClass('vjs-fill');\n }\n }\n\n /**\n * Get/Set the aspect ratio\n *\n * @param {string} [ratio]\n * Aspect ratio for player\n *\n * @return {string|undefined}\n * returns the current aspect ratio when getting\n */\n\n /**\n * A getter/setter for the `Player`'s aspect ratio.\n *\n * @param {string} [ratio]\n * The value to set the `Player`'s aspect ratio to.\n *\n * @return {string|undefined}\n * - The current aspect ratio of the `Player` when getting.\n * - undefined when setting\n */\n aspectRatio(ratio) {\n if (ratio === undefined) {\n return this.aspectRatio_;\n }\n\n // Check for width:height format\n if (!/^\\d+\\:\\d+$/.test(ratio)) {\n throw new Error('Improper value supplied for aspect ratio. The format should be width:height, for example 16:9.');\n }\n this.aspectRatio_ = ratio;\n\n // We're assuming if you set an aspect ratio you want fluid mode,\n // because in fixed mode you could calculate width and height yourself.\n this.fluid(true);\n this.updateStyleEl_();\n }\n\n /**\n * Update styles of the `Player` element (height, width and aspect ratio).\n *\n * @private\n * @listens Tech#loadedmetadata\n */\n updateStyleEl_() {\n if (window$1.VIDEOJS_NO_DYNAMIC_STYLE === true) {\n const width = typeof this.width_ === 'number' ? this.width_ : this.options_.width;\n const height = typeof this.height_ === 'number' ? this.height_ : this.options_.height;\n const techEl = this.tech_ && this.tech_.el();\n if (techEl) {\n if (width >= 0) {\n techEl.width = width;\n }\n if (height >= 0) {\n techEl.height = height;\n }\n }\n return;\n }\n let width;\n let height;\n let aspectRatio;\n let idClass;\n\n // The aspect ratio is either used directly or to calculate width and height.\n if (this.aspectRatio_ !== undefined && this.aspectRatio_ !== 'auto') {\n // Use any aspectRatio that's been specifically set\n aspectRatio = this.aspectRatio_;\n } else if (this.videoWidth() > 0) {\n // Otherwise try to get the aspect ratio from the video metadata\n aspectRatio = this.videoWidth() + ':' + this.videoHeight();\n } else {\n // Or use a default. The video element's is 2:1, but 16:9 is more common.\n aspectRatio = '16:9';\n }\n\n // Get the ratio as a decimal we can use to calculate dimensions\n const ratioParts = aspectRatio.split(':');\n const ratioMultiplier = ratioParts[1] / ratioParts[0];\n if (this.width_ !== undefined) {\n // Use any width that's been specifically set\n width = this.width_;\n } else if (this.height_ !== undefined) {\n // Or calculate the width from the aspect ratio if a height has been set\n width = this.height_ / ratioMultiplier;\n } else {\n // Or use the video's metadata, or use the video el's default of 300\n width = this.videoWidth() || 300;\n }\n if (this.height_ !== undefined) {\n // Use any height that's been specifically set\n height = this.height_;\n } else {\n // Otherwise calculate the height from the ratio and the width\n height = width * ratioMultiplier;\n }\n\n // Ensure the CSS class is valid by starting with an alpha character\n if (/^[^a-zA-Z]/.test(this.id())) {\n idClass = 'dimensions-' + this.id();\n } else {\n idClass = this.id() + '-dimensions';\n }\n\n // Ensure the right class is still on the player for the style element\n this.addClass(idClass);\n setTextContent(this.styleEl_, `\n .${idClass} {\n width: ${width}px;\n height: ${height}px;\n }\n\n .${idClass}.vjs-fluid:not(.vjs-audio-only-mode) {\n padding-top: ${ratioMultiplier * 100}%;\n }\n `);\n }\n\n /**\n * Load/Create an instance of playback {@link Tech} including element\n * and API methods. Then append the `Tech` element in `Player` as a child.\n *\n * @param {string} techName\n * name of the playback technology\n *\n * @param {string} source\n * video source\n *\n * @private\n */\n loadTech_(techName, source) {\n // Pause and remove current playback technology\n if (this.tech_) {\n this.unloadTech_();\n }\n const titleTechName = toTitleCase$1(techName);\n const camelTechName = techName.charAt(0).toLowerCase() + techName.slice(1);\n\n // get rid of the HTML5 video tag as soon as we are using another tech\n if (titleTechName !== 'Html5' && this.tag) {\n Tech.getTech('Html5').disposeMediaElement(this.tag);\n this.tag.player = null;\n this.tag = null;\n }\n this.techName_ = titleTechName;\n\n // Turn off API access because we're loading a new tech that might load asynchronously\n this.isReady_ = false;\n let autoplay = this.autoplay();\n\n // if autoplay is a string (or `true` with normalizeAutoplay: true) we pass false to the tech\n // because the player is going to handle autoplay on `loadstart`\n if (typeof this.autoplay() === 'string' || this.autoplay() === true && this.options_.normalizeAutoplay) {\n autoplay = false;\n }\n\n // Grab tech-specific options from player options and add source and parent element to use.\n const techOptions = {\n source,\n autoplay,\n 'nativeControlsForTouch': this.options_.nativeControlsForTouch,\n 'playerId': this.id(),\n 'techId': `${this.id()}_${camelTechName}_api`,\n 'playsinline': this.options_.playsinline,\n 'preload': this.options_.preload,\n 'loop': this.options_.loop,\n 'disablePictureInPicture': this.options_.disablePictureInPicture,\n 'muted': this.options_.muted,\n 'poster': this.poster(),\n 'language': this.language(),\n 'playerElIngest': this.playerElIngest_ || false,\n 'vtt.js': this.options_['vtt.js'],\n 'canOverridePoster': !!this.options_.techCanOverridePoster,\n 'enableSourceset': this.options_.enableSourceset\n };\n ALL.names.forEach(name => {\n const props = ALL[name];\n techOptions[props.getterName] = this[props.privateName];\n });\n Object.assign(techOptions, this.options_[titleTechName]);\n Object.assign(techOptions, this.options_[camelTechName]);\n Object.assign(techOptions, this.options_[techName.toLowerCase()]);\n if (this.tag) {\n techOptions.tag = this.tag;\n }\n if (source && source.src === this.cache_.src && this.cache_.currentTime > 0) {\n techOptions.startTime = this.cache_.currentTime;\n }\n\n // Initialize tech instance\n const TechClass = Tech.getTech(techName);\n if (!TechClass) {\n throw new Error(`No Tech named '${titleTechName}' exists! '${titleTechName}' should be registered using videojs.registerTech()'`);\n }\n this.tech_ = new TechClass(techOptions);\n\n // player.triggerReady is always async, so don't need this to be async\n this.tech_.ready(bind_(this, this.handleTechReady_), true);\n textTrackConverter.jsonToTextTracks(this.textTracksJson_ || [], this.tech_);\n\n // Listen to all HTML5-defined events and trigger them on the player\n TECH_EVENTS_RETRIGGER.forEach(event => {\n this.on(this.tech_, event, e => this[`handleTech${toTitleCase$1(event)}_`](e));\n });\n Object.keys(TECH_EVENTS_QUEUE).forEach(event => {\n this.on(this.tech_, event, eventObj => {\n if (this.tech_.playbackRate() === 0 && this.tech_.seeking()) {\n this.queuedCallbacks_.push({\n callback: this[`handleTech${TECH_EVENTS_QUEUE[event]}_`].bind(this),\n event: eventObj\n });\n return;\n }\n this[`handleTech${TECH_EVENTS_QUEUE[event]}_`](eventObj);\n });\n });\n this.on(this.tech_, 'loadstart', e => this.handleTechLoadStart_(e));\n this.on(this.tech_, 'sourceset', e => this.handleTechSourceset_(e));\n this.on(this.tech_, 'waiting', e => this.handleTechWaiting_(e));\n this.on(this.tech_, 'ended', e => this.handleTechEnded_(e));\n this.on(this.tech_, 'seeking', e => this.handleTechSeeking_(e));\n this.on(this.tech_, 'play', e => this.handleTechPlay_(e));\n this.on(this.tech_, 'pause', e => this.handleTechPause_(e));\n this.on(this.tech_, 'durationchange', e => this.handleTechDurationChange_(e));\n this.on(this.tech_, 'fullscreenchange', (e, data) => this.handleTechFullscreenChange_(e, data));\n this.on(this.tech_, 'fullscreenerror', (e, err) => this.handleTechFullscreenError_(e, err));\n this.on(this.tech_, 'enterpictureinpicture', e => this.handleTechEnterPictureInPicture_(e));\n this.on(this.tech_, 'leavepictureinpicture', e => this.handleTechLeavePictureInPicture_(e));\n this.on(this.tech_, 'error', e => this.handleTechError_(e));\n this.on(this.tech_, 'posterchange', e => this.handleTechPosterChange_(e));\n this.on(this.tech_, 'textdata', e => this.handleTechTextData_(e));\n this.on(this.tech_, 'ratechange', e => this.handleTechRateChange_(e));\n this.on(this.tech_, 'loadedmetadata', this.boundUpdateStyleEl_);\n this.usingNativeControls(this.techGet_('controls'));\n if (this.controls() && !this.usingNativeControls()) {\n this.addTechControlsListeners_();\n }\n\n // Add the tech element in the DOM if it was not already there\n // Make sure to not insert the original video element if using Html5\n if (this.tech_.el().parentNode !== this.el() && (titleTechName !== 'Html5' || !this.tag)) {\n prependTo(this.tech_.el(), this.el());\n }\n\n // Get rid of the original video tag reference after the first tech is loaded\n if (this.tag) {\n this.tag.player = null;\n this.tag = null;\n }\n }\n\n /**\n * Unload and dispose of the current playback {@link Tech}.\n *\n * @private\n */\n unloadTech_() {\n // Save the current text tracks so that we can reuse the same text tracks with the next tech\n ALL.names.forEach(name => {\n const props = ALL[name];\n this[props.privateName] = this[props.getterName]();\n });\n this.textTracksJson_ = textTrackConverter.textTracksToJson(this.tech_);\n this.isReady_ = false;\n this.tech_.dispose();\n this.tech_ = false;\n if (this.isPosterFromTech_) {\n this.poster_ = '';\n this.trigger('posterchange');\n }\n this.isPosterFromTech_ = false;\n }\n\n /**\n * Return a reference to the current {@link Tech}.\n * It will print a warning by default about the danger of using the tech directly\n * but any argument that is passed in will silence the warning.\n *\n * @param {*} [safety]\n * Anything passed in to silence the warning\n *\n * @return {Tech}\n * The Tech\n */\n tech(safety) {\n if (safety === undefined) {\n log$1.warn('Using the tech directly can be dangerous. I hope you know what you\\'re doing.\\n' + 'See https://github.com/videojs/video.js/issues/2617 for more info.\\n');\n }\n return this.tech_;\n }\n\n /**\n * An object that contains Video.js version.\n *\n * @typedef {Object} PlayerVersion\n *\n * @property {string} 'video.js' - Video.js version\n */\n\n /**\n * Returns an object with Video.js version.\n *\n * @return {PlayerVersion}\n * An object with Video.js version.\n */\n version() {\n return {\n 'video.js': version$6\n };\n }\n\n /**\n * Set up click and touch listeners for the playback element\n *\n * - On desktops: a click on the video itself will toggle playback\n * - On mobile devices: a click on the video toggles controls\n * which is done by toggling the user state between active and\n * inactive\n * - A tap can signal that a user has become active or has become inactive\n * e.g. a quick tap on an iPhone movie should reveal the controls. Another\n * quick tap should hide them again (signaling the user is in an inactive\n * viewing state)\n * - In addition to this, we still want the user to be considered inactive after\n * a few seconds of inactivity.\n *\n * > Note: the only part of iOS interaction we can't mimic with this setup\n * is a touch and hold on the video element counting as activity in order to\n * keep the controls showing, but that shouldn't be an issue. A touch and hold\n * on any controls will still keep the user active\n *\n * @private\n */\n addTechControlsListeners_() {\n // Make sure to remove all the previous listeners in case we are called multiple times.\n this.removeTechControlsListeners_();\n this.on(this.tech_, 'click', this.boundHandleTechClick_);\n this.on(this.tech_, 'dblclick', this.boundHandleTechDoubleClick_);\n\n // If the controls were hidden we don't want that to change without a tap event\n // so we'll check if the controls were already showing before reporting user\n // activity\n this.on(this.tech_, 'touchstart', this.boundHandleTechTouchStart_);\n this.on(this.tech_, 'touchmove', this.boundHandleTechTouchMove_);\n this.on(this.tech_, 'touchend', this.boundHandleTechTouchEnd_);\n\n // The tap listener needs to come after the touchend listener because the tap\n // listener cancels out any reportedUserActivity when setting userActive(false)\n this.on(this.tech_, 'tap', this.boundHandleTechTap_);\n }\n\n /**\n * Remove the listeners used for click and tap controls. This is needed for\n * toggling to controls disabled, where a tap/touch should do nothing.\n *\n * @private\n */\n removeTechControlsListeners_() {\n // We don't want to just use `this.off()` because there might be other needed\n // listeners added by techs that extend this.\n this.off(this.tech_, 'tap', this.boundHandleTechTap_);\n this.off(this.tech_, 'touchstart', this.boundHandleTechTouchStart_);\n this.off(this.tech_, 'touchmove', this.boundHandleTechTouchMove_);\n this.off(this.tech_, 'touchend', this.boundHandleTechTouchEnd_);\n this.off(this.tech_, 'click', this.boundHandleTechClick_);\n this.off(this.tech_, 'dblclick', this.boundHandleTechDoubleClick_);\n }\n\n /**\n * Player waits for the tech to be ready\n *\n * @private\n */\n handleTechReady_() {\n this.triggerReady();\n\n // Keep the same volume as before\n if (this.cache_.volume) {\n this.techCall_('setVolume', this.cache_.volume);\n }\n\n // Look if the tech found a higher resolution poster while loading\n this.handleTechPosterChange_();\n\n // Update the duration if available\n this.handleTechDurationChange_();\n }\n\n /**\n * Retrigger the `loadstart` event that was triggered by the {@link Tech}.\n *\n * @fires Player#loadstart\n * @listens Tech#loadstart\n * @private\n */\n handleTechLoadStart_() {\n // TODO: Update to use `emptied` event instead. See #1277.\n\n this.removeClass('vjs-ended', 'vjs-seeking');\n\n // reset the error state\n this.error(null);\n\n // Update the duration\n this.handleTechDurationChange_();\n if (!this.paused()) {\n /**\n * Fired when the user agent begins looking for media data\n *\n * @event Player#loadstart\n * @type {Event}\n */\n this.trigger('loadstart');\n } else {\n // reset the hasStarted state\n this.hasStarted(false);\n this.trigger('loadstart');\n }\n\n // autoplay happens after loadstart for the browser,\n // so we mimic that behavior\n this.manualAutoplay_(this.autoplay() === true && this.options_.normalizeAutoplay ? 'play' : this.autoplay());\n }\n\n /**\n * Handle autoplay string values, rather than the typical boolean\n * values that should be handled by the tech. Note that this is not\n * part of any specification. Valid values and what they do can be\n * found on the autoplay getter at Player#autoplay()\n */\n manualAutoplay_(type) {\n if (!this.tech_ || typeof type !== 'string') {\n return;\n }\n\n // Save original muted() value, set muted to true, and attempt to play().\n // On promise rejection, restore muted from saved value\n const resolveMuted = () => {\n const previouslyMuted = this.muted();\n this.muted(true);\n const restoreMuted = () => {\n this.muted(previouslyMuted);\n };\n\n // restore muted on play terminatation\n this.playTerminatedQueue_.push(restoreMuted);\n const mutedPromise = this.play();\n if (!isPromise(mutedPromise)) {\n return;\n }\n return mutedPromise.catch(err => {\n restoreMuted();\n throw new Error(`Rejection at manualAutoplay. Restoring muted value. ${err ? err : ''}`);\n });\n };\n let promise;\n\n // if muted defaults to true\n // the only thing we can do is call play\n if (type === 'any' && !this.muted()) {\n promise = this.play();\n if (isPromise(promise)) {\n promise = promise.catch(resolveMuted);\n }\n } else if (type === 'muted' && !this.muted()) {\n promise = resolveMuted();\n } else {\n promise = this.play();\n }\n if (!isPromise(promise)) {\n return;\n }\n return promise.then(() => {\n this.trigger({\n type: 'autoplay-success',\n autoplay: type\n });\n }).catch(() => {\n this.trigger({\n type: 'autoplay-failure',\n autoplay: type\n });\n });\n }\n\n /**\n * Update the internal source caches so that we return the correct source from\n * `src()`, `currentSource()`, and `currentSources()`.\n *\n * > Note: `currentSources` will not be updated if the source that is passed in exists\n * in the current `currentSources` cache.\n *\n *\n * @param {Tech~SourceObject} srcObj\n * A string or object source to update our caches to.\n */\n updateSourceCaches_(srcObj = '') {\n let src = srcObj;\n let type = '';\n if (typeof src !== 'string') {\n src = srcObj.src;\n type = srcObj.type;\n }\n\n // make sure all the caches are set to default values\n // to prevent null checking\n this.cache_.source = this.cache_.source || {};\n this.cache_.sources = this.cache_.sources || [];\n\n // try to get the type of the src that was passed in\n if (src && !type) {\n type = findMimetype(this, src);\n }\n\n // update `currentSource` cache always\n this.cache_.source = merge$1({}, srcObj, {\n src,\n type\n });\n const matchingSources = this.cache_.sources.filter(s => s.src && s.src === src);\n const sourceElSources = [];\n const sourceEls = this.$$('source');\n const matchingSourceEls = [];\n for (let i = 0; i < sourceEls.length; i++) {\n const sourceObj = getAttributes(sourceEls[i]);\n sourceElSources.push(sourceObj);\n if (sourceObj.src && sourceObj.src === src) {\n matchingSourceEls.push(sourceObj.src);\n }\n }\n\n // if we have matching source els but not matching sources\n // the current source cache is not up to date\n if (matchingSourceEls.length && !matchingSources.length) {\n this.cache_.sources = sourceElSources;\n // if we don't have matching source or source els set the\n // sources cache to the `currentSource` cache\n } else if (!matchingSources.length) {\n this.cache_.sources = [this.cache_.source];\n }\n\n // update the tech `src` cache\n this.cache_.src = src;\n }\n\n /**\n * *EXPERIMENTAL* Fired when the source is set or changed on the {@link Tech}\n * causing the media element to reload.\n *\n * It will fire for the initial source and each subsequent source.\n * This event is a custom event from Video.js and is triggered by the {@link Tech}.\n *\n * The event object for this event contains a `src` property that will contain the source\n * that was available when the event was triggered. This is generally only necessary if Video.js\n * is switching techs while the source was being changed.\n *\n * It is also fired when `load` is called on the player (or media element)\n * because the {@link https://html.spec.whatwg.org/multipage/media.html#dom-media-load|specification for `load`}\n * says that the resource selection algorithm needs to be aborted and restarted.\n * In this case, it is very likely that the `src` property will be set to the\n * empty string `\"\"` to indicate we do not know what the source will be but\n * that it is changing.\n *\n * *This event is currently still experimental and may change in minor releases.*\n * __To use this, pass `enableSourceset` option to the player.__\n *\n * @event Player#sourceset\n * @type {Event}\n * @prop {string} src\n * The source url available when the `sourceset` was triggered.\n * It will be an empty string if we cannot know what the source is\n * but know that the source will change.\n */\n /**\n * Retrigger the `sourceset` event that was triggered by the {@link Tech}.\n *\n * @fires Player#sourceset\n * @listens Tech#sourceset\n * @private\n */\n handleTechSourceset_(event) {\n // only update the source cache when the source\n // was not updated using the player api\n if (!this.changingSrc_) {\n let updateSourceCaches = src => this.updateSourceCaches_(src);\n const playerSrc = this.currentSource().src;\n const eventSrc = event.src;\n\n // if we have a playerSrc that is not a blob, and a tech src that is a blob\n if (playerSrc && !/^blob:/.test(playerSrc) && /^blob:/.test(eventSrc)) {\n // if both the tech source and the player source were updated we assume\n // something like @videojs/http-streaming did the sourceset and skip updating the source cache.\n if (!this.lastSource_ || this.lastSource_.tech !== eventSrc && this.lastSource_.player !== playerSrc) {\n updateSourceCaches = () => {};\n }\n }\n\n // update the source to the initial source right away\n // in some cases this will be empty string\n updateSourceCaches(eventSrc);\n\n // if the `sourceset` `src` was an empty string\n // wait for a `loadstart` to update the cache to `currentSrc`.\n // If a sourceset happens before a `loadstart`, we reset the state\n if (!event.src) {\n this.tech_.any(['sourceset', 'loadstart'], e => {\n // if a sourceset happens before a `loadstart` there\n // is nothing to do as this `handleTechSourceset_`\n // will be called again and this will be handled there.\n if (e.type === 'sourceset') {\n return;\n }\n const techSrc = this.techGet_('currentSrc');\n this.lastSource_.tech = techSrc;\n this.updateSourceCaches_(techSrc);\n });\n }\n }\n this.lastSource_ = {\n player: this.currentSource().src,\n tech: event.src\n };\n this.trigger({\n src: event.src,\n type: 'sourceset'\n });\n }\n\n /**\n * Add/remove the vjs-has-started class\n *\n *\n * @param {boolean} request\n * - true: adds the class\n * - false: remove the class\n *\n * @return {boolean}\n * the boolean value of hasStarted_\n */\n hasStarted(request) {\n if (request === undefined) {\n // act as getter, if we have no request to change\n return this.hasStarted_;\n }\n if (request === this.hasStarted_) {\n return;\n }\n this.hasStarted_ = request;\n if (this.hasStarted_) {\n this.addClass('vjs-has-started');\n } else {\n this.removeClass('vjs-has-started');\n }\n }\n\n /**\n * Fired whenever the media begins or resumes playback\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-play}\n * @fires Player#play\n * @listens Tech#play\n * @private\n */\n handleTechPlay_() {\n this.removeClass('vjs-ended', 'vjs-paused');\n this.addClass('vjs-playing');\n\n // hide the poster when the user hits play\n this.hasStarted(true);\n /**\n * Triggered whenever an {@link Tech#play} event happens. Indicates that\n * playback has started or resumed.\n *\n * @event Player#play\n * @type {Event}\n */\n this.trigger('play');\n }\n\n /**\n * Retrigger the `ratechange` event that was triggered by the {@link Tech}.\n *\n * If there were any events queued while the playback rate was zero, fire\n * those events now.\n *\n * @private\n * @method Player#handleTechRateChange_\n * @fires Player#ratechange\n * @listens Tech#ratechange\n */\n handleTechRateChange_() {\n if (this.tech_.playbackRate() > 0 && this.cache_.lastPlaybackRate === 0) {\n this.queuedCallbacks_.forEach(queued => queued.callback(queued.event));\n this.queuedCallbacks_ = [];\n }\n this.cache_.lastPlaybackRate = this.tech_.playbackRate();\n /**\n * Fires when the playing speed of the audio/video is changed\n *\n * @event Player#ratechange\n * @type {event}\n */\n this.trigger('ratechange');\n }\n\n /**\n * Retrigger the `waiting` event that was triggered by the {@link Tech}.\n *\n * @fires Player#waiting\n * @listens Tech#waiting\n * @private\n */\n handleTechWaiting_() {\n this.addClass('vjs-waiting');\n /**\n * A readyState change on the DOM element has caused playback to stop.\n *\n * @event Player#waiting\n * @type {Event}\n */\n this.trigger('waiting');\n\n // Browsers may emit a timeupdate event after a waiting event. In order to prevent\n // premature removal of the waiting class, wait for the time to change.\n const timeWhenWaiting = this.currentTime();\n const timeUpdateListener = () => {\n if (timeWhenWaiting !== this.currentTime()) {\n this.removeClass('vjs-waiting');\n this.off('timeupdate', timeUpdateListener);\n }\n };\n this.on('timeupdate', timeUpdateListener);\n }\n\n /**\n * Retrigger the `canplay` event that was triggered by the {@link Tech}.\n * > Note: This is not consistent between browsers. See #1351\n *\n * @fires Player#canplay\n * @listens Tech#canplay\n * @private\n */\n handleTechCanPlay_() {\n this.removeClass('vjs-waiting');\n /**\n * The media has a readyState of HAVE_FUTURE_DATA or greater.\n *\n * @event Player#canplay\n * @type {Event}\n */\n this.trigger('canplay');\n }\n\n /**\n * Retrigger the `canplaythrough` event that was triggered by the {@link Tech}.\n *\n * @fires Player#canplaythrough\n * @listens Tech#canplaythrough\n * @private\n */\n handleTechCanPlayThrough_() {\n this.removeClass('vjs-waiting');\n /**\n * The media has a readyState of HAVE_ENOUGH_DATA or greater. This means that the\n * entire media file can be played without buffering.\n *\n * @event Player#canplaythrough\n * @type {Event}\n */\n this.trigger('canplaythrough');\n }\n\n /**\n * Retrigger the `playing` event that was triggered by the {@link Tech}.\n *\n * @fires Player#playing\n * @listens Tech#playing\n * @private\n */\n handleTechPlaying_() {\n this.removeClass('vjs-waiting');\n /**\n * The media is no longer blocked from playback, and has started playing.\n *\n * @event Player#playing\n * @type {Event}\n */\n this.trigger('playing');\n }\n\n /**\n * Retrigger the `seeking` event that was triggered by the {@link Tech}.\n *\n * @fires Player#seeking\n * @listens Tech#seeking\n * @private\n */\n handleTechSeeking_() {\n this.addClass('vjs-seeking');\n /**\n * Fired whenever the player is jumping to a new time\n *\n * @event Player#seeking\n * @type {Event}\n */\n this.trigger('seeking');\n }\n\n /**\n * Retrigger the `seeked` event that was triggered by the {@link Tech}.\n *\n * @fires Player#seeked\n * @listens Tech#seeked\n * @private\n */\n handleTechSeeked_() {\n this.removeClass('vjs-seeking', 'vjs-ended');\n /**\n * Fired when the player has finished jumping to a new time\n *\n * @event Player#seeked\n * @type {Event}\n */\n this.trigger('seeked');\n }\n\n /**\n * Retrigger the `pause` event that was triggered by the {@link Tech}.\n *\n * @fires Player#pause\n * @listens Tech#pause\n * @private\n */\n handleTechPause_() {\n this.removeClass('vjs-playing');\n this.addClass('vjs-paused');\n /**\n * Fired whenever the media has been paused\n *\n * @event Player#pause\n * @type {Event}\n */\n this.trigger('pause');\n }\n\n /**\n * Retrigger the `ended` event that was triggered by the {@link Tech}.\n *\n * @fires Player#ended\n * @listens Tech#ended\n * @private\n */\n handleTechEnded_() {\n this.addClass('vjs-ended');\n this.removeClass('vjs-waiting');\n if (this.options_.loop) {\n this.currentTime(0);\n this.play();\n } else if (!this.paused()) {\n this.pause();\n }\n\n /**\n * Fired when the end of the media resource is reached (currentTime == duration)\n *\n * @event Player#ended\n * @type {Event}\n */\n this.trigger('ended');\n }\n\n /**\n * Fired when the duration of the media resource is first known or changed\n *\n * @listens Tech#durationchange\n * @private\n */\n handleTechDurationChange_() {\n this.duration(this.techGet_('duration'));\n }\n\n /**\n * Handle a click on the media element to play/pause\n *\n * @param {Event} event\n * the event that caused this function to trigger\n *\n * @listens Tech#click\n * @private\n */\n handleTechClick_(event) {\n // When controls are disabled a click should not toggle playback because\n // the click is considered a control\n if (!this.controls_) {\n return;\n }\n if (this.options_ === undefined || this.options_.userActions === undefined || this.options_.userActions.click === undefined || this.options_.userActions.click !== false) {\n if (this.options_ !== undefined && this.options_.userActions !== undefined && typeof this.options_.userActions.click === 'function') {\n this.options_.userActions.click.call(this, event);\n } else if (this.paused()) {\n silencePromise(this.play());\n } else {\n this.pause();\n }\n }\n }\n\n /**\n * Handle a double-click on the media element to enter/exit fullscreen\n *\n * @param {Event} event\n * the event that caused this function to trigger\n *\n * @listens Tech#dblclick\n * @private\n */\n handleTechDoubleClick_(event) {\n if (!this.controls_) {\n return;\n }\n\n // we do not want to toggle fullscreen state\n // when double-clicking inside a control bar or a modal\n const inAllowedEls = Array.prototype.some.call(this.$$('.vjs-control-bar, .vjs-modal-dialog'), el => el.contains(event.target));\n if (!inAllowedEls) {\n /*\n * options.userActions.doubleClick\n *\n * If `undefined` or `true`, double-click toggles fullscreen if controls are present\n * Set to `false` to disable double-click handling\n * Set to a function to substitute an external double-click handler\n */\n if (this.options_ === undefined || this.options_.userActions === undefined || this.options_.userActions.doubleClick === undefined || this.options_.userActions.doubleClick !== false) {\n if (this.options_ !== undefined && this.options_.userActions !== undefined && typeof this.options_.userActions.doubleClick === 'function') {\n this.options_.userActions.doubleClick.call(this, event);\n } else if (this.isFullscreen()) {\n this.exitFullscreen();\n } else {\n this.requestFullscreen();\n }\n }\n }\n }\n\n /**\n * Handle a tap on the media element. It will toggle the user\n * activity state, which hides and shows the controls.\n *\n * @listens Tech#tap\n * @private\n */\n handleTechTap_() {\n this.userActive(!this.userActive());\n }\n\n /**\n * Handle touch to start\n *\n * @listens Tech#touchstart\n * @private\n */\n handleTechTouchStart_() {\n this.userWasActive = this.userActive();\n }\n\n /**\n * Handle touch to move\n *\n * @listens Tech#touchmove\n * @private\n */\n handleTechTouchMove_() {\n if (this.userWasActive) {\n this.reportUserActivity();\n }\n }\n\n /**\n * Handle touch to end\n *\n * @param {Event} event\n * the touchend event that triggered\n * this function\n *\n * @listens Tech#touchend\n * @private\n */\n handleTechTouchEnd_(event) {\n // Stop the mouse events from also happening\n if (event.cancelable) {\n event.preventDefault();\n }\n }\n\n /**\n * @private\n */\n toggleFullscreenClass_() {\n if (this.isFullscreen()) {\n this.addClass('vjs-fullscreen');\n } else {\n this.removeClass('vjs-fullscreen');\n }\n }\n\n /**\n * when the document fschange event triggers it calls this\n */\n documentFullscreenChange_(e) {\n const targetPlayer = e.target.player;\n\n // if another player was fullscreen\n // do a null check for targetPlayer because older firefox's would put document as e.target\n if (targetPlayer && targetPlayer !== this) {\n return;\n }\n const el = this.el();\n let isFs = document[this.fsApi_.fullscreenElement] === el;\n if (!isFs && el.matches) {\n isFs = el.matches(':' + this.fsApi_.fullscreen);\n }\n this.isFullscreen(isFs);\n }\n\n /**\n * Handle Tech Fullscreen Change\n *\n * @param {Event} event\n * the fullscreenchange event that triggered this function\n *\n * @param {Object} data\n * the data that was sent with the event\n *\n * @private\n * @listens Tech#fullscreenchange\n * @fires Player#fullscreenchange\n */\n handleTechFullscreenChange_(event, data) {\n if (data) {\n if (data.nativeIOSFullscreen) {\n this.addClass('vjs-ios-native-fs');\n this.tech_.one('webkitendfullscreen', () => {\n this.removeClass('vjs-ios-native-fs');\n });\n }\n this.isFullscreen(data.isFullscreen);\n }\n }\n handleTechFullscreenError_(event, err) {\n this.trigger('fullscreenerror', err);\n }\n\n /**\n * @private\n */\n togglePictureInPictureClass_() {\n if (this.isInPictureInPicture()) {\n this.addClass('vjs-picture-in-picture');\n } else {\n this.removeClass('vjs-picture-in-picture');\n }\n }\n\n /**\n * Handle Tech Enter Picture-in-Picture.\n *\n * @param {Event} event\n * the enterpictureinpicture event that triggered this function\n *\n * @private\n * @listens Tech#enterpictureinpicture\n */\n handleTechEnterPictureInPicture_(event) {\n this.isInPictureInPicture(true);\n }\n\n /**\n * Handle Tech Leave Picture-in-Picture.\n *\n * @param {Event} event\n * the leavepictureinpicture event that triggered this function\n *\n * @private\n * @listens Tech#leavepictureinpicture\n */\n handleTechLeavePictureInPicture_(event) {\n this.isInPictureInPicture(false);\n }\n\n /**\n * Fires when an error occurred during the loading of an audio/video.\n *\n * @private\n * @listens Tech#error\n */\n handleTechError_() {\n const error = this.tech_.error();\n if (error) {\n this.error(error);\n }\n }\n\n /**\n * Retrigger the `textdata` event that was triggered by the {@link Tech}.\n *\n * @fires Player#textdata\n * @listens Tech#textdata\n * @private\n */\n handleTechTextData_() {\n let data = null;\n if (arguments.length > 1) {\n data = arguments[1];\n }\n\n /**\n * Fires when we get a textdata event from tech\n *\n * @event Player#textdata\n * @type {Event}\n */\n this.trigger('textdata', data);\n }\n\n /**\n * Get object for cached values.\n *\n * @return {Object}\n * get the current object cache\n */\n getCache() {\n return this.cache_;\n }\n\n /**\n * Resets the internal cache object.\n *\n * Using this function outside the player constructor or reset method may\n * have unintended side-effects.\n *\n * @private\n */\n resetCache_() {\n this.cache_ = {\n // Right now, the currentTime is not _really_ cached because it is always\n // retrieved from the tech (see: currentTime). However, for completeness,\n // we set it to zero here to ensure that if we do start actually caching\n // it, we reset it along with everything else.\n currentTime: 0,\n initTime: 0,\n inactivityTimeout: this.options_.inactivityTimeout,\n duration: NaN,\n lastVolume: 1,\n lastPlaybackRate: this.defaultPlaybackRate(),\n media: null,\n src: '',\n source: {},\n sources: [],\n playbackRates: [],\n volume: 1\n };\n }\n\n /**\n * Pass values to the playback tech\n *\n * @param {string} [method]\n * the method to call\n *\n * @param {Object} [arg]\n * the argument to pass\n *\n * @private\n */\n techCall_(method, arg) {\n // If it's not ready yet, call method when it is\n\n this.ready(function () {\n if (method in allowedSetters) {\n return set(this.middleware_, this.tech_, method, arg);\n } else if (method in allowedMediators) {\n return mediate(this.middleware_, this.tech_, method, arg);\n }\n try {\n if (this.tech_) {\n this.tech_[method](arg);\n }\n } catch (e) {\n log$1(e);\n throw e;\n }\n }, true);\n }\n\n /**\n * Mediate attempt to call playback tech method\n * and return the value of the method called.\n *\n * @param {string} method\n * Tech method\n *\n * @return {*}\n * Value returned by the tech method called, undefined if tech\n * is not ready or tech method is not present\n *\n * @private\n */\n techGet_(method) {\n if (!this.tech_ || !this.tech_.isReady_) {\n return;\n }\n if (method in allowedGetters) {\n return get(this.middleware_, this.tech_, method);\n } else if (method in allowedMediators) {\n return mediate(this.middleware_, this.tech_, method);\n }\n\n // Log error when playback tech object is present but method\n // is undefined or unavailable\n try {\n return this.tech_[method]();\n } catch (e) {\n // When building additional tech libs, an expected method may not be defined yet\n if (this.tech_[method] === undefined) {\n log$1(`Video.js: ${method} method not defined for ${this.techName_} playback technology.`, e);\n throw e;\n }\n\n // When a method isn't available on the object it throws a TypeError\n if (e.name === 'TypeError') {\n log$1(`Video.js: ${method} unavailable on ${this.techName_} playback technology element.`, e);\n this.tech_.isReady_ = false;\n throw e;\n }\n\n // If error unknown, just log and throw\n log$1(e);\n throw e;\n }\n }\n\n /**\n * Attempt to begin playback at the first opportunity.\n *\n * @return {Promise|undefined}\n * Returns a promise if the browser supports Promises (or one\n * was passed in as an option). This promise will be resolved on\n * the return value of play. If this is undefined it will fulfill the\n * promise chain otherwise the promise chain will be fulfilled when\n * the promise from play is fulfilled.\n */\n play() {\n return new Promise(resolve => {\n this.play_(resolve);\n });\n }\n\n /**\n * The actual logic for play, takes a callback that will be resolved on the\n * return value of play. This allows us to resolve to the play promise if there\n * is one on modern browsers.\n *\n * @private\n * @param {Function} [callback]\n * The callback that should be called when the techs play is actually called\n */\n play_(callback = silencePromise) {\n this.playCallbacks_.push(callback);\n const isSrcReady = Boolean(!this.changingSrc_ && (this.src() || this.currentSrc()));\n const isSafariOrIOS = Boolean(IS_ANY_SAFARI || IS_IOS);\n\n // treat calls to play_ somewhat like the `one` event function\n if (this.waitToPlay_) {\n this.off(['ready', 'loadstart'], this.waitToPlay_);\n this.waitToPlay_ = null;\n }\n\n // if the player/tech is not ready or the src itself is not ready\n // queue up a call to play on `ready` or `loadstart`\n if (!this.isReady_ || !isSrcReady) {\n this.waitToPlay_ = e => {\n this.play_();\n };\n this.one(['ready', 'loadstart'], this.waitToPlay_);\n\n // if we are in Safari, there is a high chance that loadstart will trigger after the gesture timeperiod\n // in that case, we need to prime the video element by calling load so it'll be ready in time\n if (!isSrcReady && isSafariOrIOS) {\n this.load();\n }\n return;\n }\n\n // If the player/tech is ready and we have a source, we can attempt playback.\n const val = this.techGet_('play');\n\n // For native playback, reset the progress bar if we get a play call from a replay.\n const isNativeReplay = isSafariOrIOS && this.hasClass('vjs-ended');\n if (isNativeReplay) {\n this.resetProgressBar_();\n }\n // play was terminated if the returned value is null\n if (val === null) {\n this.runPlayTerminatedQueue_();\n } else {\n this.runPlayCallbacks_(val);\n }\n }\n\n /**\n * These functions will be run when if play is terminated. If play\n * runPlayCallbacks_ is run these function will not be run. This allows us\n * to differentiate between a terminated play and an actual call to play.\n */\n runPlayTerminatedQueue_() {\n const queue = this.playTerminatedQueue_.slice(0);\n this.playTerminatedQueue_ = [];\n queue.forEach(function (q) {\n q();\n });\n }\n\n /**\n * When a callback to play is delayed we have to run these\n * callbacks when play is actually called on the tech. This function\n * runs the callbacks that were delayed and accepts the return value\n * from the tech.\n *\n * @param {undefined|Promise} val\n * The return value from the tech.\n */\n runPlayCallbacks_(val) {\n const callbacks = this.playCallbacks_.slice(0);\n this.playCallbacks_ = [];\n // clear play terminatedQueue since we finished a real play\n this.playTerminatedQueue_ = [];\n callbacks.forEach(function (cb) {\n cb(val);\n });\n }\n\n /**\n * Pause the video playback\n */\n pause() {\n this.techCall_('pause');\n }\n\n /**\n * Check if the player is paused or has yet to play\n *\n * @return {boolean}\n * - false: if the media is currently playing\n * - true: if media is not currently playing\n */\n paused() {\n // The initial state of paused should be true (in Safari it's actually false)\n return this.techGet_('paused') === false ? false : true;\n }\n\n /**\n * Get a TimeRange object representing the current ranges of time that the user\n * has played.\n *\n * @return { import('./utils/time').TimeRange }\n * A time range object that represents all the increments of time that have\n * been played.\n */\n played() {\n return this.techGet_('played') || createTimeRanges$1(0, 0);\n }\n\n /**\n * Sets or returns whether or not the user is \"scrubbing\". Scrubbing is\n * when the user has clicked the progress bar handle and is\n * dragging it along the progress bar.\n *\n * @param {boolean} [isScrubbing]\n * whether the user is or is not scrubbing\n *\n * @return {boolean|undefined}\n * - The value of scrubbing when getting\n * - Nothing when setting\n */\n scrubbing(isScrubbing) {\n if (typeof isScrubbing === 'undefined') {\n return this.scrubbing_;\n }\n this.scrubbing_ = !!isScrubbing;\n this.techCall_('setScrubbing', this.scrubbing_);\n if (isScrubbing) {\n this.addClass('vjs-scrubbing');\n } else {\n this.removeClass('vjs-scrubbing');\n }\n }\n\n /**\n * Get or set the current time (in seconds)\n *\n * @param {number|string} [seconds]\n * The time to seek to in seconds\n *\n * @return {number|undefined}\n * - the current time in seconds when getting\n * - Nothing when setting\n */\n currentTime(seconds) {\n if (seconds === undefined) {\n // cache last currentTime and return. default to 0 seconds\n //\n // Caching the currentTime is meant to prevent a massive amount of reads on the tech's\n // currentTime when scrubbing, but may not provide much performance benefit after all.\n // Should be tested. Also something has to read the actual current time or the cache will\n // never get updated.\n this.cache_.currentTime = this.techGet_('currentTime') || 0;\n return this.cache_.currentTime;\n }\n if (seconds < 0) {\n seconds = 0;\n }\n if (!this.isReady_ || this.changingSrc_ || !this.tech_ || !this.tech_.isReady_) {\n this.cache_.initTime = seconds;\n this.off('canplay', this.boundApplyInitTime_);\n this.one('canplay', this.boundApplyInitTime_);\n return;\n }\n this.techCall_('setCurrentTime', seconds);\n this.cache_.initTime = 0;\n if (isFinite(seconds)) {\n this.cache_.currentTime = Number(seconds);\n }\n }\n\n /**\n * Apply the value of initTime stored in cache as currentTime.\n *\n * @private\n */\n applyInitTime_() {\n this.currentTime(this.cache_.initTime);\n }\n\n /**\n * Normally gets the length in time of the video in seconds;\n * in all but the rarest use cases an argument will NOT be passed to the method\n *\n * > **NOTE**: The video must have started loading before the duration can be\n * known, and depending on preload behaviour may not be known until the video starts\n * playing.\n *\n * @fires Player#durationchange\n *\n * @param {number} [seconds]\n * The duration of the video to set in seconds\n *\n * @return {number|undefined}\n * - The duration of the video in seconds when getting\n * - Nothing when setting\n */\n duration(seconds) {\n if (seconds === undefined) {\n // return NaN if the duration is not known\n return this.cache_.duration !== undefined ? this.cache_.duration : NaN;\n }\n seconds = parseFloat(seconds);\n\n // Standardize on Infinity for signaling video is live\n if (seconds < 0) {\n seconds = Infinity;\n }\n if (seconds !== this.cache_.duration) {\n // Cache the last set value for optimized scrubbing\n this.cache_.duration = seconds;\n if (seconds === Infinity) {\n this.addClass('vjs-live');\n } else {\n this.removeClass('vjs-live');\n }\n if (!isNaN(seconds)) {\n // Do not fire durationchange unless the duration value is known.\n // @see [Spec]{@link https://www.w3.org/TR/2011/WD-html5-20110113/video.html#media-element-load-algorithm}\n\n /**\n * @event Player#durationchange\n * @type {Event}\n */\n this.trigger('durationchange');\n }\n }\n }\n\n /**\n * Calculates how much time is left in the video. Not part\n * of the native video API.\n *\n * @return {number}\n * The time remaining in seconds\n */\n remainingTime() {\n return this.duration() - this.currentTime();\n }\n\n /**\n * A remaining time function that is intended to be used when\n * the time is to be displayed directly to the user.\n *\n * @return {number}\n * The rounded time remaining in seconds\n */\n remainingTimeDisplay() {\n return Math.floor(this.duration()) - Math.floor(this.currentTime());\n }\n\n //\n // Kind of like an array of portions of the video that have been downloaded.\n\n /**\n * Get a TimeRange object with an array of the times of the video\n * that have been downloaded. If you just want the percent of the\n * video that's been downloaded, use bufferedPercent.\n *\n * @see [Buffered Spec]{@link http://dev.w3.org/html5/spec/video.html#dom-media-buffered}\n *\n * @return { import('./utils/time').TimeRange }\n * A mock {@link TimeRanges} object (following HTML spec)\n */\n buffered() {\n let buffered = this.techGet_('buffered');\n if (!buffered || !buffered.length) {\n buffered = createTimeRanges$1(0, 0);\n }\n return buffered;\n }\n\n /**\n * Get the TimeRanges of the media that are currently available\n * for seeking to.\n *\n * @see [Seekable Spec]{@link https://html.spec.whatwg.org/multipage/media.html#dom-media-seekable}\n *\n * @return { import('./utils/time').TimeRange }\n * A mock {@link TimeRanges} object (following HTML spec)\n */\n seekable() {\n let seekable = this.techGet_('seekable');\n if (!seekable || !seekable.length) {\n seekable = createTimeRanges$1(0, 0);\n }\n return seekable;\n }\n\n /**\n * Returns whether the player is in the \"seeking\" state.\n *\n * @return {boolean} True if the player is in the seeking state, false if not.\n */\n seeking() {\n return this.techGet_('seeking');\n }\n\n /**\n * Returns whether the player is in the \"ended\" state.\n *\n * @return {boolean} True if the player is in the ended state, false if not.\n */\n ended() {\n return this.techGet_('ended');\n }\n\n /**\n * Returns the current state of network activity for the element, from\n * the codes in the list below.\n * - NETWORK_EMPTY (numeric value 0)\n * The element has not yet been initialised. All attributes are in\n * their initial states.\n * - NETWORK_IDLE (numeric value 1)\n * The element's resource selection algorithm is active and has\n * selected a resource, but it is not actually using the network at\n * this time.\n * - NETWORK_LOADING (numeric value 2)\n * The user agent is actively trying to download data.\n * - NETWORK_NO_SOURCE (numeric value 3)\n * The element's resource selection algorithm is active, but it has\n * not yet found a resource to use.\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#network-states\n * @return {number} the current network activity state\n */\n networkState() {\n return this.techGet_('networkState');\n }\n\n /**\n * Returns a value that expresses the current state of the element\n * with respect to rendering the current playback position, from the\n * codes in the list below.\n * - HAVE_NOTHING (numeric value 0)\n * No information regarding the media resource is available.\n * - HAVE_METADATA (numeric value 1)\n * Enough of the resource has been obtained that the duration of the\n * resource is available.\n * - HAVE_CURRENT_DATA (numeric value 2)\n * Data for the immediate current playback position is available.\n * - HAVE_FUTURE_DATA (numeric value 3)\n * Data for the immediate current playback position is available, as\n * well as enough data for the user agent to advance the current\n * playback position in the direction of playback.\n * - HAVE_ENOUGH_DATA (numeric value 4)\n * The user agent estimates that enough data is available for\n * playback to proceed uninterrupted.\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-readystate\n * @return {number} the current playback rendering state\n */\n readyState() {\n return this.techGet_('readyState');\n }\n\n /**\n * Get the percent (as a decimal) of the video that's been downloaded.\n * This method is not a part of the native HTML video API.\n *\n * @return {number}\n * A decimal between 0 and 1 representing the percent\n * that is buffered 0 being 0% and 1 being 100%\n */\n bufferedPercent() {\n return bufferedPercent(this.buffered(), this.duration());\n }\n\n /**\n * Get the ending time of the last buffered time range\n * This is used in the progress bar to encapsulate all time ranges.\n *\n * @return {number}\n * The end of the last buffered time range\n */\n bufferedEnd() {\n const buffered = this.buffered();\n const duration = this.duration();\n let end = buffered.end(buffered.length - 1);\n if (end > duration) {\n end = duration;\n }\n return end;\n }\n\n /**\n * Get or set the current volume of the media\n *\n * @param {number} [percentAsDecimal]\n * The new volume as a decimal percent:\n * - 0 is muted/0%/off\n * - 1.0 is 100%/full\n * - 0.5 is half volume or 50%\n *\n * @return {number|undefined}\n * The current volume as a percent when getting\n */\n volume(percentAsDecimal) {\n let vol;\n if (percentAsDecimal !== undefined) {\n // Force value to between 0 and 1\n vol = Math.max(0, Math.min(1, percentAsDecimal));\n this.cache_.volume = vol;\n this.techCall_('setVolume', vol);\n if (vol > 0) {\n this.lastVolume_(vol);\n }\n return;\n }\n\n // Default to 1 when returning current volume.\n vol = parseFloat(this.techGet_('volume'));\n return isNaN(vol) ? 1 : vol;\n }\n\n /**\n * Get the current muted state, or turn mute on or off\n *\n * @param {boolean} [muted]\n * - true to mute\n * - false to unmute\n *\n * @return {boolean|undefined}\n * - true if mute is on and getting\n * - false if mute is off and getting\n * - nothing if setting\n */\n muted(muted) {\n if (muted !== undefined) {\n this.techCall_('setMuted', muted);\n return;\n }\n return this.techGet_('muted') || false;\n }\n\n /**\n * Get the current defaultMuted state, or turn defaultMuted on or off. defaultMuted\n * indicates the state of muted on initial playback.\n *\n * ```js\n * var myPlayer = videojs('some-player-id');\n *\n * myPlayer.src(\"http://www.example.com/path/to/video.mp4\");\n *\n * // get, should be false\n * console.log(myPlayer.defaultMuted());\n * // set to true\n * myPlayer.defaultMuted(true);\n * // get should be true\n * console.log(myPlayer.defaultMuted());\n * ```\n *\n * @param {boolean} [defaultMuted]\n * - true to mute\n * - false to unmute\n *\n * @return {boolean|undefined}\n * - true if defaultMuted is on and getting\n * - false if defaultMuted is off and getting\n * - Nothing when setting\n */\n defaultMuted(defaultMuted) {\n if (defaultMuted !== undefined) {\n this.techCall_('setDefaultMuted', defaultMuted);\n }\n return this.techGet_('defaultMuted') || false;\n }\n\n /**\n * Get the last volume, or set it\n *\n * @param {number} [percentAsDecimal]\n * The new last volume as a decimal percent:\n * - 0 is muted/0%/off\n * - 1.0 is 100%/full\n * - 0.5 is half volume or 50%\n *\n * @return {number|undefined}\n * - The current value of lastVolume as a percent when getting\n * - Nothing when setting\n *\n * @private\n */\n lastVolume_(percentAsDecimal) {\n if (percentAsDecimal !== undefined && percentAsDecimal !== 0) {\n this.cache_.lastVolume = percentAsDecimal;\n return;\n }\n return this.cache_.lastVolume;\n }\n\n /**\n * Check if current tech can support native fullscreen\n * (e.g. with built in controls like iOS)\n *\n * @return {boolean}\n * if native fullscreen is supported\n */\n supportsFullScreen() {\n return this.techGet_('supportsFullScreen') || false;\n }\n\n /**\n * Check if the player is in fullscreen mode or tell the player that it\n * is or is not in fullscreen mode.\n *\n * > NOTE: As of the latest HTML5 spec, isFullscreen is no longer an official\n * property and instead document.fullscreenElement is used. But isFullscreen is\n * still a valuable property for internal player workings.\n *\n * @param {boolean} [isFS]\n * Set the players current fullscreen state\n *\n * @return {boolean|undefined}\n * - true if fullscreen is on and getting\n * - false if fullscreen is off and getting\n * - Nothing when setting\n */\n isFullscreen(isFS) {\n if (isFS !== undefined) {\n const oldValue = this.isFullscreen_;\n this.isFullscreen_ = Boolean(isFS);\n\n // if we changed fullscreen state and we're in prefixed mode, trigger fullscreenchange\n // this is the only place where we trigger fullscreenchange events for older browsers\n // fullWindow mode is treated as a prefixed event and will get a fullscreenchange event as well\n if (this.isFullscreen_ !== oldValue && this.fsApi_.prefixed) {\n /**\n * @event Player#fullscreenchange\n * @type {Event}\n */\n this.trigger('fullscreenchange');\n }\n this.toggleFullscreenClass_();\n return;\n }\n return this.isFullscreen_;\n }\n\n /**\n * Increase the size of the video to full screen\n * In some browsers, full screen is not supported natively, so it enters\n * \"full window mode\", where the video fills the browser window.\n * In browsers and devices that support native full screen, sometimes the\n * browser's default controls will be shown, and not the Video.js custom skin.\n * This includes most mobile devices (iOS, Android) and older versions of\n * Safari.\n *\n * @param {Object} [fullscreenOptions]\n * Override the player fullscreen options\n *\n * @fires Player#fullscreenchange\n */\n requestFullscreen(fullscreenOptions) {\n if (this.isInPictureInPicture()) {\n this.exitPictureInPicture();\n }\n const self = this;\n return new Promise((resolve, reject) => {\n function offHandler() {\n self.off('fullscreenerror', errorHandler);\n self.off('fullscreenchange', changeHandler);\n }\n function changeHandler() {\n offHandler();\n resolve();\n }\n function errorHandler(e, err) {\n offHandler();\n reject(err);\n }\n self.one('fullscreenchange', changeHandler);\n self.one('fullscreenerror', errorHandler);\n const promise = self.requestFullscreenHelper_(fullscreenOptions);\n if (promise) {\n promise.then(offHandler, offHandler);\n promise.then(resolve, reject);\n }\n });\n }\n requestFullscreenHelper_(fullscreenOptions) {\n let fsOptions;\n\n // Only pass fullscreen options to requestFullscreen in spec-compliant browsers.\n // Use defaults or player configured option unless passed directly to this method.\n if (!this.fsApi_.prefixed) {\n fsOptions = this.options_.fullscreen && this.options_.fullscreen.options || {};\n if (fullscreenOptions !== undefined) {\n fsOptions = fullscreenOptions;\n }\n }\n\n // This method works as follows:\n // 1. if a fullscreen api is available, use it\n // 1. call requestFullscreen with potential options\n // 2. if we got a promise from above, use it to update isFullscreen()\n // 2. otherwise, if the tech supports fullscreen, call `enterFullScreen` on it.\n // This is particularly used for iPhone, older iPads, and non-safari browser on iOS.\n // 3. otherwise, use \"fullWindow\" mode\n if (this.fsApi_.requestFullscreen) {\n const promise = this.el_[this.fsApi_.requestFullscreen](fsOptions);\n\n // Even on browsers with promise support this may not return a promise\n if (promise) {\n promise.then(() => this.isFullscreen(true), () => this.isFullscreen(false));\n }\n return promise;\n } else if (this.tech_.supportsFullScreen() && !this.options_.preferFullWindow === true) {\n // we can't take the video.js controls fullscreen but we can go fullscreen\n // with native controls\n this.techCall_('enterFullScreen');\n } else {\n // fullscreen isn't supported so we'll just stretch the video element to\n // fill the viewport\n this.enterFullWindow();\n }\n }\n\n /**\n * Return the video to its normal size after having been in full screen mode\n *\n * @fires Player#fullscreenchange\n */\n exitFullscreen() {\n const self = this;\n return new Promise((resolve, reject) => {\n function offHandler() {\n self.off('fullscreenerror', errorHandler);\n self.off('fullscreenchange', changeHandler);\n }\n function changeHandler() {\n offHandler();\n resolve();\n }\n function errorHandler(e, err) {\n offHandler();\n reject(err);\n }\n self.one('fullscreenchange', changeHandler);\n self.one('fullscreenerror', errorHandler);\n const promise = self.exitFullscreenHelper_();\n if (promise) {\n promise.then(offHandler, offHandler);\n // map the promise to our resolve/reject methods\n promise.then(resolve, reject);\n }\n });\n }\n exitFullscreenHelper_() {\n if (this.fsApi_.requestFullscreen) {\n const promise = document[this.fsApi_.exitFullscreen]();\n\n // Even on browsers with promise support this may not return a promise\n if (promise) {\n // we're splitting the promise here, so, we want to catch the\n // potential error so that this chain doesn't have unhandled errors\n silencePromise(promise.then(() => this.isFullscreen(false)));\n }\n return promise;\n } else if (this.tech_.supportsFullScreen() && !this.options_.preferFullWindow === true) {\n this.techCall_('exitFullScreen');\n } else {\n this.exitFullWindow();\n }\n }\n\n /**\n * When fullscreen isn't supported we can stretch the\n * video container to as wide as the browser will let us.\n *\n * @fires Player#enterFullWindow\n */\n enterFullWindow() {\n this.isFullscreen(true);\n this.isFullWindow = true;\n\n // Storing original doc overflow value to return to when fullscreen is off\n this.docOrigOverflow = document.documentElement.style.overflow;\n\n // Add listener for esc key to exit fullscreen\n on(document, 'keydown', this.boundFullWindowOnEscKey_);\n\n // Hide any scroll bars\n document.documentElement.style.overflow = 'hidden';\n\n // Apply fullscreen styles\n addClass(document.body, 'vjs-full-window');\n\n /**\n * @event Player#enterFullWindow\n * @type {Event}\n */\n this.trigger('enterFullWindow');\n }\n\n /**\n * Check for call to either exit full window or\n * full screen on ESC key\n *\n * @param {string} event\n * Event to check for key press\n */\n fullWindowOnEscKey(event) {\n if (keycode.isEventKey(event, 'Esc')) {\n if (this.isFullscreen() === true) {\n if (!this.isFullWindow) {\n this.exitFullscreen();\n } else {\n this.exitFullWindow();\n }\n }\n }\n }\n\n /**\n * Exit full window\n *\n * @fires Player#exitFullWindow\n */\n exitFullWindow() {\n this.isFullscreen(false);\n this.isFullWindow = false;\n off(document, 'keydown', this.boundFullWindowOnEscKey_);\n\n // Unhide scroll bars.\n document.documentElement.style.overflow = this.docOrigOverflow;\n\n // Remove fullscreen styles\n removeClass(document.body, 'vjs-full-window');\n\n // Resize the box, controller, and poster to original sizes\n // this.positionAll();\n /**\n * @event Player#exitFullWindow\n * @type {Event}\n */\n this.trigger('exitFullWindow');\n }\n\n /**\n * Get or set disable Picture-in-Picture mode.\n *\n * @param {boolean} [value]\n * - true will disable Picture-in-Picture mode\n * - false will enable Picture-in-Picture mode\n */\n disablePictureInPicture(value) {\n if (value === undefined) {\n return this.techGet_('disablePictureInPicture');\n }\n this.techCall_('setDisablePictureInPicture', value);\n this.options_.disablePictureInPicture = value;\n this.trigger('disablepictureinpicturechanged');\n }\n\n /**\n * Check if the player is in Picture-in-Picture mode or tell the player that it\n * is or is not in Picture-in-Picture mode.\n *\n * @param {boolean} [isPiP]\n * Set the players current Picture-in-Picture state\n *\n * @return {boolean|undefined}\n * - true if Picture-in-Picture is on and getting\n * - false if Picture-in-Picture is off and getting\n * - nothing if setting\n */\n isInPictureInPicture(isPiP) {\n if (isPiP !== undefined) {\n this.isInPictureInPicture_ = !!isPiP;\n this.togglePictureInPictureClass_();\n return;\n }\n return !!this.isInPictureInPicture_;\n }\n\n /**\n * Create a floating video window always on top of other windows so that users may\n * continue consuming media while they interact with other content sites, or\n * applications on their device.\n *\n * This can use document picture-in-picture or element picture in picture\n *\n * Set `enableDocumentPictureInPicture` to `true` to use docPiP on a supported browser\n * Else set `disablePictureInPicture` to `false` to disable elPiP on a supported browser\n *\n *\n * @see [Spec]{@link https://w3c.github.io/picture-in-picture/}\n * @see [Spec]{@link https://wicg.github.io/document-picture-in-picture/}\n *\n * @fires Player#enterpictureinpicture\n *\n * @return {Promise}\n * A promise with a Picture-in-Picture window.\n */\n requestPictureInPicture() {\n if (this.options_.enableDocumentPictureInPicture && window$1.documentPictureInPicture) {\n const pipContainer = document.createElement(this.el().tagName);\n pipContainer.classList = this.el().classList;\n pipContainer.classList.add('vjs-pip-container');\n if (this.posterImage) {\n pipContainer.appendChild(this.posterImage.el().cloneNode(true));\n }\n if (this.titleBar) {\n pipContainer.appendChild(this.titleBar.el().cloneNode(true));\n }\n pipContainer.appendChild(createEl('p', {\n className: 'vjs-pip-text'\n }, {}, this.localize('Playing in picture-in-picture')));\n return window$1.documentPictureInPicture.requestWindow({\n // The aspect ratio won't be correct, Chrome bug https://crbug.com/1407629\n width: this.videoWidth(),\n height: this.videoHeight()\n }).then(pipWindow => {\n copyStyleSheetsToWindow(pipWindow);\n this.el_.parentNode.insertBefore(pipContainer, this.el_);\n pipWindow.document.body.appendChild(this.el_);\n pipWindow.document.body.classList.add('vjs-pip-window');\n this.player_.isInPictureInPicture(true);\n this.player_.trigger({\n type: 'enterpictureinpicture',\n pipWindow\n });\n\n // Listen for the PiP closing event to move the video back.\n pipWindow.addEventListener('pagehide', event => {\n const pipVideo = event.target.querySelector('.video-js');\n pipContainer.parentNode.replaceChild(pipVideo, pipContainer);\n this.player_.isInPictureInPicture(false);\n this.player_.trigger('leavepictureinpicture');\n });\n return pipWindow;\n });\n }\n if ('pictureInPictureEnabled' in document && this.disablePictureInPicture() === false) {\n /**\n * This event fires when the player enters picture in picture mode\n *\n * @event Player#enterpictureinpicture\n * @type {Event}\n */\n return this.techGet_('requestPictureInPicture');\n }\n return Promise.reject('No PiP mode is available');\n }\n\n /**\n * Exit Picture-in-Picture mode.\n *\n * @see [Spec]{@link https://wicg.github.io/picture-in-picture}\n *\n * @fires Player#leavepictureinpicture\n *\n * @return {Promise}\n * A promise.\n */\n exitPictureInPicture() {\n if (window$1.documentPictureInPicture && window$1.documentPictureInPicture.window) {\n // With documentPictureInPicture, Player#leavepictureinpicture is fired in the pagehide handler\n window$1.documentPictureInPicture.window.close();\n return Promise.resolve();\n }\n if ('pictureInPictureEnabled' in document) {\n /**\n * This event fires when the player leaves picture in picture mode\n *\n * @event Player#leavepictureinpicture\n * @type {Event}\n */\n return document.exitPictureInPicture();\n }\n }\n\n /**\n * Called when this Player has focus and a key gets pressed down, or when\n * any Component of this player receives a key press that it doesn't handle.\n * This allows player-wide hotkeys (either as defined below, or optionally\n * by an external function).\n *\n * @param {KeyboardEvent} event\n * The `keydown` event that caused this function to be called.\n *\n * @listens keydown\n */\n handleKeyDown(event) {\n const {\n userActions\n } = this.options_;\n\n // Bail out if hotkeys are not configured.\n if (!userActions || !userActions.hotkeys) {\n return;\n }\n\n // Function that determines whether or not to exclude an element from\n // hotkeys handling.\n const excludeElement = el => {\n const tagName = el.tagName.toLowerCase();\n\n // The first and easiest test is for `contenteditable` elements.\n if (el.isContentEditable) {\n return true;\n }\n\n // Inputs matching these types will still trigger hotkey handling as\n // they are not text inputs.\n const allowedInputTypes = ['button', 'checkbox', 'hidden', 'radio', 'reset', 'submit'];\n if (tagName === 'input') {\n return allowedInputTypes.indexOf(el.type) === -1;\n }\n\n // The final test is by tag name. These tags will be excluded entirely.\n const excludedTags = ['textarea'];\n return excludedTags.indexOf(tagName) !== -1;\n };\n\n // Bail out if the user is focused on an interactive form element.\n if (excludeElement(this.el_.ownerDocument.activeElement)) {\n return;\n }\n if (typeof userActions.hotkeys === 'function') {\n userActions.hotkeys.call(this, event);\n } else {\n this.handleHotkeys(event);\n }\n }\n\n /**\n * Called when this Player receives a hotkey keydown event.\n * Supported player-wide hotkeys are:\n *\n * f - toggle fullscreen\n * m - toggle mute\n * k or Space - toggle play/pause\n *\n * @param {Event} event\n * The `keydown` event that caused this function to be called.\n */\n handleHotkeys(event) {\n const hotkeys = this.options_.userActions ? this.options_.userActions.hotkeys : {};\n\n // set fullscreenKey, muteKey, playPauseKey from `hotkeys`, use defaults if not set\n const {\n fullscreenKey = keydownEvent => keycode.isEventKey(keydownEvent, 'f'),\n muteKey = keydownEvent => keycode.isEventKey(keydownEvent, 'm'),\n playPauseKey = keydownEvent => keycode.isEventKey(keydownEvent, 'k') || keycode.isEventKey(keydownEvent, 'Space')\n } = hotkeys;\n if (fullscreenKey.call(this, event)) {\n event.preventDefault();\n event.stopPropagation();\n const FSToggle = Component$1.getComponent('FullscreenToggle');\n if (document[this.fsApi_.fullscreenEnabled] !== false) {\n FSToggle.prototype.handleClick.call(this, event);\n }\n } else if (muteKey.call(this, event)) {\n event.preventDefault();\n event.stopPropagation();\n const MuteToggle = Component$1.getComponent('MuteToggle');\n MuteToggle.prototype.handleClick.call(this, event);\n } else if (playPauseKey.call(this, event)) {\n event.preventDefault();\n event.stopPropagation();\n const PlayToggle = Component$1.getComponent('PlayToggle');\n PlayToggle.prototype.handleClick.call(this, event);\n }\n }\n\n /**\n * Check whether the player can play a given mimetype\n *\n * @see https://www.w3.org/TR/2011/WD-html5-20110113/video.html#dom-navigator-canplaytype\n *\n * @param {string} type\n * The mimetype to check\n *\n * @return {string}\n * 'probably', 'maybe', or '' (empty string)\n */\n canPlayType(type) {\n let can;\n\n // Loop through each playback technology in the options order\n for (let i = 0, j = this.options_.techOrder; i < j.length; i++) {\n const techName = j[i];\n let tech = Tech.getTech(techName);\n\n // Support old behavior of techs being registered as components.\n // Remove once that deprecated behavior is removed.\n if (!tech) {\n tech = Component$1.getComponent(techName);\n }\n\n // Check if the current tech is defined before continuing\n if (!tech) {\n log$1.error(`The \"${techName}\" tech is undefined. Skipped browser support check for that tech.`);\n continue;\n }\n\n // Check if the browser supports this technology\n if (tech.isSupported()) {\n can = tech.canPlayType(type);\n if (can) {\n return can;\n }\n }\n }\n return '';\n }\n\n /**\n * Select source based on tech-order or source-order\n * Uses source-order selection if `options.sourceOrder` is truthy. Otherwise,\n * defaults to tech-order selection\n *\n * @param {Array} sources\n * The sources for a media asset\n *\n * @return {Object|boolean}\n * Object of source and tech order or false\n */\n selectSource(sources) {\n // Get only the techs specified in `techOrder` that exist and are supported by the\n // current platform\n const techs = this.options_.techOrder.map(techName => {\n return [techName, Tech.getTech(techName)];\n }).filter(([techName, tech]) => {\n // Check if the current tech is defined before continuing\n if (tech) {\n // Check if the browser supports this technology\n return tech.isSupported();\n }\n log$1.error(`The \"${techName}\" tech is undefined. Skipped browser support check for that tech.`);\n return false;\n });\n\n // Iterate over each `innerArray` element once per `outerArray` element and execute\n // `tester` with both. If `tester` returns a non-falsy value, exit early and return\n // that value.\n const findFirstPassingTechSourcePair = function (outerArray, innerArray, tester) {\n let found;\n outerArray.some(outerChoice => {\n return innerArray.some(innerChoice => {\n found = tester(outerChoice, innerChoice);\n if (found) {\n return true;\n }\n });\n });\n return found;\n };\n let foundSourceAndTech;\n const flip = fn => (a, b) => fn(b, a);\n const finder = ([techName, tech], source) => {\n if (tech.canPlaySource(source, this.options_[techName.toLowerCase()])) {\n return {\n source,\n tech: techName\n };\n }\n };\n\n // Depending on the truthiness of `options.sourceOrder`, we swap the order of techs and sources\n // to select from them based on their priority.\n if (this.options_.sourceOrder) {\n // Source-first ordering\n foundSourceAndTech = findFirstPassingTechSourcePair(sources, techs, flip(finder));\n } else {\n // Tech-first ordering\n foundSourceAndTech = findFirstPassingTechSourcePair(techs, sources, finder);\n }\n return foundSourceAndTech || false;\n }\n\n /**\n * Executes source setting and getting logic\n *\n * @param {Tech~SourceObject|Tech~SourceObject[]|string} [source]\n * A SourceObject, an array of SourceObjects, or a string referencing\n * a URL to a media source. It is _highly recommended_ that an object\n * or array of objects is used here, so that source selection\n * algorithms can take the `type` into account.\n *\n * If not provided, this method acts as a getter.\n * @param {boolean} [isRetry]\n * Indicates whether this is being called internally as a result of a retry\n *\n * @return {string|undefined}\n * If the `source` argument is missing, returns the current source\n * URL. Otherwise, returns nothing/undefined.\n */\n handleSrc_(source, isRetry) {\n // getter usage\n if (typeof source === 'undefined') {\n return this.cache_.src || '';\n }\n\n // Reset retry behavior for new source\n if (this.resetRetryOnError_) {\n this.resetRetryOnError_();\n }\n\n // filter out invalid sources and turn our source into\n // an array of source objects\n const sources = filterSource(source);\n\n // if a source was passed in then it is invalid because\n // it was filtered to a zero length Array. So we have to\n // show an error\n if (!sources.length) {\n this.setTimeout(function () {\n this.error({\n code: 4,\n message: this.options_.notSupportedMessage\n });\n }, 0);\n return;\n }\n\n // initial sources\n this.changingSrc_ = true;\n\n // Only update the cached source list if we are not retrying a new source after error,\n // since in that case we want to include the failed source(s) in the cache\n if (!isRetry) {\n this.cache_.sources = sources;\n }\n this.updateSourceCaches_(sources[0]);\n\n // middlewareSource is the source after it has been changed by middleware\n setSource(this, sources[0], (middlewareSource, mws) => {\n this.middleware_ = mws;\n\n // since sourceSet is async we have to update the cache again after we select a source since\n // the source that is selected could be out of order from the cache update above this callback.\n if (!isRetry) {\n this.cache_.sources = sources;\n }\n this.updateSourceCaches_(middlewareSource);\n const err = this.src_(middlewareSource);\n if (err) {\n if (sources.length > 1) {\n return this.handleSrc_(sources.slice(1));\n }\n this.changingSrc_ = false;\n\n // We need to wrap this in a timeout to give folks a chance to add error event handlers\n this.setTimeout(function () {\n this.error({\n code: 4,\n message: this.options_.notSupportedMessage\n });\n }, 0);\n\n // we could not find an appropriate tech, but let's still notify the delegate that this is it\n // this needs a better comment about why this is needed\n this.triggerReady();\n return;\n }\n setTech(mws, this.tech_);\n });\n\n // Try another available source if this one fails before playback.\n if (sources.length > 1) {\n const retry = () => {\n // Remove the error modal\n this.error(null);\n this.handleSrc_(sources.slice(1), true);\n };\n const stopListeningForErrors = () => {\n this.off('error', retry);\n };\n this.one('error', retry);\n this.one('playing', stopListeningForErrors);\n this.resetRetryOnError_ = () => {\n this.off('error', retry);\n this.off('playing', stopListeningForErrors);\n };\n }\n }\n\n /**\n * Get or set the video source.\n *\n * @param {Tech~SourceObject|Tech~SourceObject[]|string} [source]\n * A SourceObject, an array of SourceObjects, or a string referencing\n * a URL to a media source. It is _highly recommended_ that an object\n * or array of objects is used here, so that source selection\n * algorithms can take the `type` into account.\n *\n * If not provided, this method acts as a getter.\n *\n * @return {string|undefined}\n * If the `source` argument is missing, returns the current source\n * URL. Otherwise, returns nothing/undefined.\n */\n src(source) {\n return this.handleSrc_(source, false);\n }\n\n /**\n * Set the source object on the tech, returns a boolean that indicates whether\n * there is a tech that can play the source or not\n *\n * @param {Tech~SourceObject} source\n * The source object to set on the Tech\n *\n * @return {boolean}\n * - True if there is no Tech to playback this source\n * - False otherwise\n *\n * @private\n */\n src_(source) {\n const sourceTech = this.selectSource([source]);\n if (!sourceTech) {\n return true;\n }\n if (!titleCaseEquals(sourceTech.tech, this.techName_)) {\n this.changingSrc_ = true;\n // load this technology with the chosen source\n this.loadTech_(sourceTech.tech, sourceTech.source);\n this.tech_.ready(() => {\n this.changingSrc_ = false;\n });\n return false;\n }\n\n // wait until the tech is ready to set the source\n // and set it synchronously if possible (#2326)\n this.ready(function () {\n // The setSource tech method was added with source handlers\n // so older techs won't support it\n // We need to check the direct prototype for the case where subclasses\n // of the tech do not support source handlers\n if (this.tech_.constructor.prototype.hasOwnProperty('setSource')) {\n this.techCall_('setSource', source);\n } else {\n this.techCall_('src', source.src);\n }\n this.changingSrc_ = false;\n }, true);\n return false;\n }\n\n /**\n * Begin loading the src data.\n */\n load() {\n // Workaround to use the load method with the VHS.\n // Does not cover the case when the load method is called directly from the mediaElement.\n if (this.tech_ && this.tech_.vhs) {\n this.src(this.currentSource());\n return;\n }\n this.techCall_('load');\n }\n\n /**\n * Reset the player. Loads the first tech in the techOrder,\n * removes all the text tracks in the existing `tech`,\n * and calls `reset` on the `tech`.\n */\n reset() {\n if (this.paused()) {\n this.doReset_();\n } else {\n const playPromise = this.play();\n silencePromise(playPromise.then(() => this.doReset_()));\n }\n }\n doReset_() {\n if (this.tech_) {\n this.tech_.clearTracks('text');\n }\n this.removeClass('vjs-playing');\n this.addClass('vjs-paused');\n this.resetCache_();\n this.poster('');\n this.loadTech_(this.options_.techOrder[0], null);\n this.techCall_('reset');\n this.resetControlBarUI_();\n this.error(null);\n if (this.titleBar) {\n this.titleBar.update({\n title: undefined,\n description: undefined\n });\n }\n if (isEvented(this)) {\n this.trigger('playerreset');\n }\n }\n\n /**\n * Reset Control Bar's UI by calling sub-methods that reset\n * all of Control Bar's components\n */\n resetControlBarUI_() {\n this.resetProgressBar_();\n this.resetPlaybackRate_();\n this.resetVolumeBar_();\n }\n\n /**\n * Reset tech's progress so progress bar is reset in the UI\n */\n resetProgressBar_() {\n this.currentTime(0);\n const {\n currentTimeDisplay,\n durationDisplay,\n progressControl,\n remainingTimeDisplay\n } = this.controlBar || {};\n const {\n seekBar\n } = progressControl || {};\n if (currentTimeDisplay) {\n currentTimeDisplay.updateContent();\n }\n if (durationDisplay) {\n durationDisplay.updateContent();\n }\n if (remainingTimeDisplay) {\n remainingTimeDisplay.updateContent();\n }\n if (seekBar) {\n seekBar.update();\n if (seekBar.loadProgressBar) {\n seekBar.loadProgressBar.update();\n }\n }\n }\n\n /**\n * Reset Playback ratio\n */\n resetPlaybackRate_() {\n this.playbackRate(this.defaultPlaybackRate());\n this.handleTechRateChange_();\n }\n\n /**\n * Reset Volume bar\n */\n resetVolumeBar_() {\n this.volume(1.0);\n this.trigger('volumechange');\n }\n\n /**\n * Returns all of the current source objects.\n *\n * @return {Tech~SourceObject[]}\n * The current source objects\n */\n currentSources() {\n const source = this.currentSource();\n const sources = [];\n\n // assume `{}` or `{ src }`\n if (Object.keys(source).length !== 0) {\n sources.push(source);\n }\n return this.cache_.sources || sources;\n }\n\n /**\n * Returns the current source object.\n *\n * @return {Tech~SourceObject}\n * The current source object\n */\n currentSource() {\n return this.cache_.source || {};\n }\n\n /**\n * Returns the fully qualified URL of the current source value e.g. http://mysite.com/video.mp4\n * Can be used in conjunction with `currentType` to assist in rebuilding the current source object.\n *\n * @return {string}\n * The current source\n */\n currentSrc() {\n return this.currentSource() && this.currentSource().src || '';\n }\n\n /**\n * Get the current source type e.g. video/mp4\n * This can allow you rebuild the current source object so that you could load the same\n * source and tech later\n *\n * @return {string}\n * The source MIME type\n */\n currentType() {\n return this.currentSource() && this.currentSource().type || '';\n }\n\n /**\n * Get or set the preload attribute\n *\n * @param {'none'|'auto'|'metadata'} [value]\n * Preload mode to pass to tech\n *\n * @return {string|undefined}\n * - The preload attribute value when getting\n * - Nothing when setting\n */\n preload(value) {\n if (value !== undefined) {\n this.techCall_('setPreload', value);\n this.options_.preload = value;\n return;\n }\n return this.techGet_('preload');\n }\n\n /**\n * Get or set the autoplay option. When this is a boolean it will\n * modify the attribute on the tech. When this is a string the attribute on\n * the tech will be removed and `Player` will handle autoplay on loadstarts.\n *\n * @param {boolean|'play'|'muted'|'any'} [value]\n * - true: autoplay using the browser behavior\n * - false: do not autoplay\n * - 'play': call play() on every loadstart\n * - 'muted': call muted() then play() on every loadstart\n * - 'any': call play() on every loadstart. if that fails call muted() then play().\n * - *: values other than those listed here will be set `autoplay` to true\n *\n * @return {boolean|string|undefined}\n * - The current value of autoplay when getting\n * - Nothing when setting\n */\n autoplay(value) {\n // getter usage\n if (value === undefined) {\n return this.options_.autoplay || false;\n }\n let techAutoplay;\n\n // if the value is a valid string set it to that, or normalize `true` to 'play', if need be\n if (typeof value === 'string' && /(any|play|muted)/.test(value) || value === true && this.options_.normalizeAutoplay) {\n this.options_.autoplay = value;\n this.manualAutoplay_(typeof value === 'string' ? value : 'play');\n techAutoplay = false;\n\n // any falsy value sets autoplay to false in the browser,\n // lets do the same\n } else if (!value) {\n this.options_.autoplay = false;\n\n // any other value (ie truthy) sets autoplay to true\n } else {\n this.options_.autoplay = true;\n }\n techAutoplay = typeof techAutoplay === 'undefined' ? this.options_.autoplay : techAutoplay;\n\n // if we don't have a tech then we do not queue up\n // a setAutoplay call on tech ready. We do this because the\n // autoplay option will be passed in the constructor and we\n // do not need to set it twice\n if (this.tech_) {\n this.techCall_('setAutoplay', techAutoplay);\n }\n }\n\n /**\n * Set or unset the playsinline attribute.\n * Playsinline tells the browser that non-fullscreen playback is preferred.\n *\n * @param {boolean} [value]\n * - true means that we should try to play inline by default\n * - false means that we should use the browser's default playback mode,\n * which in most cases is inline. iOS Safari is a notable exception\n * and plays fullscreen by default.\n *\n * @return {string|undefined}\n * - the current value of playsinline\n * - Nothing when setting\n *\n * @see [Spec]{@link https://html.spec.whatwg.org/#attr-video-playsinline}\n */\n playsinline(value) {\n if (value !== undefined) {\n this.techCall_('setPlaysinline', value);\n this.options_.playsinline = value;\n }\n return this.techGet_('playsinline');\n }\n\n /**\n * Get or set the loop attribute on the video element.\n *\n * @param {boolean} [value]\n * - true means that we should loop the video\n * - false means that we should not loop the video\n *\n * @return {boolean|undefined}\n * - The current value of loop when getting\n * - Nothing when setting\n */\n loop(value) {\n if (value !== undefined) {\n this.techCall_('setLoop', value);\n this.options_.loop = value;\n return;\n }\n return this.techGet_('loop');\n }\n\n /**\n * Get or set the poster image source url\n *\n * @fires Player#posterchange\n *\n * @param {string} [src]\n * Poster image source URL\n *\n * @return {string|undefined}\n * - The current value of poster when getting\n * - Nothing when setting\n */\n poster(src) {\n if (src === undefined) {\n return this.poster_;\n }\n\n // The correct way to remove a poster is to set as an empty string\n // other falsey values will throw errors\n if (!src) {\n src = '';\n }\n if (src === this.poster_) {\n return;\n }\n\n // update the internal poster variable\n this.poster_ = src;\n\n // update the tech's poster\n this.techCall_('setPoster', src);\n this.isPosterFromTech_ = false;\n\n // alert components that the poster has been set\n /**\n * This event fires when the poster image is changed on the player.\n *\n * @event Player#posterchange\n * @type {Event}\n */\n this.trigger('posterchange');\n }\n\n /**\n * Some techs (e.g. YouTube) can provide a poster source in an\n * asynchronous way. We want the poster component to use this\n * poster source so that it covers up the tech's controls.\n * (YouTube's play button). However we only want to use this\n * source if the player user hasn't set a poster through\n * the normal APIs.\n *\n * @fires Player#posterchange\n * @listens Tech#posterchange\n * @private\n */\n handleTechPosterChange_() {\n if ((!this.poster_ || this.options_.techCanOverridePoster) && this.tech_ && this.tech_.poster) {\n const newPoster = this.tech_.poster() || '';\n if (newPoster !== this.poster_) {\n this.poster_ = newPoster;\n this.isPosterFromTech_ = true;\n\n // Let components know the poster has changed\n this.trigger('posterchange');\n }\n }\n }\n\n /**\n * Get or set whether or not the controls are showing.\n *\n * @fires Player#controlsenabled\n *\n * @param {boolean} [bool]\n * - true to turn controls on\n * - false to turn controls off\n *\n * @return {boolean|undefined}\n * - The current value of controls when getting\n * - Nothing when setting\n */\n controls(bool) {\n if (bool === undefined) {\n return !!this.controls_;\n }\n bool = !!bool;\n\n // Don't trigger a change event unless it actually changed\n if (this.controls_ === bool) {\n return;\n }\n this.controls_ = bool;\n if (this.usingNativeControls()) {\n this.techCall_('setControls', bool);\n }\n if (this.controls_) {\n this.removeClass('vjs-controls-disabled');\n this.addClass('vjs-controls-enabled');\n /**\n * @event Player#controlsenabled\n * @type {Event}\n */\n this.trigger('controlsenabled');\n if (!this.usingNativeControls()) {\n this.addTechControlsListeners_();\n }\n } else {\n this.removeClass('vjs-controls-enabled');\n this.addClass('vjs-controls-disabled');\n /**\n * @event Player#controlsdisabled\n * @type {Event}\n */\n this.trigger('controlsdisabled');\n if (!this.usingNativeControls()) {\n this.removeTechControlsListeners_();\n }\n }\n }\n\n /**\n * Toggle native controls on/off. Native controls are the controls built into\n * devices (e.g. default iPhone controls) or other techs\n * (e.g. Vimeo Controls)\n * **This should only be set by the current tech, because only the tech knows\n * if it can support native controls**\n *\n * @fires Player#usingnativecontrols\n * @fires Player#usingcustomcontrols\n *\n * @param {boolean} [bool]\n * - true to turn native controls on\n * - false to turn native controls off\n *\n * @return {boolean|undefined}\n * - The current value of native controls when getting\n * - Nothing when setting\n */\n usingNativeControls(bool) {\n if (bool === undefined) {\n return !!this.usingNativeControls_;\n }\n bool = !!bool;\n\n // Don't trigger a change event unless it actually changed\n if (this.usingNativeControls_ === bool) {\n return;\n }\n this.usingNativeControls_ = bool;\n if (this.usingNativeControls_) {\n this.addClass('vjs-using-native-controls');\n\n /**\n * player is using the native device controls\n *\n * @event Player#usingnativecontrols\n * @type {Event}\n */\n this.trigger('usingnativecontrols');\n } else {\n this.removeClass('vjs-using-native-controls');\n\n /**\n * player is using the custom HTML controls\n *\n * @event Player#usingcustomcontrols\n * @type {Event}\n */\n this.trigger('usingcustomcontrols');\n }\n }\n\n /**\n * Set or get the current MediaError\n *\n * @fires Player#error\n *\n * @param {MediaError|string|number} [err]\n * A MediaError or a string/number to be turned\n * into a MediaError\n *\n * @return {MediaError|null|undefined}\n * - The current MediaError when getting (or null)\n * - Nothing when setting\n */\n error(err) {\n if (err === undefined) {\n return this.error_ || null;\n }\n\n // allow hooks to modify error object\n hooks('beforeerror').forEach(hookFunction => {\n const newErr = hookFunction(this, err);\n if (!(isObject(newErr) && !Array.isArray(newErr) || typeof newErr === 'string' || typeof newErr === 'number' || newErr === null)) {\n this.log.error('please return a value that MediaError expects in beforeerror hooks');\n return;\n }\n err = newErr;\n });\n\n // Suppress the first error message for no compatible source until\n // user interaction\n if (this.options_.suppressNotSupportedError && err && err.code === 4) {\n const triggerSuppressedError = function () {\n this.error(err);\n };\n this.options_.suppressNotSupportedError = false;\n this.any(['click', 'touchstart'], triggerSuppressedError);\n this.one('loadstart', function () {\n this.off(['click', 'touchstart'], triggerSuppressedError);\n });\n return;\n }\n\n // restoring to default\n if (err === null) {\n this.error_ = null;\n this.removeClass('vjs-error');\n if (this.errorDisplay) {\n this.errorDisplay.close();\n }\n return;\n }\n this.error_ = new MediaError(err);\n\n // add the vjs-error classname to the player\n this.addClass('vjs-error');\n\n // log the name of the error type and any message\n // IE11 logs \"[object object]\" and required you to expand message to see error object\n log$1.error(`(CODE:${this.error_.code} ${MediaError.errorTypes[this.error_.code]})`, this.error_.message, this.error_);\n\n /**\n * @event Player#error\n * @type {Event}\n */\n this.trigger('error');\n\n // notify hooks of the per player error\n hooks('error').forEach(hookFunction => hookFunction(this, this.error_));\n return;\n }\n\n /**\n * Report user activity\n *\n * @param {Object} event\n * Event object\n */\n reportUserActivity(event) {\n this.userActivity_ = true;\n }\n\n /**\n * Get/set if user is active\n *\n * @fires Player#useractive\n * @fires Player#userinactive\n *\n * @param {boolean} [bool]\n * - true if the user is active\n * - false if the user is inactive\n *\n * @return {boolean|undefined}\n * - The current value of userActive when getting\n * - Nothing when setting\n */\n userActive(bool) {\n if (bool === undefined) {\n return this.userActive_;\n }\n bool = !!bool;\n if (bool === this.userActive_) {\n return;\n }\n this.userActive_ = bool;\n if (this.userActive_) {\n this.userActivity_ = true;\n this.removeClass('vjs-user-inactive');\n this.addClass('vjs-user-active');\n /**\n * @event Player#useractive\n * @type {Event}\n */\n this.trigger('useractive');\n return;\n }\n\n // Chrome/Safari/IE have bugs where when you change the cursor it can\n // trigger a mousemove event. This causes an issue when you're hiding\n // the cursor when the user is inactive, and a mousemove signals user\n // activity. Making it impossible to go into inactive mode. Specifically\n // this happens in fullscreen when we really need to hide the cursor.\n //\n // When this gets resolved in ALL browsers it can be removed\n // https://code.google.com/p/chromium/issues/detail?id=103041\n if (this.tech_) {\n this.tech_.one('mousemove', function (e) {\n e.stopPropagation();\n e.preventDefault();\n });\n }\n this.userActivity_ = false;\n this.removeClass('vjs-user-active');\n this.addClass('vjs-user-inactive');\n /**\n * @event Player#userinactive\n * @type {Event}\n */\n this.trigger('userinactive');\n }\n\n /**\n * Listen for user activity based on timeout value\n *\n * @private\n */\n listenForUserActivity_() {\n let mouseInProgress;\n let lastMoveX;\n let lastMoveY;\n const handleActivity = bind_(this, this.reportUserActivity);\n const handleMouseMove = function (e) {\n // #1068 - Prevent mousemove spamming\n // Chrome Bug: https://code.google.com/p/chromium/issues/detail?id=366970\n if (e.screenX !== lastMoveX || e.screenY !== lastMoveY) {\n lastMoveX = e.screenX;\n lastMoveY = e.screenY;\n handleActivity();\n }\n };\n const handleMouseDown = function () {\n handleActivity();\n // For as long as the they are touching the device or have their mouse down,\n // we consider them active even if they're not moving their finger or mouse.\n // So we want to continue to update that they are active\n this.clearInterval(mouseInProgress);\n // Setting userActivity=true now and setting the interval to the same time\n // as the activityCheck interval (250) should ensure we never miss the\n // next activityCheck\n mouseInProgress = this.setInterval(handleActivity, 250);\n };\n const handleMouseUpAndMouseLeave = function (event) {\n handleActivity();\n // Stop the interval that maintains activity if the mouse/touch is down\n this.clearInterval(mouseInProgress);\n };\n\n // Any mouse movement will be considered user activity\n this.on('mousedown', handleMouseDown);\n this.on('mousemove', handleMouseMove);\n this.on('mouseup', handleMouseUpAndMouseLeave);\n this.on('mouseleave', handleMouseUpAndMouseLeave);\n const controlBar = this.getChild('controlBar');\n\n // Fixes bug on Android & iOS where when tapping progressBar (when control bar is displayed)\n // controlBar would no longer be hidden by default timeout.\n if (controlBar && !IS_IOS && !IS_ANDROID) {\n controlBar.on('mouseenter', function (event) {\n if (this.player().options_.inactivityTimeout !== 0) {\n this.player().cache_.inactivityTimeout = this.player().options_.inactivityTimeout;\n }\n this.player().options_.inactivityTimeout = 0;\n });\n controlBar.on('mouseleave', function (event) {\n this.player().options_.inactivityTimeout = this.player().cache_.inactivityTimeout;\n });\n }\n\n // Listen for keyboard navigation\n // Shouldn't need to use inProgress interval because of key repeat\n this.on('keydown', handleActivity);\n this.on('keyup', handleActivity);\n\n // Run an interval every 250 milliseconds instead of stuffing everything into\n // the mousemove/touchmove function itself, to prevent performance degradation.\n // `this.reportUserActivity` simply sets this.userActivity_ to true, which\n // then gets picked up by this loop\n // http://ejohn.org/blog/learning-from-twitter/\n let inactivityTimeout;\n\n /** @this Player */\n const activityCheck = function () {\n // Check to see if mouse/touch activity has happened\n if (!this.userActivity_) {\n return;\n }\n\n // Reset the activity tracker\n this.userActivity_ = false;\n\n // If the user state was inactive, set the state to active\n this.userActive(true);\n\n // Clear any existing inactivity timeout to start the timer over\n this.clearTimeout(inactivityTimeout);\n const timeout = this.options_.inactivityTimeout;\n if (timeout <= 0) {\n return;\n }\n\n // In milliseconds, if no more activity has occurred the\n // user will be considered inactive\n inactivityTimeout = this.setTimeout(function () {\n // Protect against the case where the inactivityTimeout can trigger just\n // before the next user activity is picked up by the activity check loop\n // causing a flicker\n if (!this.userActivity_) {\n this.userActive(false);\n }\n }, timeout);\n };\n this.setInterval(activityCheck, 250);\n }\n\n /**\n * Gets or sets the current playback rate. A playback rate of\n * 1.0 represents normal speed and 0.5 would indicate half-speed\n * playback, for instance.\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-playbackrate\n *\n * @param {number} [rate]\n * New playback rate to set.\n *\n * @return {number|undefined}\n * - The current playback rate when getting or 1.0\n * - Nothing when setting\n */\n playbackRate(rate) {\n if (rate !== undefined) {\n // NOTE: this.cache_.lastPlaybackRate is set from the tech handler\n // that is registered above\n this.techCall_('setPlaybackRate', rate);\n return;\n }\n if (this.tech_ && this.tech_.featuresPlaybackRate) {\n return this.cache_.lastPlaybackRate || this.techGet_('playbackRate');\n }\n return 1.0;\n }\n\n /**\n * Gets or sets the current default playback rate. A default playback rate of\n * 1.0 represents normal speed and 0.5 would indicate half-speed playback, for instance.\n * defaultPlaybackRate will only represent what the initial playbackRate of a video was, not\n * not the current playbackRate.\n *\n * @see https://html.spec.whatwg.org/multipage/embedded-content.html#dom-media-defaultplaybackrate\n *\n * @param {number} [rate]\n * New default playback rate to set.\n *\n * @return {number|undefined}\n * - The default playback rate when getting or 1.0\n * - Nothing when setting\n */\n defaultPlaybackRate(rate) {\n if (rate !== undefined) {\n return this.techCall_('setDefaultPlaybackRate', rate);\n }\n if (this.tech_ && this.tech_.featuresPlaybackRate) {\n return this.techGet_('defaultPlaybackRate');\n }\n return 1.0;\n }\n\n /**\n * Gets or sets the audio flag\n *\n * @param {boolean} [bool]\n * - true signals that this is an audio player\n * - false signals that this is not an audio player\n *\n * @return {boolean|undefined}\n * - The current value of isAudio when getting\n * - Nothing when setting\n */\n isAudio(bool) {\n if (bool !== undefined) {\n this.isAudio_ = !!bool;\n return;\n }\n return !!this.isAudio_;\n }\n enableAudioOnlyUI_() {\n // Update styling immediately to show the control bar so we can get its height\n this.addClass('vjs-audio-only-mode');\n const playerChildren = this.children();\n const controlBar = this.getChild('ControlBar');\n const controlBarHeight = controlBar && controlBar.currentHeight();\n\n // Hide all player components except the control bar. Control bar components\n // needed only for video are hidden with CSS\n playerChildren.forEach(child => {\n if (child === controlBar) {\n return;\n }\n if (child.el_ && !child.hasClass('vjs-hidden')) {\n child.hide();\n this.audioOnlyCache_.hiddenChildren.push(child);\n }\n });\n this.audioOnlyCache_.playerHeight = this.currentHeight();\n\n // Set the player height the same as the control bar\n this.height(controlBarHeight);\n this.trigger('audioonlymodechange');\n }\n disableAudioOnlyUI_() {\n this.removeClass('vjs-audio-only-mode');\n\n // Show player components that were previously hidden\n this.audioOnlyCache_.hiddenChildren.forEach(child => child.show());\n\n // Reset player height\n this.height(this.audioOnlyCache_.playerHeight);\n this.trigger('audioonlymodechange');\n }\n\n /**\n * Get the current audioOnlyMode state or set audioOnlyMode to true or false.\n *\n * Setting this to `true` will hide all player components except the control bar,\n * as well as control bar components needed only for video.\n *\n * @param {boolean} [value]\n * The value to set audioOnlyMode to.\n *\n * @return {Promise|boolean}\n * A Promise is returned when setting the state, and a boolean when getting\n * the present state\n */\n audioOnlyMode(value) {\n if (typeof value !== 'boolean' || value === this.audioOnlyMode_) {\n return this.audioOnlyMode_;\n }\n this.audioOnlyMode_ = value;\n\n // Enable Audio Only Mode\n if (value) {\n const exitPromises = [];\n\n // Fullscreen and PiP are not supported in audioOnlyMode, so exit if we need to.\n if (this.isInPictureInPicture()) {\n exitPromises.push(this.exitPictureInPicture());\n }\n if (this.isFullscreen()) {\n exitPromises.push(this.exitFullscreen());\n }\n if (this.audioPosterMode()) {\n exitPromises.push(this.audioPosterMode(false));\n }\n return Promise.all(exitPromises).then(() => this.enableAudioOnlyUI_());\n }\n\n // Disable Audio Only Mode\n return Promise.resolve().then(() => this.disableAudioOnlyUI_());\n }\n enablePosterModeUI_() {\n // Hide the video element and show the poster image to enable posterModeUI\n const tech = this.tech_ && this.tech_;\n tech.hide();\n this.addClass('vjs-audio-poster-mode');\n this.trigger('audiopostermodechange');\n }\n disablePosterModeUI_() {\n // Show the video element and hide the poster image to disable posterModeUI\n const tech = this.tech_ && this.tech_;\n tech.show();\n this.removeClass('vjs-audio-poster-mode');\n this.trigger('audiopostermodechange');\n }\n\n /**\n * Get the current audioPosterMode state or set audioPosterMode to true or false\n *\n * @param {boolean} [value]\n * The value to set audioPosterMode to.\n *\n * @return {Promise|boolean}\n * A Promise is returned when setting the state, and a boolean when getting\n * the present state\n */\n audioPosterMode(value) {\n if (typeof value !== 'boolean' || value === this.audioPosterMode_) {\n return this.audioPosterMode_;\n }\n this.audioPosterMode_ = value;\n if (value) {\n if (this.audioOnlyMode()) {\n const audioOnlyModePromise = this.audioOnlyMode(false);\n return audioOnlyModePromise.then(() => {\n // enable audio poster mode after audio only mode is disabled\n this.enablePosterModeUI_();\n });\n }\n return Promise.resolve().then(() => {\n // enable audio poster mode\n this.enablePosterModeUI_();\n });\n }\n return Promise.resolve().then(() => {\n // disable audio poster mode\n this.disablePosterModeUI_();\n });\n }\n\n /**\n * A helper method for adding a {@link TextTrack} to our\n * {@link TextTrackList}.\n *\n * In addition to the W3C settings we allow adding additional info through options.\n *\n * @see http://www.w3.org/html/wg/drafts/html/master/embedded-content-0.html#dom-media-addtexttrack\n *\n * @param {string} [kind]\n * the kind of TextTrack you are adding\n *\n * @param {string} [label]\n * the label to give the TextTrack label\n *\n * @param {string} [language]\n * the language to set on the TextTrack\n *\n * @return {TextTrack|undefined}\n * the TextTrack that was added or undefined\n * if there is no tech\n */\n addTextTrack(kind, label, language) {\n if (this.tech_) {\n return this.tech_.addTextTrack(kind, label, language);\n }\n }\n\n /**\n * Create a remote {@link TextTrack} and an {@link HTMLTrackElement}.\n *\n * @param {Object} options\n * Options to pass to {@link HTMLTrackElement} during creation. See\n * {@link HTMLTrackElement} for object properties that you should use.\n *\n * @param {boolean} [manualCleanup=false] if set to true, the TextTrack will not be removed\n * from the TextTrackList and HtmlTrackElementList\n * after a source change\n *\n * @return { import('./tracks/html-track-element').default }\n * the HTMLTrackElement that was created and added\n * to the HtmlTrackElementList and the remote\n * TextTrackList\n *\n */\n addRemoteTextTrack(options, manualCleanup) {\n if (this.tech_) {\n return this.tech_.addRemoteTextTrack(options, manualCleanup);\n }\n }\n\n /**\n * Remove a remote {@link TextTrack} from the respective\n * {@link TextTrackList} and {@link HtmlTrackElementList}.\n *\n * @param {Object} track\n * Remote {@link TextTrack} to remove\n *\n * @return {undefined}\n * does not return anything\n */\n removeRemoteTextTrack(obj = {}) {\n let {\n track\n } = obj;\n if (!track) {\n track = obj;\n }\n\n // destructure the input into an object with a track argument, defaulting to arguments[0]\n // default the whole argument to an empty object if nothing was passed in\n\n if (this.tech_) {\n return this.tech_.removeRemoteTextTrack(track);\n }\n }\n\n /**\n * Gets available media playback quality metrics as specified by the W3C's Media\n * Playback Quality API.\n *\n * @see [Spec]{@link https://wicg.github.io/media-playback-quality}\n *\n * @return {Object|undefined}\n * An object with supported media playback quality metrics or undefined if there\n * is no tech or the tech does not support it.\n */\n getVideoPlaybackQuality() {\n return this.techGet_('getVideoPlaybackQuality');\n }\n\n /**\n * Get video width\n *\n * @return {number}\n * current video width\n */\n videoWidth() {\n return this.tech_ && this.tech_.videoWidth && this.tech_.videoWidth() || 0;\n }\n\n /**\n * Get video height\n *\n * @return {number}\n * current video height\n */\n videoHeight() {\n return this.tech_ && this.tech_.videoHeight && this.tech_.videoHeight() || 0;\n }\n\n /**\n * Set or get the player's language code.\n *\n * Changing the language will trigger\n * [languagechange]{@link Player#event:languagechange}\n * which Components can use to update control text.\n * ClickableComponent will update its control text by default on\n * [languagechange]{@link Player#event:languagechange}.\n *\n * @fires Player#languagechange\n *\n * @param {string} [code]\n * the language code to set the player to\n *\n * @return {string|undefined}\n * - The current language code when getting\n * - Nothing when setting\n */\n language(code) {\n if (code === undefined) {\n return this.language_;\n }\n if (this.language_ !== String(code).toLowerCase()) {\n this.language_ = String(code).toLowerCase();\n\n // during first init, it's possible some things won't be evented\n if (isEvented(this)) {\n /**\n * fires when the player language change\n *\n * @event Player#languagechange\n * @type {Event}\n */\n this.trigger('languagechange');\n }\n }\n }\n\n /**\n * Get the player's language dictionary\n * Merge every time, because a newly added plugin might call videojs.addLanguage() at any time\n * Languages specified directly in the player options have precedence\n *\n * @return {Array}\n * An array of of supported languages\n */\n languages() {\n return merge$1(Player.prototype.options_.languages, this.languages_);\n }\n\n /**\n * returns a JavaScript object representing the current track\n * information. **DOES not return it as JSON**\n *\n * @return {Object}\n * Object representing the current of track info\n */\n toJSON() {\n const options = merge$1(this.options_);\n const tracks = options.tracks;\n options.tracks = [];\n for (let i = 0; i < tracks.length; i++) {\n let track = tracks[i];\n\n // deep merge tracks and null out player so no circular references\n track = merge$1(track);\n track.player = undefined;\n options.tracks[i] = track;\n }\n return options;\n }\n\n /**\n * Creates a simple modal dialog (an instance of the {@link ModalDialog}\n * component) that immediately overlays the player with arbitrary\n * content and removes itself when closed.\n *\n * @param {string|Function|Element|Array|null} content\n * Same as {@link ModalDialog#content}'s param of the same name.\n * The most straight-forward usage is to provide a string or DOM\n * element.\n *\n * @param {Object} [options]\n * Extra options which will be passed on to the {@link ModalDialog}.\n *\n * @return {ModalDialog}\n * the {@link ModalDialog} that was created\n */\n createModal(content, options) {\n options = options || {};\n options.content = content || '';\n const modal = new ModalDialog(this, options);\n this.addChild(modal);\n modal.on('dispose', () => {\n this.removeChild(modal);\n });\n modal.open();\n return modal;\n }\n\n /**\n * Change breakpoint classes when the player resizes.\n *\n * @private\n */\n updateCurrentBreakpoint_() {\n if (!this.responsive()) {\n return;\n }\n const currentBreakpoint = this.currentBreakpoint();\n const currentWidth = this.currentWidth();\n for (let i = 0; i < BREAKPOINT_ORDER.length; i++) {\n const candidateBreakpoint = BREAKPOINT_ORDER[i];\n const maxWidth = this.breakpoints_[candidateBreakpoint];\n if (currentWidth <= maxWidth) {\n // The current breakpoint did not change, nothing to do.\n if (currentBreakpoint === candidateBreakpoint) {\n return;\n }\n\n // Only remove a class if there is a current breakpoint.\n if (currentBreakpoint) {\n this.removeClass(BREAKPOINT_CLASSES[currentBreakpoint]);\n }\n this.addClass(BREAKPOINT_CLASSES[candidateBreakpoint]);\n this.breakpoint_ = candidateBreakpoint;\n break;\n }\n }\n }\n\n /**\n * Removes the current breakpoint.\n *\n * @private\n */\n removeCurrentBreakpoint_() {\n const className = this.currentBreakpointClass();\n this.breakpoint_ = '';\n if (className) {\n this.removeClass(className);\n }\n }\n\n /**\n * Get or set breakpoints on the player.\n *\n * Calling this method with an object or `true` will remove any previous\n * custom breakpoints and start from the defaults again.\n *\n * @param {Object|boolean} [breakpoints]\n * If an object is given, it can be used to provide custom\n * breakpoints. If `true` is given, will set default breakpoints.\n * If this argument is not given, will simply return the current\n * breakpoints.\n *\n * @param {number} [breakpoints.tiny]\n * The maximum width for the \"vjs-layout-tiny\" class.\n *\n * @param {number} [breakpoints.xsmall]\n * The maximum width for the \"vjs-layout-x-small\" class.\n *\n * @param {number} [breakpoints.small]\n * The maximum width for the \"vjs-layout-small\" class.\n *\n * @param {number} [breakpoints.medium]\n * The maximum width for the \"vjs-layout-medium\" class.\n *\n * @param {number} [breakpoints.large]\n * The maximum width for the \"vjs-layout-large\" class.\n *\n * @param {number} [breakpoints.xlarge]\n * The maximum width for the \"vjs-layout-x-large\" class.\n *\n * @param {number} [breakpoints.huge]\n * The maximum width for the \"vjs-layout-huge\" class.\n *\n * @return {Object}\n * An object mapping breakpoint names to maximum width values.\n */\n breakpoints(breakpoints) {\n // Used as a getter.\n if (breakpoints === undefined) {\n return Object.assign(this.breakpoints_);\n }\n this.breakpoint_ = '';\n this.breakpoints_ = Object.assign({}, DEFAULT_BREAKPOINTS, breakpoints);\n\n // When breakpoint definitions change, we need to update the currently\n // selected breakpoint.\n this.updateCurrentBreakpoint_();\n\n // Clone the breakpoints before returning.\n return Object.assign(this.breakpoints_);\n }\n\n /**\n * Get or set a flag indicating whether or not this player should adjust\n * its UI based on its dimensions.\n *\n * @param {boolean} [value]\n * Should be `true` if the player should adjust its UI based on its\n * dimensions; otherwise, should be `false`.\n *\n * @return {boolean|undefined}\n * Will be `true` if this player should adjust its UI based on its\n * dimensions; otherwise, will be `false`.\n * Nothing if setting\n */\n responsive(value) {\n // Used as a getter.\n if (value === undefined) {\n return this.responsive_;\n }\n value = Boolean(value);\n const current = this.responsive_;\n\n // Nothing changed.\n if (value === current) {\n return;\n }\n\n // The value actually changed, set it.\n this.responsive_ = value;\n\n // Start listening for breakpoints and set the initial breakpoint if the\n // player is now responsive.\n if (value) {\n this.on('playerresize', this.boundUpdateCurrentBreakpoint_);\n this.updateCurrentBreakpoint_();\n\n // Stop listening for breakpoints if the player is no longer responsive.\n } else {\n this.off('playerresize', this.boundUpdateCurrentBreakpoint_);\n this.removeCurrentBreakpoint_();\n }\n return value;\n }\n\n /**\n * Get current breakpoint name, if any.\n *\n * @return {string}\n * If there is currently a breakpoint set, returns a the key from the\n * breakpoints object matching it. Otherwise, returns an empty string.\n */\n currentBreakpoint() {\n return this.breakpoint_;\n }\n\n /**\n * Get the current breakpoint class name.\n *\n * @return {string}\n * The matching class name (e.g. `\"vjs-layout-tiny\"` or\n * `\"vjs-layout-large\"`) for the current breakpoint. Empty string if\n * there is no current breakpoint.\n */\n currentBreakpointClass() {\n return BREAKPOINT_CLASSES[this.breakpoint_] || '';\n }\n\n /**\n * An object that describes a single piece of media.\n *\n * Properties that are not part of this type description will be retained; so,\n * this can be viewed as a generic metadata storage mechanism as well.\n *\n * @see {@link https://wicg.github.io/mediasession/#the-mediametadata-interface}\n * @typedef {Object} Player~MediaObject\n *\n * @property {string} [album]\n * Unused, except if this object is passed to the `MediaSession`\n * API.\n *\n * @property {string} [artist]\n * Unused, except if this object is passed to the `MediaSession`\n * API.\n *\n * @property {Object[]} [artwork]\n * Unused, except if this object is passed to the `MediaSession`\n * API. If not specified, will be populated via the `poster`, if\n * available.\n *\n * @property {string} [poster]\n * URL to an image that will display before playback.\n *\n * @property {Tech~SourceObject|Tech~SourceObject[]|string} [src]\n * A single source object, an array of source objects, or a string\n * referencing a URL to a media source. It is _highly recommended_\n * that an object or array of objects is used here, so that source\n * selection algorithms can take the `type` into account.\n *\n * @property {string} [title]\n * Unused, except if this object is passed to the `MediaSession`\n * API.\n *\n * @property {Object[]} [textTracks]\n * An array of objects to be used to create text tracks, following\n * the {@link https://www.w3.org/TR/html50/embedded-content-0.html#the-track-element|native track element format}.\n * For ease of removal, these will be created as \"remote\" text\n * tracks and set to automatically clean up on source changes.\n *\n * These objects may have properties like `src`, `kind`, `label`,\n * and `language`, see {@link Tech#createRemoteTextTrack}.\n */\n\n /**\n * Populate the player using a {@link Player~MediaObject|MediaObject}.\n *\n * @param {Player~MediaObject} media\n * A media object.\n *\n * @param {Function} ready\n * A callback to be called when the player is ready.\n */\n loadMedia(media, ready) {\n if (!media || typeof media !== 'object') {\n return;\n }\n const crossOrigin = this.crossOrigin();\n this.reset();\n\n // Clone the media object so it cannot be mutated from outside.\n this.cache_.media = merge$1(media);\n const {\n artist,\n artwork,\n description,\n poster,\n src,\n textTracks,\n title\n } = this.cache_.media;\n\n // If `artwork` is not given, create it using `poster`.\n if (!artwork && poster) {\n this.cache_.media.artwork = [{\n src: poster,\n type: getMimetype(poster)\n }];\n }\n if (crossOrigin) {\n this.crossOrigin(crossOrigin);\n }\n if (src) {\n this.src(src);\n }\n if (poster) {\n this.poster(poster);\n }\n if (Array.isArray(textTracks)) {\n textTracks.forEach(tt => this.addRemoteTextTrack(tt, false));\n }\n if (this.titleBar) {\n this.titleBar.update({\n title,\n description: description || artist || ''\n });\n }\n this.ready(ready);\n }\n\n /**\n * Get a clone of the current {@link Player~MediaObject} for this player.\n *\n * If the `loadMedia` method has not been used, will attempt to return a\n * {@link Player~MediaObject} based on the current state of the player.\n *\n * @return {Player~MediaObject}\n */\n getMedia() {\n if (!this.cache_.media) {\n const poster = this.poster();\n const src = this.currentSources();\n const textTracks = Array.prototype.map.call(this.remoteTextTracks(), tt => ({\n kind: tt.kind,\n label: tt.label,\n language: tt.language,\n src: tt.src\n }));\n const media = {\n src,\n textTracks\n };\n if (poster) {\n media.poster = poster;\n media.artwork = [{\n src: media.poster,\n type: getMimetype(media.poster)\n }];\n }\n return media;\n }\n return merge$1(this.cache_.media);\n }\n\n /**\n * Gets tag settings\n *\n * @param {Element} tag\n * The player tag\n *\n * @return {Object}\n * An object containing all of the settings\n * for a player tag\n */\n static getTagSettings(tag) {\n const baseOptions = {\n sources: [],\n tracks: []\n };\n const tagOptions = getAttributes(tag);\n const dataSetup = tagOptions['data-setup'];\n if (hasClass(tag, 'vjs-fill')) {\n tagOptions.fill = true;\n }\n if (hasClass(tag, 'vjs-fluid')) {\n tagOptions.fluid = true;\n }\n\n // Check if data-setup attr exists.\n if (dataSetup !== null) {\n // Parse options JSON\n // If empty string, make it a parsable json object.\n const [err, data] = safeParseTuple(dataSetup || '{}');\n if (err) {\n log$1.error(err);\n }\n Object.assign(tagOptions, data);\n }\n Object.assign(baseOptions, tagOptions);\n\n // Get tag children settings\n if (tag.hasChildNodes()) {\n const children = tag.childNodes;\n for (let i = 0, j = children.length; i < j; i++) {\n const child = children[i];\n // Change case needed: http://ejohn.org/blog/nodename-case-sensitivity/\n const childName = child.nodeName.toLowerCase();\n if (childName === 'source') {\n baseOptions.sources.push(getAttributes(child));\n } else if (childName === 'track') {\n baseOptions.tracks.push(getAttributes(child));\n }\n }\n }\n return baseOptions;\n }\n\n /**\n * Set debug mode to enable/disable logs at info level.\n *\n * @param {boolean} enabled\n * @fires Player#debugon\n * @fires Player#debugoff\n * @return {boolean|undefined}\n */\n debug(enabled) {\n if (enabled === undefined) {\n return this.debugEnabled_;\n }\n if (enabled) {\n this.trigger('debugon');\n this.previousLogLevel_ = this.log.level;\n this.log.level('debug');\n this.debugEnabled_ = true;\n } else {\n this.trigger('debugoff');\n this.log.level(this.previousLogLevel_);\n this.previousLogLevel_ = undefined;\n this.debugEnabled_ = false;\n }\n }\n\n /**\n * Set or get current playback rates.\n * Takes an array and updates the playback rates menu with the new items.\n * Pass in an empty array to hide the menu.\n * Values other than arrays are ignored.\n *\n * @fires Player#playbackrateschange\n * @param {number[]} [newRates]\n * The new rates that the playback rates menu should update to.\n * An empty array will hide the menu\n * @return {number[]} When used as a getter will return the current playback rates\n */\n playbackRates(newRates) {\n if (newRates === undefined) {\n return this.cache_.playbackRates;\n }\n\n // ignore any value that isn't an array\n if (!Array.isArray(newRates)) {\n return;\n }\n\n // ignore any arrays that don't only contain numbers\n if (!newRates.every(rate => typeof rate === 'number')) {\n return;\n }\n this.cache_.playbackRates = newRates;\n\n /**\n * fires when the playback rates in a player are changed\n *\n * @event Player#playbackrateschange\n * @type {Event}\n */\n this.trigger('playbackrateschange');\n }\n}\n\n/**\n * Get the {@link VideoTrackList}\n *\n * @link https://html.spec.whatwg.org/multipage/embedded-content.html#videotracklist\n *\n * @return {VideoTrackList}\n * the current video track list\n *\n * @method Player.prototype.videoTracks\n */\n\n/**\n * Get the {@link AudioTrackList}\n *\n * @link https://html.spec.whatwg.org/multipage/embedded-content.html#audiotracklist\n *\n * @return {AudioTrackList}\n * the current audio track list\n *\n * @method Player.prototype.audioTracks\n */\n\n/**\n * Get the {@link TextTrackList}\n *\n * @link http://www.w3.org/html/wg/drafts/html/master/embedded-content-0.html#dom-media-texttracks\n *\n * @return {TextTrackList}\n * the current text track list\n *\n * @method Player.prototype.textTracks\n */\n\n/**\n * Get the remote {@link TextTrackList}\n *\n * @return {TextTrackList}\n * The current remote text track list\n *\n * @method Player.prototype.remoteTextTracks\n */\n\n/**\n * Get the remote {@link HtmlTrackElementList} tracks.\n *\n * @return {HtmlTrackElementList}\n * The current remote text track element list\n *\n * @method Player.prototype.remoteTextTrackEls\n */\n\nALL.names.forEach(function (name) {\n const props = ALL[name];\n Player.prototype[props.getterName] = function () {\n if (this.tech_) {\n return this.tech_[props.getterName]();\n }\n\n // if we have not yet loadTech_, we create {video,audio,text}Tracks_\n // these will be passed to the tech during loading\n this[props.privateName] = this[props.privateName] || new props.ListClass();\n return this[props.privateName];\n };\n});\n\n/**\n * Get or set the `Player`'s crossorigin option. For the HTML5 player, this\n * sets the `crossOrigin` property on the `` tag to control the CORS\n * behavior.\n *\n * @see [Video Element Attributes]{@link https://developer.mozilla.org/en-US/docs/Web/HTML/Element/video#attr-crossorigin}\n *\n * @param {string} [value]\n * The value to set the `Player`'s crossorigin to. If an argument is\n * given, must be one of `anonymous` or `use-credentials`.\n *\n * @return {string|undefined}\n * - The current crossorigin value of the `Player` when getting.\n * - undefined when setting\n */\nPlayer.prototype.crossorigin = Player.prototype.crossOrigin;\n\n/**\n * Global enumeration of players.\n *\n * The keys are the player IDs and the values are either the {@link Player}\n * instance or `null` for disposed players.\n *\n * @type {Object}\n */\nPlayer.players = {};\nconst navigator = window$1.navigator;\n\n/*\n * Player instance options, surfaced using options\n * options = Player.prototype.options_\n * Make changes in options, not here.\n *\n * @type {Object}\n * @private\n */\nPlayer.prototype.options_ = {\n // Default order of fallback technology\n techOrder: Tech.defaultTechOrder_,\n html5: {},\n // enable sourceset by default\n enableSourceset: true,\n // default inactivity timeout\n inactivityTimeout: 2000,\n // default playback rates\n playbackRates: [],\n // Add playback rate selection by adding rates\n // 'playbackRates': [0.5, 1, 1.5, 2],\n liveui: false,\n // Included control sets\n children: ['mediaLoader', 'posterImage', 'titleBar', 'textTrackDisplay', 'loadingSpinner', 'bigPlayButton', 'liveTracker', 'controlBar', 'errorDisplay', 'textTrackSettings', 'resizeManager'],\n language: navigator && (navigator.languages && navigator.languages[0] || navigator.userLanguage || navigator.language) || 'en',\n // locales and their language translations\n languages: {},\n // Default message to show when a video cannot be played.\n notSupportedMessage: 'No compatible source was found for this media.',\n normalizeAutoplay: false,\n fullscreen: {\n options: {\n navigationUI: 'hide'\n }\n },\n breakpoints: {},\n responsive: false,\n audioOnlyMode: false,\n audioPosterMode: false,\n // Default smooth seeking to false\n enableSmoothSeeking: false\n};\nTECH_EVENTS_RETRIGGER.forEach(function (event) {\n Player.prototype[`handleTech${toTitleCase$1(event)}_`] = function () {\n return this.trigger(event);\n };\n});\n\n/**\n * Fired when the player has initial duration and dimension information\n *\n * @event Player#loadedmetadata\n * @type {Event}\n */\n\n/**\n * Fired when the player has downloaded data at the current playback position\n *\n * @event Player#loadeddata\n * @type {Event}\n */\n\n/**\n * Fired when the current playback position has changed *\n * During playback this is fired every 15-250 milliseconds, depending on the\n * playback technology in use.\n *\n * @event Player#timeupdate\n * @type {Event}\n */\n\n/**\n * Fired when the volume changes\n *\n * @event Player#volumechange\n * @type {Event}\n */\n\n/**\n * Reports whether or not a player has a plugin available.\n *\n * This does not report whether or not the plugin has ever been initialized\n * on this player. For that, [usingPlugin]{@link Player#usingPlugin}.\n *\n * @method Player#hasPlugin\n * @param {string} name\n * The name of a plugin.\n *\n * @return {boolean}\n * Whether or not this player has the requested plugin available.\n */\n\n/**\n * Reports whether or not a player is using a plugin by name.\n *\n * For basic plugins, this only reports whether the plugin has _ever_ been\n * initialized on this player.\n *\n * @method Player#usingPlugin\n * @param {string} name\n * The name of a plugin.\n *\n * @return {boolean}\n * Whether or not this player is using the requested plugin.\n */\n\nComponent$1.registerComponent('Player', Player);\n\n/**\n * @file plugin.js\n */\n\n/**\n * The base plugin name.\n *\n * @private\n * @constant\n * @type {string}\n */\nconst BASE_PLUGIN_NAME = 'plugin';\n\n/**\n * The key on which a player's active plugins cache is stored.\n *\n * @private\n * @constant\n * @type {string}\n */\nconst PLUGIN_CACHE_KEY = 'activePlugins_';\n\n/**\n * Stores registered plugins in a private space.\n *\n * @private\n * @type {Object}\n */\nconst pluginStorage = {};\n\n/**\n * Reports whether or not a plugin has been registered.\n *\n * @private\n * @param {string} name\n * The name of a plugin.\n *\n * @return {boolean}\n * Whether or not the plugin has been registered.\n */\nconst pluginExists = name => pluginStorage.hasOwnProperty(name);\n\n/**\n * Get a single registered plugin by name.\n *\n * @private\n * @param {string} name\n * The name of a plugin.\n *\n * @return {typeof Plugin|Function|undefined}\n * The plugin (or undefined).\n */\nconst getPlugin = name => pluginExists(name) ? pluginStorage[name] : undefined;\n\n/**\n * Marks a plugin as \"active\" on a player.\n *\n * Also, ensures that the player has an object for tracking active plugins.\n *\n * @private\n * @param {Player} player\n * A Video.js player instance.\n *\n * @param {string} name\n * The name of a plugin.\n */\nconst markPluginAsActive = (player, name) => {\n player[PLUGIN_CACHE_KEY] = player[PLUGIN_CACHE_KEY] || {};\n player[PLUGIN_CACHE_KEY][name] = true;\n};\n\n/**\n * Triggers a pair of plugin setup events.\n *\n * @private\n * @param {Player} player\n * A Video.js player instance.\n *\n * @param {PluginEventHash} hash\n * A plugin event hash.\n *\n * @param {boolean} [before]\n * If true, prefixes the event name with \"before\". In other words,\n * use this to trigger \"beforepluginsetup\" instead of \"pluginsetup\".\n */\nconst triggerSetupEvent = (player, hash, before) => {\n const eventName = (before ? 'before' : '') + 'pluginsetup';\n player.trigger(eventName, hash);\n player.trigger(eventName + ':' + hash.name, hash);\n};\n\n/**\n * Takes a basic plugin function and returns a wrapper function which marks\n * on the player that the plugin has been activated.\n *\n * @private\n * @param {string} name\n * The name of the plugin.\n *\n * @param {Function} plugin\n * The basic plugin.\n *\n * @return {Function}\n * A wrapper function for the given plugin.\n */\nconst createBasicPlugin = function (name, plugin) {\n const basicPluginWrapper = function () {\n // We trigger the \"beforepluginsetup\" and \"pluginsetup\" events on the player\n // regardless, but we want the hash to be consistent with the hash provided\n // for advanced plugins.\n //\n // The only potentially counter-intuitive thing here is the `instance` in\n // the \"pluginsetup\" event is the value returned by the `plugin` function.\n triggerSetupEvent(this, {\n name,\n plugin,\n instance: null\n }, true);\n const instance = plugin.apply(this, arguments);\n markPluginAsActive(this, name);\n triggerSetupEvent(this, {\n name,\n plugin,\n instance\n });\n return instance;\n };\n Object.keys(plugin).forEach(function (prop) {\n basicPluginWrapper[prop] = plugin[prop];\n });\n return basicPluginWrapper;\n};\n\n/**\n * Takes a plugin sub-class and returns a factory function for generating\n * instances of it.\n *\n * This factory function will replace itself with an instance of the requested\n * sub-class of Plugin.\n *\n * @private\n * @param {string} name\n * The name of the plugin.\n *\n * @param {Plugin} PluginSubClass\n * The advanced plugin.\n *\n * @return {Function}\n */\nconst createPluginFactory = (name, PluginSubClass) => {\n // Add a `name` property to the plugin prototype so that each plugin can\n // refer to itself by name.\n PluginSubClass.prototype.name = name;\n return function (...args) {\n triggerSetupEvent(this, {\n name,\n plugin: PluginSubClass,\n instance: null\n }, true);\n const instance = new PluginSubClass(...[this, ...args]);\n\n // The plugin is replaced by a function that returns the current instance.\n this[name] = () => instance;\n triggerSetupEvent(this, instance.getEventHash());\n return instance;\n };\n};\n\n/**\n * Parent class for all advanced plugins.\n *\n * @mixes module:evented~EventedMixin\n * @mixes module:stateful~StatefulMixin\n * @fires Player#beforepluginsetup\n * @fires Player#beforepluginsetup:$name\n * @fires Player#pluginsetup\n * @fires Player#pluginsetup:$name\n * @listens Player#dispose\n * @throws {Error}\n * If attempting to instantiate the base {@link Plugin} class\n * directly instead of via a sub-class.\n */\nclass Plugin {\n /**\n * Creates an instance of this class.\n *\n * Sub-classes should call `super` to ensure plugins are properly initialized.\n *\n * @param {Player} player\n * A Video.js player instance.\n */\n constructor(player) {\n if (this.constructor === Plugin) {\n throw new Error('Plugin must be sub-classed; not directly instantiated.');\n }\n this.player = player;\n if (!this.log) {\n this.log = this.player.log.createLogger(this.name);\n }\n\n // Make this object evented, but remove the added `trigger` method so we\n // use the prototype version instead.\n evented(this);\n delete this.trigger;\n stateful(this, this.constructor.defaultState);\n markPluginAsActive(player, this.name);\n\n // Auto-bind the dispose method so we can use it as a listener and unbind\n // it later easily.\n this.dispose = this.dispose.bind(this);\n\n // If the player is disposed, dispose the plugin.\n player.on('dispose', this.dispose);\n }\n\n /**\n * Get the version of the plugin that was set on .VERSION\n */\n version() {\n return this.constructor.VERSION;\n }\n\n /**\n * Each event triggered by plugins includes a hash of additional data with\n * conventional properties.\n *\n * This returns that object or mutates an existing hash.\n *\n * @param {Object} [hash={}]\n * An object to be used as event an event hash.\n *\n * @return {PluginEventHash}\n * An event hash object with provided properties mixed-in.\n */\n getEventHash(hash = {}) {\n hash.name = this.name;\n hash.plugin = this.constructor;\n hash.instance = this;\n return hash;\n }\n\n /**\n * Triggers an event on the plugin object and overrides\n * {@link module:evented~EventedMixin.trigger|EventedMixin.trigger}.\n *\n * @param {string|Object} event\n * An event type or an object with a type property.\n *\n * @param {Object} [hash={}]\n * Additional data hash to merge with a\n * {@link PluginEventHash|PluginEventHash}.\n *\n * @return {boolean}\n * Whether or not default was prevented.\n */\n trigger(event, hash = {}) {\n return trigger(this.eventBusEl_, event, this.getEventHash(hash));\n }\n\n /**\n * Handles \"statechanged\" events on the plugin. No-op by default, override by\n * subclassing.\n *\n * @abstract\n * @param {Event} e\n * An event object provided by a \"statechanged\" event.\n *\n * @param {Object} e.changes\n * An object describing changes that occurred with the \"statechanged\"\n * event.\n */\n handleStateChanged(e) {}\n\n /**\n * Disposes a plugin.\n *\n * Subclasses can override this if they want, but for the sake of safety,\n * it's probably best to subscribe the \"dispose\" event.\n *\n * @fires Plugin#dispose\n */\n dispose() {\n const {\n name,\n player\n } = this;\n\n /**\n * Signals that a advanced plugin is about to be disposed.\n *\n * @event Plugin#dispose\n * @type {Event}\n */\n this.trigger('dispose');\n this.off();\n player.off('dispose', this.dispose);\n\n // Eliminate any possible sources of leaking memory by clearing up\n // references between the player and the plugin instance and nulling out\n // the plugin's state and replacing methods with a function that throws.\n player[PLUGIN_CACHE_KEY][name] = false;\n this.player = this.state = null;\n\n // Finally, replace the plugin name on the player with a new factory\n // function, so that the plugin is ready to be set up again.\n player[name] = createPluginFactory(name, pluginStorage[name]);\n }\n\n /**\n * Determines if a plugin is a basic plugin (i.e. not a sub-class of `Plugin`).\n *\n * @param {string|Function} plugin\n * If a string, matches the name of a plugin. If a function, will be\n * tested directly.\n *\n * @return {boolean}\n * Whether or not a plugin is a basic plugin.\n */\n static isBasic(plugin) {\n const p = typeof plugin === 'string' ? getPlugin(plugin) : plugin;\n return typeof p === 'function' && !Plugin.prototype.isPrototypeOf(p.prototype);\n }\n\n /**\n * Register a Video.js plugin.\n *\n * @param {string} name\n * The name of the plugin to be registered. Must be a string and\n * must not match an existing plugin or a method on the `Player`\n * prototype.\n *\n * @param {typeof Plugin|Function} plugin\n * A sub-class of `Plugin` or a function for basic plugins.\n *\n * @return {typeof Plugin|Function}\n * For advanced plugins, a factory function for that plugin. For\n * basic plugins, a wrapper function that initializes the plugin.\n */\n static registerPlugin(name, plugin) {\n if (typeof name !== 'string') {\n throw new Error(`Illegal plugin name, \"${name}\", must be a string, was ${typeof name}.`);\n }\n if (pluginExists(name)) {\n log$1.warn(`A plugin named \"${name}\" already exists. You may want to avoid re-registering plugins!`);\n } else if (Player.prototype.hasOwnProperty(name)) {\n throw new Error(`Illegal plugin name, \"${name}\", cannot share a name with an existing player method!`);\n }\n if (typeof plugin !== 'function') {\n throw new Error(`Illegal plugin for \"${name}\", must be a function, was ${typeof plugin}.`);\n }\n pluginStorage[name] = plugin;\n\n // Add a player prototype method for all sub-classed plugins (but not for\n // the base Plugin class).\n if (name !== BASE_PLUGIN_NAME) {\n if (Plugin.isBasic(plugin)) {\n Player.prototype[name] = createBasicPlugin(name, plugin);\n } else {\n Player.prototype[name] = createPluginFactory(name, plugin);\n }\n }\n return plugin;\n }\n\n /**\n * De-register a Video.js plugin.\n *\n * @param {string} name\n * The name of the plugin to be de-registered. Must be a string that\n * matches an existing plugin.\n *\n * @throws {Error}\n * If an attempt is made to de-register the base plugin.\n */\n static deregisterPlugin(name) {\n if (name === BASE_PLUGIN_NAME) {\n throw new Error('Cannot de-register base plugin.');\n }\n if (pluginExists(name)) {\n delete pluginStorage[name];\n delete Player.prototype[name];\n }\n }\n\n /**\n * Gets an object containing multiple Video.js plugins.\n *\n * @param {Array} [names]\n * If provided, should be an array of plugin names. Defaults to _all_\n * plugin names.\n *\n * @return {Object|undefined}\n * An object containing plugin(s) associated with their name(s) or\n * `undefined` if no matching plugins exist).\n */\n static getPlugins(names = Object.keys(pluginStorage)) {\n let result;\n names.forEach(name => {\n const plugin = getPlugin(name);\n if (plugin) {\n result = result || {};\n result[name] = plugin;\n }\n });\n return result;\n }\n\n /**\n * Gets a plugin's version, if available\n *\n * @param {string} name\n * The name of a plugin.\n *\n * @return {string}\n * The plugin's version or an empty string.\n */\n static getPluginVersion(name) {\n const plugin = getPlugin(name);\n return plugin && plugin.VERSION || '';\n }\n}\n\n/**\n * Gets a plugin by name if it exists.\n *\n * @static\n * @method getPlugin\n * @memberOf Plugin\n * @param {string} name\n * The name of a plugin.\n *\n * @returns {typeof Plugin|Function|undefined}\n * The plugin (or `undefined`).\n */\nPlugin.getPlugin = getPlugin;\n\n/**\n * The name of the base plugin class as it is registered.\n *\n * @type {string}\n */\nPlugin.BASE_PLUGIN_NAME = BASE_PLUGIN_NAME;\nPlugin.registerPlugin(BASE_PLUGIN_NAME, Plugin);\n\n/**\n * Documented in player.js\n *\n * @ignore\n */\nPlayer.prototype.usingPlugin = function (name) {\n return !!this[PLUGIN_CACHE_KEY] && this[PLUGIN_CACHE_KEY][name] === true;\n};\n\n/**\n * Documented in player.js\n *\n * @ignore\n */\nPlayer.prototype.hasPlugin = function (name) {\n return !!pluginExists(name);\n};\n\n/**\n * Signals that a plugin is about to be set up on a player.\n *\n * @event Player#beforepluginsetup\n * @type {PluginEventHash}\n */\n\n/**\n * Signals that a plugin is about to be set up on a player - by name. The name\n * is the name of the plugin.\n *\n * @event Player#beforepluginsetup:$name\n * @type {PluginEventHash}\n */\n\n/**\n * Signals that a plugin has just been set up on a player.\n *\n * @event Player#pluginsetup\n * @type {PluginEventHash}\n */\n\n/**\n * Signals that a plugin has just been set up on a player - by name. The name\n * is the name of the plugin.\n *\n * @event Player#pluginsetup:$name\n * @type {PluginEventHash}\n */\n\n/**\n * @typedef {Object} PluginEventHash\n *\n * @property {string} instance\n * For basic plugins, the return value of the plugin function. For\n * advanced plugins, the plugin instance on which the event is fired.\n *\n * @property {string} name\n * The name of the plugin.\n *\n * @property {string} plugin\n * For basic plugins, the plugin function. For advanced plugins, the\n * plugin class/constructor.\n */\n\n/**\n * @file deprecate.js\n * @module deprecate\n */\n\n/**\n * Decorate a function with a deprecation message the first time it is called.\n *\n * @param {string} message\n * A deprecation message to log the first time the returned function\n * is called.\n *\n * @param {Function} fn\n * The function to be deprecated.\n *\n * @return {Function}\n * A wrapper function that will log a deprecation warning the first\n * time it is called. The return value will be the return value of\n * the wrapped function.\n */\nfunction deprecate(message, fn) {\n let warned = false;\n return function (...args) {\n if (!warned) {\n log$1.warn(message);\n }\n warned = true;\n return fn.apply(this, args);\n };\n}\n\n/**\n * Internal function used to mark a function as deprecated in the next major\n * version with consistent messaging.\n *\n * @param {number} major The major version where it will be removed\n * @param {string} oldName The old function name\n * @param {string} newName The new function name\n * @param {Function} fn The function to deprecate\n * @return {Function} The decorated function\n */\nfunction deprecateForMajor(major, oldName, newName, fn) {\n return deprecate(`${oldName} is deprecated and will be removed in ${major}.0; please use ${newName} instead.`, fn);\n}\n\nvar VjsErrors = {\n UnsupportedSidxContainer: 'unsupported-sidx-container-error',\n DashManifestSidxParsingError: 'dash-manifest-sidx-parsing-error',\n HlsPlaylistRequestError: 'hls-playlist-request-error',\n SegmentUnsupportedMediaFormat: 'segment-unsupported-media-format-error',\n UnsupportedMediaInitialization: 'unsupported-media-initialization-error',\n SegmentSwitchError: 'segment-switch-error',\n SegmentExceedsSourceBufferQuota: 'segment-exceeds-source-buffer-quota-error',\n SegmentAppendError: 'segment-append-error',\n VttLoadError: 'vtt-load-error',\n VttCueParsingError: 'vtt-cue-parsing-error',\n // Errors used in contrib-ads:\n AdsBeforePrerollError: 'ads-before-preroll-error',\n AdsPrerollError: 'ads-preroll-error',\n AdsMidrollError: 'ads-midroll-error',\n AdsPostrollError: 'ads-postroll-error',\n AdsMacroReplacementFailed: 'ads-macro-replacement-failed',\n AdsResumeContentFailed: 'ads-resume-content-failed',\n // Errors used in contrib-eme:\n EMEFailedToRequestMediaKeySystemAccess: 'eme-failed-request-media-key-system-access',\n EMEFailedToCreateMediaKeys: 'eme-failed-create-media-keys',\n EMEFailedToAttachMediaKeysToVideoElement: 'eme-failed-attach-media-keys-to-video',\n EMEFailedToCreateMediaKeySession: 'eme-failed-create-media-key-session',\n EMEFailedToSetServerCertificate: 'eme-failed-set-server-certificate',\n EMEFailedToGenerateLicenseRequest: 'eme-failed-generate-license-request',\n EMEFailedToUpdateSessionWithReceivedLicenseKeys: 'eme-failed-update-session',\n EMEFailedToCloseSession: 'eme-failed-close-session',\n EMEFailedToRemoveKeysFromSession: 'eme-failed-remove-keys',\n EMEFailedToLoadSessionBySessionId: 'eme-failed-load-session'\n};\n\n/**\n * @file video.js\n * @module videojs\n */\n\n/**\n * Normalize an `id` value by trimming off a leading `#`\n *\n * @private\n * @param {string} id\n * A string, maybe with a leading `#`.\n *\n * @return {string}\n * The string, without any leading `#`.\n */\nconst normalizeId = id => id.indexOf('#') === 0 ? id.slice(1) : id;\n\n/**\n * A callback that is called when a component is ready. Does not have any\n * parameters and any callback value will be ignored. See: {@link Component~ReadyCallback}\n *\n * @callback ReadyCallback\n */\n\n/**\n * The `videojs()` function doubles as the main function for users to create a\n * {@link Player} instance as well as the main library namespace.\n *\n * It can also be used as a getter for a pre-existing {@link Player} instance.\n * However, we _strongly_ recommend using `videojs.getPlayer()` for this\n * purpose because it avoids any potential for unintended initialization.\n *\n * Due to [limitations](https://github.com/jsdoc3/jsdoc/issues/955#issuecomment-313829149)\n * of our JSDoc template, we cannot properly document this as both a function\n * and a namespace, so its function signature is documented here.\n *\n * #### Arguments\n * ##### id\n * string|Element, **required**\n *\n * Video element or video element ID.\n *\n * ##### options\n * Object, optional\n *\n * Options object for providing settings.\n * See: [Options Guide](https://docs.videojs.com/tutorial-options.html).\n *\n * ##### ready\n * {@link Component~ReadyCallback}, optional\n *\n * A function to be called when the {@link Player} and {@link Tech} are ready.\n *\n * #### Return Value\n *\n * The `videojs()` function returns a {@link Player} instance.\n *\n * @namespace\n *\n * @borrows AudioTrack as AudioTrack\n * @borrows Component.getComponent as getComponent\n * @borrows module:events.on as on\n * @borrows module:events.one as one\n * @borrows module:events.off as off\n * @borrows module:events.trigger as trigger\n * @borrows EventTarget as EventTarget\n * @borrows module:middleware.use as use\n * @borrows Player.players as players\n * @borrows Plugin.registerPlugin as registerPlugin\n * @borrows Plugin.deregisterPlugin as deregisterPlugin\n * @borrows Plugin.getPlugins as getPlugins\n * @borrows Plugin.getPlugin as getPlugin\n * @borrows Plugin.getPluginVersion as getPluginVersion\n * @borrows Tech.getTech as getTech\n * @borrows Tech.registerTech as registerTech\n * @borrows TextTrack as TextTrack\n * @borrows VideoTrack as VideoTrack\n *\n * @param {string|Element} id\n * Video element or video element ID.\n *\n * @param {Object} [options]\n * Options object for providing settings.\n * See: [Options Guide](https://docs.videojs.com/tutorial-options.html).\n *\n * @param {ReadyCallback} [ready]\n * A function to be called when the {@link Player} and {@link Tech} are\n * ready.\n *\n * @return {Player}\n * The `videojs()` function returns a {@link Player|Player} instance.\n */\nfunction videojs(id, options, ready) {\n let player = videojs.getPlayer(id);\n if (player) {\n if (options) {\n log$1.warn(`Player \"${id}\" is already initialised. Options will not be applied.`);\n }\n if (ready) {\n player.ready(ready);\n }\n return player;\n }\n const el = typeof id === 'string' ? $('#' + normalizeId(id)) : id;\n if (!isEl(el)) {\n throw new TypeError('The element or ID supplied is not valid. (videojs)');\n }\n\n // document.body.contains(el) will only check if el is contained within that one document.\n // This causes problems for elements in iframes.\n // Instead, use the element's ownerDocument instead of the global document.\n // This will make sure that the element is indeed in the dom of that document.\n // Additionally, check that the document in question has a default view.\n // If the document is no longer attached to the dom, the defaultView of the document will be null.\n // If element is inside Shadow DOM (e.g. is part of a Custom element), ownerDocument.body\n // always returns false. Instead, use the Shadow DOM root.\n const inShadowDom = 'getRootNode' in el ? el.getRootNode() instanceof window$1.ShadowRoot : false;\n const rootNode = inShadowDom ? el.getRootNode() : el.ownerDocument.body;\n if (!el.ownerDocument.defaultView || !rootNode.contains(el)) {\n log$1.warn('The element supplied is not included in the DOM');\n }\n options = options || {};\n\n // Store a copy of the el before modification, if it is to be restored in destroy()\n // If div ingest, store the parent div\n if (options.restoreEl === true) {\n options.restoreEl = (el.parentNode && el.parentNode.hasAttribute('data-vjs-player') ? el.parentNode : el).cloneNode(true);\n }\n hooks('beforesetup').forEach(hookFunction => {\n const opts = hookFunction(el, merge$1(options));\n if (!isObject(opts) || Array.isArray(opts)) {\n log$1.error('please return an object in beforesetup hooks');\n return;\n }\n options = merge$1(options, opts);\n });\n\n // We get the current \"Player\" component here in case an integration has\n // replaced it with a custom player.\n const PlayerComponent = Component$1.getComponent('Player');\n player = new PlayerComponent(el, options, ready);\n hooks('setup').forEach(hookFunction => hookFunction(player));\n return player;\n}\nvideojs.hooks_ = hooks_;\nvideojs.hooks = hooks;\nvideojs.hook = hook;\nvideojs.hookOnce = hookOnce;\nvideojs.removeHook = removeHook;\n\n// Add default styles\nif (window$1.VIDEOJS_NO_DYNAMIC_STYLE !== true && isReal()) {\n let style = $('.vjs-styles-defaults');\n if (!style) {\n style = createStyleElement('vjs-styles-defaults');\n const head = $('head');\n if (head) {\n head.insertBefore(style, head.firstChild);\n }\n setTextContent(style, `\n .video-js {\n width: 300px;\n height: 150px;\n }\n\n .vjs-fluid:not(.vjs-audio-only-mode) {\n padding-top: 56.25%\n }\n `);\n }\n}\n\n// Run Auto-load players\n// You have to wait at least once in case this script is loaded after your\n// video in the DOM (weird behavior only with minified version)\nautoSetupTimeout(1, videojs);\n\n/**\n * Current Video.js version. Follows [semantic versioning](https://semver.org/).\n *\n * @type {string}\n */\nvideojs.VERSION = version$6;\n\n/**\n * The global options object. These are the settings that take effect\n * if no overrides are specified when the player is created.\n *\n * @type {Object}\n */\nvideojs.options = Player.prototype.options_;\n\n/**\n * Get an object with the currently created players, keyed by player ID\n *\n * @return {Object}\n * The created players\n */\nvideojs.getPlayers = () => Player.players;\n\n/**\n * Get a single player based on an ID or DOM element.\n *\n * This is useful if you want to check if an element or ID has an associated\n * Video.js player, but not create one if it doesn't.\n *\n * @param {string|Element} id\n * An HTML element - ``, ``, or `` -\n * or a string matching the `id` of such an element.\n *\n * @return {Player|undefined}\n * A player instance or `undefined` if there is no player instance\n * matching the argument.\n */\nvideojs.getPlayer = id => {\n const players = Player.players;\n let tag;\n if (typeof id === 'string') {\n const nId = normalizeId(id);\n const player = players[nId];\n if (player) {\n return player;\n }\n tag = $('#' + nId);\n } else {\n tag = id;\n }\n if (isEl(tag)) {\n const {\n player,\n playerId\n } = tag;\n\n // Element may have a `player` property referring to an already created\n // player instance. If so, return that.\n if (player || players[playerId]) {\n return player || players[playerId];\n }\n }\n};\n\n/**\n * Returns an array of all current players.\n *\n * @return {Array}\n * An array of all players. The array will be in the order that\n * `Object.keys` provides, which could potentially vary between\n * JavaScript engines.\n *\n */\nvideojs.getAllPlayers = () =>\n// Disposed players leave a key with a `null` value, so we need to make sure\n// we filter those out.\nObject.keys(Player.players).map(k => Player.players[k]).filter(Boolean);\nvideojs.players = Player.players;\nvideojs.getComponent = Component$1.getComponent;\n\n/**\n * Register a component so it can referred to by name. Used when adding to other\n * components, either through addChild `component.addChild('myComponent')` or through\n * default children options `{ children: ['myComponent'] }`.\n *\n * > NOTE: You could also just initialize the component before adding.\n * `component.addChild(new MyComponent());`\n *\n * @param {string} name\n * The class name of the component\n *\n * @param {typeof Component} comp\n * The component class\n *\n * @return {typeof Component}\n * The newly registered component\n */\nvideojs.registerComponent = (name, comp) => {\n if (Tech.isTech(comp)) {\n log$1.warn(`The ${name} tech was registered as a component. It should instead be registered using videojs.registerTech(name, tech)`);\n }\n return Component$1.registerComponent.call(Component$1, name, comp);\n};\nvideojs.getTech = Tech.getTech;\nvideojs.registerTech = Tech.registerTech;\nvideojs.use = use;\n\n/**\n * An object that can be returned by a middleware to signify\n * that the middleware is being terminated.\n *\n * @type {object}\n * @property {object} middleware.TERMINATOR\n */\nObject.defineProperty(videojs, 'middleware', {\n value: {},\n writeable: false,\n enumerable: true\n});\nObject.defineProperty(videojs.middleware, 'TERMINATOR', {\n value: TERMINATOR,\n writeable: false,\n enumerable: true\n});\n\n/**\n * A reference to the {@link module:browser|browser utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:browser|browser}\n */\nvideojs.browser = browser;\n\n/**\n * A reference to the {@link module:obj|obj utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:obj|obj}\n */\nvideojs.obj = Obj;\n\n/**\n * Deprecated reference to the {@link module:obj.merge|merge function}\n *\n * @type {Function}\n * @see {@link module:obj.merge|merge}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.obj.merge instead.\n */\nvideojs.mergeOptions = deprecateForMajor(9, 'videojs.mergeOptions', 'videojs.obj.merge', merge$1);\n\n/**\n * Deprecated reference to the {@link module:obj.defineLazyProperty|defineLazyProperty function}\n *\n * @type {Function}\n * @see {@link module:obj.defineLazyProperty|defineLazyProperty}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.obj.defineLazyProperty instead.\n */\nvideojs.defineLazyProperty = deprecateForMajor(9, 'videojs.defineLazyProperty', 'videojs.obj.defineLazyProperty', defineLazyProperty);\n\n/**\n * Deprecated reference to the {@link module:fn.bind_|fn.bind_ function}\n *\n * @type {Function}\n * @see {@link module:fn.bind_|fn.bind_}\n * @deprecated Deprecated and will be removed in 9.0. Please use native Function.prototype.bind instead.\n */\nvideojs.bind = deprecateForMajor(9, 'videojs.bind', 'native Function.prototype.bind', bind_);\nvideojs.registerPlugin = Plugin.registerPlugin;\nvideojs.deregisterPlugin = Plugin.deregisterPlugin;\n\n/**\n * Deprecated method to register a plugin with Video.js\n *\n * @deprecated Deprecated and will be removed in 9.0. Use videojs.registerPlugin() instead.\n *\n * @param {string} name\n * The plugin name\n*\n * @param {typeof Plugin|Function} plugin\n * The plugin sub-class or function\n *\n * @return {typeof Plugin|Function}\n */\nvideojs.plugin = (name, plugin) => {\n log$1.warn('videojs.plugin() is deprecated; use videojs.registerPlugin() instead');\n return Plugin.registerPlugin(name, plugin);\n};\nvideojs.getPlugins = Plugin.getPlugins;\nvideojs.getPlugin = Plugin.getPlugin;\nvideojs.getPluginVersion = Plugin.getPluginVersion;\n\n/**\n * Adding languages so that they're available to all players.\n * Example: `videojs.addLanguage('es', { 'Hello': 'Hola' });`\n *\n * @param {string} code\n * The language code or dictionary property\n *\n * @param {Object} data\n * The data values to be translated\n *\n * @return {Object}\n * The resulting language dictionary object\n */\nvideojs.addLanguage = function (code, data) {\n code = ('' + code).toLowerCase();\n videojs.options.languages = merge$1(videojs.options.languages, {\n [code]: data\n });\n return videojs.options.languages[code];\n};\n\n/**\n * A reference to the {@link module:log|log utility module} as an object.\n *\n * @type {Function}\n * @see {@link module:log|log}\n */\nvideojs.log = log$1;\nvideojs.createLogger = createLogger;\n\n/**\n * A reference to the {@link module:time|time utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:time|time}\n */\nvideojs.time = Time;\n\n/**\n * Deprecated reference to the {@link module:time.createTimeRanges|createTimeRanges function}\n *\n * @type {Function}\n * @see {@link module:time.createTimeRanges|createTimeRanges}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.time.createTimeRanges instead.\n */\nvideojs.createTimeRange = deprecateForMajor(9, 'videojs.createTimeRange', 'videojs.time.createTimeRanges', createTimeRanges$1);\n\n/**\n * Deprecated reference to the {@link module:time.createTimeRanges|createTimeRanges function}\n *\n * @type {Function}\n * @see {@link module:time.createTimeRanges|createTimeRanges}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.time.createTimeRanges instead.\n */\nvideojs.createTimeRanges = deprecateForMajor(9, 'videojs.createTimeRanges', 'videojs.time.createTimeRanges', createTimeRanges$1);\n\n/**\n * Deprecated reference to the {@link module:time.formatTime|formatTime function}\n *\n * @type {Function}\n * @see {@link module:time.formatTime|formatTime}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.time.format instead.\n */\nvideojs.formatTime = deprecateForMajor(9, 'videojs.formatTime', 'videojs.time.formatTime', formatTime);\n\n/**\n * Deprecated reference to the {@link module:time.setFormatTime|setFormatTime function}\n *\n * @type {Function}\n * @see {@link module:time.setFormatTime|setFormatTime}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.time.setFormat instead.\n */\nvideojs.setFormatTime = deprecateForMajor(9, 'videojs.setFormatTime', 'videojs.time.setFormatTime', setFormatTime);\n\n/**\n * Deprecated reference to the {@link module:time.resetFormatTime|resetFormatTime function}\n *\n * @type {Function}\n * @see {@link module:time.resetFormatTime|resetFormatTime}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.time.resetFormat instead.\n */\nvideojs.resetFormatTime = deprecateForMajor(9, 'videojs.resetFormatTime', 'videojs.time.resetFormatTime', resetFormatTime);\n\n/**\n * Deprecated reference to the {@link module:url.parseUrl|Url.parseUrl function}\n *\n * @type {Function}\n * @see {@link module:url.parseUrl|parseUrl}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.url.parseUrl instead.\n */\nvideojs.parseUrl = deprecateForMajor(9, 'videojs.parseUrl', 'videojs.url.parseUrl', parseUrl);\n\n/**\n * Deprecated reference to the {@link module:url.isCrossOrigin|Url.isCrossOrigin function}\n *\n * @type {Function}\n * @see {@link module:url.isCrossOrigin|isCrossOrigin}\n * @deprecated Deprecated and will be removed in 9.0. Please use videojs.url.isCrossOrigin instead.\n */\nvideojs.isCrossOrigin = deprecateForMajor(9, 'videojs.isCrossOrigin', 'videojs.url.isCrossOrigin', isCrossOrigin);\nvideojs.EventTarget = EventTarget$2;\nvideojs.any = any;\nvideojs.on = on;\nvideojs.one = one;\nvideojs.off = off;\nvideojs.trigger = trigger;\n\n/**\n * A cross-browser XMLHttpRequest wrapper.\n *\n * @function\n * @param {Object} options\n * Settings for the request.\n *\n * @return {XMLHttpRequest|XDomainRequest}\n * The request object.\n *\n * @see https://github.com/Raynos/xhr\n */\nvideojs.xhr = XHR;\nvideojs.TextTrack = TextTrack;\nvideojs.AudioTrack = AudioTrack;\nvideojs.VideoTrack = VideoTrack;\n['isEl', 'isTextNode', 'createEl', 'hasClass', 'addClass', 'removeClass', 'toggleClass', 'setAttributes', 'getAttributes', 'emptyEl', 'appendContent', 'insertContent'].forEach(k => {\n videojs[k] = function () {\n log$1.warn(`videojs.${k}() is deprecated; use videojs.dom.${k}() instead`);\n return Dom[k].apply(null, arguments);\n };\n});\nvideojs.computedStyle = deprecateForMajor(9, 'videojs.computedStyle', 'videojs.dom.computedStyle', computedStyle);\n\n/**\n * A reference to the {@link module:dom|DOM utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:dom|dom}\n */\nvideojs.dom = Dom;\n\n/**\n * A reference to the {@link module:fn|fn utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:fn|fn}\n */\nvideojs.fn = Fn;\n\n/**\n * A reference to the {@link module:num|num utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:num|num}\n */\nvideojs.num = Num;\n\n/**\n * A reference to the {@link module:str|str utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:str|str}\n */\nvideojs.str = Str;\n\n/**\n * A reference to the {@link module:url|URL utility module} as an object.\n *\n * @type {Object}\n * @see {@link module:url|url}\n */\nvideojs.url = Url;\n\n// The list of possible error types to occur in video.js\nvideojs.Error = VjsErrors;\n\n/*! @name videojs-contrib-quality-levels @version 4.1.0 @license Apache-2.0 */\n\n/**\n * A single QualityLevel.\n *\n * interface QualityLevel {\n * readonly attribute DOMString id;\n * attribute DOMString label;\n * readonly attribute long width;\n * readonly attribute long height;\n * readonly attribute long bitrate;\n * attribute boolean enabled;\n * };\n *\n * @class QualityLevel\n */\nclass QualityLevel {\n /**\n * Creates a QualityLevel\n *\n * @param {Representation|Object} representation The representation of the quality level\n * @param {string} representation.id Unique id of the QualityLevel\n * @param {number=} representation.width Resolution width of the QualityLevel\n * @param {number=} representation.height Resolution height of the QualityLevel\n * @param {number} representation.bandwidth Bitrate of the QualityLevel\n * @param {number=} representation.frameRate Frame-rate of the QualityLevel\n * @param {Function} representation.enabled Callback to enable/disable QualityLevel\n */\n constructor(representation) {\n let level = this; // eslint-disable-line\n\n level.id = representation.id;\n level.label = level.id;\n level.width = representation.width;\n level.height = representation.height;\n level.bitrate = representation.bandwidth;\n level.frameRate = representation.frameRate;\n level.enabled_ = representation.enabled;\n Object.defineProperty(level, 'enabled', {\n /**\n * Get whether the QualityLevel is enabled.\n *\n * @return {boolean} True if the QualityLevel is enabled.\n */\n get() {\n return level.enabled_();\n },\n /**\n * Enable or disable the QualityLevel.\n *\n * @param {boolean} enable true to enable QualityLevel, false to disable.\n */\n set(enable) {\n level.enabled_(enable);\n }\n });\n return level;\n }\n}\n\n/**\n * A list of QualityLevels.\n *\n * interface QualityLevelList : EventTarget {\n * getter QualityLevel (unsigned long index);\n * readonly attribute unsigned long length;\n * readonly attribute long selectedIndex;\n *\n * void addQualityLevel(QualityLevel qualityLevel)\n * void removeQualityLevel(QualityLevel remove)\n * QualityLevel? getQualityLevelById(DOMString id);\n *\n * attribute EventHandler onchange;\n * attribute EventHandler onaddqualitylevel;\n * attribute EventHandler onremovequalitylevel;\n * };\n *\n * @extends videojs.EventTarget\n * @class QualityLevelList\n */\nclass QualityLevelList extends videojs.EventTarget {\n /**\n * Creates a QualityLevelList.\n */\n constructor() {\n super();\n let list = this; // eslint-disable-line\n\n list.levels_ = [];\n list.selectedIndex_ = -1;\n\n /**\n * Get the index of the currently selected QualityLevel.\n *\n * @returns {number} The index of the selected QualityLevel. -1 if none selected.\n * @readonly\n */\n Object.defineProperty(list, 'selectedIndex', {\n get() {\n return list.selectedIndex_;\n }\n });\n\n /**\n * Get the length of the list of QualityLevels.\n *\n * @returns {number} The length of the list.\n * @readonly\n */\n Object.defineProperty(list, 'length', {\n get() {\n return list.levels_.length;\n }\n });\n list[Symbol.iterator] = () => list.levels_.values();\n return list;\n }\n\n /**\n * Adds a quality level to the list.\n *\n * @param {Representation|Object} representation The representation of the quality level\n * @param {string} representation.id Unique id of the QualityLevel\n * @param {number=} representation.width Resolution width of the QualityLevel\n * @param {number=} representation.height Resolution height of the QualityLevel\n * @param {number} representation.bandwidth Bitrate of the QualityLevel\n * @param {number=} representation.frameRate Frame-rate of the QualityLevel\n * @param {Function} representation.enabled Callback to enable/disable QualityLevel\n * @return {QualityLevel} the QualityLevel added to the list\n * @method addQualityLevel\n */\n addQualityLevel(representation) {\n let qualityLevel = this.getQualityLevelById(representation.id);\n\n // Do not add duplicate quality levels\n if (qualityLevel) {\n return qualityLevel;\n }\n const index = this.levels_.length;\n qualityLevel = new QualityLevel(representation);\n if (!('' + index in this)) {\n Object.defineProperty(this, index, {\n get() {\n return this.levels_[index];\n }\n });\n }\n this.levels_.push(qualityLevel);\n this.trigger({\n qualityLevel,\n type: 'addqualitylevel'\n });\n return qualityLevel;\n }\n\n /**\n * Removes a quality level from the list.\n *\n * @param {QualityLevel} qualityLevel The QualityLevel to remove from the list.\n * @return {QualityLevel|null} the QualityLevel removed or null if nothing removed\n * @method removeQualityLevel\n */\n removeQualityLevel(qualityLevel) {\n let removed = null;\n for (let i = 0, l = this.length; i < l; i++) {\n if (this[i] === qualityLevel) {\n removed = this.levels_.splice(i, 1)[0];\n if (this.selectedIndex_ === i) {\n this.selectedIndex_ = -1;\n } else if (this.selectedIndex_ > i) {\n this.selectedIndex_--;\n }\n break;\n }\n }\n if (removed) {\n this.trigger({\n qualityLevel,\n type: 'removequalitylevel'\n });\n }\n return removed;\n }\n\n /**\n * Searches for a QualityLevel with the given id.\n *\n * @param {string} id The id of the QualityLevel to find.\n * @return {QualityLevel|null} The QualityLevel with id, or null if not found.\n * @method getQualityLevelById\n */\n getQualityLevelById(id) {\n for (let i = 0, l = this.length; i < l; i++) {\n const level = this[i];\n if (level.id === id) {\n return level;\n }\n }\n return null;\n }\n\n /**\n * Resets the list of QualityLevels to empty\n *\n * @method dispose\n */\n dispose() {\n this.selectedIndex_ = -1;\n this.levels_.length = 0;\n }\n}\n\n/**\n * change - The selected QualityLevel has changed.\n * addqualitylevel - A QualityLevel has been added to the QualityLevelList.\n * removequalitylevel - A QualityLevel has been removed from the QualityLevelList.\n */\nQualityLevelList.prototype.allowedEvents_ = {\n change: 'change',\n addqualitylevel: 'addqualitylevel',\n removequalitylevel: 'removequalitylevel'\n};\n\n// emulate attribute EventHandler support to allow for feature detection\nfor (const event in QualityLevelList.prototype.allowedEvents_) {\n QualityLevelList.prototype['on' + event] = null;\n}\nvar version$5 = \"4.1.0\";\n\n/**\n * Initialization function for the qualityLevels plugin. Sets up the QualityLevelList and\n * event handlers.\n *\n * @param {Player} player Player object.\n * @param {Object} options Plugin options object.\n * @return {QualityLevelList} a list of QualityLevels\n */\nconst initPlugin$1 = function (player, options) {\n const originalPluginFn = player.qualityLevels;\n const qualityLevelList = new QualityLevelList();\n const disposeHandler = function () {\n qualityLevelList.dispose();\n player.qualityLevels = originalPluginFn;\n player.off('dispose', disposeHandler);\n };\n player.on('dispose', disposeHandler);\n player.qualityLevels = () => qualityLevelList;\n player.qualityLevels.VERSION = version$5;\n return qualityLevelList;\n};\n\n/**\n * A video.js plugin.\n *\n * In the plugin function, the value of `this` is a video.js `Player`\n * instance. You cannot rely on the player being in a \"ready\" state here,\n * depending on how the plugin is invoked. This may or may not be important\n * to you; if not, remove the wait for \"ready\"!\n *\n * @param {Object} options Plugin options object\n * @return {QualityLevelList} a list of QualityLevels\n */\nconst qualityLevels = function (options) {\n return initPlugin$1(this, videojs.obj.merge({}, options));\n};\n\n// Register the plugin with video.js.\nvideojs.registerPlugin('qualityLevels', qualityLevels);\n\n// Include the version number.\nqualityLevels.VERSION = version$5;\n\n/*! @name @videojs/http-streaming @version 3.12.1 @license Apache-2.0 */\n\n/**\n * @file resolve-url.js - Handling how URLs are resolved and manipulated\n */\nconst resolveUrl = _resolveUrl;\n/**\n * If the xhr request was redirected, return the responseURL, otherwise,\n * return the original url.\n *\n * @api private\n *\n * @param {string} url - an url being requested\n * @param {XMLHttpRequest} req - xhr request result\n *\n * @return {string}\n */\n\nconst resolveManifestRedirect = (url, req) => {\n // To understand how the responseURL below is set and generated:\n // - https://fetch.spec.whatwg.org/#concept-response-url\n // - https://fetch.spec.whatwg.org/#atomic-http-redirect-handling\n if (req && req.responseURL && url !== req.responseURL) {\n return req.responseURL;\n }\n return url;\n};\nconst logger = source => {\n if (videojs.log.debug) {\n return videojs.log.debug.bind(videojs, 'VHS:', `${source} >`);\n }\n return function () {};\n};\n\n/**\n * Provides a compatibility layer between Video.js 7 and 8 API changes for VHS.\n */\n/**\n * Delegates to videojs.obj.merge (Video.js 8) or\n * videojs.mergeOptions (Video.js 7).\n */\n\nfunction merge(...args) {\n const context = videojs.obj || videojs;\n const fn = context.merge || context.mergeOptions;\n return fn.apply(context, args);\n}\n/**\n * Delegates to videojs.time.createTimeRanges (Video.js 8) or\n * videojs.createTimeRanges (Video.js 7).\n */\n\nfunction createTimeRanges(...args) {\n const context = videojs.time || videojs;\n const fn = context.createTimeRanges || context.createTimeRanges;\n return fn.apply(context, args);\n}\n/**\n * Converts provided buffered ranges to a descriptive string\n *\n * @param {TimeRanges} buffered - received buffered time ranges\n *\n * @return {string} - descriptive string\n */\n\nfunction bufferedRangesToString(buffered) {\n if (buffered.length === 0) {\n return 'Buffered Ranges are empty';\n }\n let bufferedRangesStr = 'Buffered Ranges: \\n';\n for (let i = 0; i < buffered.length; i++) {\n const start = buffered.start(i);\n const end = buffered.end(i);\n bufferedRangesStr += `${start} --> ${end}. Duration (${end - start})\\n`;\n }\n return bufferedRangesStr;\n}\n\n/**\n * ranges\n *\n * Utilities for working with TimeRanges.\n *\n */\n\nconst TIME_FUDGE_FACTOR = 1 / 30; // Comparisons between time values such as current time and the end of the buffered range\n// can be misleading because of precision differences or when the current media has poorly\n// aligned audio and video, which can cause values to be slightly off from what you would\n// expect. This value is what we consider to be safe to use in such comparisons to account\n// for these scenarios.\n\nconst SAFE_TIME_DELTA = TIME_FUDGE_FACTOR * 3;\nconst filterRanges = function (timeRanges, predicate) {\n const results = [];\n let i;\n if (timeRanges && timeRanges.length) {\n // Search for ranges that match the predicate\n for (i = 0; i < timeRanges.length; i++) {\n if (predicate(timeRanges.start(i), timeRanges.end(i))) {\n results.push([timeRanges.start(i), timeRanges.end(i)]);\n }\n }\n }\n return createTimeRanges(results);\n};\n/**\n * Attempts to find the buffered TimeRange that contains the specified\n * time.\n *\n * @param {TimeRanges} buffered - the TimeRanges object to query\n * @param {number} time - the time to filter on.\n * @return {TimeRanges} a new TimeRanges object\n */\n\nconst findRange = function (buffered, time) {\n return filterRanges(buffered, function (start, end) {\n return start - SAFE_TIME_DELTA <= time && end + SAFE_TIME_DELTA >= time;\n });\n};\n/**\n * Returns the TimeRanges that begin later than the specified time.\n *\n * @param {TimeRanges} timeRanges - the TimeRanges object to query\n * @param {number} time - the time to filter on.\n * @return {TimeRanges} a new TimeRanges object.\n */\n\nconst findNextRange = function (timeRanges, time) {\n return filterRanges(timeRanges, function (start) {\n return start - TIME_FUDGE_FACTOR >= time;\n });\n};\n/**\n * Returns gaps within a list of TimeRanges\n *\n * @param {TimeRanges} buffered - the TimeRanges object\n * @return {TimeRanges} a TimeRanges object of gaps\n */\n\nconst findGaps = function (buffered) {\n if (buffered.length < 2) {\n return createTimeRanges();\n }\n const ranges = [];\n for (let i = 1; i < buffered.length; i++) {\n const start = buffered.end(i - 1);\n const end = buffered.start(i);\n ranges.push([start, end]);\n }\n return createTimeRanges(ranges);\n};\n/**\n * Calculate the intersection of two TimeRanges\n *\n * @param {TimeRanges} bufferA\n * @param {TimeRanges} bufferB\n * @return {TimeRanges} The interesection of `bufferA` with `bufferB`\n */\n\nconst bufferIntersection = function (bufferA, bufferB) {\n let start = null;\n let end = null;\n let arity = 0;\n const extents = [];\n const ranges = [];\n if (!bufferA || !bufferA.length || !bufferB || !bufferB.length) {\n return createTimeRanges();\n } // Handle the case where we have both buffers and create an\n // intersection of the two\n\n let count = bufferA.length; // A) Gather up all start and end times\n\n while (count--) {\n extents.push({\n time: bufferA.start(count),\n type: 'start'\n });\n extents.push({\n time: bufferA.end(count),\n type: 'end'\n });\n }\n count = bufferB.length;\n while (count--) {\n extents.push({\n time: bufferB.start(count),\n type: 'start'\n });\n extents.push({\n time: bufferB.end(count),\n type: 'end'\n });\n } // B) Sort them by time\n\n extents.sort(function (a, b) {\n return a.time - b.time;\n }); // C) Go along one by one incrementing arity for start and decrementing\n // arity for ends\n\n for (count = 0; count < extents.length; count++) {\n if (extents[count].type === 'start') {\n arity++; // D) If arity is ever incremented to 2 we are entering an\n // overlapping range\n\n if (arity === 2) {\n start = extents[count].time;\n }\n } else if (extents[count].type === 'end') {\n arity--; // E) If arity is ever decremented to 1 we leaving an\n // overlapping range\n\n if (arity === 1) {\n end = extents[count].time;\n }\n } // F) Record overlapping ranges\n\n if (start !== null && end !== null) {\n ranges.push([start, end]);\n start = null;\n end = null;\n }\n }\n return createTimeRanges(ranges);\n};\n/**\n * Gets a human readable string for a TimeRange\n *\n * @param {TimeRange} range\n * @return {string} a human readable string\n */\n\nconst printableRange = range => {\n const strArr = [];\n if (!range || !range.length) {\n return '';\n }\n for (let i = 0; i < range.length; i++) {\n strArr.push(range.start(i) + ' => ' + range.end(i));\n }\n return strArr.join(', ');\n};\n/**\n * Calculates the amount of time left in seconds until the player hits the end of the\n * buffer and causes a rebuffer\n *\n * @param {TimeRange} buffered\n * The state of the buffer\n * @param {Numnber} currentTime\n * The current time of the player\n * @param {number} playbackRate\n * The current playback rate of the player. Defaults to 1.\n * @return {number}\n * Time until the player has to start rebuffering in seconds.\n * @function timeUntilRebuffer\n */\n\nconst timeUntilRebuffer = function (buffered, currentTime, playbackRate = 1) {\n const bufferedEnd = buffered.length ? buffered.end(buffered.length - 1) : 0;\n return (bufferedEnd - currentTime) / playbackRate;\n};\n/**\n * Converts a TimeRanges object into an array representation\n *\n * @param {TimeRanges} timeRanges\n * @return {Array}\n */\n\nconst timeRangesToArray = timeRanges => {\n const timeRangesList = [];\n for (let i = 0; i < timeRanges.length; i++) {\n timeRangesList.push({\n start: timeRanges.start(i),\n end: timeRanges.end(i)\n });\n }\n return timeRangesList;\n};\n/**\n * Determines if two time range objects are different.\n *\n * @param {TimeRange} a\n * the first time range object to check\n *\n * @param {TimeRange} b\n * the second time range object to check\n *\n * @return {Boolean}\n * Whether the time range objects differ\n */\n\nconst isRangeDifferent = function (a, b) {\n // same object\n if (a === b) {\n return false;\n } // one or the other is undefined\n\n if (!a && b || !b && a) {\n return true;\n } // length is different\n\n if (a.length !== b.length) {\n return true;\n } // see if any start/end pair is different\n\n for (let i = 0; i < a.length; i++) {\n if (a.start(i) !== b.start(i) || a.end(i) !== b.end(i)) {\n return true;\n }\n } // if the length and every pair is the same\n // this is the same time range\n\n return false;\n};\nconst lastBufferedEnd = function (a) {\n if (!a || !a.length || !a.end) {\n return;\n }\n return a.end(a.length - 1);\n};\n/**\n * A utility function to add up the amount of time in a timeRange\n * after a specified startTime.\n * ie:[[0, 10], [20, 40], [50, 60]] with a startTime 0\n * would return 40 as there are 40s seconds after 0 in the timeRange\n *\n * @param {TimeRange} range\n * The range to check against\n * @param {number} startTime\n * The time in the time range that you should start counting from\n *\n * @return {number}\n * The number of seconds in the buffer passed the specified time.\n */\n\nconst timeAheadOf = function (range, startTime) {\n let time = 0;\n if (!range || !range.length) {\n return time;\n }\n for (let i = 0; i < range.length; i++) {\n const start = range.start(i);\n const end = range.end(i); // startTime is after this range entirely\n\n if (startTime > end) {\n continue;\n } // startTime is within this range\n\n if (startTime > start && startTime <= end) {\n time += end - startTime;\n continue;\n } // startTime is before this range.\n\n time += end - start;\n }\n return time;\n};\n\n/**\n * @file playlist.js\n *\n * Playlist related utilities.\n */\n/**\n * Get the duration of a segment, with special cases for\n * llhls segments that do not have a duration yet.\n *\n * @param {Object} playlist\n * the playlist that the segment belongs to.\n * @param {Object} segment\n * the segment to get a duration for.\n *\n * @return {number}\n * the segment duration\n */\n\nconst segmentDurationWithParts = (playlist, segment) => {\n // if this isn't a preload segment\n // then we will have a segment duration that is accurate.\n if (!segment.preload) {\n return segment.duration;\n } // otherwise we have to add up parts and preload hints\n // to get an up to date duration.\n\n let result = 0;\n (segment.parts || []).forEach(function (p) {\n result += p.duration;\n }); // for preload hints we have to use partTargetDuration\n // as they won't even have a duration yet.\n\n (segment.preloadHints || []).forEach(function (p) {\n if (p.type === 'PART') {\n result += playlist.partTargetDuration;\n }\n });\n return result;\n};\n/**\n * A function to get a combined list of parts and segments with durations\n * and indexes.\n *\n * @param {Playlist} playlist the playlist to get the list for.\n *\n * @return {Array} The part/segment list.\n */\n\nconst getPartsAndSegments = playlist => (playlist.segments || []).reduce((acc, segment, si) => {\n if (segment.parts) {\n segment.parts.forEach(function (part, pi) {\n acc.push({\n duration: part.duration,\n segmentIndex: si,\n partIndex: pi,\n part,\n segment\n });\n });\n } else {\n acc.push({\n duration: segment.duration,\n segmentIndex: si,\n partIndex: null,\n segment,\n part: null\n });\n }\n return acc;\n}, []);\nconst getLastParts = media => {\n const lastSegment = media.segments && media.segments.length && media.segments[media.segments.length - 1];\n return lastSegment && lastSegment.parts || [];\n};\nconst getKnownPartCount = ({\n preloadSegment\n}) => {\n if (!preloadSegment) {\n return;\n }\n const {\n parts,\n preloadHints\n } = preloadSegment;\n let partCount = (preloadHints || []).reduce((count, hint) => count + (hint.type === 'PART' ? 1 : 0), 0);\n partCount += parts && parts.length ? parts.length : 0;\n return partCount;\n};\n/**\n * Get the number of seconds to delay from the end of a\n * live playlist.\n *\n * @param {Playlist} main the main playlist\n * @param {Playlist} media the media playlist\n * @return {number} the hold back in seconds.\n */\n\nconst liveEdgeDelay = (main, media) => {\n if (media.endList) {\n return 0;\n } // dash suggestedPresentationDelay trumps everything\n\n if (main && main.suggestedPresentationDelay) {\n return main.suggestedPresentationDelay;\n }\n const hasParts = getLastParts(media).length > 0; // look for \"part\" delays from ll-hls first\n\n if (hasParts && media.serverControl && media.serverControl.partHoldBack) {\n return media.serverControl.partHoldBack;\n } else if (hasParts && media.partTargetDuration) {\n return media.partTargetDuration * 3; // finally look for full segment delays\n } else if (media.serverControl && media.serverControl.holdBack) {\n return media.serverControl.holdBack;\n } else if (media.targetDuration) {\n return media.targetDuration * 3;\n }\n return 0;\n};\n/**\n * walk backward until we find a duration we can use\n * or return a failure\n *\n * @param {Playlist} playlist the playlist to walk through\n * @param {Number} endSequence the mediaSequence to stop walking on\n */\n\nconst backwardDuration = function (playlist, endSequence) {\n let result = 0;\n let i = endSequence - playlist.mediaSequence; // if a start time is available for segment immediately following\n // the interval, use it\n\n let segment = playlist.segments[i]; // Walk backward until we find the latest segment with timeline\n // information that is earlier than endSequence\n\n if (segment) {\n if (typeof segment.start !== 'undefined') {\n return {\n result: segment.start,\n precise: true\n };\n }\n if (typeof segment.end !== 'undefined') {\n return {\n result: segment.end - segment.duration,\n precise: true\n };\n }\n }\n while (i--) {\n segment = playlist.segments[i];\n if (typeof segment.end !== 'undefined') {\n return {\n result: result + segment.end,\n precise: true\n };\n }\n result += segmentDurationWithParts(playlist, segment);\n if (typeof segment.start !== 'undefined') {\n return {\n result: result + segment.start,\n precise: true\n };\n }\n }\n return {\n result,\n precise: false\n };\n};\n/**\n * walk forward until we find a duration we can use\n * or return a failure\n *\n * @param {Playlist} playlist the playlist to walk through\n * @param {number} endSequence the mediaSequence to stop walking on\n */\n\nconst forwardDuration = function (playlist, endSequence) {\n let result = 0;\n let segment;\n let i = endSequence - playlist.mediaSequence; // Walk forward until we find the earliest segment with timeline\n // information\n\n for (; i < playlist.segments.length; i++) {\n segment = playlist.segments[i];\n if (typeof segment.start !== 'undefined') {\n return {\n result: segment.start - result,\n precise: true\n };\n }\n result += segmentDurationWithParts(playlist, segment);\n if (typeof segment.end !== 'undefined') {\n return {\n result: segment.end - result,\n precise: true\n };\n }\n } // indicate we didn't find a useful duration estimate\n\n return {\n result: -1,\n precise: false\n };\n};\n/**\n * Calculate the media duration from the segments associated with a\n * playlist. The duration of a subinterval of the available segments\n * may be calculated by specifying an end index.\n *\n * @param {Object} playlist a media playlist object\n * @param {number=} endSequence an exclusive upper boundary\n * for the playlist. Defaults to playlist length.\n * @param {number} expired the amount of time that has dropped\n * off the front of the playlist in a live scenario\n * @return {number} the duration between the first available segment\n * and end index.\n */\n\nconst intervalDuration = function (playlist, endSequence, expired) {\n if (typeof endSequence === 'undefined') {\n endSequence = playlist.mediaSequence + playlist.segments.length;\n }\n if (endSequence < playlist.mediaSequence) {\n return 0;\n } // do a backward walk to estimate the duration\n\n const backward = backwardDuration(playlist, endSequence);\n if (backward.precise) {\n // if we were able to base our duration estimate on timing\n // information provided directly from the Media Source, return\n // it\n return backward.result;\n } // walk forward to see if a precise duration estimate can be made\n // that way\n\n const forward = forwardDuration(playlist, endSequence);\n if (forward.precise) {\n // we found a segment that has been buffered and so it's\n // position is known precisely\n return forward.result;\n } // return the less-precise, playlist-based duration estimate\n\n return backward.result + expired;\n};\n/**\n * Calculates the duration of a playlist. If a start and end index\n * are specified, the duration will be for the subset of the media\n * timeline between those two indices. The total duration for live\n * playlists is always Infinity.\n *\n * @param {Object} playlist a media playlist object\n * @param {number=} endSequence an exclusive upper\n * boundary for the playlist. Defaults to the playlist media\n * sequence number plus its length.\n * @param {number=} expired the amount of time that has\n * dropped off the front of the playlist in a live scenario\n * @return {number} the duration between the start index and end\n * index.\n */\n\nconst duration = function (playlist, endSequence, expired) {\n if (!playlist) {\n return 0;\n }\n if (typeof expired !== 'number') {\n expired = 0;\n } // if a slice of the total duration is not requested, use\n // playlist-level duration indicators when they're present\n\n if (typeof endSequence === 'undefined') {\n // if present, use the duration specified in the playlist\n if (playlist.totalDuration) {\n return playlist.totalDuration;\n } // duration should be Infinity for live playlists\n\n if (!playlist.endList) {\n return window$1.Infinity;\n }\n } // calculate the total duration based on the segment durations\n\n return intervalDuration(playlist, endSequence, expired);\n};\n/**\n * Calculate the time between two indexes in the current playlist\n * neight the start- nor the end-index need to be within the current\n * playlist in which case, the targetDuration of the playlist is used\n * to approximate the durations of the segments\n *\n * @param {Array} options.durationList list to iterate over for durations.\n * @param {number} options.defaultDuration duration to use for elements before or after the durationList\n * @param {number} options.startIndex partsAndSegments index to start\n * @param {number} options.endIndex partsAndSegments index to end.\n * @return {number} the number of seconds between startIndex and endIndex\n */\n\nconst sumDurations = function ({\n defaultDuration,\n durationList,\n startIndex,\n endIndex\n}) {\n let durations = 0;\n if (startIndex > endIndex) {\n [startIndex, endIndex] = [endIndex, startIndex];\n }\n if (startIndex < 0) {\n for (let i = startIndex; i < Math.min(0, endIndex); i++) {\n durations += defaultDuration;\n }\n startIndex = 0;\n }\n for (let i = startIndex; i < endIndex; i++) {\n durations += durationList[i].duration;\n }\n return durations;\n};\n/**\n * Calculates the playlist end time\n *\n * @param {Object} playlist a media playlist object\n * @param {number=} expired the amount of time that has\n * dropped off the front of the playlist in a live scenario\n * @param {boolean|false} useSafeLiveEnd a boolean value indicating whether or not the\n * playlist end calculation should consider the safe live end\n * (truncate the playlist end by three segments). This is normally\n * used for calculating the end of the playlist's seekable range.\n * This takes into account the value of liveEdgePadding.\n * Setting liveEdgePadding to 0 is equivalent to setting this to false.\n * @param {number} liveEdgePadding a number indicating how far from the end of the playlist we should be in seconds.\n * If this is provided, it is used in the safe live end calculation.\n * Setting useSafeLiveEnd=false or liveEdgePadding=0 are equivalent.\n * Corresponds to suggestedPresentationDelay in DASH manifests.\n * @return {number} the end time of playlist\n * @function playlistEnd\n */\n\nconst playlistEnd = function (playlist, expired, useSafeLiveEnd, liveEdgePadding) {\n if (!playlist || !playlist.segments) {\n return null;\n }\n if (playlist.endList) {\n return duration(playlist);\n }\n if (expired === null) {\n return null;\n }\n expired = expired || 0;\n let lastSegmentEndTime = intervalDuration(playlist, playlist.mediaSequence + playlist.segments.length, expired);\n if (useSafeLiveEnd) {\n liveEdgePadding = typeof liveEdgePadding === 'number' ? liveEdgePadding : liveEdgeDelay(null, playlist);\n lastSegmentEndTime -= liveEdgePadding;\n } // don't return a time less than zero\n\n return Math.max(0, lastSegmentEndTime);\n};\n/**\n * Calculates the interval of time that is currently seekable in a\n * playlist. The returned time ranges are relative to the earliest\n * moment in the specified playlist that is still available. A full\n * seekable implementation for live streams would need to offset\n * these values by the duration of content that has expired from the\n * stream.\n *\n * @param {Object} playlist a media playlist object\n * dropped off the front of the playlist in a live scenario\n * @param {number=} expired the amount of time that has\n * dropped off the front of the playlist in a live scenario\n * @param {number} liveEdgePadding how far from the end of the playlist we should be in seconds.\n * Corresponds to suggestedPresentationDelay in DASH manifests.\n * @return {TimeRanges} the periods of time that are valid targets\n * for seeking\n */\n\nconst seekable = function (playlist, expired, liveEdgePadding) {\n const useSafeLiveEnd = true;\n const seekableStart = expired || 0;\n let seekableEnd = playlistEnd(playlist, expired, useSafeLiveEnd, liveEdgePadding);\n if (seekableEnd === null) {\n return createTimeRanges();\n } // Clamp seekable end since it can not be less than the seekable start\n\n if (seekableEnd < seekableStart) {\n seekableEnd = seekableStart;\n }\n return createTimeRanges(seekableStart, seekableEnd);\n};\n/**\n * Determine the index and estimated starting time of the segment that\n * contains a specified playback position in a media playlist.\n *\n * @param {Object} options.playlist the media playlist to query\n * @param {number} options.currentTime The number of seconds since the earliest\n * possible position to determine the containing segment for\n * @param {number} options.startTime the time when the segment/part starts\n * @param {number} options.startingSegmentIndex the segment index to start looking at.\n * @param {number?} [options.startingPartIndex] the part index to look at within the segment.\n *\n * @return {Object} an object with partIndex, segmentIndex, and startTime.\n */\n\nconst getMediaInfoForTime = function ({\n playlist,\n currentTime,\n startingSegmentIndex,\n startingPartIndex,\n startTime,\n exactManifestTimings\n}) {\n let time = currentTime - startTime;\n const partsAndSegments = getPartsAndSegments(playlist);\n let startIndex = 0;\n for (let i = 0; i < partsAndSegments.length; i++) {\n const partAndSegment = partsAndSegments[i];\n if (startingSegmentIndex !== partAndSegment.segmentIndex) {\n continue;\n } // skip this if part index does not match.\n\n if (typeof startingPartIndex === 'number' && typeof partAndSegment.partIndex === 'number' && startingPartIndex !== partAndSegment.partIndex) {\n continue;\n }\n startIndex = i;\n break;\n }\n if (time < 0) {\n // Walk backward from startIndex in the playlist, adding durations\n // until we find a segment that contains `time` and return it\n if (startIndex > 0) {\n for (let i = startIndex - 1; i >= 0; i--) {\n const partAndSegment = partsAndSegments[i];\n time += partAndSegment.duration;\n if (exactManifestTimings) {\n if (time < 0) {\n continue;\n }\n } else if (time + TIME_FUDGE_FACTOR <= 0) {\n continue;\n }\n return {\n partIndex: partAndSegment.partIndex,\n segmentIndex: partAndSegment.segmentIndex,\n startTime: startTime - sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: partsAndSegments,\n startIndex,\n endIndex: i\n })\n };\n }\n } // We were unable to find a good segment within the playlist\n // so select the first segment\n\n return {\n partIndex: partsAndSegments[0] && partsAndSegments[0].partIndex || null,\n segmentIndex: partsAndSegments[0] && partsAndSegments[0].segmentIndex || 0,\n startTime: currentTime\n };\n } // When startIndex is negative, we first walk forward to first segment\n // adding target durations. If we \"run out of time\" before getting to\n // the first segment, return the first segment\n\n if (startIndex < 0) {\n for (let i = startIndex; i < 0; i++) {\n time -= playlist.targetDuration;\n if (time < 0) {\n return {\n partIndex: partsAndSegments[0] && partsAndSegments[0].partIndex || null,\n segmentIndex: partsAndSegments[0] && partsAndSegments[0].segmentIndex || 0,\n startTime: currentTime\n };\n }\n }\n startIndex = 0;\n } // Walk forward from startIndex in the playlist, subtracting durations\n // until we find a segment that contains `time` and return it\n\n for (let i = startIndex; i < partsAndSegments.length; i++) {\n const partAndSegment = partsAndSegments[i];\n time -= partAndSegment.duration;\n const canUseFudgeFactor = partAndSegment.duration > TIME_FUDGE_FACTOR;\n const isExactlyAtTheEnd = time === 0;\n const isExtremelyCloseToTheEnd = canUseFudgeFactor && time + TIME_FUDGE_FACTOR >= 0;\n if (isExactlyAtTheEnd || isExtremelyCloseToTheEnd) {\n // 1) We are exactly at the end of the current segment.\n // 2) We are extremely close to the end of the current segment (The difference is less than 1 / 30).\n // We may encounter this situation when\n // we don't have exact match between segment duration info in the manifest and the actual duration of the segment\n // For example:\n // We appended 3 segments 10 seconds each, meaning we should have 30 sec buffered,\n // but we the actual buffered is 29.99999\n //\n // In both cases:\n // if we passed current time -> it means that we already played current segment\n // if we passed buffered.end -> it means that this segment is already loaded and buffered\n // we should select the next segment if we have one:\n if (i !== partsAndSegments.length - 1) {\n continue;\n }\n }\n if (exactManifestTimings) {\n if (time > 0) {\n continue;\n }\n } else if (time - TIME_FUDGE_FACTOR >= 0) {\n continue;\n }\n return {\n partIndex: partAndSegment.partIndex,\n segmentIndex: partAndSegment.segmentIndex,\n startTime: startTime + sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: partsAndSegments,\n startIndex,\n endIndex: i\n })\n };\n } // We are out of possible candidates so load the last one...\n\n return {\n segmentIndex: partsAndSegments[partsAndSegments.length - 1].segmentIndex,\n partIndex: partsAndSegments[partsAndSegments.length - 1].partIndex,\n startTime: currentTime\n };\n};\n/**\n * Check whether the playlist is excluded or not.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is excluded or not\n * @function isExcluded\n */\n\nconst isExcluded = function (playlist) {\n return playlist.excludeUntil && playlist.excludeUntil > Date.now();\n};\n/**\n * Check whether the playlist is compatible with current playback configuration or has\n * been excluded permanently for being incompatible.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is incompatible or not\n * @function isIncompatible\n */\n\nconst isIncompatible = function (playlist) {\n return playlist.excludeUntil && playlist.excludeUntil === Infinity;\n};\n/**\n * Check whether the playlist is enabled or not.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is enabled or not\n * @function isEnabled\n */\n\nconst isEnabled = function (playlist) {\n const excluded = isExcluded(playlist);\n return !playlist.disabled && !excluded;\n};\n/**\n * Check whether the playlist has been manually disabled through the representations api.\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist is disabled manually or not\n * @function isDisabled\n */\n\nconst isDisabled = function (playlist) {\n return playlist.disabled;\n};\n/**\n * Returns whether the current playlist is an AES encrypted HLS stream\n *\n * @return {boolean} true if it's an AES encrypted HLS stream\n */\n\nconst isAes = function (media) {\n for (let i = 0; i < media.segments.length; i++) {\n if (media.segments[i].key) {\n return true;\n }\n }\n return false;\n};\n/**\n * Checks if the playlist has a value for the specified attribute\n *\n * @param {string} attr\n * Attribute to check for\n * @param {Object} playlist\n * The media playlist object\n * @return {boolean}\n * Whether the playlist contains a value for the attribute or not\n * @function hasAttribute\n */\n\nconst hasAttribute = function (attr, playlist) {\n return playlist.attributes && playlist.attributes[attr];\n};\n/**\n * Estimates the time required to complete a segment download from the specified playlist\n *\n * @param {number} segmentDuration\n * Duration of requested segment\n * @param {number} bandwidth\n * Current measured bandwidth of the player\n * @param {Object} playlist\n * The media playlist object\n * @param {number=} bytesReceived\n * Number of bytes already received for the request. Defaults to 0\n * @return {number|NaN}\n * The estimated time to request the segment. NaN if bandwidth information for\n * the given playlist is unavailable\n * @function estimateSegmentRequestTime\n */\n\nconst estimateSegmentRequestTime = function (segmentDuration, bandwidth, playlist, bytesReceived = 0) {\n if (!hasAttribute('BANDWIDTH', playlist)) {\n return NaN;\n }\n const size = segmentDuration * playlist.attributes.BANDWIDTH;\n return (size - bytesReceived * 8) / bandwidth;\n};\n/*\n * Returns whether the current playlist is the lowest rendition\n *\n * @return {Boolean} true if on lowest rendition\n */\n\nconst isLowestEnabledRendition = (main, media) => {\n if (main.playlists.length === 1) {\n return true;\n }\n const currentBandwidth = media.attributes.BANDWIDTH || Number.MAX_VALUE;\n return main.playlists.filter(playlist => {\n if (!isEnabled(playlist)) {\n return false;\n }\n return (playlist.attributes.BANDWIDTH || 0) < currentBandwidth;\n }).length === 0;\n};\nconst playlistMatch = (a, b) => {\n // both playlits are null\n // or only one playlist is non-null\n // no match\n if (!a && !b || !a && b || a && !b) {\n return false;\n } // playlist objects are the same, match\n\n if (a === b) {\n return true;\n } // first try to use id as it should be the most\n // accurate\n\n if (a.id && b.id && a.id === b.id) {\n return true;\n } // next try to use reslovedUri as it should be the\n // second most accurate.\n\n if (a.resolvedUri && b.resolvedUri && a.resolvedUri === b.resolvedUri) {\n return true;\n } // finally try to use uri as it should be accurate\n // but might miss a few cases for relative uris\n\n if (a.uri && b.uri && a.uri === b.uri) {\n return true;\n }\n return false;\n};\nconst someAudioVariant = function (main, callback) {\n const AUDIO = main && main.mediaGroups && main.mediaGroups.AUDIO || {};\n let found = false;\n for (const groupName in AUDIO) {\n for (const label in AUDIO[groupName]) {\n found = callback(AUDIO[groupName][label]);\n if (found) {\n break;\n }\n }\n if (found) {\n break;\n }\n }\n return !!found;\n};\nconst isAudioOnly = main => {\n // we are audio only if we have no main playlists but do\n // have media group playlists.\n if (!main || !main.playlists || !main.playlists.length) {\n // without audio variants or playlists this\n // is not an audio only main.\n const found = someAudioVariant(main, variant => variant.playlists && variant.playlists.length || variant.uri);\n return found;\n } // if every playlist has only an audio codec it is audio only\n\n for (let i = 0; i < main.playlists.length; i++) {\n const playlist = main.playlists[i];\n const CODECS = playlist.attributes && playlist.attributes.CODECS; // all codecs are audio, this is an audio playlist.\n\n if (CODECS && CODECS.split(',').every(c => isAudioCodec(c))) {\n continue;\n } // playlist is in an audio group it is audio only\n\n const found = someAudioVariant(main, variant => playlistMatch(playlist, variant));\n if (found) {\n continue;\n } // if we make it here this playlist isn't audio and we\n // are not audio only\n\n return false;\n } // if we make it past every playlist without returning, then\n // this is an audio only playlist.\n\n return true;\n}; // exports\n\nvar Playlist = {\n liveEdgeDelay,\n duration,\n seekable,\n getMediaInfoForTime,\n isEnabled,\n isDisabled,\n isExcluded,\n isIncompatible,\n playlistEnd,\n isAes,\n hasAttribute,\n estimateSegmentRequestTime,\n isLowestEnabledRendition,\n isAudioOnly,\n playlistMatch,\n segmentDurationWithParts\n};\nconst {\n log\n} = videojs;\nconst createPlaylistID = (index, uri) => {\n return `${index}-${uri}`;\n}; // default function for creating a group id\n\nconst groupID = (type, group, label) => {\n return `placeholder-uri-${type}-${group}-${label}`;\n};\n/**\n * Parses a given m3u8 playlist\n *\n * @param {Function} [onwarn]\n * a function to call when the parser triggers a warning event.\n * @param {Function} [oninfo]\n * a function to call when the parser triggers an info event.\n * @param {string} manifestString\n * The downloaded manifest string\n * @param {Object[]} [customTagParsers]\n * An array of custom tag parsers for the m3u8-parser instance\n * @param {Object[]} [customTagMappers]\n * An array of custom tag mappers for the m3u8-parser instance\n * @param {boolean} [llhls]\n * Whether to keep ll-hls features in the manifest after parsing.\n * @return {Object}\n * The manifest object\n */\n\nconst parseManifest = ({\n onwarn,\n oninfo,\n manifestString,\n customTagParsers = [],\n customTagMappers = [],\n llhls\n}) => {\n const parser = new Parser();\n if (onwarn) {\n parser.on('warn', onwarn);\n }\n if (oninfo) {\n parser.on('info', oninfo);\n }\n customTagParsers.forEach(customParser => parser.addParser(customParser));\n customTagMappers.forEach(mapper => parser.addTagMapper(mapper));\n parser.push(manifestString);\n parser.end();\n const manifest = parser.manifest; // remove llhls features from the parsed manifest\n // if we don't want llhls support.\n\n if (!llhls) {\n ['preloadSegment', 'skip', 'serverControl', 'renditionReports', 'partInf', 'partTargetDuration'].forEach(function (k) {\n if (manifest.hasOwnProperty(k)) {\n delete manifest[k];\n }\n });\n if (manifest.segments) {\n manifest.segments.forEach(function (segment) {\n ['parts', 'preloadHints'].forEach(function (k) {\n if (segment.hasOwnProperty(k)) {\n delete segment[k];\n }\n });\n });\n }\n }\n if (!manifest.targetDuration) {\n let targetDuration = 10;\n if (manifest.segments && manifest.segments.length) {\n targetDuration = manifest.segments.reduce((acc, s) => Math.max(acc, s.duration), 0);\n }\n if (onwarn) {\n onwarn({\n message: `manifest has no targetDuration defaulting to ${targetDuration}`\n });\n }\n manifest.targetDuration = targetDuration;\n }\n const parts = getLastParts(manifest);\n if (parts.length && !manifest.partTargetDuration) {\n const partTargetDuration = parts.reduce((acc, p) => Math.max(acc, p.duration), 0);\n if (onwarn) {\n onwarn({\n message: `manifest has no partTargetDuration defaulting to ${partTargetDuration}`\n });\n log.error('LL-HLS manifest has parts but lacks required #EXT-X-PART-INF:PART-TARGET value. See https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis-09#section-4.4.3.7. Playback is not guaranteed.');\n }\n manifest.partTargetDuration = partTargetDuration;\n }\n return manifest;\n};\n/**\n * Loops through all supported media groups in main and calls the provided\n * callback for each group\n *\n * @param {Object} main\n * The parsed main manifest object\n * @param {Function} callback\n * Callback to call for each media group\n */\n\nconst forEachMediaGroup = (main, callback) => {\n if (!main.mediaGroups) {\n return;\n }\n ['AUDIO', 'SUBTITLES'].forEach(mediaType => {\n if (!main.mediaGroups[mediaType]) {\n return;\n }\n for (const groupKey in main.mediaGroups[mediaType]) {\n for (const labelKey in main.mediaGroups[mediaType][groupKey]) {\n const mediaProperties = main.mediaGroups[mediaType][groupKey][labelKey];\n callback(mediaProperties, mediaType, groupKey, labelKey);\n }\n }\n });\n};\n/**\n * Adds properties and attributes to the playlist to keep consistent functionality for\n * playlists throughout VHS.\n *\n * @param {Object} config\n * Arguments object\n * @param {Object} config.playlist\n * The media playlist\n * @param {string} [config.uri]\n * The uri to the media playlist (if media playlist is not from within a main\n * playlist)\n * @param {string} id\n * ID to use for the playlist\n */\n\nconst setupMediaPlaylist = ({\n playlist,\n uri,\n id\n}) => {\n playlist.id = id;\n playlist.playlistErrors_ = 0;\n if (uri) {\n // For media playlists, m3u8-parser does not have access to a URI, as HLS media\n // playlists do not contain their own source URI, but one is needed for consistency in\n // VHS.\n playlist.uri = uri;\n } // For HLS main playlists, even though certain attributes MUST be defined, the\n // stream may still be played without them.\n // For HLS media playlists, m3u8-parser does not attach an attributes object to the\n // manifest.\n //\n // To avoid undefined reference errors through the project, and make the code easier\n // to write/read, add an empty attributes object for these cases.\n\n playlist.attributes = playlist.attributes || {};\n};\n/**\n * Adds ID, resolvedUri, and attributes properties to each playlist of the main, where\n * necessary. In addition, creates playlist IDs for each playlist and adds playlist ID to\n * playlist references to the playlists array.\n *\n * @param {Object} main\n * The main playlist\n */\n\nconst setupMediaPlaylists = main => {\n let i = main.playlists.length;\n while (i--) {\n const playlist = main.playlists[i];\n setupMediaPlaylist({\n playlist,\n id: createPlaylistID(i, playlist.uri)\n });\n playlist.resolvedUri = resolveUrl(main.uri, playlist.uri);\n main.playlists[playlist.id] = playlist; // URI reference added for backwards compatibility\n\n main.playlists[playlist.uri] = playlist; // Although the spec states an #EXT-X-STREAM-INF tag MUST have a BANDWIDTH attribute,\n // the stream can be played without it. Although an attributes property may have been\n // added to the playlist to prevent undefined references, issue a warning to fix the\n // manifest.\n\n if (!playlist.attributes.BANDWIDTH) {\n log.warn('Invalid playlist STREAM-INF detected. Missing BANDWIDTH attribute.');\n }\n }\n};\n/**\n * Adds resolvedUri properties to each media group.\n *\n * @param {Object} main\n * The main playlist\n */\n\nconst resolveMediaGroupUris = main => {\n forEachMediaGroup(main, properties => {\n if (properties.uri) {\n properties.resolvedUri = resolveUrl(main.uri, properties.uri);\n }\n });\n};\n/**\n * Creates a main playlist wrapper to insert a sole media playlist into.\n *\n * @param {Object} media\n * Media playlist\n * @param {string} uri\n * The media URI\n *\n * @return {Object}\n * main playlist\n */\n\nconst mainForMedia = (media, uri) => {\n const id = createPlaylistID(0, uri);\n const main = {\n mediaGroups: {\n 'AUDIO': {},\n 'VIDEO': {},\n 'CLOSED-CAPTIONS': {},\n 'SUBTITLES': {}\n },\n uri: window$1.location.href,\n resolvedUri: window$1.location.href,\n playlists: [{\n uri,\n id,\n resolvedUri: uri,\n // m3u8-parser does not attach an attributes property to media playlists so make\n // sure that the property is attached to avoid undefined reference errors\n attributes: {}\n }]\n }; // set up ID reference\n\n main.playlists[id] = main.playlists[0]; // URI reference added for backwards compatibility\n\n main.playlists[uri] = main.playlists[0];\n return main;\n};\n/**\n * Does an in-place update of the main manifest to add updated playlist URI references\n * as well as other properties needed by VHS that aren't included by the parser.\n *\n * @param {Object} main\n * main manifest object\n * @param {string} uri\n * The source URI\n * @param {function} createGroupID\n * A function to determine how to create the groupID for mediaGroups\n */\n\nconst addPropertiesToMain = (main, uri, createGroupID = groupID) => {\n main.uri = uri;\n for (let i = 0; i < main.playlists.length; i++) {\n if (!main.playlists[i].uri) {\n // Set up phony URIs for the playlists since playlists are referenced by their URIs\n // throughout VHS, but some formats (e.g., DASH) don't have external URIs\n // TODO: consider adding dummy URIs in mpd-parser\n const phonyUri = `placeholder-uri-${i}`;\n main.playlists[i].uri = phonyUri;\n }\n }\n const audioOnlyMain = isAudioOnly(main);\n forEachMediaGroup(main, (properties, mediaType, groupKey, labelKey) => {\n // add a playlist array under properties\n if (!properties.playlists || !properties.playlists.length) {\n // If the manifest is audio only and this media group does not have a uri, check\n // if the media group is located in the main list of playlists. If it is, don't add\n // placeholder properties as it shouldn't be considered an alternate audio track.\n if (audioOnlyMain && mediaType === 'AUDIO' && !properties.uri) {\n for (let i = 0; i < main.playlists.length; i++) {\n const p = main.playlists[i];\n if (p.attributes && p.attributes.AUDIO && p.attributes.AUDIO === groupKey) {\n return;\n }\n }\n }\n properties.playlists = [_extends({}, properties)];\n }\n properties.playlists.forEach(function (p, i) {\n const groupId = createGroupID(mediaType, groupKey, labelKey, p);\n const id = createPlaylistID(i, groupId);\n if (p.uri) {\n p.resolvedUri = p.resolvedUri || resolveUrl(main.uri, p.uri);\n } else {\n // DEPRECATED, this has been added to prevent a breaking change.\n // previously we only ever had a single media group playlist, so\n // we mark the first playlist uri without prepending the index as we used to\n // ideally we would do all of the playlists the same way.\n p.uri = i === 0 ? groupId : id; // don't resolve a placeholder uri to an absolute url, just use\n // the placeholder again\n\n p.resolvedUri = p.uri;\n }\n p.id = p.id || id; // add an empty attributes object, all playlists are\n // expected to have this.\n\n p.attributes = p.attributes || {}; // setup ID and URI references (URI for backwards compatibility)\n\n main.playlists[p.id] = p;\n main.playlists[p.uri] = p;\n });\n });\n setupMediaPlaylists(main);\n resolveMediaGroupUris(main);\n};\nclass DateRangesStorage {\n constructor() {\n this.offset_ = null;\n this.pendingDateRanges_ = new Map();\n this.processedDateRanges_ = new Map();\n }\n setOffset(segments = []) {\n // already set\n if (this.offset_ !== null) {\n return;\n } // no segment to process\n\n if (!segments.length) {\n return;\n }\n const [firstSegment] = segments; // no program date time\n\n if (firstSegment.programDateTime === undefined) {\n return;\n } // Set offset as ProgramDateTime for the very first segment of the very first playlist load:\n\n this.offset_ = firstSegment.programDateTime / 1000;\n }\n setPendingDateRanges(dateRanges = []) {\n if (!dateRanges.length) {\n return;\n }\n const [dateRange] = dateRanges;\n const startTime = dateRange.startDate.getTime();\n this.trimProcessedDateRanges_(startTime);\n this.pendingDateRanges_ = dateRanges.reduce((map, pendingDateRange) => {\n map.set(pendingDateRange.id, pendingDateRange);\n return map;\n }, new Map());\n }\n processDateRange(dateRange) {\n this.pendingDateRanges_.delete(dateRange.id);\n this.processedDateRanges_.set(dateRange.id, dateRange);\n }\n getDateRangesToProcess() {\n if (this.offset_ === null) {\n return [];\n }\n const dateRangeClasses = {};\n const dateRangesToProcess = [];\n this.pendingDateRanges_.forEach((dateRange, id) => {\n if (this.processedDateRanges_.has(id)) {\n return;\n }\n dateRange.startTime = dateRange.startDate.getTime() / 1000 - this.offset_;\n dateRange.processDateRange = () => this.processDateRange(dateRange);\n dateRangesToProcess.push(dateRange);\n if (!dateRange.class) {\n return;\n }\n if (dateRangeClasses[dateRange.class]) {\n const length = dateRangeClasses[dateRange.class].push(dateRange);\n dateRange.classListIndex = length - 1;\n } else {\n dateRangeClasses[dateRange.class] = [dateRange];\n dateRange.classListIndex = 0;\n }\n });\n for (const dateRange of dateRangesToProcess) {\n const classList = dateRangeClasses[dateRange.class] || [];\n if (dateRange.endDate) {\n dateRange.endTime = dateRange.endDate.getTime() / 1000 - this.offset_;\n } else if (dateRange.endOnNext && classList[dateRange.classListIndex + 1]) {\n dateRange.endTime = classList[dateRange.classListIndex + 1].startTime;\n } else if (dateRange.duration) {\n dateRange.endTime = dateRange.startTime + dateRange.duration;\n } else if (dateRange.plannedDuration) {\n dateRange.endTime = dateRange.startTime + dateRange.plannedDuration;\n } else {\n dateRange.endTime = dateRange.startTime;\n }\n }\n return dateRangesToProcess;\n }\n trimProcessedDateRanges_(startTime) {\n const copy = new Map(this.processedDateRanges_);\n copy.forEach((dateRange, id) => {\n if (dateRange.startDate.getTime() < startTime) {\n this.processedDateRanges_.delete(id);\n }\n });\n }\n}\nconst {\n EventTarget: EventTarget$1\n} = videojs;\nconst addLLHLSQueryDirectives = (uri, media) => {\n if (media.endList || !media.serverControl) {\n return uri;\n }\n const parameters = {};\n if (media.serverControl.canBlockReload) {\n const {\n preloadSegment\n } = media; // next msn is a zero based value, length is not.\n\n let nextMSN = media.mediaSequence + media.segments.length; // If preload segment has parts then it is likely\n // that we are going to request a part of that preload segment.\n // the logic below is used to determine that.\n\n if (preloadSegment) {\n const parts = preloadSegment.parts || []; // _HLS_part is a zero based index\n\n const nextPart = getKnownPartCount(media) - 1; // if nextPart is > -1 and not equal to just the\n // length of parts, then we know we had part preload hints\n // and we need to add the _HLS_part= query\n\n if (nextPart > -1 && nextPart !== parts.length - 1) {\n // add existing parts to our preload hints\n // eslint-disable-next-line\n parameters._HLS_part = nextPart;\n } // this if statement makes sure that we request the msn\n // of the preload segment if:\n // 1. the preload segment had parts (and was not yet a full segment)\n // but was added to our segments array\n // 2. the preload segment had preload hints for parts that are not in\n // the manifest yet.\n // in all other cases we want the segment after the preload segment\n // which will be given by using media.segments.length because it is 1 based\n // rather than 0 based.\n\n if (nextPart > -1 || parts.length) {\n nextMSN--;\n }\n } // add _HLS_msn= in front of any _HLS_part query\n // eslint-disable-next-line\n\n parameters._HLS_msn = nextMSN;\n }\n if (media.serverControl && media.serverControl.canSkipUntil) {\n // add _HLS_skip= infront of all other queries.\n // eslint-disable-next-line\n parameters._HLS_skip = media.serverControl.canSkipDateranges ? 'v2' : 'YES';\n }\n if (Object.keys(parameters).length) {\n const parsedUri = new window$1.URL(uri);\n ['_HLS_skip', '_HLS_msn', '_HLS_part'].forEach(function (name) {\n if (!parameters.hasOwnProperty(name)) {\n return;\n }\n parsedUri.searchParams.set(name, parameters[name]);\n });\n uri = parsedUri.toString();\n }\n return uri;\n};\n/**\n * Returns a new segment object with properties and\n * the parts array merged.\n *\n * @param {Object} a the old segment\n * @param {Object} b the new segment\n *\n * @return {Object} the merged segment\n */\n\nconst updateSegment = (a, b) => {\n if (!a) {\n return b;\n }\n const result = merge(a, b); // if only the old segment has preload hints\n // and the new one does not, remove preload hints.\n\n if (a.preloadHints && !b.preloadHints) {\n delete result.preloadHints;\n } // if only the old segment has parts\n // then the parts are no longer valid\n\n if (a.parts && !b.parts) {\n delete result.parts; // if both segments have parts\n // copy part propeties from the old segment\n // to the new one.\n } else if (a.parts && b.parts) {\n for (let i = 0; i < b.parts.length; i++) {\n if (a.parts && a.parts[i]) {\n result.parts[i] = merge(a.parts[i], b.parts[i]);\n }\n }\n } // set skipped to false for segments that have\n // have had information merged from the old segment.\n\n if (!a.skipped && b.skipped) {\n result.skipped = false;\n } // set preload to false for segments that have\n // had information added in the new segment.\n\n if (a.preload && !b.preload) {\n result.preload = false;\n }\n return result;\n};\n/**\n * Returns a new array of segments that is the result of merging\n * properties from an older list of segments onto an updated\n * list. No properties on the updated playlist will be ovewritten.\n *\n * @param {Array} original the outdated list of segments\n * @param {Array} update the updated list of segments\n * @param {number=} offset the index of the first update\n * segment in the original segment list. For non-live playlists,\n * this should always be zero and does not need to be\n * specified. For live playlists, it should be the difference\n * between the media sequence numbers in the original and updated\n * playlists.\n * @return {Array} a list of merged segment objects\n */\n\nconst updateSegments = (original, update, offset) => {\n const oldSegments = original.slice();\n const newSegments = update.slice();\n offset = offset || 0;\n const result = [];\n let currentMap;\n for (let newIndex = 0; newIndex < newSegments.length; newIndex++) {\n const oldSegment = oldSegments[newIndex + offset];\n const newSegment = newSegments[newIndex];\n if (oldSegment) {\n currentMap = oldSegment.map || currentMap;\n result.push(updateSegment(oldSegment, newSegment));\n } else {\n // carry over map to new segment if it is missing\n if (currentMap && !newSegment.map) {\n newSegment.map = currentMap;\n }\n result.push(newSegment);\n }\n }\n return result;\n};\nconst resolveSegmentUris = (segment, baseUri) => {\n // preloadSegment will not have a uri at all\n // as the segment isn't actually in the manifest yet, only parts\n if (!segment.resolvedUri && segment.uri) {\n segment.resolvedUri = resolveUrl(baseUri, segment.uri);\n }\n if (segment.key && !segment.key.resolvedUri) {\n segment.key.resolvedUri = resolveUrl(baseUri, segment.key.uri);\n }\n if (segment.map && !segment.map.resolvedUri) {\n segment.map.resolvedUri = resolveUrl(baseUri, segment.map.uri);\n }\n if (segment.map && segment.map.key && !segment.map.key.resolvedUri) {\n segment.map.key.resolvedUri = resolveUrl(baseUri, segment.map.key.uri);\n }\n if (segment.parts && segment.parts.length) {\n segment.parts.forEach(p => {\n if (p.resolvedUri) {\n return;\n }\n p.resolvedUri = resolveUrl(baseUri, p.uri);\n });\n }\n if (segment.preloadHints && segment.preloadHints.length) {\n segment.preloadHints.forEach(p => {\n if (p.resolvedUri) {\n return;\n }\n p.resolvedUri = resolveUrl(baseUri, p.uri);\n });\n }\n};\nconst getAllSegments = function (media) {\n const segments = media.segments || [];\n const preloadSegment = media.preloadSegment; // a preloadSegment with only preloadHints is not currently\n // a usable segment, only include a preloadSegment that has\n // parts.\n\n if (preloadSegment && preloadSegment.parts && preloadSegment.parts.length) {\n // if preloadHints has a MAP that means that the\n // init segment is going to change. We cannot use any of the parts\n // from this preload segment.\n if (preloadSegment.preloadHints) {\n for (let i = 0; i < preloadSegment.preloadHints.length; i++) {\n if (preloadSegment.preloadHints[i].type === 'MAP') {\n return segments;\n }\n }\n } // set the duration for our preload segment to target duration.\n\n preloadSegment.duration = media.targetDuration;\n preloadSegment.preload = true;\n segments.push(preloadSegment);\n }\n return segments;\n}; // consider the playlist unchanged if the playlist object is the same or\n// the number of segments is equal, the media sequence number is unchanged,\n// and this playlist hasn't become the end of the playlist\n\nconst isPlaylistUnchanged = (a, b) => a === b || a.segments && b.segments && a.segments.length === b.segments.length && a.endList === b.endList && a.mediaSequence === b.mediaSequence && a.preloadSegment === b.preloadSegment;\n/**\n * Returns a new main playlist that is the result of merging an\n * updated media playlist into the original version. If the\n * updated media playlist does not match any of the playlist\n * entries in the original main playlist, null is returned.\n *\n * @param {Object} main a parsed main M3U8 object\n * @param {Object} media a parsed media M3U8 object\n * @return {Object} a new object that represents the original\n * main playlist with the updated media playlist merged in, or\n * null if the merge produced no change.\n */\n\nconst updateMain$1 = (main, newMedia, unchangedCheck = isPlaylistUnchanged) => {\n const result = merge(main, {});\n const oldMedia = result.playlists[newMedia.id];\n if (!oldMedia) {\n return null;\n }\n if (unchangedCheck(oldMedia, newMedia)) {\n return null;\n }\n newMedia.segments = getAllSegments(newMedia);\n const mergedPlaylist = merge(oldMedia, newMedia); // always use the new media's preload segment\n\n if (mergedPlaylist.preloadSegment && !newMedia.preloadSegment) {\n delete mergedPlaylist.preloadSegment;\n } // if the update could overlap existing segment information, merge the two segment lists\n\n if (oldMedia.segments) {\n if (newMedia.skip) {\n newMedia.segments = newMedia.segments || []; // add back in objects for skipped segments, so that we merge\n // old properties into the new segments\n\n for (let i = 0; i < newMedia.skip.skippedSegments; i++) {\n newMedia.segments.unshift({\n skipped: true\n });\n }\n }\n mergedPlaylist.segments = updateSegments(oldMedia.segments, newMedia.segments, newMedia.mediaSequence - oldMedia.mediaSequence);\n } // resolve any segment URIs to prevent us from having to do it later\n\n mergedPlaylist.segments.forEach(segment => {\n resolveSegmentUris(segment, mergedPlaylist.resolvedUri);\n }); // TODO Right now in the playlists array there are two references to each playlist, one\n // that is referenced by index, and one by URI. The index reference may no longer be\n // necessary.\n\n for (let i = 0; i < result.playlists.length; i++) {\n if (result.playlists[i].id === newMedia.id) {\n result.playlists[i] = mergedPlaylist;\n }\n }\n result.playlists[newMedia.id] = mergedPlaylist; // URI reference added for backwards compatibility\n\n result.playlists[newMedia.uri] = mergedPlaylist; // update media group playlist references.\n\n forEachMediaGroup(main, (properties, mediaType, groupKey, labelKey) => {\n if (!properties.playlists) {\n return;\n }\n for (let i = 0; i < properties.playlists.length; i++) {\n if (newMedia.id === properties.playlists[i].id) {\n properties.playlists[i] = mergedPlaylist;\n }\n }\n });\n return result;\n};\n/**\n * Calculates the time to wait before refreshing a live playlist\n *\n * @param {Object} media\n * The current media\n * @param {boolean} update\n * True if there were any updates from the last refresh, false otherwise\n * @return {number}\n * The time in ms to wait before refreshing the live playlist\n */\n\nconst refreshDelay = (media, update) => {\n const segments = media.segments || [];\n const lastSegment = segments[segments.length - 1];\n const lastPart = lastSegment && lastSegment.parts && lastSegment.parts[lastSegment.parts.length - 1];\n const lastDuration = lastPart && lastPart.duration || lastSegment && lastSegment.duration;\n if (update && lastDuration) {\n return lastDuration * 1000;\n } // if the playlist is unchanged since the last reload or last segment duration\n // cannot be determined, try again after half the target duration\n\n return (media.partTargetDuration || media.targetDuration || 10) * 500;\n};\n/**\n * Load a playlist from a remote location\n *\n * @class PlaylistLoader\n * @extends Stream\n * @param {string|Object} src url or object of manifest\n * @param {boolean} withCredentials the withCredentials xhr option\n * @class\n */\n\nclass PlaylistLoader extends EventTarget$1 {\n constructor(src, vhs, options = {}) {\n super();\n if (!src) {\n throw new Error('A non-empty playlist URL or object is required');\n }\n this.logger_ = logger('PlaylistLoader');\n const {\n withCredentials = false\n } = options;\n this.src = src;\n this.vhs_ = vhs;\n this.withCredentials = withCredentials;\n this.addDateRangesToTextTrack_ = options.addDateRangesToTextTrack;\n const vhsOptions = vhs.options_;\n this.customTagParsers = vhsOptions && vhsOptions.customTagParsers || [];\n this.customTagMappers = vhsOptions && vhsOptions.customTagMappers || [];\n this.llhls = vhsOptions && vhsOptions.llhls;\n this.dateRangesStorage_ = new DateRangesStorage(); // initialize the loader state\n\n this.state = 'HAVE_NOTHING'; // live playlist staleness timeout\n\n this.handleMediaupdatetimeout_ = this.handleMediaupdatetimeout_.bind(this);\n this.on('mediaupdatetimeout', this.handleMediaupdatetimeout_);\n this.on('loadedplaylist', this.handleLoadedPlaylist_.bind(this));\n }\n handleLoadedPlaylist_() {\n const mediaPlaylist = this.media();\n if (!mediaPlaylist) {\n return;\n }\n this.dateRangesStorage_.setOffset(mediaPlaylist.segments);\n this.dateRangesStorage_.setPendingDateRanges(mediaPlaylist.dateRanges);\n const availableDateRanges = this.dateRangesStorage_.getDateRangesToProcess();\n if (!availableDateRanges.length || !this.addDateRangesToTextTrack_) {\n return;\n }\n this.addDateRangesToTextTrack_(availableDateRanges);\n }\n handleMediaupdatetimeout_() {\n if (this.state !== 'HAVE_METADATA') {\n // only refresh the media playlist if no other activity is going on\n return;\n }\n const media = this.media();\n let uri = resolveUrl(this.main.uri, media.uri);\n if (this.llhls) {\n uri = addLLHLSQueryDirectives(uri, media);\n }\n this.state = 'HAVE_CURRENT_METADATA';\n this.request = this.vhs_.xhr({\n uri,\n withCredentials: this.withCredentials,\n requestType: 'hls-playlist'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n }\n if (error) {\n return this.playlistRequestError(this.request, this.media(), 'HAVE_METADATA');\n }\n this.haveMetadata({\n playlistString: this.request.responseText,\n url: this.media().uri,\n id: this.media().id\n });\n });\n }\n playlistRequestError(xhr, playlist, startingState) {\n const {\n uri,\n id\n } = playlist; // any in-flight request is now finished\n\n this.request = null;\n if (startingState) {\n this.state = startingState;\n }\n this.error = {\n playlist: this.main.playlists[id],\n status: xhr.status,\n message: `HLS playlist request error at URL: ${uri}.`,\n responseText: xhr.responseText,\n code: xhr.status >= 500 ? 4 : 2,\n metadata: {\n errorType: videojs.Error.HlsPlaylistRequestError\n }\n };\n this.trigger('error');\n }\n parseManifest_({\n url,\n manifestString\n }) {\n return parseManifest({\n onwarn: ({\n message\n }) => this.logger_(`m3u8-parser warn for ${url}: ${message}`),\n oninfo: ({\n message\n }) => this.logger_(`m3u8-parser info for ${url}: ${message}`),\n manifestString,\n customTagParsers: this.customTagParsers,\n customTagMappers: this.customTagMappers,\n llhls: this.llhls\n });\n }\n /**\n * Update the playlist loader's state in response to a new or updated playlist.\n *\n * @param {string} [playlistString]\n * Playlist string (if playlistObject is not provided)\n * @param {Object} [playlistObject]\n * Playlist object (if playlistString is not provided)\n * @param {string} url\n * URL of playlist\n * @param {string} id\n * ID to use for playlist\n */\n\n haveMetadata({\n playlistString,\n playlistObject,\n url,\n id\n }) {\n // any in-flight request is now finished\n this.request = null;\n this.state = 'HAVE_METADATA';\n const playlist = playlistObject || this.parseManifest_({\n url,\n manifestString: playlistString\n });\n playlist.lastRequest = Date.now();\n setupMediaPlaylist({\n playlist,\n uri: url,\n id\n }); // merge this playlist into the main manifest\n\n const update = updateMain$1(this.main, playlist);\n this.targetDuration = playlist.partTargetDuration || playlist.targetDuration;\n this.pendingMedia_ = null;\n if (update) {\n this.main = update;\n this.media_ = this.main.playlists[id];\n } else {\n this.trigger('playlistunchanged');\n }\n this.updateMediaUpdateTimeout_(refreshDelay(this.media(), !!update));\n this.trigger('loadedplaylist');\n }\n /**\n * Abort any outstanding work and clean up.\n */\n\n dispose() {\n this.trigger('dispose');\n this.stopRequest();\n window$1.clearTimeout(this.mediaUpdateTimeout);\n window$1.clearTimeout(this.finalRenditionTimeout);\n this.dateRangesStorage_ = new DateRangesStorage();\n this.off();\n }\n stopRequest() {\n if (this.request) {\n const oldRequest = this.request;\n this.request = null;\n oldRequest.onreadystatechange = null;\n oldRequest.abort();\n }\n }\n /**\n * When called without any arguments, returns the currently\n * active media playlist. When called with a single argument,\n * triggers the playlist loader to asynchronously switch to the\n * specified media playlist. Calling this method while the\n * loader is in the HAVE_NOTHING causes an error to be emitted\n * but otherwise has no effect.\n *\n * @param {Object=} playlist the parsed media playlist\n * object to switch to\n * @param {boolean=} shouldDelay whether we should delay the request by half target duration\n *\n * @return {Playlist} the current loaded media\n */\n\n media(playlist, shouldDelay) {\n // getter\n if (!playlist) {\n return this.media_;\n } // setter\n\n if (this.state === 'HAVE_NOTHING') {\n throw new Error('Cannot switch media playlist from ' + this.state);\n } // find the playlist object if the target playlist has been\n // specified by URI\n\n if (typeof playlist === 'string') {\n if (!this.main.playlists[playlist]) {\n throw new Error('Unknown playlist URI: ' + playlist);\n }\n playlist = this.main.playlists[playlist];\n }\n window$1.clearTimeout(this.finalRenditionTimeout);\n if (shouldDelay) {\n const delay = (playlist.partTargetDuration || playlist.targetDuration) / 2 * 1000 || 5 * 1000;\n this.finalRenditionTimeout = window$1.setTimeout(this.media.bind(this, playlist, false), delay);\n return;\n }\n const startingState = this.state;\n const mediaChange = !this.media_ || playlist.id !== this.media_.id;\n const mainPlaylistRef = this.main.playlists[playlist.id]; // switch to fully loaded playlists immediately\n\n if (mainPlaylistRef && mainPlaylistRef.endList ||\n // handle the case of a playlist object (e.g., if using vhs-json with a resolved\n // media playlist or, for the case of demuxed audio, a resolved audio media group)\n playlist.endList && playlist.segments.length) {\n // abort outstanding playlist requests\n if (this.request) {\n this.request.onreadystatechange = null;\n this.request.abort();\n this.request = null;\n }\n this.state = 'HAVE_METADATA';\n this.media_ = playlist; // trigger media change if the active media has been updated\n\n if (mediaChange) {\n this.trigger('mediachanging');\n if (startingState === 'HAVE_MAIN_MANIFEST') {\n // The initial playlist was a main manifest, and the first media selected was\n // also provided (in the form of a resolved playlist object) as part of the\n // source object (rather than just a URL). Therefore, since the media playlist\n // doesn't need to be requested, loadedmetadata won't trigger as part of the\n // normal flow, and needs an explicit trigger here.\n this.trigger('loadedmetadata');\n } else {\n this.trigger('mediachange');\n }\n }\n return;\n } // We update/set the timeout here so that live playlists\n // that are not a media change will \"start\" the loader as expected.\n // We expect that this function will start the media update timeout\n // cycle again. This also prevents a playlist switch failure from\n // causing us to stall during live.\n\n this.updateMediaUpdateTimeout_(refreshDelay(playlist, true)); // switching to the active playlist is a no-op\n\n if (!mediaChange) {\n return;\n }\n this.state = 'SWITCHING_MEDIA'; // there is already an outstanding playlist request\n\n if (this.request) {\n if (playlist.resolvedUri === this.request.url) {\n // requesting to switch to the same playlist multiple times\n // has no effect after the first\n return;\n }\n this.request.onreadystatechange = null;\n this.request.abort();\n this.request = null;\n } // request the new playlist\n\n if (this.media_) {\n this.trigger('mediachanging');\n }\n this.pendingMedia_ = playlist;\n this.request = this.vhs_.xhr({\n uri: playlist.resolvedUri,\n withCredentials: this.withCredentials,\n requestType: 'hls-playlist'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n }\n playlist.lastRequest = Date.now();\n playlist.resolvedUri = resolveManifestRedirect(playlist.resolvedUri, req);\n if (error) {\n return this.playlistRequestError(this.request, playlist, startingState);\n }\n this.haveMetadata({\n playlistString: req.responseText,\n url: playlist.uri,\n id: playlist.id\n }); // fire loadedmetadata the first time a media playlist is loaded\n\n if (startingState === 'HAVE_MAIN_MANIFEST') {\n this.trigger('loadedmetadata');\n } else {\n this.trigger('mediachange');\n }\n });\n }\n /**\n * pause loading of the playlist\n */\n\n pause() {\n if (this.mediaUpdateTimeout) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n }\n this.stopRequest();\n if (this.state === 'HAVE_NOTHING') {\n // If we pause the loader before any data has been retrieved, its as if we never\n // started, so reset to an unstarted state.\n this.started = false;\n } // Need to restore state now that no activity is happening\n\n if (this.state === 'SWITCHING_MEDIA') {\n // if the loader was in the process of switching media, it should either return to\n // HAVE_MAIN_MANIFEST or HAVE_METADATA depending on if the loader has loaded a media\n // playlist yet. This is determined by the existence of loader.media_\n if (this.media_) {\n this.state = 'HAVE_METADATA';\n } else {\n this.state = 'HAVE_MAIN_MANIFEST';\n }\n } else if (this.state === 'HAVE_CURRENT_METADATA') {\n this.state = 'HAVE_METADATA';\n }\n }\n /**\n * start loading of the playlist\n */\n\n load(shouldDelay) {\n if (this.mediaUpdateTimeout) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n }\n const media = this.media();\n if (shouldDelay) {\n const delay = media ? (media.partTargetDuration || media.targetDuration) / 2 * 1000 : 5 * 1000;\n this.mediaUpdateTimeout = window$1.setTimeout(() => {\n this.mediaUpdateTimeout = null;\n this.load();\n }, delay);\n return;\n }\n if (!this.started) {\n this.start();\n return;\n }\n if (media && !media.endList) {\n this.trigger('mediaupdatetimeout');\n } else {\n this.trigger('loadedplaylist');\n }\n }\n updateMediaUpdateTimeout_(delay) {\n if (this.mediaUpdateTimeout) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n } // we only have use mediaupdatetimeout for live playlists.\n\n if (!this.media() || this.media().endList) {\n return;\n }\n this.mediaUpdateTimeout = window$1.setTimeout(() => {\n this.mediaUpdateTimeout = null;\n this.trigger('mediaupdatetimeout');\n this.updateMediaUpdateTimeout_(delay);\n }, delay);\n }\n /**\n * start loading of the playlist\n */\n\n start() {\n this.started = true;\n if (typeof this.src === 'object') {\n // in the case of an entirely constructed manifest object (meaning there's no actual\n // manifest on a server), default the uri to the page's href\n if (!this.src.uri) {\n this.src.uri = window$1.location.href;\n } // resolvedUri is added on internally after the initial request. Since there's no\n // request for pre-resolved manifests, add on resolvedUri here.\n\n this.src.resolvedUri = this.src.uri; // Since a manifest object was passed in as the source (instead of a URL), the first\n // request can be skipped (since the top level of the manifest, at a minimum, is\n // already available as a parsed manifest object). However, if the manifest object\n // represents a main playlist, some media playlists may need to be resolved before\n // the starting segment list is available. Therefore, go directly to setup of the\n // initial playlist, and let the normal flow continue from there.\n //\n // Note that the call to setup is asynchronous, as other sections of VHS may assume\n // that the first request is asynchronous.\n\n setTimeout(() => {\n this.setupInitialPlaylist(this.src);\n }, 0);\n return;\n } // request the specified URL\n\n this.request = this.vhs_.xhr({\n uri: this.src,\n withCredentials: this.withCredentials,\n requestType: 'hls-playlist'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n } // clear the loader's request reference\n\n this.request = null;\n if (error) {\n this.error = {\n status: req.status,\n message: `HLS playlist request error at URL: ${this.src}.`,\n responseText: req.responseText,\n // MEDIA_ERR_NETWORK\n code: 2,\n metadata: {\n errorType: videojs.Error.HlsPlaylistRequestError\n }\n };\n if (this.state === 'HAVE_NOTHING') {\n this.started = false;\n }\n return this.trigger('error');\n }\n this.src = resolveManifestRedirect(this.src, req);\n const manifest = this.parseManifest_({\n manifestString: req.responseText,\n url: this.src\n });\n this.setupInitialPlaylist(manifest);\n });\n }\n srcUri() {\n return typeof this.src === 'string' ? this.src : this.src.uri;\n }\n /**\n * Given a manifest object that's either a main or media playlist, trigger the proper\n * events and set the state of the playlist loader.\n *\n * If the manifest object represents a main playlist, `loadedplaylist` will be\n * triggered to allow listeners to select a playlist. If none is selected, the loader\n * will default to the first one in the playlists array.\n *\n * If the manifest object represents a media playlist, `loadedplaylist` will be\n * triggered followed by `loadedmetadata`, as the only available playlist is loaded.\n *\n * In the case of a media playlist, a main playlist object wrapper with one playlist\n * will be created so that all logic can handle playlists in the same fashion (as an\n * assumed manifest object schema).\n *\n * @param {Object} manifest\n * The parsed manifest object\n */\n\n setupInitialPlaylist(manifest) {\n this.state = 'HAVE_MAIN_MANIFEST';\n if (manifest.playlists) {\n this.main = manifest;\n addPropertiesToMain(this.main, this.srcUri()); // If the initial main playlist has playlists wtih segments already resolved,\n // then resolve URIs in advance, as they are usually done after a playlist request,\n // which may not happen if the playlist is resolved.\n\n manifest.playlists.forEach(playlist => {\n playlist.segments = getAllSegments(playlist);\n playlist.segments.forEach(segment => {\n resolveSegmentUris(segment, playlist.resolvedUri);\n });\n });\n this.trigger('loadedplaylist');\n if (!this.request) {\n // no media playlist was specifically selected so start\n // from the first listed one\n this.media(this.main.playlists[0]);\n }\n return;\n } // In order to support media playlists passed in as vhs-json, the case where the uri\n // is not provided as part of the manifest should be considered, and an appropriate\n // default used.\n\n const uri = this.srcUri() || window$1.location.href;\n this.main = mainForMedia(manifest, uri);\n this.haveMetadata({\n playlistObject: manifest,\n url: uri,\n id: this.main.playlists[0].id\n });\n this.trigger('loadedmetadata');\n }\n /**\n * Updates or deletes a preexisting pathway clone.\n * Ensures that all playlists related to the old pathway clone are\n * either updated or deleted.\n *\n * @param {Object} clone On update, the pathway clone object for the newly updated pathway clone.\n * On delete, the old pathway clone object to be deleted.\n * @param {boolean} isUpdate True if the pathway is to be updated,\n * false if it is meant to be deleted.\n */\n\n updateOrDeleteClone(clone, isUpdate) {\n const main = this.main;\n const pathway = clone.ID;\n let i = main.playlists.length; // Iterate backwards through the playlist so we can remove playlists if necessary.\n\n while (i--) {\n const p = main.playlists[i];\n if (p.attributes['PATHWAY-ID'] === pathway) {\n const oldPlaylistUri = p.resolvedUri;\n const oldPlaylistId = p.id; // update the indexed playlist and add new playlists by ID and URI\n\n if (isUpdate) {\n const newPlaylistUri = this.createCloneURI_(p.resolvedUri, clone);\n const newPlaylistId = createPlaylistID(pathway, newPlaylistUri);\n const attributes = this.createCloneAttributes_(pathway, p.attributes);\n const updatedPlaylist = this.createClonePlaylist_(p, newPlaylistId, clone, attributes);\n main.playlists[i] = updatedPlaylist;\n main.playlists[newPlaylistId] = updatedPlaylist;\n main.playlists[newPlaylistUri] = updatedPlaylist;\n } else {\n // Remove the indexed playlist.\n main.playlists.splice(i, 1);\n } // Remove playlists by the old ID and URI.\n\n delete main.playlists[oldPlaylistId];\n delete main.playlists[oldPlaylistUri];\n }\n }\n this.updateOrDeleteCloneMedia(clone, isUpdate);\n }\n /**\n * Updates or deletes media data based on the pathway clone object.\n * Due to the complexity of the media groups and playlists, in all cases\n * we remove all of the old media groups and playlists.\n * On updates, we then create new media groups and playlists based on the\n * new pathway clone object.\n *\n * @param {Object} clone The pathway clone object for the newly updated pathway clone.\n * @param {boolean} isUpdate True if the pathway is to be updated,\n * false if it is meant to be deleted.\n */\n\n updateOrDeleteCloneMedia(clone, isUpdate) {\n const main = this.main;\n const id = clone.ID;\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(mediaType => {\n if (!main.mediaGroups[mediaType] || !main.mediaGroups[mediaType][id]) {\n return;\n }\n for (const groupKey in main.mediaGroups[mediaType]) {\n // Remove all media playlists for the media group for this pathway clone.\n if (groupKey === id) {\n for (const labelKey in main.mediaGroups[mediaType][groupKey]) {\n const oldMedia = main.mediaGroups[mediaType][groupKey][labelKey];\n oldMedia.playlists.forEach((p, i) => {\n const oldMediaPlaylist = main.playlists[p.id];\n const oldPlaylistId = oldMediaPlaylist.id;\n const oldPlaylistUri = oldMediaPlaylist.resolvedUri;\n delete main.playlists[oldPlaylistId];\n delete main.playlists[oldPlaylistUri];\n });\n } // Delete the old media group.\n\n delete main.mediaGroups[mediaType][groupKey];\n }\n }\n }); // Create the new media groups and playlists if there is an update.\n\n if (isUpdate) {\n this.createClonedMediaGroups_(clone);\n }\n }\n /**\n * Given a pathway clone object, clones all necessary playlists.\n *\n * @param {Object} clone The pathway clone object.\n * @param {Object} basePlaylist The original playlist to clone from.\n */\n\n addClonePathway(clone, basePlaylist = {}) {\n const main = this.main;\n const index = main.playlists.length;\n const uri = this.createCloneURI_(basePlaylist.resolvedUri, clone);\n const playlistId = createPlaylistID(clone.ID, uri);\n const attributes = this.createCloneAttributes_(clone.ID, basePlaylist.attributes);\n const playlist = this.createClonePlaylist_(basePlaylist, playlistId, clone, attributes);\n main.playlists[index] = playlist; // add playlist by ID and URI\n\n main.playlists[playlistId] = playlist;\n main.playlists[uri] = playlist;\n this.createClonedMediaGroups_(clone);\n }\n /**\n * Given a pathway clone object we create clones of all media.\n * In this function, all necessary information and updated playlists\n * are added to the `mediaGroup` object.\n * Playlists are also added to the `playlists` array so the media groups\n * will be properly linked.\n *\n * @param {Object} clone The pathway clone object.\n */\n\n createClonedMediaGroups_(clone) {\n const id = clone.ID;\n const baseID = clone['BASE-ID'];\n const main = this.main;\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(mediaType => {\n // If the media type doesn't exist, or there is already a clone, skip\n // to the next media type.\n if (!main.mediaGroups[mediaType] || main.mediaGroups[mediaType][id]) {\n return;\n }\n for (const groupKey in main.mediaGroups[mediaType]) {\n if (groupKey === baseID) {\n // Create the group.\n main.mediaGroups[mediaType][id] = {};\n } else {\n // There is no need to iterate over label keys in this case.\n continue;\n }\n for (const labelKey in main.mediaGroups[mediaType][groupKey]) {\n const oldMedia = main.mediaGroups[mediaType][groupKey][labelKey];\n main.mediaGroups[mediaType][id][labelKey] = _extends({}, oldMedia);\n const newMedia = main.mediaGroups[mediaType][id][labelKey]; // update URIs on the media\n\n const newUri = this.createCloneURI_(oldMedia.resolvedUri, clone);\n newMedia.resolvedUri = newUri;\n newMedia.uri = newUri; // Reset playlists in the new media group.\n\n newMedia.playlists = []; // Create new playlists in the newly cloned media group.\n\n oldMedia.playlists.forEach((p, i) => {\n const oldMediaPlaylist = main.playlists[p.id];\n const group = groupID(mediaType, id, labelKey);\n const newPlaylistID = createPlaylistID(id, group); // Check to see if it already exists\n\n if (oldMediaPlaylist && !main.playlists[newPlaylistID]) {\n const newMediaPlaylist = this.createClonePlaylist_(oldMediaPlaylist, newPlaylistID, clone);\n const newPlaylistUri = newMediaPlaylist.resolvedUri;\n main.playlists[newPlaylistID] = newMediaPlaylist;\n main.playlists[newPlaylistUri] = newMediaPlaylist;\n }\n newMedia.playlists[i] = this.createClonePlaylist_(p, newPlaylistID, clone);\n });\n }\n }\n });\n }\n /**\n * Using the original playlist to be cloned, and the pathway clone object\n * information, we create a new playlist.\n *\n * @param {Object} basePlaylist The original playlist to be cloned from.\n * @param {string} id The desired id of the newly cloned playlist.\n * @param {Object} clone The pathway clone object.\n * @param {Object} attributes An optional object to populate the `attributes` property in the playlist.\n *\n * @return {Object} The combined cloned playlist.\n */\n\n createClonePlaylist_(basePlaylist, id, clone, attributes) {\n const uri = this.createCloneURI_(basePlaylist.resolvedUri, clone);\n const newProps = {\n resolvedUri: uri,\n uri,\n id\n }; // Remove all segments from previous playlist in the clone.\n\n if (basePlaylist.segments) {\n newProps.segments = [];\n }\n if (attributes) {\n newProps.attributes = attributes;\n }\n return merge(basePlaylist, newProps);\n }\n /**\n * Generates an updated URI for a cloned pathway based on the original\n * pathway's URI and the paramaters from the pathway clone object in the\n * content steering server response.\n *\n * @param {string} baseUri URI to be updated in the cloned pathway.\n * @param {Object} clone The pathway clone object.\n *\n * @return {string} The updated URI for the cloned pathway.\n */\n\n createCloneURI_(baseURI, clone) {\n const uri = new URL(baseURI);\n uri.hostname = clone['URI-REPLACEMENT'].HOST;\n const params = clone['URI-REPLACEMENT'].PARAMS; // Add params to the cloned URL.\n\n for (const key of Object.keys(params)) {\n uri.searchParams.set(key, params[key]);\n }\n return uri.href;\n }\n /**\n * Helper function to create the attributes needed for the new clone.\n * This mainly adds the necessary media attributes.\n *\n * @param {string} id The pathway clone object ID.\n * @param {Object} oldAttributes The old attributes to compare to.\n * @return {Object} The new attributes to add to the playlist.\n */\n\n createCloneAttributes_(id, oldAttributes) {\n const attributes = {\n ['PATHWAY-ID']: id\n };\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(mediaType => {\n if (oldAttributes[mediaType]) {\n attributes[mediaType] = id;\n }\n });\n return attributes;\n }\n /**\n * Returns the key ID set from a playlist\n *\n * @param {playlist} playlist to fetch the key ID set from.\n * @return a Set of 32 digit hex strings that represent the unique keyIds for that playlist.\n */\n\n getKeyIdSet(playlist) {\n if (playlist.contentProtection) {\n const keyIds = new Set();\n for (const keysystem in playlist.contentProtection) {\n const keyId = playlist.contentProtection[keysystem].attributes.keyId;\n if (keyId) {\n keyIds.add(keyId.toLowerCase());\n }\n }\n return keyIds;\n }\n }\n}\n\n/**\n * @file xhr.js\n */\n\nconst callbackWrapper = function (request, error, response, callback) {\n const reqResponse = request.responseType === 'arraybuffer' ? request.response : request.responseText;\n if (!error && reqResponse) {\n request.responseTime = Date.now();\n request.roundTripTime = request.responseTime - request.requestTime;\n request.bytesReceived = reqResponse.byteLength || reqResponse.length;\n if (!request.bandwidth) {\n request.bandwidth = Math.floor(request.bytesReceived / request.roundTripTime * 8 * 1000);\n }\n }\n if (response.headers) {\n request.responseHeaders = response.headers;\n } // videojs.xhr now uses a specific code on the error\n // object to signal that a request has timed out instead\n // of setting a boolean on the request object\n\n if (error && error.code === 'ETIMEDOUT') {\n request.timedout = true;\n } // videojs.xhr no longer considers status codes outside of 200 and 0\n // (for file uris) to be errors, but the old XHR did, so emulate that\n // behavior. Status 206 may be used in response to byterange requests.\n\n if (!error && !request.aborted && response.statusCode !== 200 && response.statusCode !== 206 && response.statusCode !== 0) {\n error = new Error('XHR Failed with a response of: ' + (request && (reqResponse || request.responseText)));\n }\n callback(error, request);\n};\n/**\n * Iterates over the request hooks Set and calls them in order\n *\n * @param {Set} hooks the hook Set to iterate over\n * @param {Object} options the request options to pass to the xhr wrapper\n * @return the callback hook function return value, the modified or new options Object.\n */\n\nconst callAllRequestHooks = (requestSet, options) => {\n if (!requestSet || !requestSet.size) {\n return;\n }\n let newOptions = options;\n requestSet.forEach(requestCallback => {\n newOptions = requestCallback(newOptions);\n });\n return newOptions;\n};\n/**\n * Iterates over the response hooks Set and calls them in order.\n *\n * @param {Set} hooks the hook Set to iterate over\n * @param {Object} request the xhr request object\n * @param {Object} error the xhr error object\n * @param {Object} response the xhr response object\n */\n\nconst callAllResponseHooks = (responseSet, request, error, response) => {\n if (!responseSet || !responseSet.size) {\n return;\n }\n responseSet.forEach(responseCallback => {\n responseCallback(request, error, response);\n });\n};\nconst xhrFactory = function () {\n const xhr = function XhrFunction(options, callback) {\n // Add a default timeout\n options = merge({\n timeout: 45e3\n }, options); // Allow an optional user-specified function to modify the option\n // object before we construct the xhr request\n // TODO: Remove beforeRequest in the next major release.\n\n const beforeRequest = XhrFunction.beforeRequest || videojs.Vhs.xhr.beforeRequest; // onRequest and onResponse hooks as a Set, at either the player or global level.\n // TODO: new Set added here for beforeRequest alias. Remove this when beforeRequest is removed.\n\n const _requestCallbackSet = XhrFunction._requestCallbackSet || videojs.Vhs.xhr._requestCallbackSet || new Set();\n const _responseCallbackSet = XhrFunction._responseCallbackSet || videojs.Vhs.xhr._responseCallbackSet;\n if (beforeRequest && typeof beforeRequest === 'function') {\n videojs.log.warn('beforeRequest is deprecated, use onRequest instead.');\n _requestCallbackSet.add(beforeRequest);\n } // Use the standard videojs.xhr() method unless `videojs.Vhs.xhr` has been overriden\n // TODO: switch back to videojs.Vhs.xhr.name === 'XhrFunction' when we drop IE11\n\n const xhrMethod = videojs.Vhs.xhr.original === true ? videojs.xhr : videojs.Vhs.xhr; // call all registered onRequest hooks, assign new options.\n\n const beforeRequestOptions = callAllRequestHooks(_requestCallbackSet, options); // Remove the beforeRequest function from the hooks set so stale beforeRequest functions are not called.\n\n _requestCallbackSet.delete(beforeRequest); // xhrMethod will call XMLHttpRequest.open and XMLHttpRequest.send\n\n const request = xhrMethod(beforeRequestOptions || options, function (error, response) {\n // call all registered onResponse hooks\n callAllResponseHooks(_responseCallbackSet, request, error, response);\n return callbackWrapper(request, error, response, callback);\n });\n const originalAbort = request.abort;\n request.abort = function () {\n request.aborted = true;\n return originalAbort.apply(request, arguments);\n };\n request.uri = options.uri;\n request.requestTime = Date.now();\n return request;\n };\n xhr.original = true;\n return xhr;\n};\n/**\n * Turns segment byterange into a string suitable for use in\n * HTTP Range requests\n *\n * @param {Object} byterange - an object with two values defining the start and end\n * of a byte-range\n */\n\nconst byterangeStr = function (byterange) {\n // `byterangeEnd` is one less than `offset + length` because the HTTP range\n // header uses inclusive ranges\n let byterangeEnd;\n const byterangeStart = byterange.offset;\n if (typeof byterange.offset === 'bigint' || typeof byterange.length === 'bigint') {\n byterangeEnd = window$1.BigInt(byterange.offset) + window$1.BigInt(byterange.length) - window$1.BigInt(1);\n } else {\n byterangeEnd = byterange.offset + byterange.length - 1;\n }\n return 'bytes=' + byterangeStart + '-' + byterangeEnd;\n};\n/**\n * Defines headers for use in the xhr request for a particular segment.\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n */\n\nconst segmentXhrHeaders = function (segment) {\n const headers = {};\n if (segment.byterange) {\n headers.Range = byterangeStr(segment.byterange);\n }\n return headers;\n};\n\n/**\n * @file bin-utils.js\n */\n\n/**\n * convert a TimeRange to text\n *\n * @param {TimeRange} range the timerange to use for conversion\n * @param {number} i the iterator on the range to convert\n * @return {string} the range in string format\n */\n\nconst textRange = function (range, i) {\n return range.start(i) + '-' + range.end(i);\n};\n/**\n * format a number as hex string\n *\n * @param {number} e The number\n * @param {number} i the iterator\n * @return {string} the hex formatted number as a string\n */\n\nconst formatHexString = function (e, i) {\n const value = e.toString(16);\n return '00'.substring(0, 2 - value.length) + value + (i % 2 ? ' ' : '');\n};\nconst formatAsciiString = function (e) {\n if (e >= 0x20 && e < 0x7e) {\n return String.fromCharCode(e);\n }\n return '.';\n};\n/**\n * Creates an object for sending to a web worker modifying properties that are TypedArrays\n * into a new object with seperated properties for the buffer, byteOffset, and byteLength.\n *\n * @param {Object} message\n * Object of properties and values to send to the web worker\n * @return {Object}\n * Modified message with TypedArray values expanded\n * @function createTransferableMessage\n */\n\nconst createTransferableMessage = function (message) {\n const transferable = {};\n Object.keys(message).forEach(key => {\n const value = message[key];\n if (isArrayBufferView(value)) {\n transferable[key] = {\n bytes: value.buffer,\n byteOffset: value.byteOffset,\n byteLength: value.byteLength\n };\n } else {\n transferable[key] = value;\n }\n });\n return transferable;\n};\n/**\n * Returns a unique string identifier for a media initialization\n * segment.\n *\n * @param {Object} initSegment\n * the init segment object.\n *\n * @return {string} the generated init segment id\n */\n\nconst initSegmentId = function (initSegment) {\n const byterange = initSegment.byterange || {\n length: Infinity,\n offset: 0\n };\n return [byterange.length, byterange.offset, initSegment.resolvedUri].join(',');\n};\n/**\n * Returns a unique string identifier for a media segment key.\n *\n * @param {Object} key the encryption key\n * @return {string} the unique id for the media segment key.\n */\n\nconst segmentKeyId = function (key) {\n return key.resolvedUri;\n};\n/**\n * utils to help dump binary data to the console\n *\n * @param {Array|TypedArray} data\n * data to dump to a string\n *\n * @return {string} the data as a hex string.\n */\n\nconst hexDump = data => {\n const bytes = Array.prototype.slice.call(data);\n const step = 16;\n let result = '';\n let hex;\n let ascii;\n for (let j = 0; j < bytes.length / step; j++) {\n hex = bytes.slice(j * step, j * step + step).map(formatHexString).join('');\n ascii = bytes.slice(j * step, j * step + step).map(formatAsciiString).join('');\n result += hex + ' ' + ascii + '\\n';\n }\n return result;\n};\nconst tagDump = ({\n bytes\n}) => hexDump(bytes);\nconst textRanges = ranges => {\n let result = '';\n let i;\n for (i = 0; i < ranges.length; i++) {\n result += textRange(ranges, i) + ' ';\n }\n return result;\n};\nvar utils = /*#__PURE__*/Object.freeze({\n __proto__: null,\n createTransferableMessage: createTransferableMessage,\n initSegmentId: initSegmentId,\n segmentKeyId: segmentKeyId,\n hexDump: hexDump,\n tagDump: tagDump,\n textRanges: textRanges\n});\n\n// TODO handle fmp4 case where the timing info is accurate and doesn't involve transmux\n// 25% was arbitrarily chosen, and may need to be refined over time.\n\nconst SEGMENT_END_FUDGE_PERCENT = 0.25;\n/**\n * Converts a player time (any time that can be gotten/set from player.currentTime(),\n * e.g., any time within player.seekable().start(0) to player.seekable().end(0)) to a\n * program time (any time referencing the real world (e.g., EXT-X-PROGRAM-DATE-TIME)).\n *\n * The containing segment is required as the EXT-X-PROGRAM-DATE-TIME serves as an \"anchor\n * point\" (a point where we have a mapping from program time to player time, with player\n * time being the post transmux start of the segment).\n *\n * For more details, see [this doc](../../docs/program-time-from-player-time.md).\n *\n * @param {number} playerTime the player time\n * @param {Object} segment the segment which contains the player time\n * @return {Date} program time\n */\n\nconst playerTimeToProgramTime = (playerTime, segment) => {\n if (!segment.dateTimeObject) {\n // Can't convert without an \"anchor point\" for the program time (i.e., a time that can\n // be used to map the start of a segment with a real world time).\n return null;\n }\n const transmuxerPrependedSeconds = segment.videoTimingInfo.transmuxerPrependedSeconds;\n const transmuxedStart = segment.videoTimingInfo.transmuxedPresentationStart; // get the start of the content from before old content is prepended\n\n const startOfSegment = transmuxedStart + transmuxerPrependedSeconds;\n const offsetFromSegmentStart = playerTime - startOfSegment;\n return new Date(segment.dateTimeObject.getTime() + offsetFromSegmentStart * 1000);\n};\nconst originalSegmentVideoDuration = videoTimingInfo => {\n return videoTimingInfo.transmuxedPresentationEnd - videoTimingInfo.transmuxedPresentationStart - videoTimingInfo.transmuxerPrependedSeconds;\n};\n/**\n * Finds a segment that contains the time requested given as an ISO-8601 string. The\n * returned segment might be an estimate or an accurate match.\n *\n * @param {string} programTime The ISO-8601 programTime to find a match for\n * @param {Object} playlist A playlist object to search within\n */\n\nconst findSegmentForProgramTime = (programTime, playlist) => {\n // Assumptions:\n // - verifyProgramDateTimeTags has already been run\n // - live streams have been started\n let dateTimeObject;\n try {\n dateTimeObject = new Date(programTime);\n } catch (e) {\n return null;\n }\n if (!playlist || !playlist.segments || playlist.segments.length === 0) {\n return null;\n }\n let segment = playlist.segments[0];\n if (dateTimeObject < new Date(segment.dateTimeObject)) {\n // Requested time is before stream start.\n return null;\n }\n for (let i = 0; i < playlist.segments.length - 1; i++) {\n segment = playlist.segments[i];\n const nextSegmentStart = new Date(playlist.segments[i + 1].dateTimeObject);\n if (dateTimeObject < nextSegmentStart) {\n break;\n }\n }\n const lastSegment = playlist.segments[playlist.segments.length - 1];\n const lastSegmentStart = lastSegment.dateTimeObject;\n const lastSegmentDuration = lastSegment.videoTimingInfo ? originalSegmentVideoDuration(lastSegment.videoTimingInfo) : lastSegment.duration + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT;\n const lastSegmentEnd = new Date(lastSegmentStart.getTime() + lastSegmentDuration * 1000);\n if (dateTimeObject > lastSegmentEnd) {\n // Beyond the end of the stream, or our best guess of the end of the stream.\n return null;\n }\n if (dateTimeObject > new Date(lastSegmentStart)) {\n segment = lastSegment;\n }\n return {\n segment,\n estimatedStart: segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationStart : Playlist.duration(playlist, playlist.mediaSequence + playlist.segments.indexOf(segment)),\n // Although, given that all segments have accurate date time objects, the segment\n // selected should be accurate, unless the video has been transmuxed at some point\n // (determined by the presence of the videoTimingInfo object), the segment's \"player\n // time\" (the start time in the player) can't be considered accurate.\n type: segment.videoTimingInfo ? 'accurate' : 'estimate'\n };\n};\n/**\n * Finds a segment that contains the given player time(in seconds).\n *\n * @param {number} time The player time to find a match for\n * @param {Object} playlist A playlist object to search within\n */\n\nconst findSegmentForPlayerTime = (time, playlist) => {\n // Assumptions:\n // - there will always be a segment.duration\n // - we can start from zero\n // - segments are in time order\n if (!playlist || !playlist.segments || playlist.segments.length === 0) {\n return null;\n }\n let segmentEnd = 0;\n let segment;\n for (let i = 0; i < playlist.segments.length; i++) {\n segment = playlist.segments[i]; // videoTimingInfo is set after the segment is downloaded and transmuxed, and\n // should contain the most accurate values we have for the segment's player times.\n //\n // Use the accurate transmuxedPresentationEnd value if it is available, otherwise fall\n // back to an estimate based on the manifest derived (inaccurate) segment.duration, to\n // calculate an end value.\n\n segmentEnd = segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationEnd : segmentEnd + segment.duration;\n if (time <= segmentEnd) {\n break;\n }\n }\n const lastSegment = playlist.segments[playlist.segments.length - 1];\n if (lastSegment.videoTimingInfo && lastSegment.videoTimingInfo.transmuxedPresentationEnd < time) {\n // The time requested is beyond the stream end.\n return null;\n }\n if (time > segmentEnd) {\n // The time is within or beyond the last segment.\n //\n // Check to see if the time is beyond a reasonable guess of the end of the stream.\n if (time > segmentEnd + lastSegment.duration * SEGMENT_END_FUDGE_PERCENT) {\n // Technically, because the duration value is only an estimate, the time may still\n // exist in the last segment, however, there isn't enough information to make even\n // a reasonable estimate.\n return null;\n }\n segment = lastSegment;\n }\n return {\n segment,\n estimatedStart: segment.videoTimingInfo ? segment.videoTimingInfo.transmuxedPresentationStart : segmentEnd - segment.duration,\n // Because videoTimingInfo is only set after transmux, it is the only way to get\n // accurate timing values.\n type: segment.videoTimingInfo ? 'accurate' : 'estimate'\n };\n};\n/**\n * Gives the offset of the comparisonTimestamp from the programTime timestamp in seconds.\n * If the offset returned is positive, the programTime occurs after the\n * comparisonTimestamp.\n * If the offset is negative, the programTime occurs before the comparisonTimestamp.\n *\n * @param {string} comparisonTimeStamp An ISO-8601 timestamp to compare against\n * @param {string} programTime The programTime as an ISO-8601 string\n * @return {number} offset\n */\n\nconst getOffsetFromTimestamp = (comparisonTimeStamp, programTime) => {\n let segmentDateTime;\n let programDateTime;\n try {\n segmentDateTime = new Date(comparisonTimeStamp);\n programDateTime = new Date(programTime);\n } catch (e) {// TODO handle error\n }\n const segmentTimeEpoch = segmentDateTime.getTime();\n const programTimeEpoch = programDateTime.getTime();\n return (programTimeEpoch - segmentTimeEpoch) / 1000;\n};\n/**\n * Checks that all segments in this playlist have programDateTime tags.\n *\n * @param {Object} playlist A playlist object\n */\n\nconst verifyProgramDateTimeTags = playlist => {\n if (!playlist.segments || playlist.segments.length === 0) {\n return false;\n }\n for (let i = 0; i < playlist.segments.length; i++) {\n const segment = playlist.segments[i];\n if (!segment.dateTimeObject) {\n return false;\n }\n }\n return true;\n};\n/**\n * Returns the programTime of the media given a playlist and a playerTime.\n * The playlist must have programDateTime tags for a programDateTime tag to be returned.\n * If the segments containing the time requested have not been buffered yet, an estimate\n * may be returned to the callback.\n *\n * @param {Object} args\n * @param {Object} args.playlist A playlist object to search within\n * @param {number} time A playerTime in seconds\n * @param {Function} callback(err, programTime)\n * @return {string} err.message A detailed error message\n * @return {Object} programTime\n * @return {number} programTime.mediaSeconds The streamTime in seconds\n * @return {string} programTime.programDateTime The programTime as an ISO-8601 String\n */\n\nconst getProgramTime = ({\n playlist,\n time = undefined,\n callback\n}) => {\n if (!callback) {\n throw new Error('getProgramTime: callback must be provided');\n }\n if (!playlist || time === undefined) {\n return callback({\n message: 'getProgramTime: playlist and time must be provided'\n });\n }\n const matchedSegment = findSegmentForPlayerTime(time, playlist);\n if (!matchedSegment) {\n return callback({\n message: 'valid programTime was not found'\n });\n }\n if (matchedSegment.type === 'estimate') {\n return callback({\n message: 'Accurate programTime could not be determined.' + ' Please seek to e.seekTime and try again',\n seekTime: matchedSegment.estimatedStart\n });\n }\n const programTimeObject = {\n mediaSeconds: time\n };\n const programTime = playerTimeToProgramTime(time, matchedSegment.segment);\n if (programTime) {\n programTimeObject.programDateTime = programTime.toISOString();\n }\n return callback(null, programTimeObject);\n};\n/**\n * Seeks in the player to a time that matches the given programTime ISO-8601 string.\n *\n * @param {Object} args\n * @param {string} args.programTime A programTime to seek to as an ISO-8601 String\n * @param {Object} args.playlist A playlist to look within\n * @param {number} args.retryCount The number of times to try for an accurate seek. Default is 2.\n * @param {Function} args.seekTo A method to perform a seek\n * @param {boolean} args.pauseAfterSeek Whether to end in a paused state after seeking. Default is true.\n * @param {Object} args.tech The tech to seek on\n * @param {Function} args.callback(err, newTime) A callback to return the new time to\n * @return {string} err.message A detailed error message\n * @return {number} newTime The exact time that was seeked to in seconds\n */\n\nconst seekToProgramTime = ({\n programTime,\n playlist,\n retryCount = 2,\n seekTo,\n pauseAfterSeek = true,\n tech,\n callback\n}) => {\n if (!callback) {\n throw new Error('seekToProgramTime: callback must be provided');\n }\n if (typeof programTime === 'undefined' || !playlist || !seekTo) {\n return callback({\n message: 'seekToProgramTime: programTime, seekTo and playlist must be provided'\n });\n }\n if (!playlist.endList && !tech.hasStarted_) {\n return callback({\n message: 'player must be playing a live stream to start buffering'\n });\n }\n if (!verifyProgramDateTimeTags(playlist)) {\n return callback({\n message: 'programDateTime tags must be provided in the manifest ' + playlist.resolvedUri\n });\n }\n const matchedSegment = findSegmentForProgramTime(programTime, playlist); // no match\n\n if (!matchedSegment) {\n return callback({\n message: `${programTime} was not found in the stream`\n });\n }\n const segment = matchedSegment.segment;\n const mediaOffset = getOffsetFromTimestamp(segment.dateTimeObject, programTime);\n if (matchedSegment.type === 'estimate') {\n // we've run out of retries\n if (retryCount === 0) {\n return callback({\n message: `${programTime} is not buffered yet. Try again`\n });\n }\n seekTo(matchedSegment.estimatedStart + mediaOffset);\n tech.one('seeked', () => {\n seekToProgramTime({\n programTime,\n playlist,\n retryCount: retryCount - 1,\n seekTo,\n pauseAfterSeek,\n tech,\n callback\n });\n });\n return;\n } // Since the segment.start value is determined from the buffered end or ending time\n // of the prior segment, the seekToTime doesn't need to account for any transmuxer\n // modifications.\n\n const seekToTime = segment.start + mediaOffset;\n const seekedCallback = () => {\n return callback(null, tech.currentTime());\n }; // listen for seeked event\n\n tech.one('seeked', seekedCallback); // pause before seeking as video.js will restore this state\n\n if (pauseAfterSeek) {\n tech.pause();\n }\n seekTo(seekToTime);\n};\n\n// which will only happen if the request is complete.\n\nconst callbackOnCompleted = (request, cb) => {\n if (request.readyState === 4) {\n return cb();\n }\n return;\n};\nconst containerRequest = (uri, xhr, cb) => {\n let bytes = [];\n let id3Offset;\n let finished = false;\n const endRequestAndCallback = function (err, req, type, _bytes) {\n req.abort();\n finished = true;\n return cb(err, req, type, _bytes);\n };\n const progressListener = function (error, request) {\n if (finished) {\n return;\n }\n if (error) {\n return endRequestAndCallback(error, request, '', bytes);\n } // grap the new part of content that was just downloaded\n\n const newPart = request.responseText.substring(bytes && bytes.byteLength || 0, request.responseText.length); // add that onto bytes\n\n bytes = concatTypedArrays(bytes, stringToBytes(newPart, true));\n id3Offset = id3Offset || getId3Offset(bytes); // we need at least 10 bytes to determine a type\n // or we need at least two bytes after an id3Offset\n\n if (bytes.length < 10 || id3Offset && bytes.length < id3Offset + 2) {\n return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));\n }\n const type = detectContainerForBytes(bytes); // if this looks like a ts segment but we don't have enough data\n // to see the second sync byte, wait until we have enough data\n // before declaring it ts\n\n if (type === 'ts' && bytes.length < 188) {\n return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));\n } // this may be an unsynced ts segment\n // wait for 376 bytes before detecting no container\n\n if (!type && bytes.length < 376) {\n return callbackOnCompleted(request, () => endRequestAndCallback(error, request, '', bytes));\n }\n return endRequestAndCallback(null, request, type, bytes);\n };\n const options = {\n uri,\n beforeSend(request) {\n // this forces the browser to pass the bytes to us unprocessed\n request.overrideMimeType('text/plain; charset=x-user-defined');\n request.addEventListener('progress', function ({\n total,\n loaded\n }) {\n return callbackWrapper(request, null, {\n statusCode: request.status\n }, progressListener);\n });\n }\n };\n const request = xhr(options, function (error, response) {\n return callbackWrapper(request, error, response, progressListener);\n });\n return request;\n};\nconst {\n EventTarget\n} = videojs;\nconst dashPlaylistUnchanged = function (a, b) {\n if (!isPlaylistUnchanged(a, b)) {\n return false;\n } // for dash the above check will often return true in scenarios where\n // the playlist actually has changed because mediaSequence isn't a\n // dash thing, and we often set it to 1. So if the playlists have the same amount\n // of segments we return true.\n // So for dash we need to make sure that the underlying segments are different.\n // if sidx changed then the playlists are different.\n\n if (a.sidx && b.sidx && (a.sidx.offset !== b.sidx.offset || a.sidx.length !== b.sidx.length)) {\n return false;\n } else if (!a.sidx && b.sidx || a.sidx && !b.sidx) {\n return false;\n } // one or the other does not have segments\n // there was a change.\n\n if (a.segments && !b.segments || !a.segments && b.segments) {\n return false;\n } // neither has segments nothing changed\n\n if (!a.segments && !b.segments) {\n return true;\n } // check segments themselves\n\n for (let i = 0; i < a.segments.length; i++) {\n const aSegment = a.segments[i];\n const bSegment = b.segments[i]; // if uris are different between segments there was a change\n\n if (aSegment.uri !== bSegment.uri) {\n return false;\n } // neither segment has a byterange, there will be no byterange change.\n\n if (!aSegment.byterange && !bSegment.byterange) {\n continue;\n }\n const aByterange = aSegment.byterange;\n const bByterange = bSegment.byterange; // if byterange only exists on one of the segments, there was a change.\n\n if (aByterange && !bByterange || !aByterange && bByterange) {\n return false;\n } // if both segments have byterange with different offsets, there was a change.\n\n if (aByterange.offset !== bByterange.offset || aByterange.length !== bByterange.length) {\n return false;\n }\n } // if everything was the same with segments, this is the same playlist.\n\n return true;\n};\n/**\n * Use the representation IDs from the mpd object to create groupIDs, the NAME is set to mandatory representation\n * ID in the parser. This allows for continuous playout across periods with the same representation IDs\n * (continuous periods as defined in DASH-IF 3.2.12). This is assumed in the mpd-parser as well. If we want to support\n * periods without continuous playback this function may need modification as well as the parser.\n */\n\nconst dashGroupId = (type, group, label, playlist) => {\n // If the manifest somehow does not have an ID (non-dash compliant), use the label.\n const playlistId = playlist.attributes.NAME || label;\n return `placeholder-uri-${type}-${group}-${playlistId}`;\n};\n/**\n * Parses the main XML string and updates playlist URI references.\n *\n * @param {Object} config\n * Object of arguments\n * @param {string} config.mainXml\n * The mpd XML\n * @param {string} config.srcUrl\n * The mpd URL\n * @param {Date} config.clientOffset\n * A time difference between server and client\n * @param {Object} config.sidxMapping\n * SIDX mappings for moof/mdat URIs and byte ranges\n * @return {Object}\n * The parsed mpd manifest object\n */\n\nconst parseMainXml = ({\n mainXml,\n srcUrl,\n clientOffset,\n sidxMapping,\n previousManifest\n}) => {\n const manifest = parse(mainXml, {\n manifestUri: srcUrl,\n clientOffset,\n sidxMapping,\n previousManifest\n });\n addPropertiesToMain(manifest, srcUrl, dashGroupId);\n return manifest;\n};\n/**\n * Removes any mediaGroup labels that no longer exist in the newMain\n *\n * @param {Object} update\n * The previous mpd object being updated\n * @param {Object} newMain\n * The new mpd object\n */\n\nconst removeOldMediaGroupLabels = (update, newMain) => {\n forEachMediaGroup(update, (properties, type, group, label) => {\n if (!(label in newMain.mediaGroups[type][group])) {\n delete update.mediaGroups[type][group][label];\n }\n });\n};\n/**\n * Returns a new main manifest that is the result of merging an updated main manifest\n * into the original version.\n *\n * @param {Object} oldMain\n * The old parsed mpd object\n * @param {Object} newMain\n * The updated parsed mpd object\n * @return {Object}\n * A new object representing the original main manifest with the updated media\n * playlists merged in\n */\n\nconst updateMain = (oldMain, newMain, sidxMapping) => {\n let noChanges = true;\n let update = merge(oldMain, {\n // These are top level properties that can be updated\n duration: newMain.duration,\n minimumUpdatePeriod: newMain.minimumUpdatePeriod,\n timelineStarts: newMain.timelineStarts\n }); // First update the playlists in playlist list\n\n for (let i = 0; i < newMain.playlists.length; i++) {\n const playlist = newMain.playlists[i];\n if (playlist.sidx) {\n const sidxKey = generateSidxKey(playlist.sidx); // add sidx segments to the playlist if we have all the sidx info already\n\n if (sidxMapping && sidxMapping[sidxKey] && sidxMapping[sidxKey].sidx) {\n addSidxSegmentsToPlaylist(playlist, sidxMapping[sidxKey].sidx, playlist.sidx.resolvedUri);\n }\n }\n const playlistUpdate = updateMain$1(update, playlist, dashPlaylistUnchanged);\n if (playlistUpdate) {\n update = playlistUpdate;\n noChanges = false;\n }\n } // Then update media group playlists\n\n forEachMediaGroup(newMain, (properties, type, group, label) => {\n if (properties.playlists && properties.playlists.length) {\n const id = properties.playlists[0].id;\n const playlistUpdate = updateMain$1(update, properties.playlists[0], dashPlaylistUnchanged);\n if (playlistUpdate) {\n update = playlistUpdate; // add new mediaGroup label if it doesn't exist and assign the new mediaGroup.\n\n if (!(label in update.mediaGroups[type][group])) {\n update.mediaGroups[type][group][label] = properties;\n } // update the playlist reference within media groups\n\n update.mediaGroups[type][group][label].playlists[0] = update.playlists[id];\n noChanges = false;\n }\n }\n }); // remove mediaGroup labels and references that no longer exist in the newMain\n\n removeOldMediaGroupLabels(update, newMain);\n if (newMain.minimumUpdatePeriod !== oldMain.minimumUpdatePeriod) {\n noChanges = false;\n }\n if (noChanges) {\n return null;\n }\n return update;\n}; // SIDX should be equivalent if the URI and byteranges of the SIDX match.\n// If the SIDXs have maps, the two maps should match,\n// both `a` and `b` missing SIDXs is considered matching.\n// If `a` or `b` but not both have a map, they aren't matching.\n\nconst equivalentSidx = (a, b) => {\n const neitherMap = Boolean(!a.map && !b.map);\n const equivalentMap = neitherMap || Boolean(a.map && b.map && a.map.byterange.offset === b.map.byterange.offset && a.map.byterange.length === b.map.byterange.length);\n return equivalentMap && a.uri === b.uri && a.byterange.offset === b.byterange.offset && a.byterange.length === b.byterange.length;\n}; // exported for testing\n\nconst compareSidxEntry = (playlists, oldSidxMapping) => {\n const newSidxMapping = {};\n for (const id in playlists) {\n const playlist = playlists[id];\n const currentSidxInfo = playlist.sidx;\n if (currentSidxInfo) {\n const key = generateSidxKey(currentSidxInfo);\n if (!oldSidxMapping[key]) {\n break;\n }\n const savedSidxInfo = oldSidxMapping[key].sidxInfo;\n if (equivalentSidx(savedSidxInfo, currentSidxInfo)) {\n newSidxMapping[key] = oldSidxMapping[key];\n }\n }\n }\n return newSidxMapping;\n};\n/**\n * A function that filters out changed items as they need to be requested separately.\n *\n * The method is exported for testing\n *\n * @param {Object} main the parsed mpd XML returned via mpd-parser\n * @param {Object} oldSidxMapping the SIDX to compare against\n */\n\nconst filterChangedSidxMappings = (main, oldSidxMapping) => {\n const videoSidx = compareSidxEntry(main.playlists, oldSidxMapping);\n let mediaGroupSidx = videoSidx;\n forEachMediaGroup(main, (properties, mediaType, groupKey, labelKey) => {\n if (properties.playlists && properties.playlists.length) {\n const playlists = properties.playlists;\n mediaGroupSidx = merge(mediaGroupSidx, compareSidxEntry(playlists, oldSidxMapping));\n }\n });\n return mediaGroupSidx;\n};\nclass DashPlaylistLoader extends EventTarget {\n // DashPlaylistLoader must accept either a src url or a playlist because subsequent\n // playlist loader setups from media groups will expect to be able to pass a playlist\n // (since there aren't external URLs to media playlists with DASH)\n constructor(srcUrlOrPlaylist, vhs, options = {}, mainPlaylistLoader) {\n super();\n this.mainPlaylistLoader_ = mainPlaylistLoader || this;\n if (!mainPlaylistLoader) {\n this.isMain_ = true;\n }\n const {\n withCredentials = false\n } = options;\n this.vhs_ = vhs;\n this.withCredentials = withCredentials;\n this.addMetadataToTextTrack = options.addMetadataToTextTrack;\n if (!srcUrlOrPlaylist) {\n throw new Error('A non-empty playlist URL or object is required');\n } // event naming?\n\n this.on('minimumUpdatePeriod', () => {\n this.refreshXml_();\n }); // live playlist staleness timeout\n\n this.on('mediaupdatetimeout', () => {\n this.refreshMedia_(this.media().id);\n });\n this.state = 'HAVE_NOTHING';\n this.loadedPlaylists_ = {};\n this.logger_ = logger('DashPlaylistLoader'); // initialize the loader state\n // The mainPlaylistLoader will be created with a string\n\n if (this.isMain_) {\n this.mainPlaylistLoader_.srcUrl = srcUrlOrPlaylist; // TODO: reset sidxMapping between period changes\n // once multi-period is refactored\n\n this.mainPlaylistLoader_.sidxMapping_ = {};\n } else {\n this.childPlaylist_ = srcUrlOrPlaylist;\n }\n }\n requestErrored_(err, request, startingState) {\n // disposed\n if (!this.request) {\n return true;\n } // pending request is cleared\n\n this.request = null;\n if (err) {\n // use the provided error object or create one\n // based on the request/response\n this.error = typeof err === 'object' && !(err instanceof Error) ? err : {\n status: request.status,\n message: 'DASH request error at URL: ' + request.uri,\n response: request.response,\n // MEDIA_ERR_NETWORK\n code: 2,\n metadata: err.metadata\n };\n if (startingState) {\n this.state = startingState;\n }\n this.trigger('error');\n return true;\n }\n }\n /**\n * Verify that the container of the sidx segment can be parsed\n * and if it can, get and parse that segment.\n */\n\n addSidxSegments_(playlist, startingState, cb) {\n const sidxKey = playlist.sidx && generateSidxKey(playlist.sidx); // playlist lacks sidx or sidx segments were added to this playlist already.\n\n if (!playlist.sidx || !sidxKey || this.mainPlaylistLoader_.sidxMapping_[sidxKey]) {\n // keep this function async\n this.mediaRequest_ = window$1.setTimeout(() => cb(false), 0);\n return;\n } // resolve the segment URL relative to the playlist\n\n const uri = resolveManifestRedirect(playlist.sidx.resolvedUri);\n const fin = (err, request) => {\n // TODO: add error metdata here once we create an error type in video.js\n if (this.requestErrored_(err, request, startingState)) {\n return;\n }\n const sidxMapping = this.mainPlaylistLoader_.sidxMapping_;\n let sidx;\n try {\n sidx = parseSidx(toUint8(request.response).subarray(8));\n } catch (e) {\n e.metadata = {\n errorType: videojs.Error.DashManifestSidxParsingError\n }; // sidx parsing failed.\n\n this.requestErrored_(e, request, startingState);\n return;\n }\n sidxMapping[sidxKey] = {\n sidxInfo: playlist.sidx,\n sidx\n };\n addSidxSegmentsToPlaylist(playlist, sidx, playlist.sidx.resolvedUri);\n return cb(true);\n };\n this.request = containerRequest(uri, this.vhs_.xhr, (err, request, container, bytes) => {\n if (err) {\n return fin(err, request);\n }\n if (!container || container !== 'mp4') {\n const sidxContainer = container || 'unknown';\n return fin({\n status: request.status,\n message: `Unsupported ${sidxContainer} container type for sidx segment at URL: ${uri}`,\n // response is just bytes in this case\n // but we really don't want to return that.\n response: '',\n playlist,\n internal: true,\n playlistExclusionDuration: Infinity,\n // MEDIA_ERR_NETWORK\n code: 2,\n metadata: {\n errorType: videojs.Error.UnsupportedSidxContainer,\n sidxContainer\n }\n }, request);\n } // if we already downloaded the sidx bytes in the container request, use them\n\n const {\n offset,\n length\n } = playlist.sidx.byterange;\n if (bytes.length >= length + offset) {\n return fin(err, {\n response: bytes.subarray(offset, offset + length),\n status: request.status,\n uri: request.uri\n });\n } // otherwise request sidx bytes\n\n this.request = this.vhs_.xhr({\n uri,\n responseType: 'arraybuffer',\n headers: segmentXhrHeaders({\n byterange: playlist.sidx.byterange\n })\n }, fin);\n });\n }\n dispose() {\n this.trigger('dispose');\n this.stopRequest();\n this.loadedPlaylists_ = {};\n window$1.clearTimeout(this.minimumUpdatePeriodTimeout_);\n window$1.clearTimeout(this.mediaRequest_);\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n this.mediaRequest_ = null;\n this.minimumUpdatePeriodTimeout_ = null;\n if (this.mainPlaylistLoader_.createMupOnMedia_) {\n this.off('loadedmetadata', this.mainPlaylistLoader_.createMupOnMedia_);\n this.mainPlaylistLoader_.createMupOnMedia_ = null;\n }\n this.off();\n }\n hasPendingRequest() {\n return this.request || this.mediaRequest_;\n }\n stopRequest() {\n if (this.request) {\n const oldRequest = this.request;\n this.request = null;\n oldRequest.onreadystatechange = null;\n oldRequest.abort();\n }\n }\n media(playlist) {\n // getter\n if (!playlist) {\n return this.media_;\n } // setter\n\n if (this.state === 'HAVE_NOTHING') {\n throw new Error('Cannot switch media playlist from ' + this.state);\n }\n const startingState = this.state; // find the playlist object if the target playlist has been specified by URI\n\n if (typeof playlist === 'string') {\n if (!this.mainPlaylistLoader_.main.playlists[playlist]) {\n throw new Error('Unknown playlist URI: ' + playlist);\n }\n playlist = this.mainPlaylistLoader_.main.playlists[playlist];\n }\n const mediaChange = !this.media_ || playlist.id !== this.media_.id; // switch to previously loaded playlists immediately\n\n if (mediaChange && this.loadedPlaylists_[playlist.id] && this.loadedPlaylists_[playlist.id].endList) {\n this.state = 'HAVE_METADATA';\n this.media_ = playlist; // trigger media change if the active media has been updated\n\n if (mediaChange) {\n this.trigger('mediachanging');\n this.trigger('mediachange');\n }\n return;\n } // switching to the active playlist is a no-op\n\n if (!mediaChange) {\n return;\n } // switching from an already loaded playlist\n\n if (this.media_) {\n this.trigger('mediachanging');\n }\n this.addSidxSegments_(playlist, startingState, sidxChanged => {\n // everything is ready just continue to haveMetadata\n this.haveMetadata({\n startingState,\n playlist\n });\n });\n }\n haveMetadata({\n startingState,\n playlist\n }) {\n this.state = 'HAVE_METADATA';\n this.loadedPlaylists_[playlist.id] = playlist;\n this.mediaRequest_ = null; // This will trigger loadedplaylist\n\n this.refreshMedia_(playlist.id); // fire loadedmetadata the first time a media playlist is loaded\n // to resolve setup of media groups\n\n if (startingState === 'HAVE_MAIN_MANIFEST') {\n this.trigger('loadedmetadata');\n } else {\n // trigger media change if the active media has been updated\n this.trigger('mediachange');\n }\n }\n pause() {\n if (this.mainPlaylistLoader_.createMupOnMedia_) {\n this.off('loadedmetadata', this.mainPlaylistLoader_.createMupOnMedia_);\n this.mainPlaylistLoader_.createMupOnMedia_ = null;\n }\n this.stopRequest();\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n if (this.isMain_) {\n window$1.clearTimeout(this.mainPlaylistLoader_.minimumUpdatePeriodTimeout_);\n this.mainPlaylistLoader_.minimumUpdatePeriodTimeout_ = null;\n }\n if (this.state === 'HAVE_NOTHING') {\n // If we pause the loader before any data has been retrieved, its as if we never\n // started, so reset to an unstarted state.\n this.started = false;\n }\n }\n load(isFinalRendition) {\n window$1.clearTimeout(this.mediaUpdateTimeout);\n this.mediaUpdateTimeout = null;\n const media = this.media();\n if (isFinalRendition) {\n const delay = media ? media.targetDuration / 2 * 1000 : 5 * 1000;\n this.mediaUpdateTimeout = window$1.setTimeout(() => this.load(), delay);\n return;\n } // because the playlists are internal to the manifest, load should either load the\n // main manifest, or do nothing but trigger an event\n\n if (!this.started) {\n this.start();\n return;\n }\n if (media && !media.endList) {\n // Check to see if this is the main loader and the MUP was cleared (this happens\n // when the loader was paused). `media` should be set at this point since one is always\n // set during `start()`.\n if (this.isMain_ && !this.minimumUpdatePeriodTimeout_) {\n // Trigger minimumUpdatePeriod to refresh the main manifest\n this.trigger('minimumUpdatePeriod'); // Since there was no prior minimumUpdatePeriodTimeout it should be recreated\n\n this.updateMinimumUpdatePeriodTimeout_();\n }\n this.trigger('mediaupdatetimeout');\n } else {\n this.trigger('loadedplaylist');\n }\n }\n start() {\n this.started = true; // We don't need to request the main manifest again\n // Call this asynchronously to match the xhr request behavior below\n\n if (!this.isMain_) {\n this.mediaRequest_ = window$1.setTimeout(() => this.haveMain_(), 0);\n return;\n }\n this.requestMain_((req, mainChanged) => {\n this.haveMain_();\n if (!this.hasPendingRequest() && !this.media_) {\n this.media(this.mainPlaylistLoader_.main.playlists[0]);\n }\n });\n }\n requestMain_(cb) {\n this.request = this.vhs_.xhr({\n uri: this.mainPlaylistLoader_.srcUrl,\n withCredentials: this.withCredentials,\n requestType: 'dash-manifest'\n }, (error, req) => {\n if (this.requestErrored_(error, req)) {\n if (this.state === 'HAVE_NOTHING') {\n this.started = false;\n }\n return;\n }\n const mainChanged = req.responseText !== this.mainPlaylistLoader_.mainXml_;\n this.mainPlaylistLoader_.mainXml_ = req.responseText;\n if (req.responseHeaders && req.responseHeaders.date) {\n this.mainLoaded_ = Date.parse(req.responseHeaders.date);\n } else {\n this.mainLoaded_ = Date.now();\n }\n this.mainPlaylistLoader_.srcUrl = resolveManifestRedirect(this.mainPlaylistLoader_.srcUrl, req);\n if (mainChanged) {\n this.handleMain_();\n this.syncClientServerClock_(() => {\n return cb(req, mainChanged);\n });\n return;\n }\n return cb(req, mainChanged);\n });\n }\n /**\n * Parses the main xml for UTCTiming node to sync the client clock to the server\n * clock. If the UTCTiming node requires a HEAD or GET request, that request is made.\n *\n * @param {Function} done\n * Function to call when clock sync has completed\n */\n\n syncClientServerClock_(done) {\n const utcTiming = parseUTCTiming(this.mainPlaylistLoader_.mainXml_); // No UTCTiming element found in the mpd. Use Date header from mpd request as the\n // server clock\n\n if (utcTiming === null) {\n this.mainPlaylistLoader_.clientOffset_ = this.mainLoaded_ - Date.now();\n return done();\n }\n if (utcTiming.method === 'DIRECT') {\n this.mainPlaylistLoader_.clientOffset_ = utcTiming.value - Date.now();\n return done();\n }\n this.request = this.vhs_.xhr({\n uri: resolveUrl(this.mainPlaylistLoader_.srcUrl, utcTiming.value),\n method: utcTiming.method,\n withCredentials: this.withCredentials,\n requestType: 'dash-clock-sync'\n }, (error, req) => {\n // disposed\n if (!this.request) {\n return;\n }\n if (error) {\n // sync request failed, fall back to using date header from mpd\n // TODO: log warning\n this.mainPlaylistLoader_.clientOffset_ = this.mainLoaded_ - Date.now();\n return done();\n }\n let serverTime;\n if (utcTiming.method === 'HEAD') {\n if (!req.responseHeaders || !req.responseHeaders.date) {\n // expected date header not preset, fall back to using date header from mpd\n // TODO: log warning\n serverTime = this.mainLoaded_;\n } else {\n serverTime = Date.parse(req.responseHeaders.date);\n }\n } else {\n serverTime = Date.parse(req.responseText);\n }\n this.mainPlaylistLoader_.clientOffset_ = serverTime - Date.now();\n done();\n });\n }\n haveMain_() {\n this.state = 'HAVE_MAIN_MANIFEST';\n if (this.isMain_) {\n // We have the main playlist at this point, so\n // trigger this to allow PlaylistController\n // to make an initial playlist selection\n this.trigger('loadedplaylist');\n } else if (!this.media_) {\n // no media playlist was specifically selected so select\n // the one the child playlist loader was created with\n this.media(this.childPlaylist_);\n }\n }\n handleMain_() {\n // clear media request\n this.mediaRequest_ = null;\n const oldMain = this.mainPlaylistLoader_.main;\n let newMain = parseMainXml({\n mainXml: this.mainPlaylistLoader_.mainXml_,\n srcUrl: this.mainPlaylistLoader_.srcUrl,\n clientOffset: this.mainPlaylistLoader_.clientOffset_,\n sidxMapping: this.mainPlaylistLoader_.sidxMapping_,\n previousManifest: oldMain\n }); // if we have an old main to compare the new main against\n\n if (oldMain) {\n newMain = updateMain(oldMain, newMain, this.mainPlaylistLoader_.sidxMapping_);\n } // only update main if we have a new main\n\n this.mainPlaylistLoader_.main = newMain ? newMain : oldMain;\n const location = this.mainPlaylistLoader_.main.locations && this.mainPlaylistLoader_.main.locations[0];\n if (location && location !== this.mainPlaylistLoader_.srcUrl) {\n this.mainPlaylistLoader_.srcUrl = location;\n }\n if (!oldMain || newMain && newMain.minimumUpdatePeriod !== oldMain.minimumUpdatePeriod) {\n this.updateMinimumUpdatePeriodTimeout_();\n }\n this.addEventStreamToMetadataTrack_(newMain);\n return Boolean(newMain);\n }\n updateMinimumUpdatePeriodTimeout_() {\n const mpl = this.mainPlaylistLoader_; // cancel any pending creation of mup on media\n // a new one will be added if needed.\n\n if (mpl.createMupOnMedia_) {\n mpl.off('loadedmetadata', mpl.createMupOnMedia_);\n mpl.createMupOnMedia_ = null;\n } // clear any pending timeouts\n\n if (mpl.minimumUpdatePeriodTimeout_) {\n window$1.clearTimeout(mpl.minimumUpdatePeriodTimeout_);\n mpl.minimumUpdatePeriodTimeout_ = null;\n }\n let mup = mpl.main && mpl.main.minimumUpdatePeriod; // If the minimumUpdatePeriod has a value of 0, that indicates that the current\n // MPD has no future validity, so a new one will need to be acquired when new\n // media segments are to be made available. Thus, we use the target duration\n // in this case\n\n if (mup === 0) {\n if (mpl.media()) {\n mup = mpl.media().targetDuration * 1000;\n } else {\n mpl.createMupOnMedia_ = mpl.updateMinimumUpdatePeriodTimeout_;\n mpl.one('loadedmetadata', mpl.createMupOnMedia_);\n }\n } // if minimumUpdatePeriod is invalid or <= zero, which\n // can happen when a live video becomes VOD. skip timeout\n // creation.\n\n if (typeof mup !== 'number' || mup <= 0) {\n if (mup < 0) {\n this.logger_(`found invalid minimumUpdatePeriod of ${mup}, not setting a timeout`);\n }\n return;\n }\n this.createMUPTimeout_(mup);\n }\n createMUPTimeout_(mup) {\n const mpl = this.mainPlaylistLoader_;\n mpl.minimumUpdatePeriodTimeout_ = window$1.setTimeout(() => {\n mpl.minimumUpdatePeriodTimeout_ = null;\n mpl.trigger('minimumUpdatePeriod');\n mpl.createMUPTimeout_(mup);\n }, mup);\n }\n /**\n * Sends request to refresh the main xml and updates the parsed main manifest\n */\n\n refreshXml_() {\n this.requestMain_((req, mainChanged) => {\n if (!mainChanged) {\n return;\n }\n if (this.media_) {\n this.media_ = this.mainPlaylistLoader_.main.playlists[this.media_.id];\n } // This will filter out updated sidx info from the mapping\n\n this.mainPlaylistLoader_.sidxMapping_ = filterChangedSidxMappings(this.mainPlaylistLoader_.main, this.mainPlaylistLoader_.sidxMapping_);\n this.addSidxSegments_(this.media(), this.state, sidxChanged => {\n // TODO: do we need to reload the current playlist?\n this.refreshMedia_(this.media().id);\n });\n });\n }\n /**\n * Refreshes the media playlist by re-parsing the main xml and updating playlist\n * references. If this is an alternate loader, the updated parsed manifest is retrieved\n * from the main loader.\n */\n\n refreshMedia_(mediaID) {\n if (!mediaID) {\n throw new Error('refreshMedia_ must take a media id');\n } // for main we have to reparse the main xml\n // to re-create segments based on current timing values\n // which may change media. We only skip updating the main manifest\n // if this is the first time this.media_ is being set.\n // as main was just parsed in that case.\n\n if (this.media_ && this.isMain_) {\n this.handleMain_();\n }\n const playlists = this.mainPlaylistLoader_.main.playlists;\n const mediaChanged = !this.media_ || this.media_ !== playlists[mediaID];\n if (mediaChanged) {\n this.media_ = playlists[mediaID];\n } else {\n this.trigger('playlistunchanged');\n }\n if (!this.mediaUpdateTimeout) {\n const createMediaUpdateTimeout = () => {\n if (this.media().endList) {\n return;\n }\n this.mediaUpdateTimeout = window$1.setTimeout(() => {\n this.trigger('mediaupdatetimeout');\n createMediaUpdateTimeout();\n }, refreshDelay(this.media(), Boolean(mediaChanged)));\n };\n createMediaUpdateTimeout();\n }\n this.trigger('loadedplaylist');\n }\n /**\n * Takes eventstream data from a parsed DASH manifest and adds it to the metadata text track.\n *\n * @param {manifest} newMain the newly parsed manifest\n */\n\n addEventStreamToMetadataTrack_(newMain) {\n // Only add new event stream metadata if we have a new manifest.\n if (newMain && this.mainPlaylistLoader_.main.eventStream) {\n // convert EventStream to ID3-like data.\n const metadataArray = this.mainPlaylistLoader_.main.eventStream.map(eventStreamNode => {\n return {\n cueTime: eventStreamNode.start,\n frames: [{\n data: eventStreamNode.messageData\n }]\n };\n });\n this.addMetadataToTextTrack('EventStream', metadataArray, this.mainPlaylistLoader_.main.duration);\n }\n }\n /**\n * Returns the key ID set from a playlist\n *\n * @param {playlist} playlist to fetch the key ID set from.\n * @return a Set of 32 digit hex strings that represent the unique keyIds for that playlist.\n */\n\n getKeyIdSet(playlist) {\n if (playlist.contentProtection) {\n const keyIds = new Set();\n for (const keysystem in playlist.contentProtection) {\n const defaultKID = playlist.contentProtection[keysystem].attributes['cenc:default_KID'];\n if (defaultKID) {\n // DASH keyIds are separated by dashes.\n keyIds.add(defaultKID.replace(/-/g, '').toLowerCase());\n }\n }\n return keyIds;\n }\n }\n}\nvar Config = {\n GOAL_BUFFER_LENGTH: 30,\n MAX_GOAL_BUFFER_LENGTH: 60,\n BACK_BUFFER_LENGTH: 30,\n GOAL_BUFFER_LENGTH_RATE: 1,\n // 0.5 MB/s\n INITIAL_BANDWIDTH: 4194304,\n // A fudge factor to apply to advertised playlist bitrates to account for\n // temporary flucations in client bandwidth\n BANDWIDTH_VARIANCE: 1.2,\n // How much of the buffer must be filled before we consider upswitching\n BUFFER_LOW_WATER_LINE: 0,\n MAX_BUFFER_LOW_WATER_LINE: 30,\n // TODO: Remove this when experimentalBufferBasedABR is removed\n EXPERIMENTAL_MAX_BUFFER_LOW_WATER_LINE: 16,\n BUFFER_LOW_WATER_LINE_RATE: 1,\n // If the buffer is greater than the high water line, we won't switch down\n BUFFER_HIGH_WATER_LINE: 30\n};\nconst stringToArrayBuffer = string => {\n const view = new Uint8Array(new ArrayBuffer(string.length));\n for (let i = 0; i < string.length; i++) {\n view[i] = string.charCodeAt(i);\n }\n return view.buffer;\n};\n\n/* global Blob, BlobBuilder, Worker */\n// unify worker interface\nconst browserWorkerPolyFill = function (workerObj) {\n // node only supports on/off\n workerObj.on = workerObj.addEventListener;\n workerObj.off = workerObj.removeEventListener;\n return workerObj;\n};\nconst createObjectURL = function (str) {\n try {\n return URL.createObjectURL(new Blob([str], {\n type: 'application/javascript'\n }));\n } catch (e) {\n const blob = new BlobBuilder();\n blob.append(str);\n return URL.createObjectURL(blob.getBlob());\n }\n};\nconst factory = function (code) {\n return function () {\n const objectUrl = createObjectURL(code);\n const worker = browserWorkerPolyFill(new Worker(objectUrl));\n worker.objURL = objectUrl;\n const terminate = worker.terminate;\n worker.on = worker.addEventListener;\n worker.off = worker.removeEventListener;\n worker.terminate = function () {\n URL.revokeObjectURL(objectUrl);\n return terminate.call(this);\n };\n return worker;\n };\n};\nconst transform = function (code) {\n return `var browserWorkerPolyFill = ${browserWorkerPolyFill.toString()};\\n` + 'browserWorkerPolyFill(self);\\n' + code;\n};\nconst getWorkerString = function (fn) {\n return fn.toString().replace(/^function.+?{/, '').slice(0, -1);\n};\n\n/* rollup-plugin-worker-factory start for worker!/home/runner/work/http-streaming/http-streaming/src/transmuxer-worker.js */\nconst workerCode$1 = transform(getWorkerString(function () {\n var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A lightweight readable stream implemention that handles event dispatching.\n * Objects that inherit from streams should call init in their constructors.\n */\n\n var Stream$8 = function () {\n this.init = function () {\n var listeners = {};\n /**\n * Add a listener for a specified event type.\n * @param type {string} the event name\n * @param listener {function} the callback to be invoked when an event of\n * the specified type occurs\n */\n\n this.on = function (type, listener) {\n if (!listeners[type]) {\n listeners[type] = [];\n }\n listeners[type] = listeners[type].concat(listener);\n };\n /**\n * Remove a listener for a specified event type.\n * @param type {string} the event name\n * @param listener {function} a function previously registered for this\n * type of event through `on`\n */\n\n this.off = function (type, listener) {\n var index;\n if (!listeners[type]) {\n return false;\n }\n index = listeners[type].indexOf(listener);\n listeners[type] = listeners[type].slice();\n listeners[type].splice(index, 1);\n return index > -1;\n };\n /**\n * Trigger an event of the specified type on this stream. Any additional\n * arguments to this function are passed as parameters to event listeners.\n * @param type {string} the event name\n */\n\n this.trigger = function (type) {\n var callbacks, i, length, args;\n callbacks = listeners[type];\n if (!callbacks) {\n return;\n } // Slicing the arguments on every invocation of this method\n // can add a significant amount of overhead. Avoid the\n // intermediate object creation for the common case of a\n // single callback argument\n\n if (arguments.length === 2) {\n length = callbacks.length;\n for (i = 0; i < length; ++i) {\n callbacks[i].call(this, arguments[1]);\n }\n } else {\n args = [];\n i = arguments.length;\n for (i = 1; i < arguments.length; ++i) {\n args.push(arguments[i]);\n }\n length = callbacks.length;\n for (i = 0; i < length; ++i) {\n callbacks[i].apply(this, args);\n }\n }\n };\n /**\n * Destroys the stream and cleans up.\n */\n\n this.dispose = function () {\n listeners = {};\n };\n };\n };\n /**\n * Forwards all `data` events on this stream to the destination stream. The\n * destination stream should provide a method `push` to receive the data\n * events as they arrive.\n * @param destination {stream} the stream that will receive all `data` events\n * @param autoFlush {boolean} if false, we will not call `flush` on the destination\n * when the current stream emits a 'done' event\n * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options\n */\n\n Stream$8.prototype.pipe = function (destination) {\n this.on('data', function (data) {\n destination.push(data);\n });\n this.on('done', function (flushSource) {\n destination.flush(flushSource);\n });\n this.on('partialdone', function (flushSource) {\n destination.partialFlush(flushSource);\n });\n this.on('endedtimeline', function (flushSource) {\n destination.endTimeline(flushSource);\n });\n this.on('reset', function (flushSource) {\n destination.reset(flushSource);\n });\n return destination;\n }; // Default stream functions that are expected to be overridden to perform\n // actual work. These are provided by the prototype as a sort of no-op\n // implementation so that we don't have to check for their existence in the\n // `pipe` function above.\n\n Stream$8.prototype.push = function (data) {\n this.trigger('data', data);\n };\n Stream$8.prototype.flush = function (flushSource) {\n this.trigger('done', flushSource);\n };\n Stream$8.prototype.partialFlush = function (flushSource) {\n this.trigger('partialdone', flushSource);\n };\n Stream$8.prototype.endTimeline = function (flushSource) {\n this.trigger('endedtimeline', flushSource);\n };\n Stream$8.prototype.reset = function (flushSource) {\n this.trigger('reset', flushSource);\n };\n var stream = Stream$8;\n var MAX_UINT32$1 = Math.pow(2, 32);\n var getUint64$3 = function (uint8) {\n var dv = new DataView(uint8.buffer, uint8.byteOffset, uint8.byteLength);\n var value;\n if (dv.getBigUint64) {\n value = dv.getBigUint64(0);\n if (value < Number.MAX_SAFE_INTEGER) {\n return Number(value);\n }\n return value;\n }\n return dv.getUint32(0) * MAX_UINT32$1 + dv.getUint32(4);\n };\n var numbers = {\n getUint64: getUint64$3,\n MAX_UINT32: MAX_UINT32$1\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Functions that generate fragmented MP4s suitable for use with Media\n * Source Extensions.\n */\n\n var MAX_UINT32 = numbers.MAX_UINT32;\n var box, dinf, esds, ftyp, mdat, mfhd, minf, moof, moov, mvex, mvhd, trak, tkhd, mdia, mdhd, hdlr, sdtp, stbl, stsd, traf, trex, trun$1, types, MAJOR_BRAND, MINOR_VERSION, AVC1_BRAND, VIDEO_HDLR, AUDIO_HDLR, HDLR_TYPES, VMHD, SMHD, DREF, STCO, STSC, STSZ, STTS; // pre-calculate constants\n\n (function () {\n var i;\n types = {\n avc1: [],\n // codingname\n avcC: [],\n btrt: [],\n dinf: [],\n dref: [],\n esds: [],\n ftyp: [],\n hdlr: [],\n mdat: [],\n mdhd: [],\n mdia: [],\n mfhd: [],\n minf: [],\n moof: [],\n moov: [],\n mp4a: [],\n // codingname\n mvex: [],\n mvhd: [],\n pasp: [],\n sdtp: [],\n smhd: [],\n stbl: [],\n stco: [],\n stsc: [],\n stsd: [],\n stsz: [],\n stts: [],\n styp: [],\n tfdt: [],\n tfhd: [],\n traf: [],\n trak: [],\n trun: [],\n trex: [],\n tkhd: [],\n vmhd: []\n }; // In environments where Uint8Array is undefined (e.g., IE8), skip set up so that we\n // don't throw an error\n\n if (typeof Uint8Array === 'undefined') {\n return;\n }\n for (i in types) {\n if (types.hasOwnProperty(i)) {\n types[i] = [i.charCodeAt(0), i.charCodeAt(1), i.charCodeAt(2), i.charCodeAt(3)];\n }\n }\n MAJOR_BRAND = new Uint8Array(['i'.charCodeAt(0), 's'.charCodeAt(0), 'o'.charCodeAt(0), 'm'.charCodeAt(0)]);\n AVC1_BRAND = new Uint8Array(['a'.charCodeAt(0), 'v'.charCodeAt(0), 'c'.charCodeAt(0), '1'.charCodeAt(0)]);\n MINOR_VERSION = new Uint8Array([0, 0, 0, 1]);\n VIDEO_HDLR = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0x76, 0x69, 0x64, 0x65,\n // handler_type: 'vide'\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x56, 0x69, 0x64, 0x65, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'VideoHandler'\n ]);\n\n AUDIO_HDLR = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0x73, 0x6f, 0x75, 0x6e,\n // handler_type: 'soun'\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x53, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x00 // name: 'SoundHandler'\n ]);\n\n HDLR_TYPES = {\n video: VIDEO_HDLR,\n audio: AUDIO_HDLR\n };\n DREF = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01,\n // entry_count\n 0x00, 0x00, 0x00, 0x0c,\n // entry_size\n 0x75, 0x72, 0x6c, 0x20,\n // 'url' type\n 0x00,\n // version 0\n 0x00, 0x00, 0x01 // entry_flags\n ]);\n\n SMHD = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00,\n // balance, 0 means centered\n 0x00, 0x00 // reserved\n ]);\n\n STCO = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00 // entry_count\n ]);\n\n STSC = STCO;\n STSZ = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // sample_size\n 0x00, 0x00, 0x00, 0x00 // sample_count\n ]);\n\n STTS = STCO;\n VMHD = new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x01,\n // flags\n 0x00, 0x00,\n // graphicsmode\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 // opcolor\n ]);\n })();\n\n box = function (type) {\n var payload = [],\n size = 0,\n i,\n result,\n view;\n for (i = 1; i < arguments.length; i++) {\n payload.push(arguments[i]);\n }\n i = payload.length; // calculate the total size we need to allocate\n\n while (i--) {\n size += payload[i].byteLength;\n }\n result = new Uint8Array(size + 8);\n view = new DataView(result.buffer, result.byteOffset, result.byteLength);\n view.setUint32(0, result.byteLength);\n result.set(type, 4); // copy the payload into the result\n\n for (i = 0, size = 8; i < payload.length; i++) {\n result.set(payload[i], size);\n size += payload[i].byteLength;\n }\n return result;\n };\n dinf = function () {\n return box(types.dinf, box(types.dref, DREF));\n };\n esds = function (track) {\n return box(types.esds, new Uint8Array([0x00,\n // version\n 0x00, 0x00, 0x00,\n // flags\n // ES_Descriptor\n 0x03,\n // tag, ES_DescrTag\n 0x19,\n // length\n 0x00, 0x00,\n // ES_ID\n 0x00,\n // streamDependenceFlag, URL_flag, reserved, streamPriority\n // DecoderConfigDescriptor\n 0x04,\n // tag, DecoderConfigDescrTag\n 0x11,\n // length\n 0x40,\n // object type\n 0x15,\n // streamType\n 0x00, 0x06, 0x00,\n // bufferSizeDB\n 0x00, 0x00, 0xda, 0xc0,\n // maxBitrate\n 0x00, 0x00, 0xda, 0xc0,\n // avgBitrate\n // DecoderSpecificInfo\n 0x05,\n // tag, DecoderSpecificInfoTag\n 0x02,\n // length\n // ISO/IEC 14496-3, AudioSpecificConfig\n // for samplingFrequencyIndex see ISO/IEC 13818-7:2006, 8.1.3.2.2, Table 35\n track.audioobjecttype << 3 | track.samplingfrequencyindex >>> 1, track.samplingfrequencyindex << 7 | track.channelcount << 3, 0x06, 0x01, 0x02 // GASpecificConfig\n ]));\n };\n\n ftyp = function () {\n return box(types.ftyp, MAJOR_BRAND, MINOR_VERSION, MAJOR_BRAND, AVC1_BRAND);\n };\n hdlr = function (type) {\n return box(types.hdlr, HDLR_TYPES[type]);\n };\n mdat = function (data) {\n return box(types.mdat, data);\n };\n mdhd = function (track) {\n var result = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x02,\n // creation_time\n 0x00, 0x00, 0x00, 0x03,\n // modification_time\n 0x00, 0x01, 0x5f, 0x90,\n // timescale, 90,000 \"ticks\" per second\n track.duration >>> 24 & 0xFF, track.duration >>> 16 & 0xFF, track.duration >>> 8 & 0xFF, track.duration & 0xFF,\n // duration\n 0x55, 0xc4,\n // 'und' language (undetermined)\n 0x00, 0x00]); // Use the sample rate from the track metadata, when it is\n // defined. The sample rate can be parsed out of an ADTS header, for\n // instance.\n\n if (track.samplerate) {\n result[12] = track.samplerate >>> 24 & 0xFF;\n result[13] = track.samplerate >>> 16 & 0xFF;\n result[14] = track.samplerate >>> 8 & 0xFF;\n result[15] = track.samplerate & 0xFF;\n }\n return box(types.mdhd, result);\n };\n mdia = function (track) {\n return box(types.mdia, mdhd(track), hdlr(track.type), minf(track));\n };\n mfhd = function (sequenceNumber) {\n return box(types.mfhd, new Uint8Array([0x00, 0x00, 0x00, 0x00,\n // flags\n (sequenceNumber & 0xFF000000) >> 24, (sequenceNumber & 0xFF0000) >> 16, (sequenceNumber & 0xFF00) >> 8, sequenceNumber & 0xFF // sequence_number\n ]));\n };\n\n minf = function (track) {\n return box(types.minf, track.type === 'video' ? box(types.vmhd, VMHD) : box(types.smhd, SMHD), dinf(), stbl(track));\n };\n moof = function (sequenceNumber, tracks) {\n var trackFragments = [],\n i = tracks.length; // build traf boxes for each track fragment\n\n while (i--) {\n trackFragments[i] = traf(tracks[i]);\n }\n return box.apply(null, [types.moof, mfhd(sequenceNumber)].concat(trackFragments));\n };\n /**\n * Returns a movie box.\n * @param tracks {array} the tracks associated with this movie\n * @see ISO/IEC 14496-12:2012(E), section 8.2.1\n */\n\n moov = function (tracks) {\n var i = tracks.length,\n boxes = [];\n while (i--) {\n boxes[i] = trak(tracks[i]);\n }\n return box.apply(null, [types.moov, mvhd(0xffffffff)].concat(boxes).concat(mvex(tracks)));\n };\n mvex = function (tracks) {\n var i = tracks.length,\n boxes = [];\n while (i--) {\n boxes[i] = trex(tracks[i]);\n }\n return box.apply(null, [types.mvex].concat(boxes));\n };\n mvhd = function (duration) {\n var bytes = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01,\n // creation_time\n 0x00, 0x00, 0x00, 0x02,\n // modification_time\n 0x00, 0x01, 0x5f, 0x90,\n // timescale, 90,000 \"ticks\" per second\n (duration & 0xFF000000) >> 24, (duration & 0xFF0000) >> 16, (duration & 0xFF00) >> 8, duration & 0xFF,\n // duration\n 0x00, 0x01, 0x00, 0x00,\n // 1.0 rate\n 0x01, 0x00,\n // 1.0 volume\n 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,\n // transformation: unity matrix\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n 0xff, 0xff, 0xff, 0xff // next_track_ID\n ]);\n\n return box(types.mvhd, bytes);\n };\n sdtp = function (track) {\n var samples = track.samples || [],\n bytes = new Uint8Array(4 + samples.length),\n flags,\n i; // leave the full box header (4 bytes) all zero\n // write the sample table\n\n for (i = 0; i < samples.length; i++) {\n flags = samples[i].flags;\n bytes[i + 4] = flags.dependsOn << 4 | flags.isDependedOn << 2 | flags.hasRedundancy;\n }\n return box(types.sdtp, bytes);\n };\n stbl = function (track) {\n return box(types.stbl, stsd(track), box(types.stts, STTS), box(types.stsc, STSC), box(types.stsz, STSZ), box(types.stco, STCO));\n };\n (function () {\n var videoSample, audioSample;\n stsd = function (track) {\n return box(types.stsd, new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n 0x00, 0x00, 0x00, 0x01]), track.type === 'video' ? videoSample(track) : audioSample(track));\n };\n videoSample = function (track) {\n var sps = track.sps || [],\n pps = track.pps || [],\n sequenceParameterSets = [],\n pictureParameterSets = [],\n i,\n avc1Box; // assemble the SPSs\n\n for (i = 0; i < sps.length; i++) {\n sequenceParameterSets.push((sps[i].byteLength & 0xFF00) >>> 8);\n sequenceParameterSets.push(sps[i].byteLength & 0xFF); // sequenceParameterSetLength\n\n sequenceParameterSets = sequenceParameterSets.concat(Array.prototype.slice.call(sps[i])); // SPS\n } // assemble the PPSs\n\n for (i = 0; i < pps.length; i++) {\n pictureParameterSets.push((pps[i].byteLength & 0xFF00) >>> 8);\n pictureParameterSets.push(pps[i].byteLength & 0xFF);\n pictureParameterSets = pictureParameterSets.concat(Array.prototype.slice.call(pps[i]));\n }\n avc1Box = [types.avc1, new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // data_reference_index\n 0x00, 0x00,\n // pre_defined\n 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // pre_defined\n (track.width & 0xff00) >> 8, track.width & 0xff,\n // width\n (track.height & 0xff00) >> 8, track.height & 0xff,\n // height\n 0x00, 0x48, 0x00, 0x00,\n // horizresolution\n 0x00, 0x48, 0x00, 0x00,\n // vertresolution\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // frame_count\n 0x13, 0x76, 0x69, 0x64, 0x65, 0x6f, 0x6a, 0x73, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x69, 0x62, 0x2d, 0x68, 0x6c, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // compressorname\n 0x00, 0x18,\n // depth = 24\n 0x11, 0x11 // pre_defined = -1\n ]), box(types.avcC, new Uint8Array([0x01,\n // configurationVersion\n track.profileIdc,\n // AVCProfileIndication\n track.profileCompatibility,\n // profile_compatibility\n track.levelIdc,\n // AVCLevelIndication\n 0xff // lengthSizeMinusOne, hard-coded to 4 bytes\n ].concat([sps.length],\n // numOfSequenceParameterSets\n sequenceParameterSets,\n // \"SPS\"\n [pps.length],\n // numOfPictureParameterSets\n pictureParameterSets // \"PPS\"\n ))), box(types.btrt, new Uint8Array([0x00, 0x1c, 0x9c, 0x80,\n // bufferSizeDB\n 0x00, 0x2d, 0xc6, 0xc0,\n // maxBitrate\n 0x00, 0x2d, 0xc6, 0xc0 // avgBitrate\n ]))];\n\n if (track.sarRatio) {\n var hSpacing = track.sarRatio[0],\n vSpacing = track.sarRatio[1];\n avc1Box.push(box(types.pasp, new Uint8Array([(hSpacing & 0xFF000000) >> 24, (hSpacing & 0xFF0000) >> 16, (hSpacing & 0xFF00) >> 8, hSpacing & 0xFF, (vSpacing & 0xFF000000) >> 24, (vSpacing & 0xFF0000) >> 16, (vSpacing & 0xFF00) >> 8, vSpacing & 0xFF])));\n }\n return box.apply(null, avc1Box);\n };\n audioSample = function (track) {\n return box(types.mp4a, new Uint8Array([\n // SampleEntry, ISO/IEC 14496-12\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x01,\n // data_reference_index\n // AudioSampleEntry, ISO/IEC 14496-12\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n (track.channelcount & 0xff00) >> 8, track.channelcount & 0xff,\n // channelcount\n (track.samplesize & 0xff00) >> 8, track.samplesize & 0xff,\n // samplesize\n 0x00, 0x00,\n // pre_defined\n 0x00, 0x00,\n // reserved\n (track.samplerate & 0xff00) >> 8, track.samplerate & 0xff, 0x00, 0x00 // samplerate, 16.16\n // MP4AudioSampleEntry, ISO/IEC 14496-14\n ]), esds(track));\n };\n })();\n tkhd = function (track) {\n var result = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x07,\n // flags\n 0x00, 0x00, 0x00, 0x00,\n // creation_time\n 0x00, 0x00, 0x00, 0x00,\n // modification_time\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF,\n // track_ID\n 0x00, 0x00, 0x00, 0x00,\n // reserved\n (track.duration & 0xFF000000) >> 24, (track.duration & 0xFF0000) >> 16, (track.duration & 0xFF00) >> 8, track.duration & 0xFF,\n // duration\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n // reserved\n 0x00, 0x00,\n // layer\n 0x00, 0x00,\n // alternate_group\n 0x01, 0x00,\n // non-audio track volume\n 0x00, 0x00,\n // reserved\n 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,\n // transformation: unity matrix\n (track.width & 0xFF00) >> 8, track.width & 0xFF, 0x00, 0x00,\n // width\n (track.height & 0xFF00) >> 8, track.height & 0xFF, 0x00, 0x00 // height\n ]);\n\n return box(types.tkhd, result);\n };\n /**\n * Generate a track fragment (traf) box. A traf box collects metadata\n * about tracks in a movie fragment (moof) box.\n */\n\n traf = function (track) {\n var trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable, dataOffset, upperWordBaseMediaDecodeTime, lowerWordBaseMediaDecodeTime;\n trackFragmentHeader = box(types.tfhd, new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x3a,\n // flags\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF,\n // track_ID\n 0x00, 0x00, 0x00, 0x01,\n // sample_description_index\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_duration\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_size\n 0x00, 0x00, 0x00, 0x00 // default_sample_flags\n ]));\n\n upperWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime / MAX_UINT32);\n lowerWordBaseMediaDecodeTime = Math.floor(track.baseMediaDecodeTime % MAX_UINT32);\n trackFragmentDecodeTime = box(types.tfdt, new Uint8Array([0x01,\n // version 1\n 0x00, 0x00, 0x00,\n // flags\n // baseMediaDecodeTime\n upperWordBaseMediaDecodeTime >>> 24 & 0xFF, upperWordBaseMediaDecodeTime >>> 16 & 0xFF, upperWordBaseMediaDecodeTime >>> 8 & 0xFF, upperWordBaseMediaDecodeTime & 0xFF, lowerWordBaseMediaDecodeTime >>> 24 & 0xFF, lowerWordBaseMediaDecodeTime >>> 16 & 0xFF, lowerWordBaseMediaDecodeTime >>> 8 & 0xFF, lowerWordBaseMediaDecodeTime & 0xFF])); // the data offset specifies the number of bytes from the start of\n // the containing moof to the first payload byte of the associated\n // mdat\n\n dataOffset = 32 +\n // tfhd\n 20 +\n // tfdt\n 8 +\n // traf header\n 16 +\n // mfhd\n 8 +\n // moof header\n 8; // mdat header\n // audio tracks require less metadata\n\n if (track.type === 'audio') {\n trackFragmentRun = trun$1(track, dataOffset);\n return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun);\n } // video tracks should contain an independent and disposable samples\n // box (sdtp)\n // generate one and adjust offsets to match\n\n sampleDependencyTable = sdtp(track);\n trackFragmentRun = trun$1(track, sampleDependencyTable.length + dataOffset);\n return box(types.traf, trackFragmentHeader, trackFragmentDecodeTime, trackFragmentRun, sampleDependencyTable);\n };\n /**\n * Generate a track box.\n * @param track {object} a track definition\n * @return {Uint8Array} the track box\n */\n\n trak = function (track) {\n track.duration = track.duration || 0xffffffff;\n return box(types.trak, tkhd(track), mdia(track));\n };\n trex = function (track) {\n var result = new Uint8Array([0x00,\n // version 0\n 0x00, 0x00, 0x00,\n // flags\n (track.id & 0xFF000000) >> 24, (track.id & 0xFF0000) >> 16, (track.id & 0xFF00) >> 8, track.id & 0xFF,\n // track_ID\n 0x00, 0x00, 0x00, 0x01,\n // default_sample_description_index\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_duration\n 0x00, 0x00, 0x00, 0x00,\n // default_sample_size\n 0x00, 0x01, 0x00, 0x01 // default_sample_flags\n ]); // the last two bytes of default_sample_flags is the sample\n // degradation priority, a hint about the importance of this sample\n // relative to others. Lower the degradation priority for all sample\n // types other than video.\n\n if (track.type !== 'video') {\n result[result.length - 1] = 0x00;\n }\n return box(types.trex, result);\n };\n (function () {\n var audioTrun, videoTrun, trunHeader; // This method assumes all samples are uniform. That is, if a\n // duration is present for the first sample, it will be present for\n // all subsequent samples.\n // see ISO/IEC 14496-12:2012, Section 8.8.8.1\n\n trunHeader = function (samples, offset) {\n var durationPresent = 0,\n sizePresent = 0,\n flagsPresent = 0,\n compositionTimeOffset = 0; // trun flag constants\n\n if (samples.length) {\n if (samples[0].duration !== undefined) {\n durationPresent = 0x1;\n }\n if (samples[0].size !== undefined) {\n sizePresent = 0x2;\n }\n if (samples[0].flags !== undefined) {\n flagsPresent = 0x4;\n }\n if (samples[0].compositionTimeOffset !== undefined) {\n compositionTimeOffset = 0x8;\n }\n }\n return [0x00,\n // version 0\n 0x00, durationPresent | sizePresent | flagsPresent | compositionTimeOffset, 0x01,\n // flags\n (samples.length & 0xFF000000) >>> 24, (samples.length & 0xFF0000) >>> 16, (samples.length & 0xFF00) >>> 8, samples.length & 0xFF,\n // sample_count\n (offset & 0xFF000000) >>> 24, (offset & 0xFF0000) >>> 16, (offset & 0xFF00) >>> 8, offset & 0xFF // data_offset\n ];\n };\n\n videoTrun = function (track, offset) {\n var bytesOffest, bytes, header, samples, sample, i;\n samples = track.samples || [];\n offset += 8 + 12 + 16 * samples.length;\n header = trunHeader(samples, offset);\n bytes = new Uint8Array(header.length + samples.length * 16);\n bytes.set(header);\n bytesOffest = header.length;\n for (i = 0; i < samples.length; i++) {\n sample = samples[i];\n bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration\n\n bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.size & 0xFF; // sample_size\n\n bytes[bytesOffest++] = sample.flags.isLeading << 2 | sample.flags.dependsOn;\n bytes[bytesOffest++] = sample.flags.isDependedOn << 6 | sample.flags.hasRedundancy << 4 | sample.flags.paddingValue << 1 | sample.flags.isNonSyncSample;\n bytes[bytesOffest++] = sample.flags.degradationPriority & 0xF0 << 8;\n bytes[bytesOffest++] = sample.flags.degradationPriority & 0x0F; // sample_flags\n\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.compositionTimeOffset & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.compositionTimeOffset & 0xFF; // sample_composition_time_offset\n }\n\n return box(types.trun, bytes);\n };\n audioTrun = function (track, offset) {\n var bytes, bytesOffest, header, samples, sample, i;\n samples = track.samples || [];\n offset += 8 + 12 + 8 * samples.length;\n header = trunHeader(samples, offset);\n bytes = new Uint8Array(header.length + samples.length * 8);\n bytes.set(header);\n bytesOffest = header.length;\n for (i = 0; i < samples.length; i++) {\n sample = samples[i];\n bytes[bytesOffest++] = (sample.duration & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.duration & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.duration & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.duration & 0xFF; // sample_duration\n\n bytes[bytesOffest++] = (sample.size & 0xFF000000) >>> 24;\n bytes[bytesOffest++] = (sample.size & 0xFF0000) >>> 16;\n bytes[bytesOffest++] = (sample.size & 0xFF00) >>> 8;\n bytes[bytesOffest++] = sample.size & 0xFF; // sample_size\n }\n\n return box(types.trun, bytes);\n };\n trun$1 = function (track, offset) {\n if (track.type === 'audio') {\n return audioTrun(track, offset);\n }\n return videoTrun(track, offset);\n };\n })();\n var mp4Generator = {\n ftyp: ftyp,\n mdat: mdat,\n moof: moof,\n moov: moov,\n initSegment: function (tracks) {\n var fileType = ftyp(),\n movie = moov(tracks),\n result;\n result = new Uint8Array(fileType.byteLength + movie.byteLength);\n result.set(fileType);\n result.set(movie, fileType.byteLength);\n return result;\n }\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n // composed of the nal units that make up that frame\n // Also keep track of cummulative data about the frame from the nal units such\n // as the frame duration, starting pts, etc.\n\n var groupNalsIntoFrames = function (nalUnits) {\n var i,\n currentNal,\n currentFrame = [],\n frames = []; // TODO added for LHLS, make sure this is OK\n\n frames.byteLength = 0;\n frames.nalCount = 0;\n frames.duration = 0;\n currentFrame.byteLength = 0;\n for (i = 0; i < nalUnits.length; i++) {\n currentNal = nalUnits[i]; // Split on 'aud'-type nal units\n\n if (currentNal.nalUnitType === 'access_unit_delimiter_rbsp') {\n // Since the very first nal unit is expected to be an AUD\n // only push to the frames array when currentFrame is not empty\n if (currentFrame.length) {\n currentFrame.duration = currentNal.dts - currentFrame.dts; // TODO added for LHLS, make sure this is OK\n\n frames.byteLength += currentFrame.byteLength;\n frames.nalCount += currentFrame.length;\n frames.duration += currentFrame.duration;\n frames.push(currentFrame);\n }\n currentFrame = [currentNal];\n currentFrame.byteLength = currentNal.data.byteLength;\n currentFrame.pts = currentNal.pts;\n currentFrame.dts = currentNal.dts;\n } else {\n // Specifically flag key frames for ease of use later\n if (currentNal.nalUnitType === 'slice_layer_without_partitioning_rbsp_idr') {\n currentFrame.keyFrame = true;\n }\n currentFrame.duration = currentNal.dts - currentFrame.dts;\n currentFrame.byteLength += currentNal.data.byteLength;\n currentFrame.push(currentNal);\n }\n } // For the last frame, use the duration of the previous frame if we\n // have nothing better to go on\n\n if (frames.length && (!currentFrame.duration || currentFrame.duration <= 0)) {\n currentFrame.duration = frames[frames.length - 1].duration;\n } // Push the final frame\n // TODO added for LHLS, make sure this is OK\n\n frames.byteLength += currentFrame.byteLength;\n frames.nalCount += currentFrame.length;\n frames.duration += currentFrame.duration;\n frames.push(currentFrame);\n return frames;\n }; // Convert an array of frames into an array of Gop with each Gop being composed\n // of the frames that make up that Gop\n // Also keep track of cummulative data about the Gop from the frames such as the\n // Gop duration, starting pts, etc.\n\n var groupFramesIntoGops = function (frames) {\n var i,\n currentFrame,\n currentGop = [],\n gops = []; // We must pre-set some of the values on the Gop since we\n // keep running totals of these values\n\n currentGop.byteLength = 0;\n currentGop.nalCount = 0;\n currentGop.duration = 0;\n currentGop.pts = frames[0].pts;\n currentGop.dts = frames[0].dts; // store some metadata about all the Gops\n\n gops.byteLength = 0;\n gops.nalCount = 0;\n gops.duration = 0;\n gops.pts = frames[0].pts;\n gops.dts = frames[0].dts;\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n if (currentFrame.keyFrame) {\n // Since the very first frame is expected to be an keyframe\n // only push to the gops array when currentGop is not empty\n if (currentGop.length) {\n gops.push(currentGop);\n gops.byteLength += currentGop.byteLength;\n gops.nalCount += currentGop.nalCount;\n gops.duration += currentGop.duration;\n }\n currentGop = [currentFrame];\n currentGop.nalCount = currentFrame.length;\n currentGop.byteLength = currentFrame.byteLength;\n currentGop.pts = currentFrame.pts;\n currentGop.dts = currentFrame.dts;\n currentGop.duration = currentFrame.duration;\n } else {\n currentGop.duration += currentFrame.duration;\n currentGop.nalCount += currentFrame.length;\n currentGop.byteLength += currentFrame.byteLength;\n currentGop.push(currentFrame);\n }\n }\n if (gops.length && currentGop.duration <= 0) {\n currentGop.duration = gops[gops.length - 1].duration;\n }\n gops.byteLength += currentGop.byteLength;\n gops.nalCount += currentGop.nalCount;\n gops.duration += currentGop.duration; // push the final Gop\n\n gops.push(currentGop);\n return gops;\n };\n /*\n * Search for the first keyframe in the GOPs and throw away all frames\n * until that keyframe. Then extend the duration of the pulled keyframe\n * and pull the PTS and DTS of the keyframe so that it covers the time\n * range of the frames that were disposed.\n *\n * @param {Array} gops video GOPs\n * @returns {Array} modified video GOPs\n */\n\n var extendFirstKeyFrame = function (gops) {\n var currentGop;\n if (!gops[0][0].keyFrame && gops.length > 1) {\n // Remove the first GOP\n currentGop = gops.shift();\n gops.byteLength -= currentGop.byteLength;\n gops.nalCount -= currentGop.nalCount; // Extend the first frame of what is now the\n // first gop to cover the time period of the\n // frames we just removed\n\n gops[0][0].dts = currentGop.dts;\n gops[0][0].pts = currentGop.pts;\n gops[0][0].duration += currentGop.duration;\n }\n return gops;\n };\n /**\n * Default sample object\n * see ISO/IEC 14496-12:2012, section 8.6.4.3\n */\n\n var createDefaultSample = function () {\n return {\n size: 0,\n flags: {\n isLeading: 0,\n dependsOn: 1,\n isDependedOn: 0,\n hasRedundancy: 0,\n degradationPriority: 0,\n isNonSyncSample: 1\n }\n };\n };\n /*\n * Collates information from a video frame into an object for eventual\n * entry into an MP4 sample table.\n *\n * @param {Object} frame the video frame\n * @param {Number} dataOffset the byte offset to position the sample\n * @return {Object} object containing sample table info for a frame\n */\n\n var sampleForFrame = function (frame, dataOffset) {\n var sample = createDefaultSample();\n sample.dataOffset = dataOffset;\n sample.compositionTimeOffset = frame.pts - frame.dts;\n sample.duration = frame.duration;\n sample.size = 4 * frame.length; // Space for nal unit size\n\n sample.size += frame.byteLength;\n if (frame.keyFrame) {\n sample.flags.dependsOn = 2;\n sample.flags.isNonSyncSample = 0;\n }\n return sample;\n }; // generate the track's sample table from an array of gops\n\n var generateSampleTable$1 = function (gops, baseDataOffset) {\n var h,\n i,\n sample,\n currentGop,\n currentFrame,\n dataOffset = baseDataOffset || 0,\n samples = [];\n for (h = 0; h < gops.length; h++) {\n currentGop = gops[h];\n for (i = 0; i < currentGop.length; i++) {\n currentFrame = currentGop[i];\n sample = sampleForFrame(currentFrame, dataOffset);\n dataOffset += sample.size;\n samples.push(sample);\n }\n }\n return samples;\n }; // generate the track's raw mdat data from an array of gops\n\n var concatenateNalData = function (gops) {\n var h,\n i,\n j,\n currentGop,\n currentFrame,\n currentNal,\n dataOffset = 0,\n nalsByteLength = gops.byteLength,\n numberOfNals = gops.nalCount,\n totalByteLength = nalsByteLength + 4 * numberOfNals,\n data = new Uint8Array(totalByteLength),\n view = new DataView(data.buffer); // For each Gop..\n\n for (h = 0; h < gops.length; h++) {\n currentGop = gops[h]; // For each Frame..\n\n for (i = 0; i < currentGop.length; i++) {\n currentFrame = currentGop[i]; // For each NAL..\n\n for (j = 0; j < currentFrame.length; j++) {\n currentNal = currentFrame[j];\n view.setUint32(dataOffset, currentNal.data.byteLength);\n dataOffset += 4;\n data.set(currentNal.data, dataOffset);\n dataOffset += currentNal.data.byteLength;\n }\n }\n }\n return data;\n }; // generate the track's sample table from a frame\n\n var generateSampleTableForFrame = function (frame, baseDataOffset) {\n var sample,\n dataOffset = baseDataOffset || 0,\n samples = [];\n sample = sampleForFrame(frame, dataOffset);\n samples.push(sample);\n return samples;\n }; // generate the track's raw mdat data from a frame\n\n var concatenateNalDataForFrame = function (frame) {\n var i,\n currentNal,\n dataOffset = 0,\n nalsByteLength = frame.byteLength,\n numberOfNals = frame.length,\n totalByteLength = nalsByteLength + 4 * numberOfNals,\n data = new Uint8Array(totalByteLength),\n view = new DataView(data.buffer); // For each NAL..\n\n for (i = 0; i < frame.length; i++) {\n currentNal = frame[i];\n view.setUint32(dataOffset, currentNal.data.byteLength);\n dataOffset += 4;\n data.set(currentNal.data, dataOffset);\n dataOffset += currentNal.data.byteLength;\n }\n return data;\n };\n var frameUtils$1 = {\n groupNalsIntoFrames: groupNalsIntoFrames,\n groupFramesIntoGops: groupFramesIntoGops,\n extendFirstKeyFrame: extendFirstKeyFrame,\n generateSampleTable: generateSampleTable$1,\n concatenateNalData: concatenateNalData,\n generateSampleTableForFrame: generateSampleTableForFrame,\n concatenateNalDataForFrame: concatenateNalDataForFrame\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var highPrefix = [33, 16, 5, 32, 164, 27];\n var lowPrefix = [33, 65, 108, 84, 1, 2, 4, 8, 168, 2, 4, 8, 17, 191, 252];\n var zeroFill = function (count) {\n var a = [];\n while (count--) {\n a.push(0);\n }\n return a;\n };\n var makeTable = function (metaTable) {\n return Object.keys(metaTable).reduce(function (obj, key) {\n obj[key] = new Uint8Array(metaTable[key].reduce(function (arr, part) {\n return arr.concat(part);\n }, []));\n return obj;\n }, {});\n };\n var silence;\n var silence_1 = function () {\n if (!silence) {\n // Frames-of-silence to use for filling in missing AAC frames\n var coneOfSilence = {\n 96000: [highPrefix, [227, 64], zeroFill(154), [56]],\n 88200: [highPrefix, [231], zeroFill(170), [56]],\n 64000: [highPrefix, [248, 192], zeroFill(240), [56]],\n 48000: [highPrefix, [255, 192], zeroFill(268), [55, 148, 128], zeroFill(54), [112]],\n 44100: [highPrefix, [255, 192], zeroFill(268), [55, 163, 128], zeroFill(84), [112]],\n 32000: [highPrefix, [255, 192], zeroFill(268), [55, 234], zeroFill(226), [112]],\n 24000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 112], zeroFill(126), [224]],\n 16000: [highPrefix, [255, 192], zeroFill(268), [55, 255, 128], zeroFill(268), [111, 255], zeroFill(269), [223, 108], zeroFill(195), [1, 192]],\n 12000: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 253, 128], zeroFill(259), [56]],\n 11025: [lowPrefix, zeroFill(268), [3, 127, 248], zeroFill(268), [6, 255, 240], zeroFill(268), [13, 255, 224], zeroFill(268), [27, 255, 192], zeroFill(268), [55, 175, 128], zeroFill(108), [112]],\n 8000: [lowPrefix, zeroFill(268), [3, 121, 16], zeroFill(47), [7]]\n };\n silence = makeTable(coneOfSilence);\n }\n return silence;\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ONE_SECOND_IN_TS$4 = 90000,\n // 90kHz clock\n secondsToVideoTs,\n secondsToAudioTs,\n videoTsToSeconds,\n audioTsToSeconds,\n audioTsToVideoTs,\n videoTsToAudioTs,\n metadataTsToSeconds;\n secondsToVideoTs = function (seconds) {\n return seconds * ONE_SECOND_IN_TS$4;\n };\n secondsToAudioTs = function (seconds, sampleRate) {\n return seconds * sampleRate;\n };\n videoTsToSeconds = function (timestamp) {\n return timestamp / ONE_SECOND_IN_TS$4;\n };\n audioTsToSeconds = function (timestamp, sampleRate) {\n return timestamp / sampleRate;\n };\n audioTsToVideoTs = function (timestamp, sampleRate) {\n return secondsToVideoTs(audioTsToSeconds(timestamp, sampleRate));\n };\n videoTsToAudioTs = function (timestamp, sampleRate) {\n return secondsToAudioTs(videoTsToSeconds(timestamp), sampleRate);\n };\n /**\n * Adjust ID3 tag or caption timing information by the timeline pts values\n * (if keepOriginalTimestamps is false) and convert to seconds\n */\n\n metadataTsToSeconds = function (timestamp, timelineStartPts, keepOriginalTimestamps) {\n return videoTsToSeconds(keepOriginalTimestamps ? timestamp : timestamp - timelineStartPts);\n };\n var clock$2 = {\n ONE_SECOND_IN_TS: ONE_SECOND_IN_TS$4,\n secondsToVideoTs: secondsToVideoTs,\n secondsToAudioTs: secondsToAudioTs,\n videoTsToSeconds: videoTsToSeconds,\n audioTsToSeconds: audioTsToSeconds,\n audioTsToVideoTs: audioTsToVideoTs,\n videoTsToAudioTs: videoTsToAudioTs,\n metadataTsToSeconds: metadataTsToSeconds\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var coneOfSilence = silence_1;\n var clock$1 = clock$2;\n /**\n * Sum the `byteLength` properties of the data in each AAC frame\n */\n\n var sumFrameByteLengths = function (array) {\n var i,\n currentObj,\n sum = 0; // sum the byteLength's all each nal unit in the frame\n\n for (i = 0; i < array.length; i++) {\n currentObj = array[i];\n sum += currentObj.data.byteLength;\n }\n return sum;\n }; // Possibly pad (prefix) the audio track with silence if appending this track\n // would lead to the introduction of a gap in the audio buffer\n\n var prefixWithSilence = function (track, frames, audioAppendStartTs, videoBaseMediaDecodeTime) {\n var baseMediaDecodeTimeTs,\n frameDuration = 0,\n audioGapDuration = 0,\n audioFillFrameCount = 0,\n audioFillDuration = 0,\n silentFrame,\n i,\n firstFrame;\n if (!frames.length) {\n return;\n }\n baseMediaDecodeTimeTs = clock$1.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate); // determine frame clock duration based on sample rate, round up to avoid overfills\n\n frameDuration = Math.ceil(clock$1.ONE_SECOND_IN_TS / (track.samplerate / 1024));\n if (audioAppendStartTs && videoBaseMediaDecodeTime) {\n // insert the shortest possible amount (audio gap or audio to video gap)\n audioGapDuration = baseMediaDecodeTimeTs - Math.max(audioAppendStartTs, videoBaseMediaDecodeTime); // number of full frames in the audio gap\n\n audioFillFrameCount = Math.floor(audioGapDuration / frameDuration);\n audioFillDuration = audioFillFrameCount * frameDuration;\n } // don't attempt to fill gaps smaller than a single frame or larger\n // than a half second\n\n if (audioFillFrameCount < 1 || audioFillDuration > clock$1.ONE_SECOND_IN_TS / 2) {\n return;\n }\n silentFrame = coneOfSilence()[track.samplerate];\n if (!silentFrame) {\n // we don't have a silent frame pregenerated for the sample rate, so use a frame\n // from the content instead\n silentFrame = frames[0].data;\n }\n for (i = 0; i < audioFillFrameCount; i++) {\n firstFrame = frames[0];\n frames.splice(0, 0, {\n data: silentFrame,\n dts: firstFrame.dts - frameDuration,\n pts: firstFrame.pts - frameDuration\n });\n }\n track.baseMediaDecodeTime -= Math.floor(clock$1.videoTsToAudioTs(audioFillDuration, track.samplerate));\n return audioFillDuration;\n }; // If the audio segment extends before the earliest allowed dts\n // value, remove AAC frames until starts at or after the earliest\n // allowed DTS so that we don't end up with a negative baseMedia-\n // DecodeTime for the audio track\n\n var trimAdtsFramesByEarliestDts = function (adtsFrames, track, earliestAllowedDts) {\n if (track.minSegmentDts >= earliestAllowedDts) {\n return adtsFrames;\n } // We will need to recalculate the earliest segment Dts\n\n track.minSegmentDts = Infinity;\n return adtsFrames.filter(function (currentFrame) {\n // If this is an allowed frame, keep it and record it's Dts\n if (currentFrame.dts >= earliestAllowedDts) {\n track.minSegmentDts = Math.min(track.minSegmentDts, currentFrame.dts);\n track.minSegmentPts = track.minSegmentDts;\n return true;\n } // Otherwise, discard it\n\n return false;\n });\n }; // generate the track's raw mdat data from an array of frames\n\n var generateSampleTable = function (frames) {\n var i,\n currentFrame,\n samples = [];\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n samples.push({\n size: currentFrame.data.byteLength,\n duration: 1024 // For AAC audio, all samples contain 1024 samples\n });\n }\n\n return samples;\n }; // generate the track's sample table from an array of frames\n\n var concatenateFrameData = function (frames) {\n var i,\n currentFrame,\n dataOffset = 0,\n data = new Uint8Array(sumFrameByteLengths(frames));\n for (i = 0; i < frames.length; i++) {\n currentFrame = frames[i];\n data.set(currentFrame.data, dataOffset);\n dataOffset += currentFrame.data.byteLength;\n }\n return data;\n };\n var audioFrameUtils$1 = {\n prefixWithSilence: prefixWithSilence,\n trimAdtsFramesByEarliestDts: trimAdtsFramesByEarliestDts,\n generateSampleTable: generateSampleTable,\n concatenateFrameData: concatenateFrameData\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ONE_SECOND_IN_TS$3 = clock$2.ONE_SECOND_IN_TS;\n /**\n * Store information about the start and end of the track and the\n * duration for each frame/sample we process in order to calculate\n * the baseMediaDecodeTime\n */\n\n var collectDtsInfo = function (track, data) {\n if (typeof data.pts === 'number') {\n if (track.timelineStartInfo.pts === undefined) {\n track.timelineStartInfo.pts = data.pts;\n }\n if (track.minSegmentPts === undefined) {\n track.minSegmentPts = data.pts;\n } else {\n track.minSegmentPts = Math.min(track.minSegmentPts, data.pts);\n }\n if (track.maxSegmentPts === undefined) {\n track.maxSegmentPts = data.pts;\n } else {\n track.maxSegmentPts = Math.max(track.maxSegmentPts, data.pts);\n }\n }\n if (typeof data.dts === 'number') {\n if (track.timelineStartInfo.dts === undefined) {\n track.timelineStartInfo.dts = data.dts;\n }\n if (track.minSegmentDts === undefined) {\n track.minSegmentDts = data.dts;\n } else {\n track.minSegmentDts = Math.min(track.minSegmentDts, data.dts);\n }\n if (track.maxSegmentDts === undefined) {\n track.maxSegmentDts = data.dts;\n } else {\n track.maxSegmentDts = Math.max(track.maxSegmentDts, data.dts);\n }\n }\n };\n /**\n * Clear values used to calculate the baseMediaDecodeTime between\n * tracks\n */\n\n var clearDtsInfo = function (track) {\n delete track.minSegmentDts;\n delete track.maxSegmentDts;\n delete track.minSegmentPts;\n delete track.maxSegmentPts;\n };\n /**\n * Calculate the track's baseMediaDecodeTime based on the earliest\n * DTS the transmuxer has ever seen and the minimum DTS for the\n * current track\n * @param track {object} track metadata configuration\n * @param keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n var calculateTrackBaseMediaDecodeTime = function (track, keepOriginalTimestamps) {\n var baseMediaDecodeTime,\n scale,\n minSegmentDts = track.minSegmentDts; // Optionally adjust the time so the first segment starts at zero.\n\n if (!keepOriginalTimestamps) {\n minSegmentDts -= track.timelineStartInfo.dts;\n } // track.timelineStartInfo.baseMediaDecodeTime is the location, in time, where\n // we want the start of the first segment to be placed\n\n baseMediaDecodeTime = track.timelineStartInfo.baseMediaDecodeTime; // Add to that the distance this segment is from the very first\n\n baseMediaDecodeTime += minSegmentDts; // baseMediaDecodeTime must not become negative\n\n baseMediaDecodeTime = Math.max(0, baseMediaDecodeTime);\n if (track.type === 'audio') {\n // Audio has a different clock equal to the sampling_rate so we need to\n // scale the PTS values into the clock rate of the track\n scale = track.samplerate / ONE_SECOND_IN_TS$3;\n baseMediaDecodeTime *= scale;\n baseMediaDecodeTime = Math.floor(baseMediaDecodeTime);\n }\n return baseMediaDecodeTime;\n };\n var trackDecodeInfo$1 = {\n clearDtsInfo: clearDtsInfo,\n calculateTrackBaseMediaDecodeTime: calculateTrackBaseMediaDecodeTime,\n collectDtsInfo: collectDtsInfo\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Reads in-band caption information from a video elementary\n * stream. Captions must follow the CEA-708 standard for injection\n * into an MPEG-2 transport streams.\n * @see https://en.wikipedia.org/wiki/CEA-708\n * @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf\n */\n // payload type field to indicate how they are to be\n // interpreted. CEAS-708 caption content is always transmitted with\n // payload type 0x04.\n\n var USER_DATA_REGISTERED_ITU_T_T35 = 4,\n RBSP_TRAILING_BITS = 128;\n /**\n * Parse a supplemental enhancement information (SEI) NAL unit.\n * Stops parsing once a message of type ITU T T35 has been found.\n *\n * @param bytes {Uint8Array} the bytes of a SEI NAL unit\n * @return {object} the parsed SEI payload\n * @see Rec. ITU-T H.264, 7.3.2.3.1\n */\n\n var parseSei = function (bytes) {\n var i = 0,\n result = {\n payloadType: -1,\n payloadSize: 0\n },\n payloadType = 0,\n payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message\n\n while (i < bytes.byteLength) {\n // stop once we have hit the end of the sei_rbsp\n if (bytes[i] === RBSP_TRAILING_BITS) {\n break;\n } // Parse payload type\n\n while (bytes[i] === 0xFF) {\n payloadType += 255;\n i++;\n }\n payloadType += bytes[i++]; // Parse payload size\n\n while (bytes[i] === 0xFF) {\n payloadSize += 255;\n i++;\n }\n payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break\n // there can only ever be one caption message in a frame's sei\n\n if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) {\n var userIdentifier = String.fromCharCode(bytes[i + 3], bytes[i + 4], bytes[i + 5], bytes[i + 6]);\n if (userIdentifier === 'GA94') {\n result.payloadType = payloadType;\n result.payloadSize = payloadSize;\n result.payload = bytes.subarray(i, i + payloadSize);\n break;\n } else {\n result.payload = void 0;\n }\n } // skip the payload and parse the next message\n\n i += payloadSize;\n payloadType = 0;\n payloadSize = 0;\n }\n return result;\n }; // see ANSI/SCTE 128-1 (2013), section 8.1\n\n var parseUserData = function (sei) {\n // itu_t_t35_contry_code must be 181 (United States) for\n // captions\n if (sei.payload[0] !== 181) {\n return null;\n } // itu_t_t35_provider_code should be 49 (ATSC) for captions\n\n if ((sei.payload[1] << 8 | sei.payload[2]) !== 49) {\n return null;\n } // the user_identifier should be \"GA94\" to indicate ATSC1 data\n\n if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') {\n return null;\n } // finally, user_data_type_code should be 0x03 for caption data\n\n if (sei.payload[7] !== 0x03) {\n return null;\n } // return the user_data_type_structure and strip the trailing\n // marker bits\n\n return sei.payload.subarray(8, sei.payload.length - 1);\n }; // see CEA-708-D, section 4.4\n\n var parseCaptionPackets = function (pts, userData) {\n var results = [],\n i,\n count,\n offset,\n data; // if this is just filler, return immediately\n\n if (!(userData[0] & 0x40)) {\n return results;\n } // parse out the cc_data_1 and cc_data_2 fields\n\n count = userData[0] & 0x1f;\n for (i = 0; i < count; i++) {\n offset = i * 3;\n data = {\n type: userData[offset + 2] & 0x03,\n pts: pts\n }; // capture cc data when cc_valid is 1\n\n if (userData[offset + 2] & 0x04) {\n data.ccData = userData[offset + 3] << 8 | userData[offset + 4];\n results.push(data);\n }\n }\n return results;\n };\n var discardEmulationPreventionBytes$1 = function (data) {\n var length = data.byteLength,\n emulationPreventionBytesPositions = [],\n i = 1,\n newLength,\n newData; // Find all `Emulation Prevention Bytes`\n\n while (i < length - 2) {\n if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {\n emulationPreventionBytesPositions.push(i + 2);\n i += 2;\n } else {\n i++;\n }\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n if (emulationPreventionBytesPositions.length === 0) {\n return data;\n } // Create a new array to hold the NAL unit data\n\n newLength = length - emulationPreventionBytesPositions.length;\n newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === emulationPreventionBytesPositions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n emulationPreventionBytesPositions.shift();\n }\n newData[i] = data[sourceIndex];\n }\n return newData;\n }; // exports\n\n var captionPacketParser = {\n parseSei: parseSei,\n parseUserData: parseUserData,\n parseCaptionPackets: parseCaptionPackets,\n discardEmulationPreventionBytes: discardEmulationPreventionBytes$1,\n USER_DATA_REGISTERED_ITU_T_T35: USER_DATA_REGISTERED_ITU_T_T35\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Reads in-band caption information from a video elementary\n * stream. Captions must follow the CEA-708 standard for injection\n * into an MPEG-2 transport streams.\n * @see https://en.wikipedia.org/wiki/CEA-708\n * @see https://www.gpo.gov/fdsys/pkg/CFR-2007-title47-vol1/pdf/CFR-2007-title47-vol1-sec15-119.pdf\n */\n // Link To Transport\n // -----------------\n\n var Stream$7 = stream;\n var cea708Parser = captionPacketParser;\n var CaptionStream$2 = function (options) {\n options = options || {};\n CaptionStream$2.prototype.init.call(this); // parse708captions flag, default to true\n\n this.parse708captions_ = typeof options.parse708captions === 'boolean' ? options.parse708captions : true;\n this.captionPackets_ = [];\n this.ccStreams_ = [new Cea608Stream(0, 0),\n // eslint-disable-line no-use-before-define\n new Cea608Stream(0, 1),\n // eslint-disable-line no-use-before-define\n new Cea608Stream(1, 0),\n // eslint-disable-line no-use-before-define\n new Cea608Stream(1, 1) // eslint-disable-line no-use-before-define\n ];\n\n if (this.parse708captions_) {\n this.cc708Stream_ = new Cea708Stream({\n captionServices: options.captionServices\n }); // eslint-disable-line no-use-before-define\n }\n\n this.reset(); // forward data and done events from CCs to this CaptionStream\n\n this.ccStreams_.forEach(function (cc) {\n cc.on('data', this.trigger.bind(this, 'data'));\n cc.on('partialdone', this.trigger.bind(this, 'partialdone'));\n cc.on('done', this.trigger.bind(this, 'done'));\n }, this);\n if (this.parse708captions_) {\n this.cc708Stream_.on('data', this.trigger.bind(this, 'data'));\n this.cc708Stream_.on('partialdone', this.trigger.bind(this, 'partialdone'));\n this.cc708Stream_.on('done', this.trigger.bind(this, 'done'));\n }\n };\n CaptionStream$2.prototype = new Stream$7();\n CaptionStream$2.prototype.push = function (event) {\n var sei, userData, newCaptionPackets; // only examine SEI NALs\n\n if (event.nalUnitType !== 'sei_rbsp') {\n return;\n } // parse the sei\n\n sei = cea708Parser.parseSei(event.escapedRBSP); // no payload data, skip\n\n if (!sei.payload) {\n return;\n } // ignore everything but user_data_registered_itu_t_t35\n\n if (sei.payloadType !== cea708Parser.USER_DATA_REGISTERED_ITU_T_T35) {\n return;\n } // parse out the user data payload\n\n userData = cea708Parser.parseUserData(sei); // ignore unrecognized userData\n\n if (!userData) {\n return;\n } // Sometimes, the same segment # will be downloaded twice. To stop the\n // caption data from being processed twice, we track the latest dts we've\n // received and ignore everything with a dts before that. However, since\n // data for a specific dts can be split across packets on either side of\n // a segment boundary, we need to make sure we *don't* ignore the packets\n // from the *next* segment that have dts === this.latestDts_. By constantly\n // tracking the number of packets received with dts === this.latestDts_, we\n // know how many should be ignored once we start receiving duplicates.\n\n if (event.dts < this.latestDts_) {\n // We've started getting older data, so set the flag.\n this.ignoreNextEqualDts_ = true;\n return;\n } else if (event.dts === this.latestDts_ && this.ignoreNextEqualDts_) {\n this.numSameDts_--;\n if (!this.numSameDts_) {\n // We've received the last duplicate packet, time to start processing again\n this.ignoreNextEqualDts_ = false;\n }\n return;\n } // parse out CC data packets and save them for later\n\n newCaptionPackets = cea708Parser.parseCaptionPackets(event.pts, userData);\n this.captionPackets_ = this.captionPackets_.concat(newCaptionPackets);\n if (this.latestDts_ !== event.dts) {\n this.numSameDts_ = 0;\n }\n this.numSameDts_++;\n this.latestDts_ = event.dts;\n };\n CaptionStream$2.prototype.flushCCStreams = function (flushType) {\n this.ccStreams_.forEach(function (cc) {\n return flushType === 'flush' ? cc.flush() : cc.partialFlush();\n }, this);\n };\n CaptionStream$2.prototype.flushStream = function (flushType) {\n // make sure we actually parsed captions before proceeding\n if (!this.captionPackets_.length) {\n this.flushCCStreams(flushType);\n return;\n } // In Chrome, the Array#sort function is not stable so add a\n // presortIndex that we can use to ensure we get a stable-sort\n\n this.captionPackets_.forEach(function (elem, idx) {\n elem.presortIndex = idx;\n }); // sort caption byte-pairs based on their PTS values\n\n this.captionPackets_.sort(function (a, b) {\n if (a.pts === b.pts) {\n return a.presortIndex - b.presortIndex;\n }\n return a.pts - b.pts;\n });\n this.captionPackets_.forEach(function (packet) {\n if (packet.type < 2) {\n // Dispatch packet to the right Cea608Stream\n this.dispatchCea608Packet(packet);\n } else {\n // Dispatch packet to the Cea708Stream\n this.dispatchCea708Packet(packet);\n }\n }, this);\n this.captionPackets_.length = 0;\n this.flushCCStreams(flushType);\n };\n CaptionStream$2.prototype.flush = function () {\n return this.flushStream('flush');\n }; // Only called if handling partial data\n\n CaptionStream$2.prototype.partialFlush = function () {\n return this.flushStream('partialFlush');\n };\n CaptionStream$2.prototype.reset = function () {\n this.latestDts_ = null;\n this.ignoreNextEqualDts_ = false;\n this.numSameDts_ = 0;\n this.activeCea608Channel_ = [null, null];\n this.ccStreams_.forEach(function (ccStream) {\n ccStream.reset();\n });\n }; // From the CEA-608 spec:\n\n /*\n * When XDS sub-packets are interleaved with other services, the end of each sub-packet shall be followed\n * by a control pair to change to a different service. When any of the control codes from 0x10 to 0x1F is\n * used to begin a control code pair, it indicates the return to captioning or Text data. The control code pair\n * and subsequent data should then be processed according to the FCC rules. It may be necessary for the\n * line 21 data encoder to automatically insert a control code pair (i.e. RCL, RU2, RU3, RU4, RDC, or RTD)\n * to switch to captioning or Text.\n */\n // With that in mind, we ignore any data between an XDS control code and a\n // subsequent closed-captioning control code.\n\n CaptionStream$2.prototype.dispatchCea608Packet = function (packet) {\n // NOTE: packet.type is the CEA608 field\n if (this.setsTextOrXDSActive(packet)) {\n this.activeCea608Channel_[packet.type] = null;\n } else if (this.setsChannel1Active(packet)) {\n this.activeCea608Channel_[packet.type] = 0;\n } else if (this.setsChannel2Active(packet)) {\n this.activeCea608Channel_[packet.type] = 1;\n }\n if (this.activeCea608Channel_[packet.type] === null) {\n // If we haven't received anything to set the active channel, or the\n // packets are Text/XDS data, discard the data; we don't want jumbled\n // captions\n return;\n }\n this.ccStreams_[(packet.type << 1) + this.activeCea608Channel_[packet.type]].push(packet);\n };\n CaptionStream$2.prototype.setsChannel1Active = function (packet) {\n return (packet.ccData & 0x7800) === 0x1000;\n };\n CaptionStream$2.prototype.setsChannel2Active = function (packet) {\n return (packet.ccData & 0x7800) === 0x1800;\n };\n CaptionStream$2.prototype.setsTextOrXDSActive = function (packet) {\n return (packet.ccData & 0x7100) === 0x0100 || (packet.ccData & 0x78fe) === 0x102a || (packet.ccData & 0x78fe) === 0x182a;\n };\n CaptionStream$2.prototype.dispatchCea708Packet = function (packet) {\n if (this.parse708captions_) {\n this.cc708Stream_.push(packet);\n }\n }; // ----------------------\n // Session to Application\n // ----------------------\n // This hash maps special and extended character codes to their\n // proper Unicode equivalent. The first one-byte key is just a\n // non-standard character code. The two-byte keys that follow are\n // the extended CEA708 character codes, along with the preceding\n // 0x10 extended character byte to distinguish these codes from\n // non-extended character codes. Every CEA708 character code that\n // is not in this object maps directly to a standard unicode\n // character code.\n // The transparent space and non-breaking transparent space are\n // technically not fully supported since there is no code to\n // make them transparent, so they have normal non-transparent\n // stand-ins.\n // The special closed caption (CC) character isn't a standard\n // unicode character, so a fairly similar unicode character was\n // chosen in it's place.\n\n var CHARACTER_TRANSLATION_708 = {\n 0x7f: 0x266a,\n // ♪\n 0x1020: 0x20,\n // Transparent Space\n 0x1021: 0xa0,\n // Nob-breaking Transparent Space\n 0x1025: 0x2026,\n // …\n 0x102a: 0x0160,\n // Š\n 0x102c: 0x0152,\n // Œ\n 0x1030: 0x2588,\n // █\n 0x1031: 0x2018,\n // ‘\n 0x1032: 0x2019,\n // ’\n 0x1033: 0x201c,\n // “\n 0x1034: 0x201d,\n // ”\n 0x1035: 0x2022,\n // •\n 0x1039: 0x2122,\n // ™\n 0x103a: 0x0161,\n // š\n 0x103c: 0x0153,\n // œ\n 0x103d: 0x2120,\n // ℠\n 0x103f: 0x0178,\n // Ÿ\n 0x1076: 0x215b,\n // ⅛\n 0x1077: 0x215c,\n // ⅜\n 0x1078: 0x215d,\n // ⅝\n 0x1079: 0x215e,\n // ⅞\n 0x107a: 0x23d0,\n // ⏐\n 0x107b: 0x23a4,\n // ⎤\n 0x107c: 0x23a3,\n // ⎣\n 0x107d: 0x23af,\n // ⎯\n 0x107e: 0x23a6,\n // ⎦\n 0x107f: 0x23a1,\n // ⎡\n 0x10a0: 0x3138 // ㄸ (CC char)\n };\n\n var get708CharFromCode = function (code) {\n var newCode = CHARACTER_TRANSLATION_708[code] || code;\n if (code & 0x1000 && code === newCode) {\n // Invalid extended code\n return '';\n }\n return String.fromCharCode(newCode);\n };\n var within708TextBlock = function (b) {\n return 0x20 <= b && b <= 0x7f || 0xa0 <= b && b <= 0xff;\n };\n var Cea708Window = function (windowNum) {\n this.windowNum = windowNum;\n this.reset();\n };\n Cea708Window.prototype.reset = function () {\n this.clearText();\n this.pendingNewLine = false;\n this.winAttr = {};\n this.penAttr = {};\n this.penLoc = {};\n this.penColor = {}; // These default values are arbitrary,\n // defineWindow will usually override them\n\n this.visible = 0;\n this.rowLock = 0;\n this.columnLock = 0;\n this.priority = 0;\n this.relativePositioning = 0;\n this.anchorVertical = 0;\n this.anchorHorizontal = 0;\n this.anchorPoint = 0;\n this.rowCount = 1;\n this.virtualRowCount = this.rowCount + 1;\n this.columnCount = 41;\n this.windowStyle = 0;\n this.penStyle = 0;\n };\n Cea708Window.prototype.getText = function () {\n return this.rows.join('\\n');\n };\n Cea708Window.prototype.clearText = function () {\n this.rows = [''];\n this.rowIdx = 0;\n };\n Cea708Window.prototype.newLine = function (pts) {\n if (this.rows.length >= this.virtualRowCount && typeof this.beforeRowOverflow === 'function') {\n this.beforeRowOverflow(pts);\n }\n if (this.rows.length > 0) {\n this.rows.push('');\n this.rowIdx++;\n } // Show all virtual rows since there's no visible scrolling\n\n while (this.rows.length > this.virtualRowCount) {\n this.rows.shift();\n this.rowIdx--;\n }\n };\n Cea708Window.prototype.isEmpty = function () {\n if (this.rows.length === 0) {\n return true;\n } else if (this.rows.length === 1) {\n return this.rows[0] === '';\n }\n return false;\n };\n Cea708Window.prototype.addText = function (text) {\n this.rows[this.rowIdx] += text;\n };\n Cea708Window.prototype.backspace = function () {\n if (!this.isEmpty()) {\n var row = this.rows[this.rowIdx];\n this.rows[this.rowIdx] = row.substr(0, row.length - 1);\n }\n };\n var Cea708Service = function (serviceNum, encoding, stream) {\n this.serviceNum = serviceNum;\n this.text = '';\n this.currentWindow = new Cea708Window(-1);\n this.windows = [];\n this.stream = stream; // Try to setup a TextDecoder if an `encoding` value was provided\n\n if (typeof encoding === 'string') {\n this.createTextDecoder(encoding);\n }\n };\n /**\n * Initialize service windows\n * Must be run before service use\n *\n * @param {Integer} pts PTS value\n * @param {Function} beforeRowOverflow Function to execute before row overflow of a window\n */\n\n Cea708Service.prototype.init = function (pts, beforeRowOverflow) {\n this.startPts = pts;\n for (var win = 0; win < 8; win++) {\n this.windows[win] = new Cea708Window(win);\n if (typeof beforeRowOverflow === 'function') {\n this.windows[win].beforeRowOverflow = beforeRowOverflow;\n }\n }\n };\n /**\n * Set current window of service to be affected by commands\n *\n * @param {Integer} windowNum Window number\n */\n\n Cea708Service.prototype.setCurrentWindow = function (windowNum) {\n this.currentWindow = this.windows[windowNum];\n };\n /**\n * Try to create a TextDecoder if it is natively supported\n */\n\n Cea708Service.prototype.createTextDecoder = function (encoding) {\n if (typeof TextDecoder === 'undefined') {\n this.stream.trigger('log', {\n level: 'warn',\n message: 'The `encoding` option is unsupported without TextDecoder support'\n });\n } else {\n try {\n this.textDecoder_ = new TextDecoder(encoding);\n } catch (error) {\n this.stream.trigger('log', {\n level: 'warn',\n message: 'TextDecoder could not be created with ' + encoding + ' encoding. ' + error\n });\n }\n }\n };\n var Cea708Stream = function (options) {\n options = options || {};\n Cea708Stream.prototype.init.call(this);\n var self = this;\n var captionServices = options.captionServices || {};\n var captionServiceEncodings = {};\n var serviceProps; // Get service encodings from captionServices option block\n\n Object.keys(captionServices).forEach(serviceName => {\n serviceProps = captionServices[serviceName];\n if (/^SERVICE/.test(serviceName)) {\n captionServiceEncodings[serviceName] = serviceProps.encoding;\n }\n });\n this.serviceEncodings = captionServiceEncodings;\n this.current708Packet = null;\n this.services = {};\n this.push = function (packet) {\n if (packet.type === 3) {\n // 708 packet start\n self.new708Packet();\n self.add708Bytes(packet);\n } else {\n if (self.current708Packet === null) {\n // This should only happen at the start of a file if there's no packet start.\n self.new708Packet();\n }\n self.add708Bytes(packet);\n }\n };\n };\n Cea708Stream.prototype = new Stream$7();\n /**\n * Push current 708 packet, create new 708 packet.\n */\n\n Cea708Stream.prototype.new708Packet = function () {\n if (this.current708Packet !== null) {\n this.push708Packet();\n }\n this.current708Packet = {\n data: [],\n ptsVals: []\n };\n };\n /**\n * Add pts and both bytes from packet into current 708 packet.\n */\n\n Cea708Stream.prototype.add708Bytes = function (packet) {\n var data = packet.ccData;\n var byte0 = data >>> 8;\n var byte1 = data & 0xff; // I would just keep a list of packets instead of bytes, but it isn't clear in the spec\n // that service blocks will always line up with byte pairs.\n\n this.current708Packet.ptsVals.push(packet.pts);\n this.current708Packet.data.push(byte0);\n this.current708Packet.data.push(byte1);\n };\n /**\n * Parse completed 708 packet into service blocks and push each service block.\n */\n\n Cea708Stream.prototype.push708Packet = function () {\n var packet708 = this.current708Packet;\n var packetData = packet708.data;\n var serviceNum = null;\n var blockSize = null;\n var i = 0;\n var b = packetData[i++];\n packet708.seq = b >> 6;\n packet708.sizeCode = b & 0x3f; // 0b00111111;\n\n for (; i < packetData.length; i++) {\n b = packetData[i++];\n serviceNum = b >> 5;\n blockSize = b & 0x1f; // 0b00011111\n\n if (serviceNum === 7 && blockSize > 0) {\n // Extended service num\n b = packetData[i++];\n serviceNum = b;\n }\n this.pushServiceBlock(serviceNum, i, blockSize);\n if (blockSize > 0) {\n i += blockSize - 1;\n }\n }\n };\n /**\n * Parse service block, execute commands, read text.\n *\n * Note: While many of these commands serve important purposes,\n * many others just parse out the parameters or attributes, but\n * nothing is done with them because this is not a full and complete\n * implementation of the entire 708 spec.\n *\n * @param {Integer} serviceNum Service number\n * @param {Integer} start Start index of the 708 packet data\n * @param {Integer} size Block size\n */\n\n Cea708Stream.prototype.pushServiceBlock = function (serviceNum, start, size) {\n var b;\n var i = start;\n var packetData = this.current708Packet.data;\n var service = this.services[serviceNum];\n if (!service) {\n service = this.initService(serviceNum, i);\n }\n for (; i < start + size && i < packetData.length; i++) {\n b = packetData[i];\n if (within708TextBlock(b)) {\n i = this.handleText(i, service);\n } else if (b === 0x18) {\n i = this.multiByteCharacter(i, service);\n } else if (b === 0x10) {\n i = this.extendedCommands(i, service);\n } else if (0x80 <= b && b <= 0x87) {\n i = this.setCurrentWindow(i, service);\n } else if (0x98 <= b && b <= 0x9f) {\n i = this.defineWindow(i, service);\n } else if (b === 0x88) {\n i = this.clearWindows(i, service);\n } else if (b === 0x8c) {\n i = this.deleteWindows(i, service);\n } else if (b === 0x89) {\n i = this.displayWindows(i, service);\n } else if (b === 0x8a) {\n i = this.hideWindows(i, service);\n } else if (b === 0x8b) {\n i = this.toggleWindows(i, service);\n } else if (b === 0x97) {\n i = this.setWindowAttributes(i, service);\n } else if (b === 0x90) {\n i = this.setPenAttributes(i, service);\n } else if (b === 0x91) {\n i = this.setPenColor(i, service);\n } else if (b === 0x92) {\n i = this.setPenLocation(i, service);\n } else if (b === 0x8f) {\n service = this.reset(i, service);\n } else if (b === 0x08) {\n // BS: Backspace\n service.currentWindow.backspace();\n } else if (b === 0x0c) {\n // FF: Form feed\n service.currentWindow.clearText();\n } else if (b === 0x0d) {\n // CR: Carriage return\n service.currentWindow.pendingNewLine = true;\n } else if (b === 0x0e) {\n // HCR: Horizontal carriage return\n service.currentWindow.clearText();\n } else if (b === 0x8d) {\n // DLY: Delay, nothing to do\n i++;\n } else ;\n }\n };\n /**\n * Execute an extended command\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.extendedCommands = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n if (within708TextBlock(b)) {\n i = this.handleText(i, service, {\n isExtended: true\n });\n }\n return i;\n };\n /**\n * Get PTS value of a given byte index\n *\n * @param {Integer} byteIndex Index of the byte\n * @return {Integer} PTS\n */\n\n Cea708Stream.prototype.getPts = function (byteIndex) {\n // There's 1 pts value per 2 bytes\n return this.current708Packet.ptsVals[Math.floor(byteIndex / 2)];\n };\n /**\n * Initializes a service\n *\n * @param {Integer} serviceNum Service number\n * @return {Service} Initialized service object\n */\n\n Cea708Stream.prototype.initService = function (serviceNum, i) {\n var serviceName = 'SERVICE' + serviceNum;\n var self = this;\n var serviceName;\n var encoding;\n if (serviceName in this.serviceEncodings) {\n encoding = this.serviceEncodings[serviceName];\n }\n this.services[serviceNum] = new Cea708Service(serviceNum, encoding, self);\n this.services[serviceNum].init(this.getPts(i), function (pts) {\n self.flushDisplayed(pts, self.services[serviceNum]);\n });\n return this.services[serviceNum];\n };\n /**\n * Execute text writing to current window\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.handleText = function (i, service, options) {\n var isExtended = options && options.isExtended;\n var isMultiByte = options && options.isMultiByte;\n var packetData = this.current708Packet.data;\n var extended = isExtended ? 0x1000 : 0x0000;\n var currentByte = packetData[i];\n var nextByte = packetData[i + 1];\n var win = service.currentWindow;\n var char;\n var charCodeArray; // Converts an array of bytes to a unicode hex string.\n\n function toHexString(byteArray) {\n return byteArray.map(byte => {\n return ('0' + (byte & 0xFF).toString(16)).slice(-2);\n }).join('');\n }\n if (isMultiByte) {\n charCodeArray = [currentByte, nextByte];\n i++;\n } else {\n charCodeArray = [currentByte];\n } // Use the TextDecoder if one was created for this service\n\n if (service.textDecoder_ && !isExtended) {\n char = service.textDecoder_.decode(new Uint8Array(charCodeArray));\n } else {\n // We assume any multi-byte char without a decoder is unicode.\n if (isMultiByte) {\n const unicode = toHexString(charCodeArray); // Takes a unicode hex string and creates a single character.\n\n char = String.fromCharCode(parseInt(unicode, 16));\n } else {\n char = get708CharFromCode(extended | currentByte);\n }\n }\n if (win.pendingNewLine && !win.isEmpty()) {\n win.newLine(this.getPts(i));\n }\n win.pendingNewLine = false;\n win.addText(char);\n return i;\n };\n /**\n * Handle decoding of multibyte character\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.multiByteCharacter = function (i, service) {\n var packetData = this.current708Packet.data;\n var firstByte = packetData[i + 1];\n var secondByte = packetData[i + 2];\n if (within708TextBlock(firstByte) && within708TextBlock(secondByte)) {\n i = this.handleText(++i, service, {\n isMultiByte: true\n });\n }\n return i;\n };\n /**\n * Parse and execute the CW# command.\n *\n * Set the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setCurrentWindow = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var windowNum = b & 0x07;\n service.setCurrentWindow(windowNum);\n return i;\n };\n /**\n * Parse and execute the DF# command.\n *\n * Define a window and set it as the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.defineWindow = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var windowNum = b & 0x07;\n service.setCurrentWindow(windowNum);\n var win = service.currentWindow;\n b = packetData[++i];\n win.visible = (b & 0x20) >> 5; // v\n\n win.rowLock = (b & 0x10) >> 4; // rl\n\n win.columnLock = (b & 0x08) >> 3; // cl\n\n win.priority = b & 0x07; // p\n\n b = packetData[++i];\n win.relativePositioning = (b & 0x80) >> 7; // rp\n\n win.anchorVertical = b & 0x7f; // av\n\n b = packetData[++i];\n win.anchorHorizontal = b; // ah\n\n b = packetData[++i];\n win.anchorPoint = (b & 0xf0) >> 4; // ap\n\n win.rowCount = b & 0x0f; // rc\n\n b = packetData[++i];\n win.columnCount = b & 0x3f; // cc\n\n b = packetData[++i];\n win.windowStyle = (b & 0x38) >> 3; // ws\n\n win.penStyle = b & 0x07; // ps\n // The spec says there are (rowCount+1) \"virtual rows\"\n\n win.virtualRowCount = win.rowCount + 1;\n return i;\n };\n /**\n * Parse and execute the SWA command.\n *\n * Set attributes of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setWindowAttributes = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var winAttr = service.currentWindow.winAttr;\n b = packetData[++i];\n winAttr.fillOpacity = (b & 0xc0) >> 6; // fo\n\n winAttr.fillRed = (b & 0x30) >> 4; // fr\n\n winAttr.fillGreen = (b & 0x0c) >> 2; // fg\n\n winAttr.fillBlue = b & 0x03; // fb\n\n b = packetData[++i];\n winAttr.borderType = (b & 0xc0) >> 6; // bt\n\n winAttr.borderRed = (b & 0x30) >> 4; // br\n\n winAttr.borderGreen = (b & 0x0c) >> 2; // bg\n\n winAttr.borderBlue = b & 0x03; // bb\n\n b = packetData[++i];\n winAttr.borderType += (b & 0x80) >> 5; // bt\n\n winAttr.wordWrap = (b & 0x40) >> 6; // ww\n\n winAttr.printDirection = (b & 0x30) >> 4; // pd\n\n winAttr.scrollDirection = (b & 0x0c) >> 2; // sd\n\n winAttr.justify = b & 0x03; // j\n\n b = packetData[++i];\n winAttr.effectSpeed = (b & 0xf0) >> 4; // es\n\n winAttr.effectDirection = (b & 0x0c) >> 2; // ed\n\n winAttr.displayEffect = b & 0x03; // de\n\n return i;\n };\n /**\n * Gather text from all displayed windows and push a caption to output.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n */\n\n Cea708Stream.prototype.flushDisplayed = function (pts, service) {\n var displayedText = []; // TODO: Positioning not supported, displaying multiple windows will not necessarily\n // display text in the correct order, but sample files so far have not shown any issue.\n\n for (var winId = 0; winId < 8; winId++) {\n if (service.windows[winId].visible && !service.windows[winId].isEmpty()) {\n displayedText.push(service.windows[winId].getText());\n }\n }\n service.endPts = pts;\n service.text = displayedText.join('\\n\\n');\n this.pushCaption(service);\n service.startPts = pts;\n };\n /**\n * Push a caption to output if the caption contains text.\n *\n * @param {Service} service The service object to be affected\n */\n\n Cea708Stream.prototype.pushCaption = function (service) {\n if (service.text !== '') {\n this.trigger('data', {\n startPts: service.startPts,\n endPts: service.endPts,\n text: service.text,\n stream: 'cc708_' + service.serviceNum\n });\n service.text = '';\n service.startPts = service.endPts;\n }\n };\n /**\n * Parse and execute the DSW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.displayWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible = 1;\n }\n }\n return i;\n };\n /**\n * Parse and execute the HDW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.hideWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible = 0;\n }\n }\n return i;\n };\n /**\n * Parse and execute the TGW command.\n *\n * Set visible property of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.toggleWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].visible ^= 1;\n }\n }\n return i;\n };\n /**\n * Parse and execute the CLW command.\n *\n * Clear text of windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.clearWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].clearText();\n }\n }\n return i;\n };\n /**\n * Parse and execute the DLW command.\n *\n * Re-initialize windows based on the parsed bitmask.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.deleteWindows = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[++i];\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n for (var winId = 0; winId < 8; winId++) {\n if (b & 0x01 << winId) {\n service.windows[winId].reset();\n }\n }\n return i;\n };\n /**\n * Parse and execute the SPA command.\n *\n * Set pen attributes of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setPenAttributes = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penAttr = service.currentWindow.penAttr;\n b = packetData[++i];\n penAttr.textTag = (b & 0xf0) >> 4; // tt\n\n penAttr.offset = (b & 0x0c) >> 2; // o\n\n penAttr.penSize = b & 0x03; // s\n\n b = packetData[++i];\n penAttr.italics = (b & 0x80) >> 7; // i\n\n penAttr.underline = (b & 0x40) >> 6; // u\n\n penAttr.edgeType = (b & 0x38) >> 3; // et\n\n penAttr.fontStyle = b & 0x07; // fs\n\n return i;\n };\n /**\n * Parse and execute the SPC command.\n *\n * Set pen color of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setPenColor = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penColor = service.currentWindow.penColor;\n b = packetData[++i];\n penColor.fgOpacity = (b & 0xc0) >> 6; // fo\n\n penColor.fgRed = (b & 0x30) >> 4; // fr\n\n penColor.fgGreen = (b & 0x0c) >> 2; // fg\n\n penColor.fgBlue = b & 0x03; // fb\n\n b = packetData[++i];\n penColor.bgOpacity = (b & 0xc0) >> 6; // bo\n\n penColor.bgRed = (b & 0x30) >> 4; // br\n\n penColor.bgGreen = (b & 0x0c) >> 2; // bg\n\n penColor.bgBlue = b & 0x03; // bb\n\n b = packetData[++i];\n penColor.edgeRed = (b & 0x30) >> 4; // er\n\n penColor.edgeGreen = (b & 0x0c) >> 2; // eg\n\n penColor.edgeBlue = b & 0x03; // eb\n\n return i;\n };\n /**\n * Parse and execute the SPL command.\n *\n * Set pen location of the current window.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Integer} New index after parsing\n */\n\n Cea708Stream.prototype.setPenLocation = function (i, service) {\n var packetData = this.current708Packet.data;\n var b = packetData[i];\n var penLoc = service.currentWindow.penLoc; // Positioning isn't really supported at the moment, so this essentially just inserts a linebreak\n\n service.currentWindow.pendingNewLine = true;\n b = packetData[++i];\n penLoc.row = b & 0x0f; // r\n\n b = packetData[++i];\n penLoc.column = b & 0x3f; // c\n\n return i;\n };\n /**\n * Execute the RST command.\n *\n * Reset service to a clean slate. Re-initialize.\n *\n * @param {Integer} i Current index in the 708 packet\n * @param {Service} service The service object to be affected\n * @return {Service} Re-initialized service\n */\n\n Cea708Stream.prototype.reset = function (i, service) {\n var pts = this.getPts(i);\n this.flushDisplayed(pts, service);\n return this.initService(service.serviceNum, i);\n }; // This hash maps non-ASCII, special, and extended character codes to their\n // proper Unicode equivalent. The first keys that are only a single byte\n // are the non-standard ASCII characters, which simply map the CEA608 byte\n // to the standard ASCII/Unicode. The two-byte keys that follow are the CEA608\n // character codes, but have their MSB bitmasked with 0x03 so that a lookup\n // can be performed regardless of the field and data channel on which the\n // character code was received.\n\n var CHARACTER_TRANSLATION = {\n 0x2a: 0xe1,\n // á\n 0x5c: 0xe9,\n // é\n 0x5e: 0xed,\n // í\n 0x5f: 0xf3,\n // ó\n 0x60: 0xfa,\n // ú\n 0x7b: 0xe7,\n // ç\n 0x7c: 0xf7,\n // ÷\n 0x7d: 0xd1,\n // Ñ\n 0x7e: 0xf1,\n // ñ\n 0x7f: 0x2588,\n // █\n 0x0130: 0xae,\n // ®\n 0x0131: 0xb0,\n // °\n 0x0132: 0xbd,\n // ½\n 0x0133: 0xbf,\n // ¿\n 0x0134: 0x2122,\n // ™\n 0x0135: 0xa2,\n // ¢\n 0x0136: 0xa3,\n // £\n 0x0137: 0x266a,\n // ♪\n 0x0138: 0xe0,\n // à\n 0x0139: 0xa0,\n //\n 0x013a: 0xe8,\n // è\n 0x013b: 0xe2,\n // â\n 0x013c: 0xea,\n // ê\n 0x013d: 0xee,\n // î\n 0x013e: 0xf4,\n // ô\n 0x013f: 0xfb,\n // û\n 0x0220: 0xc1,\n // Á\n 0x0221: 0xc9,\n // É\n 0x0222: 0xd3,\n // Ó\n 0x0223: 0xda,\n // Ú\n 0x0224: 0xdc,\n // Ü\n 0x0225: 0xfc,\n // ü\n 0x0226: 0x2018,\n // ‘\n 0x0227: 0xa1,\n // ¡\n 0x0228: 0x2a,\n // *\n 0x0229: 0x27,\n // '\n 0x022a: 0x2014,\n // —\n 0x022b: 0xa9,\n // ©\n 0x022c: 0x2120,\n // ℠\n 0x022d: 0x2022,\n // •\n 0x022e: 0x201c,\n // “\n 0x022f: 0x201d,\n // ”\n 0x0230: 0xc0,\n // À\n 0x0231: 0xc2,\n // Â\n 0x0232: 0xc7,\n // Ç\n 0x0233: 0xc8,\n // È\n 0x0234: 0xca,\n // Ê\n 0x0235: 0xcb,\n // Ë\n 0x0236: 0xeb,\n // ë\n 0x0237: 0xce,\n // Î\n 0x0238: 0xcf,\n // Ï\n 0x0239: 0xef,\n // ï\n 0x023a: 0xd4,\n // Ô\n 0x023b: 0xd9,\n // Ù\n 0x023c: 0xf9,\n // ù\n 0x023d: 0xdb,\n // Û\n 0x023e: 0xab,\n // «\n 0x023f: 0xbb,\n // »\n 0x0320: 0xc3,\n // Ã\n 0x0321: 0xe3,\n // ã\n 0x0322: 0xcd,\n // Í\n 0x0323: 0xcc,\n // Ì\n 0x0324: 0xec,\n // ì\n 0x0325: 0xd2,\n // Ò\n 0x0326: 0xf2,\n // ò\n 0x0327: 0xd5,\n // Õ\n 0x0328: 0xf5,\n // õ\n 0x0329: 0x7b,\n // {\n 0x032a: 0x7d,\n // }\n 0x032b: 0x5c,\n // \\\n 0x032c: 0x5e,\n // ^\n 0x032d: 0x5f,\n // _\n 0x032e: 0x7c,\n // |\n 0x032f: 0x7e,\n // ~\n 0x0330: 0xc4,\n // Ä\n 0x0331: 0xe4,\n // ä\n 0x0332: 0xd6,\n // Ö\n 0x0333: 0xf6,\n // ö\n 0x0334: 0xdf,\n // ß\n 0x0335: 0xa5,\n // ¥\n 0x0336: 0xa4,\n // ¤\n 0x0337: 0x2502,\n // │\n 0x0338: 0xc5,\n // Å\n 0x0339: 0xe5,\n // å\n 0x033a: 0xd8,\n // Ø\n 0x033b: 0xf8,\n // ø\n 0x033c: 0x250c,\n // ┌\n 0x033d: 0x2510,\n // ┐\n 0x033e: 0x2514,\n // └\n 0x033f: 0x2518 // ┘\n };\n\n var getCharFromCode = function (code) {\n if (code === null) {\n return '';\n }\n code = CHARACTER_TRANSLATION[code] || code;\n return String.fromCharCode(code);\n }; // the index of the last row in a CEA-608 display buffer\n\n var BOTTOM_ROW = 14; // This array is used for mapping PACs -> row #, since there's no way of\n // getting it through bit logic.\n\n var ROWS = [0x1100, 0x1120, 0x1200, 0x1220, 0x1500, 0x1520, 0x1600, 0x1620, 0x1700, 0x1720, 0x1000, 0x1300, 0x1320, 0x1400, 0x1420]; // CEA-608 captions are rendered onto a 34x15 matrix of character\n // cells. The \"bottom\" row is the last element in the outer array.\n // We keep track of positioning information as we go by storing the\n // number of indentations and the tab offset in this buffer.\n\n var createDisplayBuffer = function () {\n var result = [],\n i = BOTTOM_ROW + 1;\n while (i--) {\n result.push({\n text: '',\n indent: 0,\n offset: 0\n });\n }\n return result;\n };\n var Cea608Stream = function (field, dataChannel) {\n Cea608Stream.prototype.init.call(this);\n this.field_ = field || 0;\n this.dataChannel_ = dataChannel || 0;\n this.name_ = 'CC' + ((this.field_ << 1 | this.dataChannel_) + 1);\n this.setConstants();\n this.reset();\n this.push = function (packet) {\n var data, swap, char0, char1, text; // remove the parity bits\n\n data = packet.ccData & 0x7f7f; // ignore duplicate control codes; the spec demands they're sent twice\n\n if (data === this.lastControlCode_) {\n this.lastControlCode_ = null;\n return;\n } // Store control codes\n\n if ((data & 0xf000) === 0x1000) {\n this.lastControlCode_ = data;\n } else if (data !== this.PADDING_) {\n this.lastControlCode_ = null;\n }\n char0 = data >>> 8;\n char1 = data & 0xff;\n if (data === this.PADDING_) {\n return;\n } else if (data === this.RESUME_CAPTION_LOADING_) {\n this.mode_ = 'popOn';\n } else if (data === this.END_OF_CAPTION_) {\n // If an EOC is received while in paint-on mode, the displayed caption\n // text should be swapped to non-displayed memory as if it was a pop-on\n // caption. Because of that, we should explicitly switch back to pop-on\n // mode\n this.mode_ = 'popOn';\n this.clearFormatting(packet.pts); // if a caption was being displayed, it's gone now\n\n this.flushDisplayed(packet.pts); // flip memory\n\n swap = this.displayed_;\n this.displayed_ = this.nonDisplayed_;\n this.nonDisplayed_ = swap; // start measuring the time to display the caption\n\n this.startPts_ = packet.pts;\n } else if (data === this.ROLL_UP_2_ROWS_) {\n this.rollUpRows_ = 2;\n this.setRollUp(packet.pts);\n } else if (data === this.ROLL_UP_3_ROWS_) {\n this.rollUpRows_ = 3;\n this.setRollUp(packet.pts);\n } else if (data === this.ROLL_UP_4_ROWS_) {\n this.rollUpRows_ = 4;\n this.setRollUp(packet.pts);\n } else if (data === this.CARRIAGE_RETURN_) {\n this.clearFormatting(packet.pts);\n this.flushDisplayed(packet.pts);\n this.shiftRowsUp_();\n this.startPts_ = packet.pts;\n } else if (data === this.BACKSPACE_) {\n if (this.mode_ === 'popOn') {\n this.nonDisplayed_[this.row_].text = this.nonDisplayed_[this.row_].text.slice(0, -1);\n } else {\n this.displayed_[this.row_].text = this.displayed_[this.row_].text.slice(0, -1);\n }\n } else if (data === this.ERASE_DISPLAYED_MEMORY_) {\n this.flushDisplayed(packet.pts);\n this.displayed_ = createDisplayBuffer();\n } else if (data === this.ERASE_NON_DISPLAYED_MEMORY_) {\n this.nonDisplayed_ = createDisplayBuffer();\n } else if (data === this.RESUME_DIRECT_CAPTIONING_) {\n if (this.mode_ !== 'paintOn') {\n // NOTE: This should be removed when proper caption positioning is\n // implemented\n this.flushDisplayed(packet.pts);\n this.displayed_ = createDisplayBuffer();\n }\n this.mode_ = 'paintOn';\n this.startPts_ = packet.pts; // Append special characters to caption text\n } else if (this.isSpecialCharacter(char0, char1)) {\n // Bitmask char0 so that we can apply character transformations\n // regardless of field and data channel.\n // Then byte-shift to the left and OR with char1 so we can pass the\n // entire character code to `getCharFromCode`.\n char0 = (char0 & 0x03) << 8;\n text = getCharFromCode(char0 | char1);\n this[this.mode_](packet.pts, text);\n this.column_++; // Append extended characters to caption text\n } else if (this.isExtCharacter(char0, char1)) {\n // Extended characters always follow their \"non-extended\" equivalents.\n // IE if a \"è\" is desired, you'll always receive \"eè\"; non-compliant\n // decoders are supposed to drop the \"è\", while compliant decoders\n // backspace the \"e\" and insert \"è\".\n // Delete the previous character\n if (this.mode_ === 'popOn') {\n this.nonDisplayed_[this.row_].text = this.nonDisplayed_[this.row_].text.slice(0, -1);\n } else {\n this.displayed_[this.row_].text = this.displayed_[this.row_].text.slice(0, -1);\n } // Bitmask char0 so that we can apply character transformations\n // regardless of field and data channel.\n // Then byte-shift to the left and OR with char1 so we can pass the\n // entire character code to `getCharFromCode`.\n\n char0 = (char0 & 0x03) << 8;\n text = getCharFromCode(char0 | char1);\n this[this.mode_](packet.pts, text);\n this.column_++; // Process mid-row codes\n } else if (this.isMidRowCode(char0, char1)) {\n // Attributes are not additive, so clear all formatting\n this.clearFormatting(packet.pts); // According to the standard, mid-row codes\n // should be replaced with spaces, so add one now\n\n this[this.mode_](packet.pts, ' ');\n this.column_++;\n if ((char1 & 0xe) === 0xe) {\n this.addFormatting(packet.pts, ['i']);\n }\n if ((char1 & 0x1) === 0x1) {\n this.addFormatting(packet.pts, ['u']);\n } // Detect offset control codes and adjust cursor\n } else if (this.isOffsetControlCode(char0, char1)) {\n // Cursor position is set by indent PAC (see below) in 4-column\n // increments, with an additional offset code of 1-3 to reach any\n // of the 32 columns specified by CEA-608. So all we need to do\n // here is increment the column cursor by the given offset.\n const offset = char1 & 0x03; // For an offest value 1-3, set the offset for that caption\n // in the non-displayed array.\n\n this.nonDisplayed_[this.row_].offset = offset;\n this.column_ += offset; // Detect PACs (Preamble Address Codes)\n } else if (this.isPAC(char0, char1)) {\n // There's no logic for PAC -> row mapping, so we have to just\n // find the row code in an array and use its index :(\n var row = ROWS.indexOf(data & 0x1f20); // Configure the caption window if we're in roll-up mode\n\n if (this.mode_ === 'rollUp') {\n // This implies that the base row is incorrectly set.\n // As per the recommendation in CEA-608(Base Row Implementation), defer to the number\n // of roll-up rows set.\n if (row - this.rollUpRows_ + 1 < 0) {\n row = this.rollUpRows_ - 1;\n }\n this.setRollUp(packet.pts, row);\n } // Ensure the row is between 0 and 14, otherwise use the most\n // recent or default row.\n\n if (row !== this.row_ && row >= 0 && row <= 14) {\n // formatting is only persistent for current row\n this.clearFormatting(packet.pts);\n this.row_ = row;\n } // All PACs can apply underline, so detect and apply\n // (All odd-numbered second bytes set underline)\n\n if (char1 & 0x1 && this.formatting_.indexOf('u') === -1) {\n this.addFormatting(packet.pts, ['u']);\n }\n if ((data & 0x10) === 0x10) {\n // We've got an indent level code. Each successive even number\n // increments the column cursor by 4, so we can get the desired\n // column position by bit-shifting to the right (to get n/2)\n // and multiplying by 4.\n const indentations = (data & 0xe) >> 1;\n this.column_ = indentations * 4; // add to the number of indentations for positioning\n\n this.nonDisplayed_[this.row_].indent += indentations;\n }\n if (this.isColorPAC(char1)) {\n // it's a color code, though we only support white, which\n // can be either normal or italicized. white italics can be\n // either 0x4e or 0x6e depending on the row, so we just\n // bitwise-and with 0xe to see if italics should be turned on\n if ((char1 & 0xe) === 0xe) {\n this.addFormatting(packet.pts, ['i']);\n }\n } // We have a normal character in char0, and possibly one in char1\n } else if (this.isNormalChar(char0)) {\n if (char1 === 0x00) {\n char1 = null;\n }\n text = getCharFromCode(char0);\n text += getCharFromCode(char1);\n this[this.mode_](packet.pts, text);\n this.column_ += text.length;\n } // finish data processing\n };\n };\n\n Cea608Stream.prototype = new Stream$7(); // Trigger a cue point that captures the current state of the\n // display buffer\n\n Cea608Stream.prototype.flushDisplayed = function (pts) {\n const logWarning = index => {\n this.trigger('log', {\n level: 'warn',\n message: 'Skipping a malformed 608 caption at index ' + index + '.'\n });\n };\n const content = [];\n this.displayed_.forEach((row, i) => {\n if (row && row.text && row.text.length) {\n try {\n // remove spaces from the start and end of the string\n row.text = row.text.trim();\n } catch (e) {\n // Ordinarily, this shouldn't happen. However, caption\n // parsing errors should not throw exceptions and\n // break playback.\n logWarning(i);\n } // See the below link for more details on the following fields:\n // https://dvcs.w3.org/hg/text-tracks/raw-file/default/608toVTT/608toVTT.html#positioning-in-cea-608\n\n if (row.text.length) {\n content.push({\n // The text to be displayed in the caption from this specific row, with whitespace removed.\n text: row.text,\n // Value between 1 and 15 representing the PAC row used to calculate line height.\n line: i + 1,\n // A number representing the indent position by percentage (CEA-608 PAC indent code).\n // The value will be a number between 10 and 80. Offset is used to add an aditional\n // value to the position if necessary.\n position: 10 + Math.min(70, row.indent * 10) + row.offset * 2.5\n });\n }\n } else if (row === undefined || row === null) {\n logWarning(i);\n }\n });\n if (content.length) {\n this.trigger('data', {\n startPts: this.startPts_,\n endPts: pts,\n content,\n stream: this.name_\n });\n }\n };\n /**\n * Zero out the data, used for startup and on seek\n */\n\n Cea608Stream.prototype.reset = function () {\n this.mode_ = 'popOn'; // When in roll-up mode, the index of the last row that will\n // actually display captions. If a caption is shifted to a row\n // with a lower index than this, it is cleared from the display\n // buffer\n\n this.topRow_ = 0;\n this.startPts_ = 0;\n this.displayed_ = createDisplayBuffer();\n this.nonDisplayed_ = createDisplayBuffer();\n this.lastControlCode_ = null; // Track row and column for proper line-breaking and spacing\n\n this.column_ = 0;\n this.row_ = BOTTOM_ROW;\n this.rollUpRows_ = 2; // This variable holds currently-applied formatting\n\n this.formatting_ = [];\n };\n /**\n * Sets up control code and related constants for this instance\n */\n\n Cea608Stream.prototype.setConstants = function () {\n // The following attributes have these uses:\n // ext_ : char0 for mid-row codes, and the base for extended\n // chars (ext_+0, ext_+1, and ext_+2 are char0s for\n // extended codes)\n // control_: char0 for control codes, except byte-shifted to the\n // left so that we can do this.control_ | CONTROL_CODE\n // offset_: char0 for tab offset codes\n //\n // It's also worth noting that control codes, and _only_ control codes,\n // differ between field 1 and field2. Field 2 control codes are always\n // their field 1 value plus 1. That's why there's the \"| field\" on the\n // control value.\n if (this.dataChannel_ === 0) {\n this.BASE_ = 0x10;\n this.EXT_ = 0x11;\n this.CONTROL_ = (0x14 | this.field_) << 8;\n this.OFFSET_ = 0x17;\n } else if (this.dataChannel_ === 1) {\n this.BASE_ = 0x18;\n this.EXT_ = 0x19;\n this.CONTROL_ = (0x1c | this.field_) << 8;\n this.OFFSET_ = 0x1f;\n } // Constants for the LSByte command codes recognized by Cea608Stream. This\n // list is not exhaustive. For a more comprehensive listing and semantics see\n // http://www.gpo.gov/fdsys/pkg/CFR-2010-title47-vol1/pdf/CFR-2010-title47-vol1-sec15-119.pdf\n // Padding\n\n this.PADDING_ = 0x0000; // Pop-on Mode\n\n this.RESUME_CAPTION_LOADING_ = this.CONTROL_ | 0x20;\n this.END_OF_CAPTION_ = this.CONTROL_ | 0x2f; // Roll-up Mode\n\n this.ROLL_UP_2_ROWS_ = this.CONTROL_ | 0x25;\n this.ROLL_UP_3_ROWS_ = this.CONTROL_ | 0x26;\n this.ROLL_UP_4_ROWS_ = this.CONTROL_ | 0x27;\n this.CARRIAGE_RETURN_ = this.CONTROL_ | 0x2d; // paint-on mode\n\n this.RESUME_DIRECT_CAPTIONING_ = this.CONTROL_ | 0x29; // Erasure\n\n this.BACKSPACE_ = this.CONTROL_ | 0x21;\n this.ERASE_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2c;\n this.ERASE_NON_DISPLAYED_MEMORY_ = this.CONTROL_ | 0x2e;\n };\n /**\n * Detects if the 2-byte packet data is a special character\n *\n * Special characters have a second byte in the range 0x30 to 0x3f,\n * with the first byte being 0x11 (for data channel 1) or 0x19 (for\n * data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an special character\n */\n\n Cea608Stream.prototype.isSpecialCharacter = function (char0, char1) {\n return char0 === this.EXT_ && char1 >= 0x30 && char1 <= 0x3f;\n };\n /**\n * Detects if the 2-byte packet data is an extended character\n *\n * Extended characters have a second byte in the range 0x20 to 0x3f,\n * with the first byte being 0x12 or 0x13 (for data channel 1) or\n * 0x1a or 0x1b (for data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an extended character\n */\n\n Cea608Stream.prototype.isExtCharacter = function (char0, char1) {\n return (char0 === this.EXT_ + 1 || char0 === this.EXT_ + 2) && char1 >= 0x20 && char1 <= 0x3f;\n };\n /**\n * Detects if the 2-byte packet is a mid-row code\n *\n * Mid-row codes have a second byte in the range 0x20 to 0x2f, with\n * the first byte being 0x11 (for data channel 1) or 0x19 (for data\n * channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are a mid-row code\n */\n\n Cea608Stream.prototype.isMidRowCode = function (char0, char1) {\n return char0 === this.EXT_ && char1 >= 0x20 && char1 <= 0x2f;\n };\n /**\n * Detects if the 2-byte packet is an offset control code\n *\n * Offset control codes have a second byte in the range 0x21 to 0x23,\n * with the first byte being 0x17 (for data channel 1) or 0x1f (for\n * data channel 2).\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are an offset control code\n */\n\n Cea608Stream.prototype.isOffsetControlCode = function (char0, char1) {\n return char0 === this.OFFSET_ && char1 >= 0x21 && char1 <= 0x23;\n };\n /**\n * Detects if the 2-byte packet is a Preamble Address Code\n *\n * PACs have a first byte in the range 0x10 to 0x17 (for data channel 1)\n * or 0x18 to 0x1f (for data channel 2), with the second byte in the\n * range 0x40 to 0x7f.\n *\n * @param {Integer} char0 The first byte\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the 2 bytes are a PAC\n */\n\n Cea608Stream.prototype.isPAC = function (char0, char1) {\n return char0 >= this.BASE_ && char0 < this.BASE_ + 8 && char1 >= 0x40 && char1 <= 0x7f;\n };\n /**\n * Detects if a packet's second byte is in the range of a PAC color code\n *\n * PAC color codes have the second byte be in the range 0x40 to 0x4f, or\n * 0x60 to 0x6f.\n *\n * @param {Integer} char1 The second byte\n * @return {Boolean} Whether the byte is a color PAC\n */\n\n Cea608Stream.prototype.isColorPAC = function (char1) {\n return char1 >= 0x40 && char1 <= 0x4f || char1 >= 0x60 && char1 <= 0x7f;\n };\n /**\n * Detects if a single byte is in the range of a normal character\n *\n * Normal text bytes are in the range 0x20 to 0x7f.\n *\n * @param {Integer} char The byte\n * @return {Boolean} Whether the byte is a normal character\n */\n\n Cea608Stream.prototype.isNormalChar = function (char) {\n return char >= 0x20 && char <= 0x7f;\n };\n /**\n * Configures roll-up\n *\n * @param {Integer} pts Current PTS\n * @param {Integer} newBaseRow Used by PACs to slide the current window to\n * a new position\n */\n\n Cea608Stream.prototype.setRollUp = function (pts, newBaseRow) {\n // Reset the base row to the bottom row when switching modes\n if (this.mode_ !== 'rollUp') {\n this.row_ = BOTTOM_ROW;\n this.mode_ = 'rollUp'; // Spec says to wipe memories when switching to roll-up\n\n this.flushDisplayed(pts);\n this.nonDisplayed_ = createDisplayBuffer();\n this.displayed_ = createDisplayBuffer();\n }\n if (newBaseRow !== undefined && newBaseRow !== this.row_) {\n // move currently displayed captions (up or down) to the new base row\n for (var i = 0; i < this.rollUpRows_; i++) {\n this.displayed_[newBaseRow - i] = this.displayed_[this.row_ - i];\n this.displayed_[this.row_ - i] = {\n text: '',\n indent: 0,\n offset: 0\n };\n }\n }\n if (newBaseRow === undefined) {\n newBaseRow = this.row_;\n }\n this.topRow_ = newBaseRow - this.rollUpRows_ + 1;\n }; // Adds the opening HTML tag for the passed character to the caption text,\n // and keeps track of it for later closing\n\n Cea608Stream.prototype.addFormatting = function (pts, format) {\n this.formatting_ = this.formatting_.concat(format);\n var text = format.reduce(function (text, format) {\n return text + '<' + format + '>';\n }, '');\n this[this.mode_](pts, text);\n }; // Adds HTML closing tags for current formatting to caption text and\n // clears remembered formatting\n\n Cea608Stream.prototype.clearFormatting = function (pts) {\n if (!this.formatting_.length) {\n return;\n }\n var text = this.formatting_.reverse().reduce(function (text, format) {\n return text + '' + format + '>';\n }, '');\n this.formatting_ = [];\n this[this.mode_](pts, text);\n }; // Mode Implementations\n\n Cea608Stream.prototype.popOn = function (pts, text) {\n var baseRow = this.nonDisplayed_[this.row_].text; // buffer characters\n\n baseRow += text;\n this.nonDisplayed_[this.row_].text = baseRow;\n };\n Cea608Stream.prototype.rollUp = function (pts, text) {\n var baseRow = this.displayed_[this.row_].text;\n baseRow += text;\n this.displayed_[this.row_].text = baseRow;\n };\n Cea608Stream.prototype.shiftRowsUp_ = function () {\n var i; // clear out inactive rows\n\n for (i = 0; i < this.topRow_; i++) {\n this.displayed_[i] = {\n text: '',\n indent: 0,\n offset: 0\n };\n }\n for (i = this.row_ + 1; i < BOTTOM_ROW + 1; i++) {\n this.displayed_[i] = {\n text: '',\n indent: 0,\n offset: 0\n };\n } // shift displayed rows up\n\n for (i = this.topRow_; i < this.row_; i++) {\n this.displayed_[i] = this.displayed_[i + 1];\n } // clear out the bottom row\n\n this.displayed_[this.row_] = {\n text: '',\n indent: 0,\n offset: 0\n };\n };\n Cea608Stream.prototype.paintOn = function (pts, text) {\n var baseRow = this.displayed_[this.row_].text;\n baseRow += text;\n this.displayed_[this.row_].text = baseRow;\n }; // exports\n\n var captionStream = {\n CaptionStream: CaptionStream$2,\n Cea608Stream: Cea608Stream,\n Cea708Stream: Cea708Stream\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var streamTypes = {\n H264_STREAM_TYPE: 0x1B,\n ADTS_STREAM_TYPE: 0x0F,\n METADATA_STREAM_TYPE: 0x15\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Accepts program elementary stream (PES) data events and corrects\n * decode and presentation time stamps to account for a rollover\n * of the 33 bit value.\n */\n\n var Stream$6 = stream;\n var MAX_TS = 8589934592;\n var RO_THRESH = 4294967296;\n var TYPE_SHARED = 'shared';\n var handleRollover$1 = function (value, reference) {\n var direction = 1;\n if (value > reference) {\n // If the current timestamp value is greater than our reference timestamp and we detect a\n // timestamp rollover, this means the roll over is happening in the opposite direction.\n // Example scenario: Enter a long stream/video just after a rollover occurred. The reference\n // point will be set to a small number, e.g. 1. The user then seeks backwards over the\n // rollover point. In loading this segment, the timestamp values will be very large,\n // e.g. 2^33 - 1. Since this comes before the data we loaded previously, we want to adjust\n // the time stamp to be `value - 2^33`.\n direction = -1;\n } // Note: A seek forwards or back that is greater than the RO_THRESH (2^32, ~13 hours) will\n // cause an incorrect adjustment.\n\n while (Math.abs(reference - value) > RO_THRESH) {\n value += direction * MAX_TS;\n }\n return value;\n };\n var TimestampRolloverStream$1 = function (type) {\n var lastDTS, referenceDTS;\n TimestampRolloverStream$1.prototype.init.call(this); // The \"shared\" type is used in cases where a stream will contain muxed\n // video and audio. We could use `undefined` here, but having a string\n // makes debugging a little clearer.\n\n this.type_ = type || TYPE_SHARED;\n this.push = function (data) {\n /**\n * Rollover stream expects data from elementary stream.\n * Elementary stream can push forward 2 types of data\n * - Parsed Video/Audio/Timed-metadata PES (packetized elementary stream) packets\n * - Tracks metadata from PMT (Program Map Table)\n * Rollover stream expects pts/dts info to be available, since it stores lastDTS\n * We should ignore non-PES packets since they may override lastDTS to undefined.\n * lastDTS is important to signal the next segments\n * about rollover from the previous segments.\n */\n if (data.type === 'metadata') {\n this.trigger('data', data);\n return;\n } // Any \"shared\" rollover streams will accept _all_ data. Otherwise,\n // streams will only accept data that matches their type.\n\n if (this.type_ !== TYPE_SHARED && data.type !== this.type_) {\n return;\n }\n if (referenceDTS === undefined) {\n referenceDTS = data.dts;\n }\n data.dts = handleRollover$1(data.dts, referenceDTS);\n data.pts = handleRollover$1(data.pts, referenceDTS);\n lastDTS = data.dts;\n this.trigger('data', data);\n };\n this.flush = function () {\n referenceDTS = lastDTS;\n this.trigger('done');\n };\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n this.discontinuity = function () {\n referenceDTS = void 0;\n lastDTS = void 0;\n };\n this.reset = function () {\n this.discontinuity();\n this.trigger('reset');\n };\n };\n TimestampRolloverStream$1.prototype = new Stream$6();\n var timestampRolloverStream = {\n TimestampRolloverStream: TimestampRolloverStream$1,\n handleRollover: handleRollover$1\n }; // Once IE11 support is dropped, this function should be removed.\n\n var typedArrayIndexOf$1 = (typedArray, element, fromIndex) => {\n if (!typedArray) {\n return -1;\n }\n var currentIndex = fromIndex;\n for (; currentIndex < typedArray.length; currentIndex++) {\n if (typedArray[currentIndex] === element) {\n return currentIndex;\n }\n }\n return -1;\n };\n var typedArray = {\n typedArrayIndexOf: typedArrayIndexOf$1\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Tools for parsing ID3 frame data\n * @see http://id3.org/id3v2.3.0\n */\n\n var typedArrayIndexOf = typedArray.typedArrayIndexOf,\n // Frames that allow different types of text encoding contain a text\n // encoding description byte [ID3v2.4.0 section 4.]\n textEncodingDescriptionByte = {\n Iso88591: 0x00,\n // ISO-8859-1, terminated with \\0.\n Utf16: 0x01,\n // UTF-16 encoded Unicode BOM, terminated with \\0\\0\n Utf16be: 0x02,\n // UTF-16BE encoded Unicode, without BOM, terminated with \\0\\0\n Utf8: 0x03 // UTF-8 encoded Unicode, terminated with \\0\n },\n // return a percent-encoded representation of the specified byte range\n // @see http://en.wikipedia.org/wiki/Percent-encoding \n percentEncode$1 = function (bytes, start, end) {\n var i,\n result = '';\n for (i = start; i < end; i++) {\n result += '%' + ('00' + bytes[i].toString(16)).slice(-2);\n }\n return result;\n },\n // return the string representation of the specified byte range,\n // interpreted as UTf-8.\n parseUtf8 = function (bytes, start, end) {\n return decodeURIComponent(percentEncode$1(bytes, start, end));\n },\n // return the string representation of the specified byte range,\n // interpreted as ISO-8859-1.\n parseIso88591$1 = function (bytes, start, end) {\n return unescape(percentEncode$1(bytes, start, end)); // jshint ignore:line\n },\n parseSyncSafeInteger$1 = function (data) {\n return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];\n },\n frameParsers = {\n 'APIC': function (frame) {\n var i = 1,\n mimeTypeEndIndex,\n descriptionEndIndex,\n LINK_MIME_TYPE = '-->';\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n } // parsing fields [ID3v2.4.0 section 4.14.]\n\n mimeTypeEndIndex = typedArrayIndexOf(frame.data, 0, i);\n if (mimeTypeEndIndex < 0) {\n // malformed frame\n return;\n } // parsing Mime type field (terminated with \\0)\n\n frame.mimeType = parseIso88591$1(frame.data, i, mimeTypeEndIndex);\n i = mimeTypeEndIndex + 1; // parsing 1-byte Picture Type field\n\n frame.pictureType = frame.data[i];\n i++;\n descriptionEndIndex = typedArrayIndexOf(frame.data, 0, i);\n if (descriptionEndIndex < 0) {\n // malformed frame\n return;\n } // parsing Description field (terminated with \\0)\n\n frame.description = parseUtf8(frame.data, i, descriptionEndIndex);\n i = descriptionEndIndex + 1;\n if (frame.mimeType === LINK_MIME_TYPE) {\n // parsing Picture Data field as URL (always represented as ISO-8859-1 [ID3v2.4.0 section 4.])\n frame.url = parseIso88591$1(frame.data, i, frame.data.length);\n } else {\n // parsing Picture Data field as binary data\n frame.pictureData = frame.data.subarray(i, frame.data.length);\n }\n },\n 'T*': function (frame) {\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n } // parse text field, do not include null terminator in the frame value\n // frames that allow different types of encoding contain terminated text [ID3v2.4.0 section 4.]\n\n frame.value = parseUtf8(frame.data, 1, frame.data.length).replace(/\\0*$/, ''); // text information frames supports multiple strings, stored as a terminator separated list [ID3v2.4.0 section 4.2.]\n\n frame.values = frame.value.split('\\0');\n },\n 'TXXX': function (frame) {\n var descriptionEndIndex;\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n }\n descriptionEndIndex = typedArrayIndexOf(frame.data, 0, 1);\n if (descriptionEndIndex === -1) {\n return;\n } // parse the text fields\n\n frame.description = parseUtf8(frame.data, 1, descriptionEndIndex); // do not include the null terminator in the tag value\n // frames that allow different types of encoding contain terminated text\n // [ID3v2.4.0 section 4.]\n\n frame.value = parseUtf8(frame.data, descriptionEndIndex + 1, frame.data.length).replace(/\\0*$/, '');\n frame.data = frame.value;\n },\n 'W*': function (frame) {\n // parse URL field; URL fields are always represented as ISO-8859-1 [ID3v2.4.0 section 4.]\n // if the value is followed by a string termination all the following information should be ignored [ID3v2.4.0 section 4.3]\n frame.url = parseIso88591$1(frame.data, 0, frame.data.length).replace(/\\0.*$/, '');\n },\n 'WXXX': function (frame) {\n var descriptionEndIndex;\n if (frame.data[0] !== textEncodingDescriptionByte.Utf8) {\n // ignore frames with unrecognized character encodings\n return;\n }\n descriptionEndIndex = typedArrayIndexOf(frame.data, 0, 1);\n if (descriptionEndIndex === -1) {\n return;\n } // parse the description and URL fields\n\n frame.description = parseUtf8(frame.data, 1, descriptionEndIndex); // URL fields are always represented as ISO-8859-1 [ID3v2.4.0 section 4.]\n // if the value is followed by a string termination all the following information\n // should be ignored [ID3v2.4.0 section 4.3]\n\n frame.url = parseIso88591$1(frame.data, descriptionEndIndex + 1, frame.data.length).replace(/\\0.*$/, '');\n },\n 'PRIV': function (frame) {\n var i;\n for (i = 0; i < frame.data.length; i++) {\n if (frame.data[i] === 0) {\n // parse the description and URL fields\n frame.owner = parseIso88591$1(frame.data, 0, i);\n break;\n }\n }\n frame.privateData = frame.data.subarray(i + 1);\n frame.data = frame.privateData;\n }\n };\n var parseId3Frames$1 = function (data) {\n var frameSize,\n frameHeader,\n frameStart = 10,\n tagSize = 0,\n frames = []; // If we don't have enough data for a header, 10 bytes, \n // or 'ID3' in the first 3 bytes this is not a valid ID3 tag.\n\n if (data.length < 10 || data[0] !== 'I'.charCodeAt(0) || data[1] !== 'D'.charCodeAt(0) || data[2] !== '3'.charCodeAt(0)) {\n return;\n } // the frame size is transmitted as a 28-bit integer in the\n // last four bytes of the ID3 header.\n // The most significant bit of each byte is dropped and the\n // results concatenated to recover the actual value.\n\n tagSize = parseSyncSafeInteger$1(data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more\n // convenient for our comparisons to include it\n\n tagSize += 10; // check bit 6 of byte 5 for the extended header flag.\n\n var hasExtendedHeader = data[5] & 0x40;\n if (hasExtendedHeader) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += parseSyncSafeInteger$1(data.subarray(10, 14));\n tagSize -= parseSyncSafeInteger$1(data.subarray(16, 20)); // clip any padding off the end\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n do {\n // determine the number of bytes in this frame\n frameSize = parseSyncSafeInteger$1(data.subarray(frameStart + 4, frameStart + 8));\n if (frameSize < 1) {\n break;\n }\n frameHeader = String.fromCharCode(data[frameStart], data[frameStart + 1], data[frameStart + 2], data[frameStart + 3]);\n var frame = {\n id: frameHeader,\n data: data.subarray(frameStart + 10, frameStart + frameSize + 10)\n };\n frame.key = frame.id; // parse frame values\n\n if (frameParsers[frame.id]) {\n // use frame specific parser\n frameParsers[frame.id](frame);\n } else if (frame.id[0] === 'T') {\n // use text frame generic parser\n frameParsers['T*'](frame);\n } else if (frame.id[0] === 'W') {\n // use URL link frame generic parser\n frameParsers['W*'](frame);\n }\n frames.push(frame);\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < tagSize);\n return frames;\n };\n var parseId3 = {\n parseId3Frames: parseId3Frames$1,\n parseSyncSafeInteger: parseSyncSafeInteger$1,\n frameParsers: frameParsers\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Accepts program elementary stream (PES) data events and parses out\n * ID3 metadata from them, if present.\n * @see http://id3.org/id3v2.3.0\n */\n\n var Stream$5 = stream,\n StreamTypes$3 = streamTypes,\n id3 = parseId3,\n MetadataStream;\n MetadataStream = function (options) {\n var settings = {\n // the bytes of the program-level descriptor field in MP2T\n // see ISO/IEC 13818-1:2013 (E), section 2.6 \"Program and\n // program element descriptors\"\n descriptor: options && options.descriptor\n },\n // the total size in bytes of the ID3 tag being parsed\n tagSize = 0,\n // tag data that is not complete enough to be parsed\n buffer = [],\n // the total number of bytes currently in the buffer\n bufferSize = 0,\n i;\n MetadataStream.prototype.init.call(this); // calculate the text track in-band metadata track dispatch type\n // https://html.spec.whatwg.org/multipage/embedded-content.html#steps-to-expose-a-media-resource-specific-text-track\n\n this.dispatchType = StreamTypes$3.METADATA_STREAM_TYPE.toString(16);\n if (settings.descriptor) {\n for (i = 0; i < settings.descriptor.length; i++) {\n this.dispatchType += ('00' + settings.descriptor[i].toString(16)).slice(-2);\n }\n }\n this.push = function (chunk) {\n var tag, frameStart, frameSize, frame, i, frameHeader;\n if (chunk.type !== 'timed-metadata') {\n return;\n } // if data_alignment_indicator is set in the PES header,\n // we must have the start of a new ID3 tag. Assume anything\n // remaining in the buffer was malformed and throw it out\n\n if (chunk.dataAlignmentIndicator) {\n bufferSize = 0;\n buffer.length = 0;\n } // ignore events that don't look like ID3 data\n\n if (buffer.length === 0 && (chunk.data.length < 10 || chunk.data[0] !== 'I'.charCodeAt(0) || chunk.data[1] !== 'D'.charCodeAt(0) || chunk.data[2] !== '3'.charCodeAt(0))) {\n this.trigger('log', {\n level: 'warn',\n message: 'Skipping unrecognized metadata packet'\n });\n return;\n } // add this chunk to the data we've collected so far\n\n buffer.push(chunk);\n bufferSize += chunk.data.byteLength; // grab the size of the entire frame from the ID3 header\n\n if (buffer.length === 1) {\n // the frame size is transmitted as a 28-bit integer in the\n // last four bytes of the ID3 header.\n // The most significant bit of each byte is dropped and the\n // results concatenated to recover the actual value.\n tagSize = id3.parseSyncSafeInteger(chunk.data.subarray(6, 10)); // ID3 reports the tag size excluding the header but it's more\n // convenient for our comparisons to include it\n\n tagSize += 10;\n } // if the entire frame has not arrived, wait for more data\n\n if (bufferSize < tagSize) {\n return;\n } // collect the entire frame so it can be parsed\n\n tag = {\n data: new Uint8Array(tagSize),\n frames: [],\n pts: buffer[0].pts,\n dts: buffer[0].dts\n };\n for (i = 0; i < tagSize;) {\n tag.data.set(buffer[0].data.subarray(0, tagSize - i), i);\n i += buffer[0].data.byteLength;\n bufferSize -= buffer[0].data.byteLength;\n buffer.shift();\n } // find the start of the first frame and the end of the tag\n\n frameStart = 10;\n if (tag.data[5] & 0x40) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += id3.parseSyncSafeInteger(tag.data.subarray(10, 14)); // clip any padding off the end\n\n tagSize -= id3.parseSyncSafeInteger(tag.data.subarray(16, 20));\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n do {\n // determine the number of bytes in this frame\n frameSize = id3.parseSyncSafeInteger(tag.data.subarray(frameStart + 4, frameStart + 8));\n if (frameSize < 1) {\n this.trigger('log', {\n level: 'warn',\n message: 'Malformed ID3 frame encountered. Skipping remaining metadata parsing.'\n }); // If the frame is malformed, don't parse any further frames but allow previous valid parsed frames\n // to be sent along.\n\n break;\n }\n frameHeader = String.fromCharCode(tag.data[frameStart], tag.data[frameStart + 1], tag.data[frameStart + 2], tag.data[frameStart + 3]);\n frame = {\n id: frameHeader,\n data: tag.data.subarray(frameStart + 10, frameStart + frameSize + 10)\n };\n frame.key = frame.id; // parse frame values\n\n if (id3.frameParsers[frame.id]) {\n // use frame specific parser\n id3.frameParsers[frame.id](frame);\n } else if (frame.id[0] === 'T') {\n // use text frame generic parser\n id3.frameParsers['T*'](frame);\n } else if (frame.id[0] === 'W') {\n // use URL link frame generic parser\n id3.frameParsers['W*'](frame);\n } // handle the special PRIV frame used to indicate the start\n // time for raw AAC data\n\n if (frame.owner === 'com.apple.streaming.transportStreamTimestamp') {\n var d = frame.data,\n size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;\n size *= 4;\n size += d[7] & 0x03;\n frame.timeStamp = size; // in raw AAC, all subsequent data will be timestamped based\n // on the value of this frame\n // we couldn't have known the appropriate pts and dts before\n // parsing this ID3 tag so set those values now\n\n if (tag.pts === undefined && tag.dts === undefined) {\n tag.pts = frame.timeStamp;\n tag.dts = frame.timeStamp;\n }\n this.trigger('timestamp', frame);\n }\n tag.frames.push(frame);\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < tagSize);\n this.trigger('data', tag);\n };\n };\n MetadataStream.prototype = new Stream$5();\n var metadataStream = MetadataStream;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A stream-based mp2t to mp4 converter. This utility can be used to\n * deliver mp4s to a SourceBuffer on platforms that support native\n * Media Source Extensions.\n */\n\n var Stream$4 = stream,\n CaptionStream$1 = captionStream,\n StreamTypes$2 = streamTypes,\n TimestampRolloverStream = timestampRolloverStream.TimestampRolloverStream; // object types\n\n var TransportPacketStream, TransportParseStream, ElementaryStream; // constants\n\n var MP2T_PACKET_LENGTH$1 = 188,\n // bytes\n SYNC_BYTE$1 = 0x47;\n /**\n * Splits an incoming stream of binary data into MPEG-2 Transport\n * Stream packets.\n */\n\n TransportPacketStream = function () {\n var buffer = new Uint8Array(MP2T_PACKET_LENGTH$1),\n bytesInBuffer = 0;\n TransportPacketStream.prototype.init.call(this); // Deliver new bytes to the stream.\n\n /**\n * Split a stream of data into M2TS packets\n **/\n\n this.push = function (bytes) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH$1,\n everything; // If there are bytes remaining from the last segment, prepend them to the\n // bytes that were pushed in\n\n if (bytesInBuffer) {\n everything = new Uint8Array(bytes.byteLength + bytesInBuffer);\n everything.set(buffer.subarray(0, bytesInBuffer));\n everything.set(bytes, bytesInBuffer);\n bytesInBuffer = 0;\n } else {\n everything = bytes;\n } // While we have enough data for a packet\n\n while (endIndex < everything.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (everything[startIndex] === SYNC_BYTE$1 && everything[endIndex] === SYNC_BYTE$1) {\n // We found a packet so emit it and jump one whole packet forward in\n // the stream\n this.trigger('data', everything.subarray(startIndex, endIndex));\n startIndex += MP2T_PACKET_LENGTH$1;\n endIndex += MP2T_PACKET_LENGTH$1;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n } // If there was some data left over at the end of the segment that couldn't\n // possibly be a whole packet, keep it because it might be the start of a packet\n // that continues in the next segment\n\n if (startIndex < everything.byteLength) {\n buffer.set(everything.subarray(startIndex), 0);\n bytesInBuffer = everything.byteLength - startIndex;\n }\n };\n /**\n * Passes identified M2TS packets to the TransportParseStream to be parsed\n **/\n\n this.flush = function () {\n // If the buffer contains a whole packet when we are being flushed, emit it\n // and empty the buffer. Otherwise hold onto the data because it may be\n // important for decoding the next segment\n if (bytesInBuffer === MP2T_PACKET_LENGTH$1 && buffer[0] === SYNC_BYTE$1) {\n this.trigger('data', buffer);\n bytesInBuffer = 0;\n }\n this.trigger('done');\n };\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n this.reset = function () {\n bytesInBuffer = 0;\n this.trigger('reset');\n };\n };\n TransportPacketStream.prototype = new Stream$4();\n /**\n * Accepts an MP2T TransportPacketStream and emits data events with parsed\n * forms of the individual transport stream packets.\n */\n\n TransportParseStream = function () {\n var parsePsi, parsePat, parsePmt, self;\n TransportParseStream.prototype.init.call(this);\n self = this;\n this.packetsWaitingForPmt = [];\n this.programMapTable = undefined;\n parsePsi = function (payload, psi) {\n var offset = 0; // PSI packets may be split into multiple sections and those\n // sections may be split into multiple packets. If a PSI\n // section starts in this packet, the payload_unit_start_indicator\n // will be true and the first byte of the payload will indicate\n // the offset from the current position to the start of the\n // section.\n\n if (psi.payloadUnitStartIndicator) {\n offset += payload[offset] + 1;\n }\n if (psi.type === 'pat') {\n parsePat(payload.subarray(offset), psi);\n } else {\n parsePmt(payload.subarray(offset), psi);\n }\n };\n parsePat = function (payload, pat) {\n pat.section_number = payload[7]; // eslint-disable-line camelcase\n\n pat.last_section_number = payload[8]; // eslint-disable-line camelcase\n // skip the PSI header and parse the first PMT entry\n\n self.pmtPid = (payload[10] & 0x1F) << 8 | payload[11];\n pat.pmtPid = self.pmtPid;\n };\n /**\n * Parse out the relevant fields of a Program Map Table (PMT).\n * @param payload {Uint8Array} the PMT-specific portion of an MP2T\n * packet. The first byte in this array should be the table_id\n * field.\n * @param pmt {object} the object that should be decorated with\n * fields parsed from the PMT.\n */\n\n parsePmt = function (payload, pmt) {\n var sectionLength, tableEnd, programInfoLength, offset; // PMTs can be sent ahead of the time when they should actually\n // take effect. We don't believe this should ever be the case\n // for HLS but we'll ignore \"forward\" PMT declarations if we see\n // them. Future PMT declarations have the current_next_indicator\n // set to zero.\n\n if (!(payload[5] & 0x01)) {\n return;\n } // overwrite any existing program map table\n\n self.programMapTable = {\n video: null,\n audio: null,\n 'timed-metadata': {}\n }; // the mapping table ends at the end of the current section\n\n sectionLength = (payload[1] & 0x0f) << 8 | payload[2];\n tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n\n programInfoLength = (payload[10] & 0x0f) << 8 | payload[11]; // advance the offset to the first entry in the mapping table\n\n offset = 12 + programInfoLength;\n while (offset < tableEnd) {\n var streamType = payload[offset];\n var pid = (payload[offset + 1] & 0x1F) << 8 | payload[offset + 2]; // only map a single elementary_pid for audio and video stream types\n // TODO: should this be done for metadata too? for now maintain behavior of\n // multiple metadata streams\n\n if (streamType === StreamTypes$2.H264_STREAM_TYPE && self.programMapTable.video === null) {\n self.programMapTable.video = pid;\n } else if (streamType === StreamTypes$2.ADTS_STREAM_TYPE && self.programMapTable.audio === null) {\n self.programMapTable.audio = pid;\n } else if (streamType === StreamTypes$2.METADATA_STREAM_TYPE) {\n // map pid to stream type for metadata streams\n self.programMapTable['timed-metadata'][pid] = streamType;\n } // move to the next table entry\n // skip past the elementary stream descriptors, if present\n\n offset += ((payload[offset + 3] & 0x0F) << 8 | payload[offset + 4]) + 5;\n } // record the map on the packet as well\n\n pmt.programMapTable = self.programMapTable;\n };\n /**\n * Deliver a new MP2T packet to the next stream in the pipeline.\n */\n\n this.push = function (packet) {\n var result = {},\n offset = 4;\n result.payloadUnitStartIndicator = !!(packet[1] & 0x40); // pid is a 13-bit field starting at the last bit of packet[1]\n\n result.pid = packet[1] & 0x1f;\n result.pid <<= 8;\n result.pid |= packet[2]; // if an adaption field is present, its length is specified by the\n // fifth byte of the TS packet header. The adaptation field is\n // used to add stuffing to PES packets that don't fill a complete\n // TS packet, and to specify some forms of timing and control data\n // that we do not currently use.\n\n if ((packet[3] & 0x30) >>> 4 > 0x01) {\n offset += packet[offset] + 1;\n } // parse the rest of the packet based on the type\n\n if (result.pid === 0) {\n result.type = 'pat';\n parsePsi(packet.subarray(offset), result);\n this.trigger('data', result);\n } else if (result.pid === this.pmtPid) {\n result.type = 'pmt';\n parsePsi(packet.subarray(offset), result);\n this.trigger('data', result); // if there are any packets waiting for a PMT to be found, process them now\n\n while (this.packetsWaitingForPmt.length) {\n this.processPes_.apply(this, this.packetsWaitingForPmt.shift());\n }\n } else if (this.programMapTable === undefined) {\n // When we have not seen a PMT yet, defer further processing of\n // PES packets until one has been parsed\n this.packetsWaitingForPmt.push([packet, offset, result]);\n } else {\n this.processPes_(packet, offset, result);\n }\n };\n this.processPes_ = function (packet, offset, result) {\n // set the appropriate stream type\n if (result.pid === this.programMapTable.video) {\n result.streamType = StreamTypes$2.H264_STREAM_TYPE;\n } else if (result.pid === this.programMapTable.audio) {\n result.streamType = StreamTypes$2.ADTS_STREAM_TYPE;\n } else {\n // if not video or audio, it is timed-metadata or unknown\n // if unknown, streamType will be undefined\n result.streamType = this.programMapTable['timed-metadata'][result.pid];\n }\n result.type = 'pes';\n result.data = packet.subarray(offset);\n this.trigger('data', result);\n };\n };\n TransportParseStream.prototype = new Stream$4();\n TransportParseStream.STREAM_TYPES = {\n h264: 0x1b,\n adts: 0x0f\n };\n /**\n * Reconsistutes program elementary stream (PES) packets from parsed\n * transport stream packets. That is, if you pipe an\n * mp2t.TransportParseStream into a mp2t.ElementaryStream, the output\n * events will be events which capture the bytes for individual PES\n * packets plus relevant metadata that has been extracted from the\n * container.\n */\n\n ElementaryStream = function () {\n var self = this,\n segmentHadPmt = false,\n // PES packet fragments\n video = {\n data: [],\n size: 0\n },\n audio = {\n data: [],\n size: 0\n },\n timedMetadata = {\n data: [],\n size: 0\n },\n programMapTable,\n parsePes = function (payload, pes) {\n var ptsDtsFlags;\n const startPrefix = payload[0] << 16 | payload[1] << 8 | payload[2]; // default to an empty array\n\n pes.data = new Uint8Array(); // In certain live streams, the start of a TS fragment has ts packets\n // that are frame data that is continuing from the previous fragment. This\n // is to check that the pes data is the start of a new pes payload\n\n if (startPrefix !== 1) {\n return;\n } // get the packet length, this will be 0 for video\n\n pes.packetLength = 6 + (payload[4] << 8 | payload[5]); // find out if this packets starts a new keyframe\n\n pes.dataAlignmentIndicator = (payload[6] & 0x04) !== 0; // PES packets may be annotated with a PTS value, or a PTS value\n // and a DTS value. Determine what combination of values is\n // available to work with.\n\n ptsDtsFlags = payload[7]; // PTS and DTS are normally stored as a 33-bit number. Javascript\n // performs all bitwise operations on 32-bit integers but javascript\n // supports a much greater range (52-bits) of integer using standard\n // mathematical operations.\n // We construct a 31-bit value using bitwise operators over the 31\n // most significant bits and then multiply by 4 (equal to a left-shift\n // of 2) before we add the final 2 least significant bits of the\n // timestamp (equal to an OR.)\n\n if (ptsDtsFlags & 0xC0) {\n // the PTS and DTS are not written out directly. For information\n // on how they are encoded, see\n // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n pes.pts = (payload[9] & 0x0E) << 27 | (payload[10] & 0xFF) << 20 | (payload[11] & 0xFE) << 12 | (payload[12] & 0xFF) << 5 | (payload[13] & 0xFE) >>> 3;\n pes.pts *= 4; // Left shift by 2\n\n pes.pts += (payload[13] & 0x06) >>> 1; // OR by the two LSBs\n\n pes.dts = pes.pts;\n if (ptsDtsFlags & 0x40) {\n pes.dts = (payload[14] & 0x0E) << 27 | (payload[15] & 0xFF) << 20 | (payload[16] & 0xFE) << 12 | (payload[17] & 0xFF) << 5 | (payload[18] & 0xFE) >>> 3;\n pes.dts *= 4; // Left shift by 2\n\n pes.dts += (payload[18] & 0x06) >>> 1; // OR by the two LSBs\n }\n } // the data section starts immediately after the PES header.\n // pes_header_data_length specifies the number of header bytes\n // that follow the last byte of the field.\n\n pes.data = payload.subarray(9 + payload[8]);\n },\n /**\n * Pass completely parsed PES packets to the next stream in the pipeline\n **/\n flushStream = function (stream, type, forceFlush) {\n var packetData = new Uint8Array(stream.size),\n event = {\n type: type\n },\n i = 0,\n offset = 0,\n packetFlushable = false,\n fragment; // do nothing if there is not enough buffered data for a complete\n // PES header\n\n if (!stream.data.length || stream.size < 9) {\n return;\n }\n event.trackId = stream.data[0].pid; // reassemble the packet\n\n for (i = 0; i < stream.data.length; i++) {\n fragment = stream.data[i];\n packetData.set(fragment.data, offset);\n offset += fragment.data.byteLength;\n } // parse assembled packet's PES header\n\n parsePes(packetData, event); // non-video PES packets MUST have a non-zero PES_packet_length\n // check that there is enough stream data to fill the packet\n\n packetFlushable = type === 'video' || event.packetLength <= stream.size; // flush pending packets if the conditions are right\n\n if (forceFlush || packetFlushable) {\n stream.size = 0;\n stream.data.length = 0;\n } // only emit packets that are complete. this is to avoid assembling\n // incomplete PES packets due to poor segmentation\n\n if (packetFlushable) {\n self.trigger('data', event);\n }\n };\n ElementaryStream.prototype.init.call(this);\n /**\n * Identifies M2TS packet types and parses PES packets using metadata\n * parsed from the PMT\n **/\n\n this.push = function (data) {\n ({\n pat: function () {// we have to wait for the PMT to arrive as well before we\n // have any meaningful metadata\n },\n pes: function () {\n var stream, streamType;\n switch (data.streamType) {\n case StreamTypes$2.H264_STREAM_TYPE:\n stream = video;\n streamType = 'video';\n break;\n case StreamTypes$2.ADTS_STREAM_TYPE:\n stream = audio;\n streamType = 'audio';\n break;\n case StreamTypes$2.METADATA_STREAM_TYPE:\n stream = timedMetadata;\n streamType = 'timed-metadata';\n break;\n default:\n // ignore unknown stream types\n return;\n } // if a new packet is starting, we can flush the completed\n // packet\n\n if (data.payloadUnitStartIndicator) {\n flushStream(stream, streamType, true);\n } // buffer this fragment until we are sure we've received the\n // complete payload\n\n stream.data.push(data);\n stream.size += data.data.byteLength;\n },\n pmt: function () {\n var event = {\n type: 'metadata',\n tracks: []\n };\n programMapTable = data.programMapTable; // translate audio and video streams to tracks\n\n if (programMapTable.video !== null) {\n event.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.video,\n codec: 'avc',\n type: 'video'\n });\n }\n if (programMapTable.audio !== null) {\n event.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.audio,\n codec: 'adts',\n type: 'audio'\n });\n }\n segmentHadPmt = true;\n self.trigger('data', event);\n }\n })[data.type]();\n };\n this.reset = function () {\n video.size = 0;\n video.data.length = 0;\n audio.size = 0;\n audio.data.length = 0;\n this.trigger('reset');\n };\n /**\n * Flush any remaining input. Video PES packets may be of variable\n * length. Normally, the start of a new video packet can trigger the\n * finalization of the previous packet. That is not possible if no\n * more video is forthcoming, however. In that case, some other\n * mechanism (like the end of the file) has to be employed. When it is\n * clear that no additional data is forthcoming, calling this method\n * will flush the buffered packets.\n */\n\n this.flushStreams_ = function () {\n // !!THIS ORDER IS IMPORTANT!!\n // video first then audio\n flushStream(video, 'video');\n flushStream(audio, 'audio');\n flushStream(timedMetadata, 'timed-metadata');\n };\n this.flush = function () {\n // if on flush we haven't had a pmt emitted\n // and we have a pmt to emit. emit the pmt\n // so that we trigger a trackinfo downstream.\n if (!segmentHadPmt && programMapTable) {\n var pmt = {\n type: 'metadata',\n tracks: []\n }; // translate audio and video streams to tracks\n\n if (programMapTable.video !== null) {\n pmt.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.video,\n codec: 'avc',\n type: 'video'\n });\n }\n if (programMapTable.audio !== null) {\n pmt.tracks.push({\n timelineStartInfo: {\n baseMediaDecodeTime: 0\n },\n id: +programMapTable.audio,\n codec: 'adts',\n type: 'audio'\n });\n }\n self.trigger('data', pmt);\n }\n segmentHadPmt = false;\n this.flushStreams_();\n this.trigger('done');\n };\n };\n ElementaryStream.prototype = new Stream$4();\n var m2ts$1 = {\n PAT_PID: 0x0000,\n MP2T_PACKET_LENGTH: MP2T_PACKET_LENGTH$1,\n TransportPacketStream: TransportPacketStream,\n TransportParseStream: TransportParseStream,\n ElementaryStream: ElementaryStream,\n TimestampRolloverStream: TimestampRolloverStream,\n CaptionStream: CaptionStream$1.CaptionStream,\n Cea608Stream: CaptionStream$1.Cea608Stream,\n Cea708Stream: CaptionStream$1.Cea708Stream,\n MetadataStream: metadataStream\n };\n for (var type in StreamTypes$2) {\n if (StreamTypes$2.hasOwnProperty(type)) {\n m2ts$1[type] = StreamTypes$2[type];\n }\n }\n var m2ts_1 = m2ts$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var Stream$3 = stream;\n var ONE_SECOND_IN_TS$2 = clock$2.ONE_SECOND_IN_TS;\n var AdtsStream$1;\n var ADTS_SAMPLING_FREQUENCIES$1 = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];\n /*\n * Accepts a ElementaryStream and emits data events with parsed\n * AAC Audio Frames of the individual packets. Input audio in ADTS\n * format is unpacked and re-emitted as AAC frames.\n *\n * @see http://wiki.multimedia.cx/index.php?title=ADTS\n * @see http://wiki.multimedia.cx/?title=Understanding_AAC\n */\n\n AdtsStream$1 = function (handlePartialSegments) {\n var buffer,\n frameNum = 0;\n AdtsStream$1.prototype.init.call(this);\n this.skipWarn_ = function (start, end) {\n this.trigger('log', {\n level: 'warn',\n message: `adts skiping bytes ${start} to ${end} in frame ${frameNum} outside syncword`\n });\n };\n this.push = function (packet) {\n var i = 0,\n frameLength,\n protectionSkipBytes,\n oldBuffer,\n sampleCount,\n adtsFrameDuration;\n if (!handlePartialSegments) {\n frameNum = 0;\n }\n if (packet.type !== 'audio') {\n // ignore non-audio data\n return;\n } // Prepend any data in the buffer to the input data so that we can parse\n // aac frames the cross a PES packet boundary\n\n if (buffer && buffer.length) {\n oldBuffer = buffer;\n buffer = new Uint8Array(oldBuffer.byteLength + packet.data.byteLength);\n buffer.set(oldBuffer);\n buffer.set(packet.data, oldBuffer.byteLength);\n } else {\n buffer = packet.data;\n } // unpack any ADTS frames which have been fully received\n // for details on the ADTS header, see http://wiki.multimedia.cx/index.php?title=ADTS\n\n var skip; // We use i + 7 here because we want to be able to parse the entire header.\n // If we don't have enough bytes to do that, then we definitely won't have a full frame.\n\n while (i + 7 < buffer.length) {\n // Look for the start of an ADTS header..\n if (buffer[i] !== 0xFF || (buffer[i + 1] & 0xF6) !== 0xF0) {\n if (typeof skip !== 'number') {\n skip = i;\n } // If a valid header was not found, jump one forward and attempt to\n // find a valid ADTS header starting at the next byte\n\n i++;\n continue;\n }\n if (typeof skip === 'number') {\n this.skipWarn_(skip, i);\n skip = null;\n } // The protection skip bit tells us if we have 2 bytes of CRC data at the\n // end of the ADTS header\n\n protectionSkipBytes = (~buffer[i + 1] & 0x01) * 2; // Frame length is a 13 bit integer starting 16 bits from the\n // end of the sync sequence\n // NOTE: frame length includes the size of the header\n\n frameLength = (buffer[i + 3] & 0x03) << 11 | buffer[i + 4] << 3 | (buffer[i + 5] & 0xe0) >> 5;\n sampleCount = ((buffer[i + 6] & 0x03) + 1) * 1024;\n adtsFrameDuration = sampleCount * ONE_SECOND_IN_TS$2 / ADTS_SAMPLING_FREQUENCIES$1[(buffer[i + 2] & 0x3c) >>> 2]; // If we don't have enough data to actually finish this ADTS frame,\n // then we have to wait for more data\n\n if (buffer.byteLength - i < frameLength) {\n break;\n } // Otherwise, deliver the complete AAC frame\n\n this.trigger('data', {\n pts: packet.pts + frameNum * adtsFrameDuration,\n dts: packet.dts + frameNum * adtsFrameDuration,\n sampleCount: sampleCount,\n audioobjecttype: (buffer[i + 2] >>> 6 & 0x03) + 1,\n channelcount: (buffer[i + 2] & 1) << 2 | (buffer[i + 3] & 0xc0) >>> 6,\n samplerate: ADTS_SAMPLING_FREQUENCIES$1[(buffer[i + 2] & 0x3c) >>> 2],\n samplingfrequencyindex: (buffer[i + 2] & 0x3c) >>> 2,\n // assume ISO/IEC 14496-12 AudioSampleEntry default of 16\n samplesize: 16,\n // data is the frame without it's header\n data: buffer.subarray(i + 7 + protectionSkipBytes, i + frameLength)\n });\n frameNum++;\n i += frameLength;\n }\n if (typeof skip === 'number') {\n this.skipWarn_(skip, i);\n skip = null;\n } // remove processed bytes from the buffer.\n\n buffer = buffer.subarray(i);\n };\n this.flush = function () {\n frameNum = 0;\n this.trigger('done');\n };\n this.reset = function () {\n buffer = void 0;\n this.trigger('reset');\n };\n this.endTimeline = function () {\n buffer = void 0;\n this.trigger('endedtimeline');\n };\n };\n AdtsStream$1.prototype = new Stream$3();\n var adts = AdtsStream$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var ExpGolomb$1;\n /**\n * Parser for exponential Golomb codes, a variable-bitwidth number encoding\n * scheme used by h264.\n */\n\n ExpGolomb$1 = function (workingData) {\n var\n // the number of bytes left to examine in workingData\n workingBytesAvailable = workingData.byteLength,\n // the current word being examined\n workingWord = 0,\n // :uint\n // the number of bits left to examine in the current word\n workingBitsAvailable = 0; // :uint;\n // ():uint\n\n this.length = function () {\n return 8 * workingBytesAvailable;\n }; // ():uint\n\n this.bitsAvailable = function () {\n return 8 * workingBytesAvailable + workingBitsAvailable;\n }; // ():void\n\n this.loadWord = function () {\n var position = workingData.byteLength - workingBytesAvailable,\n workingBytes = new Uint8Array(4),\n availableBytes = Math.min(4, workingBytesAvailable);\n if (availableBytes === 0) {\n throw new Error('no bytes available');\n }\n workingBytes.set(workingData.subarray(position, position + availableBytes));\n workingWord = new DataView(workingBytes.buffer).getUint32(0); // track the amount of workingData that has been processed\n\n workingBitsAvailable = availableBytes * 8;\n workingBytesAvailable -= availableBytes;\n }; // (count:int):void\n\n this.skipBits = function (count) {\n var skipBytes; // :int\n\n if (workingBitsAvailable > count) {\n workingWord <<= count;\n workingBitsAvailable -= count;\n } else {\n count -= workingBitsAvailable;\n skipBytes = Math.floor(count / 8);\n count -= skipBytes * 8;\n workingBytesAvailable -= skipBytes;\n this.loadWord();\n workingWord <<= count;\n workingBitsAvailable -= count;\n }\n }; // (size:int):uint\n\n this.readBits = function (size) {\n var bits = Math.min(workingBitsAvailable, size),\n // :uint\n valu = workingWord >>> 32 - bits; // :uint\n // if size > 31, handle error\n\n workingBitsAvailable -= bits;\n if (workingBitsAvailable > 0) {\n workingWord <<= bits;\n } else if (workingBytesAvailable > 0) {\n this.loadWord();\n }\n bits = size - bits;\n if (bits > 0) {\n return valu << bits | this.readBits(bits);\n }\n return valu;\n }; // ():uint\n\n this.skipLeadingZeros = function () {\n var leadingZeroCount; // :uint\n\n for (leadingZeroCount = 0; leadingZeroCount < workingBitsAvailable; ++leadingZeroCount) {\n if ((workingWord & 0x80000000 >>> leadingZeroCount) !== 0) {\n // the first bit of working word is 1\n workingWord <<= leadingZeroCount;\n workingBitsAvailable -= leadingZeroCount;\n return leadingZeroCount;\n }\n } // we exhausted workingWord and still have not found a 1\n\n this.loadWord();\n return leadingZeroCount + this.skipLeadingZeros();\n }; // ():void\n\n this.skipUnsignedExpGolomb = function () {\n this.skipBits(1 + this.skipLeadingZeros());\n }; // ():void\n\n this.skipExpGolomb = function () {\n this.skipBits(1 + this.skipLeadingZeros());\n }; // ():uint\n\n this.readUnsignedExpGolomb = function () {\n var clz = this.skipLeadingZeros(); // :uint\n\n return this.readBits(clz + 1) - 1;\n }; // ():int\n\n this.readExpGolomb = function () {\n var valu = this.readUnsignedExpGolomb(); // :int\n\n if (0x01 & valu) {\n // the number is odd if the low order bit is set\n return 1 + valu >>> 1; // add 1 to make it even, and divide by 2\n }\n\n return -1 * (valu >>> 1); // divide by two then make it negative\n }; // Some convenience functions\n // :Boolean\n\n this.readBoolean = function () {\n return this.readBits(1) === 1;\n }; // ():int\n\n this.readUnsignedByte = function () {\n return this.readBits(8);\n };\n this.loadWord();\n };\n var expGolomb = ExpGolomb$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var Stream$2 = stream;\n var ExpGolomb = expGolomb;\n var H264Stream$1, NalByteStream;\n var PROFILES_WITH_OPTIONAL_SPS_DATA;\n /**\n * Accepts a NAL unit byte stream and unpacks the embedded NAL units.\n */\n\n NalByteStream = function () {\n var syncPoint = 0,\n i,\n buffer;\n NalByteStream.prototype.init.call(this);\n /*\n * Scans a byte stream and triggers a data event with the NAL units found.\n * @param {Object} data Event received from H264Stream\n * @param {Uint8Array} data.data The h264 byte stream to be scanned\n *\n * @see H264Stream.push\n */\n\n this.push = function (data) {\n var swapBuffer;\n if (!buffer) {\n buffer = data.data;\n } else {\n swapBuffer = new Uint8Array(buffer.byteLength + data.data.byteLength);\n swapBuffer.set(buffer);\n swapBuffer.set(data.data, buffer.byteLength);\n buffer = swapBuffer;\n }\n var len = buffer.byteLength; // Rec. ITU-T H.264, Annex B\n // scan for NAL unit boundaries\n // a match looks like this:\n // 0 0 1 .. NAL .. 0 0 1\n // ^ sync point ^ i\n // or this:\n // 0 0 1 .. NAL .. 0 0 0\n // ^ sync point ^ i\n // advance the sync point to a NAL start, if necessary\n\n for (; syncPoint < len - 3; syncPoint++) {\n if (buffer[syncPoint + 2] === 1) {\n // the sync point is properly aligned\n i = syncPoint + 5;\n break;\n }\n }\n while (i < len) {\n // look at the current byte to determine if we've hit the end of\n // a NAL unit boundary\n switch (buffer[i]) {\n case 0:\n // skip past non-sync sequences\n if (buffer[i - 1] !== 0) {\n i += 2;\n break;\n } else if (buffer[i - 2] !== 0) {\n i++;\n break;\n } // deliver the NAL unit if it isn't empty\n\n if (syncPoint + 3 !== i - 2) {\n this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));\n } // drop trailing zeroes\n\n do {\n i++;\n } while (buffer[i] !== 1 && i < len);\n syncPoint = i - 2;\n i += 3;\n break;\n case 1:\n // skip past non-sync sequences\n if (buffer[i - 1] !== 0 || buffer[i - 2] !== 0) {\n i += 3;\n break;\n } // deliver the NAL unit\n\n this.trigger('data', buffer.subarray(syncPoint + 3, i - 2));\n syncPoint = i - 2;\n i += 3;\n break;\n default:\n // the current byte isn't a one or zero, so it cannot be part\n // of a sync sequence\n i += 3;\n break;\n }\n } // filter out the NAL units that were delivered\n\n buffer = buffer.subarray(syncPoint);\n i -= syncPoint;\n syncPoint = 0;\n };\n this.reset = function () {\n buffer = null;\n syncPoint = 0;\n this.trigger('reset');\n };\n this.flush = function () {\n // deliver the last buffered NAL unit\n if (buffer && buffer.byteLength > 3) {\n this.trigger('data', buffer.subarray(syncPoint + 3));\n } // reset the stream state\n\n buffer = null;\n syncPoint = 0;\n this.trigger('done');\n };\n this.endTimeline = function () {\n this.flush();\n this.trigger('endedtimeline');\n };\n };\n NalByteStream.prototype = new Stream$2(); // values of profile_idc that indicate additional fields are included in the SPS\n // see Recommendation ITU-T H.264 (4/2013),\n // 7.3.2.1.1 Sequence parameter set data syntax\n\n PROFILES_WITH_OPTIONAL_SPS_DATA = {\n 100: true,\n 110: true,\n 122: true,\n 244: true,\n 44: true,\n 83: true,\n 86: true,\n 118: true,\n 128: true,\n // TODO: the three profiles below don't\n // appear to have sps data in the specificiation anymore?\n 138: true,\n 139: true,\n 134: true\n };\n /**\n * Accepts input from a ElementaryStream and produces H.264 NAL unit data\n * events.\n */\n\n H264Stream$1 = function () {\n var nalByteStream = new NalByteStream(),\n self,\n trackId,\n currentPts,\n currentDts,\n discardEmulationPreventionBytes,\n readSequenceParameterSet,\n skipScalingList;\n H264Stream$1.prototype.init.call(this);\n self = this;\n /*\n * Pushes a packet from a stream onto the NalByteStream\n *\n * @param {Object} packet - A packet received from a stream\n * @param {Uint8Array} packet.data - The raw bytes of the packet\n * @param {Number} packet.dts - Decode timestamp of the packet\n * @param {Number} packet.pts - Presentation timestamp of the packet\n * @param {Number} packet.trackId - The id of the h264 track this packet came from\n * @param {('video'|'audio')} packet.type - The type of packet\n *\n */\n\n this.push = function (packet) {\n if (packet.type !== 'video') {\n return;\n }\n trackId = packet.trackId;\n currentPts = packet.pts;\n currentDts = packet.dts;\n nalByteStream.push(packet);\n };\n /*\n * Identify NAL unit types and pass on the NALU, trackId, presentation and decode timestamps\n * for the NALUs to the next stream component.\n * Also, preprocess caption and sequence parameter NALUs.\n *\n * @param {Uint8Array} data - A NAL unit identified by `NalByteStream.push`\n * @see NalByteStream.push\n */\n\n nalByteStream.on('data', function (data) {\n var event = {\n trackId: trackId,\n pts: currentPts,\n dts: currentDts,\n data: data,\n nalUnitTypeCode: data[0] & 0x1f\n };\n switch (event.nalUnitTypeCode) {\n case 0x05:\n event.nalUnitType = 'slice_layer_without_partitioning_rbsp_idr';\n break;\n case 0x06:\n event.nalUnitType = 'sei_rbsp';\n event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));\n break;\n case 0x07:\n event.nalUnitType = 'seq_parameter_set_rbsp';\n event.escapedRBSP = discardEmulationPreventionBytes(data.subarray(1));\n event.config = readSequenceParameterSet(event.escapedRBSP);\n break;\n case 0x08:\n event.nalUnitType = 'pic_parameter_set_rbsp';\n break;\n case 0x09:\n event.nalUnitType = 'access_unit_delimiter_rbsp';\n break;\n } // This triggers data on the H264Stream\n\n self.trigger('data', event);\n });\n nalByteStream.on('done', function () {\n self.trigger('done');\n });\n nalByteStream.on('partialdone', function () {\n self.trigger('partialdone');\n });\n nalByteStream.on('reset', function () {\n self.trigger('reset');\n });\n nalByteStream.on('endedtimeline', function () {\n self.trigger('endedtimeline');\n });\n this.flush = function () {\n nalByteStream.flush();\n };\n this.partialFlush = function () {\n nalByteStream.partialFlush();\n };\n this.reset = function () {\n nalByteStream.reset();\n };\n this.endTimeline = function () {\n nalByteStream.endTimeline();\n };\n /**\n * Advance the ExpGolomb decoder past a scaling list. The scaling\n * list is optionally transmitted as part of a sequence parameter\n * set and is not relevant to transmuxing.\n * @param count {number} the number of entries in this scaling list\n * @param expGolombDecoder {object} an ExpGolomb pointed to the\n * start of a scaling list\n * @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1\n */\n\n skipScalingList = function (count, expGolombDecoder) {\n var lastScale = 8,\n nextScale = 8,\n j,\n deltaScale;\n for (j = 0; j < count; j++) {\n if (nextScale !== 0) {\n deltaScale = expGolombDecoder.readExpGolomb();\n nextScale = (lastScale + deltaScale + 256) % 256;\n }\n lastScale = nextScale === 0 ? lastScale : nextScale;\n }\n };\n /**\n * Expunge any \"Emulation Prevention\" bytes from a \"Raw Byte\n * Sequence Payload\"\n * @param data {Uint8Array} the bytes of a RBSP from a NAL\n * unit\n * @return {Uint8Array} the RBSP without any Emulation\n * Prevention Bytes\n */\n\n discardEmulationPreventionBytes = function (data) {\n var length = data.byteLength,\n emulationPreventionBytesPositions = [],\n i = 1,\n newLength,\n newData; // Find all `Emulation Prevention Bytes`\n\n while (i < length - 2) {\n if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) {\n emulationPreventionBytesPositions.push(i + 2);\n i += 2;\n } else {\n i++;\n }\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n if (emulationPreventionBytesPositions.length === 0) {\n return data;\n } // Create a new array to hold the NAL unit data\n\n newLength = length - emulationPreventionBytesPositions.length;\n newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === emulationPreventionBytesPositions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n emulationPreventionBytesPositions.shift();\n }\n newData[i] = data[sourceIndex];\n }\n return newData;\n };\n /**\n * Read a sequence parameter set and return some interesting video\n * properties. A sequence parameter set is the H264 metadata that\n * describes the properties of upcoming video frames.\n * @param data {Uint8Array} the bytes of a sequence parameter set\n * @return {object} an object with configuration parsed from the\n * sequence parameter set, including the dimensions of the\n * associated video frames.\n */\n\n readSequenceParameterSet = function (data) {\n var frameCropLeftOffset = 0,\n frameCropRightOffset = 0,\n frameCropTopOffset = 0,\n frameCropBottomOffset = 0,\n expGolombDecoder,\n profileIdc,\n levelIdc,\n profileCompatibility,\n chromaFormatIdc,\n picOrderCntType,\n numRefFramesInPicOrderCntCycle,\n picWidthInMbsMinus1,\n picHeightInMapUnitsMinus1,\n frameMbsOnlyFlag,\n scalingListCount,\n sarRatio = [1, 1],\n aspectRatioIdc,\n i;\n expGolombDecoder = new ExpGolomb(data);\n profileIdc = expGolombDecoder.readUnsignedByte(); // profile_idc\n\n profileCompatibility = expGolombDecoder.readUnsignedByte(); // constraint_set[0-5]_flag\n\n levelIdc = expGolombDecoder.readUnsignedByte(); // level_idc u(8)\n\n expGolombDecoder.skipUnsignedExpGolomb(); // seq_parameter_set_id\n // some profiles have more optional data we don't need\n\n if (PROFILES_WITH_OPTIONAL_SPS_DATA[profileIdc]) {\n chromaFormatIdc = expGolombDecoder.readUnsignedExpGolomb();\n if (chromaFormatIdc === 3) {\n expGolombDecoder.skipBits(1); // separate_colour_plane_flag\n }\n\n expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_luma_minus8\n\n expGolombDecoder.skipUnsignedExpGolomb(); // bit_depth_chroma_minus8\n\n expGolombDecoder.skipBits(1); // qpprime_y_zero_transform_bypass_flag\n\n if (expGolombDecoder.readBoolean()) {\n // seq_scaling_matrix_present_flag\n scalingListCount = chromaFormatIdc !== 3 ? 8 : 12;\n for (i = 0; i < scalingListCount; i++) {\n if (expGolombDecoder.readBoolean()) {\n // seq_scaling_list_present_flag[ i ]\n if (i < 6) {\n skipScalingList(16, expGolombDecoder);\n } else {\n skipScalingList(64, expGolombDecoder);\n }\n }\n }\n }\n }\n expGolombDecoder.skipUnsignedExpGolomb(); // log2_max_frame_num_minus4\n\n picOrderCntType = expGolombDecoder.readUnsignedExpGolomb();\n if (picOrderCntType === 0) {\n expGolombDecoder.readUnsignedExpGolomb(); // log2_max_pic_order_cnt_lsb_minus4\n } else if (picOrderCntType === 1) {\n expGolombDecoder.skipBits(1); // delta_pic_order_always_zero_flag\n\n expGolombDecoder.skipExpGolomb(); // offset_for_non_ref_pic\n\n expGolombDecoder.skipExpGolomb(); // offset_for_top_to_bottom_field\n\n numRefFramesInPicOrderCntCycle = expGolombDecoder.readUnsignedExpGolomb();\n for (i = 0; i < numRefFramesInPicOrderCntCycle; i++) {\n expGolombDecoder.skipExpGolomb(); // offset_for_ref_frame[ i ]\n }\n }\n\n expGolombDecoder.skipUnsignedExpGolomb(); // max_num_ref_frames\n\n expGolombDecoder.skipBits(1); // gaps_in_frame_num_value_allowed_flag\n\n picWidthInMbsMinus1 = expGolombDecoder.readUnsignedExpGolomb();\n picHeightInMapUnitsMinus1 = expGolombDecoder.readUnsignedExpGolomb();\n frameMbsOnlyFlag = expGolombDecoder.readBits(1);\n if (frameMbsOnlyFlag === 0) {\n expGolombDecoder.skipBits(1); // mb_adaptive_frame_field_flag\n }\n\n expGolombDecoder.skipBits(1); // direct_8x8_inference_flag\n\n if (expGolombDecoder.readBoolean()) {\n // frame_cropping_flag\n frameCropLeftOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropRightOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropTopOffset = expGolombDecoder.readUnsignedExpGolomb();\n frameCropBottomOffset = expGolombDecoder.readUnsignedExpGolomb();\n }\n if (expGolombDecoder.readBoolean()) {\n // vui_parameters_present_flag\n if (expGolombDecoder.readBoolean()) {\n // aspect_ratio_info_present_flag\n aspectRatioIdc = expGolombDecoder.readUnsignedByte();\n switch (aspectRatioIdc) {\n case 1:\n sarRatio = [1, 1];\n break;\n case 2:\n sarRatio = [12, 11];\n break;\n case 3:\n sarRatio = [10, 11];\n break;\n case 4:\n sarRatio = [16, 11];\n break;\n case 5:\n sarRatio = [40, 33];\n break;\n case 6:\n sarRatio = [24, 11];\n break;\n case 7:\n sarRatio = [20, 11];\n break;\n case 8:\n sarRatio = [32, 11];\n break;\n case 9:\n sarRatio = [80, 33];\n break;\n case 10:\n sarRatio = [18, 11];\n break;\n case 11:\n sarRatio = [15, 11];\n break;\n case 12:\n sarRatio = [64, 33];\n break;\n case 13:\n sarRatio = [160, 99];\n break;\n case 14:\n sarRatio = [4, 3];\n break;\n case 15:\n sarRatio = [3, 2];\n break;\n case 16:\n sarRatio = [2, 1];\n break;\n case 255:\n {\n sarRatio = [expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte(), expGolombDecoder.readUnsignedByte() << 8 | expGolombDecoder.readUnsignedByte()];\n break;\n }\n }\n if (sarRatio) {\n sarRatio[0] / sarRatio[1];\n }\n }\n }\n return {\n profileIdc: profileIdc,\n levelIdc: levelIdc,\n profileCompatibility: profileCompatibility,\n width: (picWidthInMbsMinus1 + 1) * 16 - frameCropLeftOffset * 2 - frameCropRightOffset * 2,\n height: (2 - frameMbsOnlyFlag) * (picHeightInMapUnitsMinus1 + 1) * 16 - frameCropTopOffset * 2 - frameCropBottomOffset * 2,\n // sar is sample aspect ratio\n sarRatio: sarRatio\n };\n };\n };\n H264Stream$1.prototype = new Stream$2();\n var h264 = {\n H264Stream: H264Stream$1,\n NalByteStream: NalByteStream\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Utilities to detect basic properties and metadata about Aac data.\n */\n\n var ADTS_SAMPLING_FREQUENCIES = [96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350];\n var parseId3TagSize = function (header, byteIndex) {\n var returnSize = header[byteIndex + 6] << 21 | header[byteIndex + 7] << 14 | header[byteIndex + 8] << 7 | header[byteIndex + 9],\n flags = header[byteIndex + 5],\n footerPresent = (flags & 16) >> 4; // if we get a negative returnSize clamp it to 0\n\n returnSize = returnSize >= 0 ? returnSize : 0;\n if (footerPresent) {\n return returnSize + 20;\n }\n return returnSize + 10;\n };\n var getId3Offset = function (data, offset) {\n if (data.length - offset < 10 || data[offset] !== 'I'.charCodeAt(0) || data[offset + 1] !== 'D'.charCodeAt(0) || data[offset + 2] !== '3'.charCodeAt(0)) {\n return offset;\n }\n offset += parseId3TagSize(data, offset);\n return getId3Offset(data, offset);\n }; // TODO: use vhs-utils\n\n var isLikelyAacData$1 = function (data) {\n var offset = getId3Offset(data, 0);\n return data.length >= offset + 2 && (data[offset] & 0xFF) === 0xFF && (data[offset + 1] & 0xF0) === 0xF0 &&\n // verify that the 2 layer bits are 0, aka this\n // is not mp3 data but aac data.\n (data[offset + 1] & 0x16) === 0x10;\n };\n var parseSyncSafeInteger = function (data) {\n return data[0] << 21 | data[1] << 14 | data[2] << 7 | data[3];\n }; // return a percent-encoded representation of the specified byte range\n // @see http://en.wikipedia.org/wiki/Percent-encoding\n\n var percentEncode = function (bytes, start, end) {\n var i,\n result = '';\n for (i = start; i < end; i++) {\n result += '%' + ('00' + bytes[i].toString(16)).slice(-2);\n }\n return result;\n }; // return the string representation of the specified byte range,\n // interpreted as ISO-8859-1.\n\n var parseIso88591 = function (bytes, start, end) {\n return unescape(percentEncode(bytes, start, end)); // jshint ignore:line\n };\n\n var parseAdtsSize = function (header, byteIndex) {\n var lowThree = (header[byteIndex + 5] & 0xE0) >> 5,\n middle = header[byteIndex + 4] << 3,\n highTwo = header[byteIndex + 3] & 0x3 << 11;\n return highTwo | middle | lowThree;\n };\n var parseType$4 = function (header, byteIndex) {\n if (header[byteIndex] === 'I'.charCodeAt(0) && header[byteIndex + 1] === 'D'.charCodeAt(0) && header[byteIndex + 2] === '3'.charCodeAt(0)) {\n return 'timed-metadata';\n } else if (header[byteIndex] & 0xff === 0xff && (header[byteIndex + 1] & 0xf0) === 0xf0) {\n return 'audio';\n }\n return null;\n };\n var parseSampleRate = function (packet) {\n var i = 0;\n while (i + 5 < packet.length) {\n if (packet[i] !== 0xFF || (packet[i + 1] & 0xF6) !== 0xF0) {\n // If a valid header was not found, jump one forward and attempt to\n // find a valid ADTS header starting at the next byte\n i++;\n continue;\n }\n return ADTS_SAMPLING_FREQUENCIES[(packet[i + 2] & 0x3c) >>> 2];\n }\n return null;\n };\n var parseAacTimestamp = function (packet) {\n var frameStart, frameSize, frame, frameHeader; // find the start of the first frame and the end of the tag\n\n frameStart = 10;\n if (packet[5] & 0x40) {\n // advance the frame start past the extended header\n frameStart += 4; // header size field\n\n frameStart += parseSyncSafeInteger(packet.subarray(10, 14));\n } // parse one or more ID3 frames\n // http://id3.org/id3v2.3.0#ID3v2_frame_overview\n\n do {\n // determine the number of bytes in this frame\n frameSize = parseSyncSafeInteger(packet.subarray(frameStart + 4, frameStart + 8));\n if (frameSize < 1) {\n return null;\n }\n frameHeader = String.fromCharCode(packet[frameStart], packet[frameStart + 1], packet[frameStart + 2], packet[frameStart + 3]);\n if (frameHeader === 'PRIV') {\n frame = packet.subarray(frameStart + 10, frameStart + frameSize + 10);\n for (var i = 0; i < frame.byteLength; i++) {\n if (frame[i] === 0) {\n var owner = parseIso88591(frame, 0, i);\n if (owner === 'com.apple.streaming.transportStreamTimestamp') {\n var d = frame.subarray(i + 1);\n var size = (d[3] & 0x01) << 30 | d[4] << 22 | d[5] << 14 | d[6] << 6 | d[7] >>> 2;\n size *= 4;\n size += d[7] & 0x03;\n return size;\n }\n break;\n }\n }\n }\n frameStart += 10; // advance past the frame header\n\n frameStart += frameSize; // advance past the frame body\n } while (frameStart < packet.byteLength);\n return null;\n };\n var utils = {\n isLikelyAacData: isLikelyAacData$1,\n parseId3TagSize: parseId3TagSize,\n parseAdtsSize: parseAdtsSize,\n parseType: parseType$4,\n parseSampleRate: parseSampleRate,\n parseAacTimestamp: parseAacTimestamp\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A stream-based aac to mp4 converter. This utility can be used to\n * deliver mp4s to a SourceBuffer on platforms that support native\n * Media Source Extensions.\n */\n\n var Stream$1 = stream;\n var aacUtils = utils; // Constants\n\n var AacStream$1;\n /**\n * Splits an incoming stream of binary data into ADTS and ID3 Frames.\n */\n\n AacStream$1 = function () {\n var everything = new Uint8Array(),\n timeStamp = 0;\n AacStream$1.prototype.init.call(this);\n this.setTimestamp = function (timestamp) {\n timeStamp = timestamp;\n };\n this.push = function (bytes) {\n var frameSize = 0,\n byteIndex = 0,\n bytesLeft,\n chunk,\n packet,\n tempLength; // If there are bytes remaining from the last segment, prepend them to the\n // bytes that were pushed in\n\n if (everything.length) {\n tempLength = everything.length;\n everything = new Uint8Array(bytes.byteLength + tempLength);\n everything.set(everything.subarray(0, tempLength));\n everything.set(bytes, tempLength);\n } else {\n everything = bytes;\n }\n while (everything.length - byteIndex >= 3) {\n if (everything[byteIndex] === 'I'.charCodeAt(0) && everything[byteIndex + 1] === 'D'.charCodeAt(0) && everything[byteIndex + 2] === '3'.charCodeAt(0)) {\n // Exit early because we don't have enough to parse\n // the ID3 tag header\n if (everything.length - byteIndex < 10) {\n break;\n } // check framesize\n\n frameSize = aacUtils.parseId3TagSize(everything, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n // Add to byteIndex to support multiple ID3 tags in sequence\n\n if (byteIndex + frameSize > everything.length) {\n break;\n }\n chunk = {\n type: 'timed-metadata',\n data: everything.subarray(byteIndex, byteIndex + frameSize)\n };\n this.trigger('data', chunk);\n byteIndex += frameSize;\n continue;\n } else if ((everything[byteIndex] & 0xff) === 0xff && (everything[byteIndex + 1] & 0xf0) === 0xf0) {\n // Exit early because we don't have enough to parse\n // the ADTS frame header\n if (everything.length - byteIndex < 7) {\n break;\n }\n frameSize = aacUtils.parseAdtsSize(everything, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (byteIndex + frameSize > everything.length) {\n break;\n }\n packet = {\n type: 'audio',\n data: everything.subarray(byteIndex, byteIndex + frameSize),\n pts: timeStamp,\n dts: timeStamp\n };\n this.trigger('data', packet);\n byteIndex += frameSize;\n continue;\n }\n byteIndex++;\n }\n bytesLeft = everything.length - byteIndex;\n if (bytesLeft > 0) {\n everything = everything.subarray(byteIndex);\n } else {\n everything = new Uint8Array();\n }\n };\n this.reset = function () {\n everything = new Uint8Array();\n this.trigger('reset');\n };\n this.endTimeline = function () {\n everything = new Uint8Array();\n this.trigger('endedtimeline');\n };\n };\n AacStream$1.prototype = new Stream$1();\n var aac = AacStream$1;\n var AUDIO_PROPERTIES$1 = ['audioobjecttype', 'channelcount', 'samplerate', 'samplingfrequencyindex', 'samplesize'];\n var audioProperties = AUDIO_PROPERTIES$1;\n var VIDEO_PROPERTIES$1 = ['width', 'height', 'profileIdc', 'levelIdc', 'profileCompatibility', 'sarRatio'];\n var videoProperties = VIDEO_PROPERTIES$1;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * A stream-based mp2t to mp4 converter. This utility can be used to\n * deliver mp4s to a SourceBuffer on platforms that support native\n * Media Source Extensions.\n */\n\n var Stream = stream;\n var mp4 = mp4Generator;\n var frameUtils = frameUtils$1;\n var audioFrameUtils = audioFrameUtils$1;\n var trackDecodeInfo = trackDecodeInfo$1;\n var m2ts = m2ts_1;\n var clock = clock$2;\n var AdtsStream = adts;\n var H264Stream = h264.H264Stream;\n var AacStream = aac;\n var isLikelyAacData = utils.isLikelyAacData;\n var ONE_SECOND_IN_TS$1 = clock$2.ONE_SECOND_IN_TS;\n var AUDIO_PROPERTIES = audioProperties;\n var VIDEO_PROPERTIES = videoProperties; // object types\n\n var VideoSegmentStream, AudioSegmentStream, Transmuxer, CoalesceStream;\n var retriggerForStream = function (key, event) {\n event.stream = key;\n this.trigger('log', event);\n };\n var addPipelineLogRetriggers = function (transmuxer, pipeline) {\n var keys = Object.keys(pipeline);\n for (var i = 0; i < keys.length; i++) {\n var key = keys[i]; // skip non-stream keys and headOfPipeline\n // which is just a duplicate\n\n if (key === 'headOfPipeline' || !pipeline[key].on) {\n continue;\n }\n pipeline[key].on('log', retriggerForStream.bind(transmuxer, key));\n }\n };\n /**\n * Compare two arrays (even typed) for same-ness\n */\n\n var arrayEquals = function (a, b) {\n var i;\n if (a.length !== b.length) {\n return false;\n } // compare the value of each element in the array\n\n for (i = 0; i < a.length; i++) {\n if (a[i] !== b[i]) {\n return false;\n }\n }\n return true;\n };\n var generateSegmentTimingInfo = function (baseMediaDecodeTime, startDts, startPts, endDts, endPts, prependedContentDuration) {\n var ptsOffsetFromDts = startPts - startDts,\n decodeDuration = endDts - startDts,\n presentationDuration = endPts - startPts; // The PTS and DTS values are based on the actual stream times from the segment,\n // however, the player time values will reflect a start from the baseMediaDecodeTime.\n // In order to provide relevant values for the player times, base timing info on the\n // baseMediaDecodeTime and the DTS and PTS durations of the segment.\n\n return {\n start: {\n dts: baseMediaDecodeTime,\n pts: baseMediaDecodeTime + ptsOffsetFromDts\n },\n end: {\n dts: baseMediaDecodeTime + decodeDuration,\n pts: baseMediaDecodeTime + presentationDuration\n },\n prependedContentDuration: prependedContentDuration,\n baseMediaDecodeTime: baseMediaDecodeTime\n };\n };\n /**\n * Constructs a single-track, ISO BMFF media segment from AAC data\n * events. The output of this stream can be fed to a SourceBuffer\n * configured with a suitable initialization segment.\n * @param track {object} track metadata configuration\n * @param options {object} transmuxer options object\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n AudioSegmentStream = function (track, options) {\n var adtsFrames = [],\n sequenceNumber,\n earliestAllowedDts = 0,\n audioAppendStartTs = 0,\n videoBaseMediaDecodeTime = Infinity;\n options = options || {};\n sequenceNumber = options.firstSequenceNumber || 0;\n AudioSegmentStream.prototype.init.call(this);\n this.push = function (data) {\n trackDecodeInfo.collectDtsInfo(track, data);\n if (track) {\n AUDIO_PROPERTIES.forEach(function (prop) {\n track[prop] = data[prop];\n });\n } // buffer audio data until end() is called\n\n adtsFrames.push(data);\n };\n this.setEarliestDts = function (earliestDts) {\n earliestAllowedDts = earliestDts;\n };\n this.setVideoBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n videoBaseMediaDecodeTime = baseMediaDecodeTime;\n };\n this.setAudioAppendStart = function (timestamp) {\n audioAppendStartTs = timestamp;\n };\n this.flush = function () {\n var frames, moof, mdat, boxes, frameDuration, segmentDuration, videoClockCyclesOfSilencePrefixed; // return early if no audio data has been observed\n\n if (adtsFrames.length === 0) {\n this.trigger('done', 'AudioSegmentStream');\n return;\n }\n frames = audioFrameUtils.trimAdtsFramesByEarliestDts(adtsFrames, track, earliestAllowedDts);\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps); // amount of audio filled but the value is in video clock rather than audio clock\n\n videoClockCyclesOfSilencePrefixed = audioFrameUtils.prefixWithSilence(track, frames, audioAppendStartTs, videoBaseMediaDecodeTime); // we have to build the index from byte locations to\n // samples (that is, adts frames) in the audio data\n\n track.samples = audioFrameUtils.generateSampleTable(frames); // concatenate the audio data to constuct the mdat\n\n mdat = mp4.mdat(audioFrameUtils.concatenateFrameData(frames));\n adtsFrames = [];\n moof = mp4.moof(sequenceNumber, [track]);\n boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // bump the sequence number for next time\n\n sequenceNumber++;\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n trackDecodeInfo.clearDtsInfo(track);\n frameDuration = Math.ceil(ONE_SECOND_IN_TS$1 * 1024 / track.samplerate); // TODO this check was added to maintain backwards compatibility (particularly with\n // tests) on adding the timingInfo event. However, it seems unlikely that there's a\n // valid use-case where an init segment/data should be triggered without associated\n // frames. Leaving for now, but should be looked into.\n\n if (frames.length) {\n segmentDuration = frames.length * frameDuration;\n this.trigger('segmentTimingInfo', generateSegmentTimingInfo(\n // The audio track's baseMediaDecodeTime is in audio clock cycles, but the\n // frame info is in video clock cycles. Convert to match expectation of\n // listeners (that all timestamps will be based on video clock cycles).\n clock.audioTsToVideoTs(track.baseMediaDecodeTime, track.samplerate),\n // frame times are already in video clock, as is segment duration\n frames[0].dts, frames[0].pts, frames[0].dts + segmentDuration, frames[0].pts + segmentDuration, videoClockCyclesOfSilencePrefixed || 0));\n this.trigger('timingInfo', {\n start: frames[0].pts,\n end: frames[0].pts + segmentDuration\n });\n }\n this.trigger('data', {\n track: track,\n boxes: boxes\n });\n this.trigger('done', 'AudioSegmentStream');\n };\n this.reset = function () {\n trackDecodeInfo.clearDtsInfo(track);\n adtsFrames = [];\n this.trigger('reset');\n };\n };\n AudioSegmentStream.prototype = new Stream();\n /**\n * Constructs a single-track, ISO BMFF media segment from H264 data\n * events. The output of this stream can be fed to a SourceBuffer\n * configured with a suitable initialization segment.\n * @param track {object} track metadata configuration\n * @param options {object} transmuxer options object\n * @param options.alignGopsAtEnd {boolean} If true, start from the end of the\n * gopsToAlignWith list when attempting to align gop pts\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at 0.\n */\n\n VideoSegmentStream = function (track, options) {\n var sequenceNumber,\n nalUnits = [],\n gopsToAlignWith = [],\n config,\n pps;\n options = options || {};\n sequenceNumber = options.firstSequenceNumber || 0;\n VideoSegmentStream.prototype.init.call(this);\n delete track.minPTS;\n this.gopCache_ = [];\n /**\n * Constructs a ISO BMFF segment given H264 nalUnits\n * @param {Object} nalUnit A data event representing a nalUnit\n * @param {String} nalUnit.nalUnitType\n * @param {Object} nalUnit.config Properties for a mp4 track\n * @param {Uint8Array} nalUnit.data The nalUnit bytes\n * @see lib/codecs/h264.js\n **/\n\n this.push = function (nalUnit) {\n trackDecodeInfo.collectDtsInfo(track, nalUnit); // record the track config\n\n if (nalUnit.nalUnitType === 'seq_parameter_set_rbsp' && !config) {\n config = nalUnit.config;\n track.sps = [nalUnit.data];\n VIDEO_PROPERTIES.forEach(function (prop) {\n track[prop] = config[prop];\n }, this);\n }\n if (nalUnit.nalUnitType === 'pic_parameter_set_rbsp' && !pps) {\n pps = nalUnit.data;\n track.pps = [nalUnit.data];\n } // buffer video until flush() is called\n\n nalUnits.push(nalUnit);\n };\n /**\n * Pass constructed ISO BMFF track and boxes on to the\n * next stream in the pipeline\n **/\n\n this.flush = function () {\n var frames,\n gopForFusion,\n gops,\n moof,\n mdat,\n boxes,\n prependedContentDuration = 0,\n firstGop,\n lastGop; // Throw away nalUnits at the start of the byte stream until\n // we find the first AUD\n\n while (nalUnits.length) {\n if (nalUnits[0].nalUnitType === 'access_unit_delimiter_rbsp') {\n break;\n }\n nalUnits.shift();\n } // Return early if no video data has been observed\n\n if (nalUnits.length === 0) {\n this.resetStream_();\n this.trigger('done', 'VideoSegmentStream');\n return;\n } // Organize the raw nal-units into arrays that represent\n // higher-level constructs such as frames and gops\n // (group-of-pictures)\n\n frames = frameUtils.groupNalsIntoFrames(nalUnits);\n gops = frameUtils.groupFramesIntoGops(frames); // If the first frame of this fragment is not a keyframe we have\n // a problem since MSE (on Chrome) requires a leading keyframe.\n //\n // We have two approaches to repairing this situation:\n // 1) GOP-FUSION:\n // This is where we keep track of the GOPS (group-of-pictures)\n // from previous fragments and attempt to find one that we can\n // prepend to the current fragment in order to create a valid\n // fragment.\n // 2) KEYFRAME-PULLING:\n // Here we search for the first keyframe in the fragment and\n // throw away all the frames between the start of the fragment\n // and that keyframe. We then extend the duration and pull the\n // PTS of the keyframe forward so that it covers the time range\n // of the frames that were disposed of.\n //\n // #1 is far prefereable over #2 which can cause \"stuttering\" but\n // requires more things to be just right.\n\n if (!gops[0][0].keyFrame) {\n // Search for a gop for fusion from our gopCache\n gopForFusion = this.getGopForFusion_(nalUnits[0], track);\n if (gopForFusion) {\n // in order to provide more accurate timing information about the segment, save\n // the number of seconds prepended to the original segment due to GOP fusion\n prependedContentDuration = gopForFusion.duration;\n gops.unshift(gopForFusion); // Adjust Gops' metadata to account for the inclusion of the\n // new gop at the beginning\n\n gops.byteLength += gopForFusion.byteLength;\n gops.nalCount += gopForFusion.nalCount;\n gops.pts = gopForFusion.pts;\n gops.dts = gopForFusion.dts;\n gops.duration += gopForFusion.duration;\n } else {\n // If we didn't find a candidate gop fall back to keyframe-pulling\n gops = frameUtils.extendFirstKeyFrame(gops);\n }\n } // Trim gops to align with gopsToAlignWith\n\n if (gopsToAlignWith.length) {\n var alignedGops;\n if (options.alignGopsAtEnd) {\n alignedGops = this.alignGopsAtEnd_(gops);\n } else {\n alignedGops = this.alignGopsAtStart_(gops);\n }\n if (!alignedGops) {\n // save all the nals in the last GOP into the gop cache\n this.gopCache_.unshift({\n gop: gops.pop(),\n pps: track.pps,\n sps: track.sps\n }); // Keep a maximum of 6 GOPs in the cache\n\n this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits\n\n nalUnits = []; // return early no gops can be aligned with desired gopsToAlignWith\n\n this.resetStream_();\n this.trigger('done', 'VideoSegmentStream');\n return;\n } // Some gops were trimmed. clear dts info so minSegmentDts and pts are correct\n // when recalculated before sending off to CoalesceStream\n\n trackDecodeInfo.clearDtsInfo(track);\n gops = alignedGops;\n }\n trackDecodeInfo.collectDtsInfo(track, gops); // First, we have to build the index from byte locations to\n // samples (that is, frames) in the video data\n\n track.samples = frameUtils.generateSampleTable(gops); // Concatenate the video data and construct the mdat\n\n mdat = mp4.mdat(frameUtils.concatenateNalData(gops));\n track.baseMediaDecodeTime = trackDecodeInfo.calculateTrackBaseMediaDecodeTime(track, options.keepOriginalTimestamps);\n this.trigger('processedGopsInfo', gops.map(function (gop) {\n return {\n pts: gop.pts,\n dts: gop.dts,\n byteLength: gop.byteLength\n };\n }));\n firstGop = gops[0];\n lastGop = gops[gops.length - 1];\n this.trigger('segmentTimingInfo', generateSegmentTimingInfo(track.baseMediaDecodeTime, firstGop.dts, firstGop.pts, lastGop.dts + lastGop.duration, lastGop.pts + lastGop.duration, prependedContentDuration));\n this.trigger('timingInfo', {\n start: gops[0].pts,\n end: gops[gops.length - 1].pts + gops[gops.length - 1].duration\n }); // save all the nals in the last GOP into the gop cache\n\n this.gopCache_.unshift({\n gop: gops.pop(),\n pps: track.pps,\n sps: track.sps\n }); // Keep a maximum of 6 GOPs in the cache\n\n this.gopCache_.length = Math.min(6, this.gopCache_.length); // Clear nalUnits\n\n nalUnits = [];\n this.trigger('baseMediaDecodeTime', track.baseMediaDecodeTime);\n this.trigger('timelineStartInfo', track.timelineStartInfo);\n moof = mp4.moof(sequenceNumber, [track]); // it would be great to allocate this array up front instead of\n // throwing away hundreds of media segment fragments\n\n boxes = new Uint8Array(moof.byteLength + mdat.byteLength); // Bump the sequence number for next time\n\n sequenceNumber++;\n boxes.set(moof);\n boxes.set(mdat, moof.byteLength);\n this.trigger('data', {\n track: track,\n boxes: boxes\n });\n this.resetStream_(); // Continue with the flush process now\n\n this.trigger('done', 'VideoSegmentStream');\n };\n this.reset = function () {\n this.resetStream_();\n nalUnits = [];\n this.gopCache_.length = 0;\n gopsToAlignWith.length = 0;\n this.trigger('reset');\n };\n this.resetStream_ = function () {\n trackDecodeInfo.clearDtsInfo(track); // reset config and pps because they may differ across segments\n // for instance, when we are rendition switching\n\n config = undefined;\n pps = undefined;\n }; // Search for a candidate Gop for gop-fusion from the gop cache and\n // return it or return null if no good candidate was found\n\n this.getGopForFusion_ = function (nalUnit) {\n var halfSecond = 45000,\n // Half-a-second in a 90khz clock\n allowableOverlap = 10000,\n // About 3 frames @ 30fps\n nearestDistance = Infinity,\n dtsDistance,\n nearestGopObj,\n currentGop,\n currentGopObj,\n i; // Search for the GOP nearest to the beginning of this nal unit\n\n for (i = 0; i < this.gopCache_.length; i++) {\n currentGopObj = this.gopCache_[i];\n currentGop = currentGopObj.gop; // Reject Gops with different SPS or PPS\n\n if (!(track.pps && arrayEquals(track.pps[0], currentGopObj.pps[0])) || !(track.sps && arrayEquals(track.sps[0], currentGopObj.sps[0]))) {\n continue;\n } // Reject Gops that would require a negative baseMediaDecodeTime\n\n if (currentGop.dts < track.timelineStartInfo.dts) {\n continue;\n } // The distance between the end of the gop and the start of the nalUnit\n\n dtsDistance = nalUnit.dts - currentGop.dts - currentGop.duration; // Only consider GOPS that start before the nal unit and end within\n // a half-second of the nal unit\n\n if (dtsDistance >= -allowableOverlap && dtsDistance <= halfSecond) {\n // Always use the closest GOP we found if there is more than\n // one candidate\n if (!nearestGopObj || nearestDistance > dtsDistance) {\n nearestGopObj = currentGopObj;\n nearestDistance = dtsDistance;\n }\n }\n }\n if (nearestGopObj) {\n return nearestGopObj.gop;\n }\n return null;\n }; // trim gop list to the first gop found that has a matching pts with a gop in the list\n // of gopsToAlignWith starting from the START of the list\n\n this.alignGopsAtStart_ = function (gops) {\n var alignIndex, gopIndex, align, gop, byteLength, nalCount, duration, alignedGops;\n byteLength = gops.byteLength;\n nalCount = gops.nalCount;\n duration = gops.duration;\n alignIndex = gopIndex = 0;\n while (alignIndex < gopsToAlignWith.length && gopIndex < gops.length) {\n align = gopsToAlignWith[alignIndex];\n gop = gops[gopIndex];\n if (align.pts === gop.pts) {\n break;\n }\n if (gop.pts > align.pts) {\n // this current gop starts after the current gop we want to align on, so increment\n // align index\n alignIndex++;\n continue;\n } // current gop starts before the current gop we want to align on. so increment gop\n // index\n\n gopIndex++;\n byteLength -= gop.byteLength;\n nalCount -= gop.nalCount;\n duration -= gop.duration;\n }\n if (gopIndex === 0) {\n // no gops to trim\n return gops;\n }\n if (gopIndex === gops.length) {\n // all gops trimmed, skip appending all gops\n return null;\n }\n alignedGops = gops.slice(gopIndex);\n alignedGops.byteLength = byteLength;\n alignedGops.duration = duration;\n alignedGops.nalCount = nalCount;\n alignedGops.pts = alignedGops[0].pts;\n alignedGops.dts = alignedGops[0].dts;\n return alignedGops;\n }; // trim gop list to the first gop found that has a matching pts with a gop in the list\n // of gopsToAlignWith starting from the END of the list\n\n this.alignGopsAtEnd_ = function (gops) {\n var alignIndex, gopIndex, align, gop, alignEndIndex, matchFound;\n alignIndex = gopsToAlignWith.length - 1;\n gopIndex = gops.length - 1;\n alignEndIndex = null;\n matchFound = false;\n while (alignIndex >= 0 && gopIndex >= 0) {\n align = gopsToAlignWith[alignIndex];\n gop = gops[gopIndex];\n if (align.pts === gop.pts) {\n matchFound = true;\n break;\n }\n if (align.pts > gop.pts) {\n alignIndex--;\n continue;\n }\n if (alignIndex === gopsToAlignWith.length - 1) {\n // gop.pts is greater than the last alignment candidate. If no match is found\n // by the end of this loop, we still want to append gops that come after this\n // point\n alignEndIndex = gopIndex;\n }\n gopIndex--;\n }\n if (!matchFound && alignEndIndex === null) {\n return null;\n }\n var trimIndex;\n if (matchFound) {\n trimIndex = gopIndex;\n } else {\n trimIndex = alignEndIndex;\n }\n if (trimIndex === 0) {\n return gops;\n }\n var alignedGops = gops.slice(trimIndex);\n var metadata = alignedGops.reduce(function (total, gop) {\n total.byteLength += gop.byteLength;\n total.duration += gop.duration;\n total.nalCount += gop.nalCount;\n return total;\n }, {\n byteLength: 0,\n duration: 0,\n nalCount: 0\n });\n alignedGops.byteLength = metadata.byteLength;\n alignedGops.duration = metadata.duration;\n alignedGops.nalCount = metadata.nalCount;\n alignedGops.pts = alignedGops[0].pts;\n alignedGops.dts = alignedGops[0].dts;\n return alignedGops;\n };\n this.alignGopsWith = function (newGopsToAlignWith) {\n gopsToAlignWith = newGopsToAlignWith;\n };\n };\n VideoSegmentStream.prototype = new Stream();\n /**\n * A Stream that can combine multiple streams (ie. audio & video)\n * into a single output segment for MSE. Also supports audio-only\n * and video-only streams.\n * @param options {object} transmuxer options object\n * @param options.keepOriginalTimestamps {boolean} If true, keep the timestamps\n * in the source; false to adjust the first segment to start at media timeline start.\n */\n\n CoalesceStream = function (options, metadataStream) {\n // Number of Tracks per output segment\n // If greater than 1, we combine multiple\n // tracks into a single segment\n this.numberOfTracks = 0;\n this.metadataStream = metadataStream;\n options = options || {};\n if (typeof options.remux !== 'undefined') {\n this.remuxTracks = !!options.remux;\n } else {\n this.remuxTracks = true;\n }\n if (typeof options.keepOriginalTimestamps === 'boolean') {\n this.keepOriginalTimestamps = options.keepOriginalTimestamps;\n } else {\n this.keepOriginalTimestamps = false;\n }\n this.pendingTracks = [];\n this.videoTrack = null;\n this.pendingBoxes = [];\n this.pendingCaptions = [];\n this.pendingMetadata = [];\n this.pendingBytes = 0;\n this.emittedTracks = 0;\n CoalesceStream.prototype.init.call(this); // Take output from multiple\n\n this.push = function (output) {\n // buffer incoming captions until the associated video segment\n // finishes\n if (output.content || output.text) {\n return this.pendingCaptions.push(output);\n } // buffer incoming id3 tags until the final flush\n\n if (output.frames) {\n return this.pendingMetadata.push(output);\n } // Add this track to the list of pending tracks and store\n // important information required for the construction of\n // the final segment\n\n this.pendingTracks.push(output.track);\n this.pendingBytes += output.boxes.byteLength; // TODO: is there an issue for this against chrome?\n // We unshift audio and push video because\n // as of Chrome 75 when switching from\n // one init segment to another if the video\n // mdat does not appear after the audio mdat\n // only audio will play for the duration of our transmux.\n\n if (output.track.type === 'video') {\n this.videoTrack = output.track;\n this.pendingBoxes.push(output.boxes);\n }\n if (output.track.type === 'audio') {\n this.audioTrack = output.track;\n this.pendingBoxes.unshift(output.boxes);\n }\n };\n };\n CoalesceStream.prototype = new Stream();\n CoalesceStream.prototype.flush = function (flushSource) {\n var offset = 0,\n event = {\n captions: [],\n captionStreams: {},\n metadata: [],\n info: {}\n },\n caption,\n id3,\n initSegment,\n timelineStartPts = 0,\n i;\n if (this.pendingTracks.length < this.numberOfTracks) {\n if (flushSource !== 'VideoSegmentStream' && flushSource !== 'AudioSegmentStream') {\n // Return because we haven't received a flush from a data-generating\n // portion of the segment (meaning that we have only recieved meta-data\n // or captions.)\n return;\n } else if (this.remuxTracks) {\n // Return until we have enough tracks from the pipeline to remux (if we\n // are remuxing audio and video into a single MP4)\n return;\n } else if (this.pendingTracks.length === 0) {\n // In the case where we receive a flush without any data having been\n // received we consider it an emitted track for the purposes of coalescing\n // `done` events.\n // We do this for the case where there is an audio and video track in the\n // segment but no audio data. (seen in several playlists with alternate\n // audio tracks and no audio present in the main TS segments.)\n this.emittedTracks++;\n if (this.emittedTracks >= this.numberOfTracks) {\n this.trigger('done');\n this.emittedTracks = 0;\n }\n return;\n }\n }\n if (this.videoTrack) {\n timelineStartPts = this.videoTrack.timelineStartInfo.pts;\n VIDEO_PROPERTIES.forEach(function (prop) {\n event.info[prop] = this.videoTrack[prop];\n }, this);\n } else if (this.audioTrack) {\n timelineStartPts = this.audioTrack.timelineStartInfo.pts;\n AUDIO_PROPERTIES.forEach(function (prop) {\n event.info[prop] = this.audioTrack[prop];\n }, this);\n }\n if (this.videoTrack || this.audioTrack) {\n if (this.pendingTracks.length === 1) {\n event.type = this.pendingTracks[0].type;\n } else {\n event.type = 'combined';\n }\n this.emittedTracks += this.pendingTracks.length;\n initSegment = mp4.initSegment(this.pendingTracks); // Create a new typed array to hold the init segment\n\n event.initSegment = new Uint8Array(initSegment.byteLength); // Create an init segment containing a moov\n // and track definitions\n\n event.initSegment.set(initSegment); // Create a new typed array to hold the moof+mdats\n\n event.data = new Uint8Array(this.pendingBytes); // Append each moof+mdat (one per track) together\n\n for (i = 0; i < this.pendingBoxes.length; i++) {\n event.data.set(this.pendingBoxes[i], offset);\n offset += this.pendingBoxes[i].byteLength;\n } // Translate caption PTS times into second offsets to match the\n // video timeline for the segment, and add track info\n\n for (i = 0; i < this.pendingCaptions.length; i++) {\n caption = this.pendingCaptions[i];\n caption.startTime = clock.metadataTsToSeconds(caption.startPts, timelineStartPts, this.keepOriginalTimestamps);\n caption.endTime = clock.metadataTsToSeconds(caption.endPts, timelineStartPts, this.keepOriginalTimestamps);\n event.captionStreams[caption.stream] = true;\n event.captions.push(caption);\n } // Translate ID3 frame PTS times into second offsets to match the\n // video timeline for the segment\n\n for (i = 0; i < this.pendingMetadata.length; i++) {\n id3 = this.pendingMetadata[i];\n id3.cueTime = clock.metadataTsToSeconds(id3.pts, timelineStartPts, this.keepOriginalTimestamps);\n event.metadata.push(id3);\n } // We add this to every single emitted segment even though we only need\n // it for the first\n\n event.metadata.dispatchType = this.metadataStream.dispatchType; // Reset stream state\n\n this.pendingTracks.length = 0;\n this.videoTrack = null;\n this.pendingBoxes.length = 0;\n this.pendingCaptions.length = 0;\n this.pendingBytes = 0;\n this.pendingMetadata.length = 0; // Emit the built segment\n // We include captions and ID3 tags for backwards compatibility,\n // ideally we should send only video and audio in the data event\n\n this.trigger('data', event); // Emit each caption to the outside world\n // Ideally, this would happen immediately on parsing captions,\n // but we need to ensure that video data is sent back first\n // so that caption timing can be adjusted to match video timing\n\n for (i = 0; i < event.captions.length; i++) {\n caption = event.captions[i];\n this.trigger('caption', caption);\n } // Emit each id3 tag to the outside world\n // Ideally, this would happen immediately on parsing the tag,\n // but we need to ensure that video data is sent back first\n // so that ID3 frame timing can be adjusted to match video timing\n\n for (i = 0; i < event.metadata.length; i++) {\n id3 = event.metadata[i];\n this.trigger('id3Frame', id3);\n }\n } // Only emit `done` if all tracks have been flushed and emitted\n\n if (this.emittedTracks >= this.numberOfTracks) {\n this.trigger('done');\n this.emittedTracks = 0;\n }\n };\n CoalesceStream.prototype.setRemux = function (val) {\n this.remuxTracks = val;\n };\n /**\n * A Stream that expects MP2T binary data as input and produces\n * corresponding media segments, suitable for use with Media Source\n * Extension (MSE) implementations that support the ISO BMFF byte\n * stream format, like Chrome.\n */\n\n Transmuxer = function (options) {\n var self = this,\n hasFlushed = true,\n videoTrack,\n audioTrack;\n Transmuxer.prototype.init.call(this);\n options = options || {};\n this.baseMediaDecodeTime = options.baseMediaDecodeTime || 0;\n this.transmuxPipeline_ = {};\n this.setupAacPipeline = function () {\n var pipeline = {};\n this.transmuxPipeline_ = pipeline;\n pipeline.type = 'aac';\n pipeline.metadataStream = new m2ts.MetadataStream(); // set up the parsing pipeline\n\n pipeline.aacStream = new AacStream();\n pipeline.audioTimestampRolloverStream = new m2ts.TimestampRolloverStream('audio');\n pipeline.timedMetadataTimestampRolloverStream = new m2ts.TimestampRolloverStream('timed-metadata');\n pipeline.adtsStream = new AdtsStream();\n pipeline.coalesceStream = new CoalesceStream(options, pipeline.metadataStream);\n pipeline.headOfPipeline = pipeline.aacStream;\n pipeline.aacStream.pipe(pipeline.audioTimestampRolloverStream).pipe(pipeline.adtsStream);\n pipeline.aacStream.pipe(pipeline.timedMetadataTimestampRolloverStream).pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream);\n pipeline.metadataStream.on('timestamp', function (frame) {\n pipeline.aacStream.setTimestamp(frame.timeStamp);\n });\n pipeline.aacStream.on('data', function (data) {\n if (data.type !== 'timed-metadata' && data.type !== 'audio' || pipeline.audioSegmentStream) {\n return;\n }\n audioTrack = audioTrack || {\n timelineStartInfo: {\n baseMediaDecodeTime: self.baseMediaDecodeTime\n },\n codec: 'adts',\n type: 'audio'\n }; // hook up the audio segment stream to the first track with aac data\n\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.audioSegmentStream = new AudioSegmentStream(audioTrack, options);\n pipeline.audioSegmentStream.on('log', self.getLogTrigger_('audioSegmentStream'));\n pipeline.audioSegmentStream.on('timingInfo', self.trigger.bind(self, 'audioTimingInfo')); // Set up the final part of the audio pipeline\n\n pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream); // emit pmt info\n\n self.trigger('trackinfo', {\n hasAudio: !!audioTrack,\n hasVideo: !!videoTrack\n });\n }); // Re-emit any data coming from the coalesce stream to the outside world\n\n pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data')); // Let the consumer know we have finished flushing the entire pipeline\n\n pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));\n addPipelineLogRetriggers(this, pipeline);\n };\n this.setupTsPipeline = function () {\n var pipeline = {};\n this.transmuxPipeline_ = pipeline;\n pipeline.type = 'ts';\n pipeline.metadataStream = new m2ts.MetadataStream(); // set up the parsing pipeline\n\n pipeline.packetStream = new m2ts.TransportPacketStream();\n pipeline.parseStream = new m2ts.TransportParseStream();\n pipeline.elementaryStream = new m2ts.ElementaryStream();\n pipeline.timestampRolloverStream = new m2ts.TimestampRolloverStream();\n pipeline.adtsStream = new AdtsStream();\n pipeline.h264Stream = new H264Stream();\n pipeline.captionStream = new m2ts.CaptionStream(options);\n pipeline.coalesceStream = new CoalesceStream(options, pipeline.metadataStream);\n pipeline.headOfPipeline = pipeline.packetStream; // disassemble MPEG2-TS packets into elementary streams\n\n pipeline.packetStream.pipe(pipeline.parseStream).pipe(pipeline.elementaryStream).pipe(pipeline.timestampRolloverStream); // !!THIS ORDER IS IMPORTANT!!\n // demux the streams\n\n pipeline.timestampRolloverStream.pipe(pipeline.h264Stream);\n pipeline.timestampRolloverStream.pipe(pipeline.adtsStream);\n pipeline.timestampRolloverStream.pipe(pipeline.metadataStream).pipe(pipeline.coalesceStream); // Hook up CEA-608/708 caption stream\n\n pipeline.h264Stream.pipe(pipeline.captionStream).pipe(pipeline.coalesceStream);\n pipeline.elementaryStream.on('data', function (data) {\n var i;\n if (data.type === 'metadata') {\n i = data.tracks.length; // scan the tracks listed in the metadata\n\n while (i--) {\n if (!videoTrack && data.tracks[i].type === 'video') {\n videoTrack = data.tracks[i];\n videoTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;\n } else if (!audioTrack && data.tracks[i].type === 'audio') {\n audioTrack = data.tracks[i];\n audioTrack.timelineStartInfo.baseMediaDecodeTime = self.baseMediaDecodeTime;\n }\n } // hook up the video segment stream to the first track with h264 data\n\n if (videoTrack && !pipeline.videoSegmentStream) {\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.videoSegmentStream = new VideoSegmentStream(videoTrack, options);\n pipeline.videoSegmentStream.on('log', self.getLogTrigger_('videoSegmentStream'));\n pipeline.videoSegmentStream.on('timelineStartInfo', function (timelineStartInfo) {\n // When video emits timelineStartInfo data after a flush, we forward that\n // info to the AudioSegmentStream, if it exists, because video timeline\n // data takes precedence. Do not do this if keepOriginalTimestamps is set,\n // because this is a particularly subtle form of timestamp alteration.\n if (audioTrack && !options.keepOriginalTimestamps) {\n audioTrack.timelineStartInfo = timelineStartInfo; // On the first segment we trim AAC frames that exist before the\n // very earliest DTS we have seen in video because Chrome will\n // interpret any video track with a baseMediaDecodeTime that is\n // non-zero as a gap.\n\n pipeline.audioSegmentStream.setEarliestDts(timelineStartInfo.dts - self.baseMediaDecodeTime);\n }\n });\n pipeline.videoSegmentStream.on('processedGopsInfo', self.trigger.bind(self, 'gopInfo'));\n pipeline.videoSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'videoSegmentTimingInfo'));\n pipeline.videoSegmentStream.on('baseMediaDecodeTime', function (baseMediaDecodeTime) {\n if (audioTrack) {\n pipeline.audioSegmentStream.setVideoBaseMediaDecodeTime(baseMediaDecodeTime);\n }\n });\n pipeline.videoSegmentStream.on('timingInfo', self.trigger.bind(self, 'videoTimingInfo')); // Set up the final part of the video pipeline\n\n pipeline.h264Stream.pipe(pipeline.videoSegmentStream).pipe(pipeline.coalesceStream);\n }\n if (audioTrack && !pipeline.audioSegmentStream) {\n // hook up the audio segment stream to the first track with aac data\n pipeline.coalesceStream.numberOfTracks++;\n pipeline.audioSegmentStream = new AudioSegmentStream(audioTrack, options);\n pipeline.audioSegmentStream.on('log', self.getLogTrigger_('audioSegmentStream'));\n pipeline.audioSegmentStream.on('timingInfo', self.trigger.bind(self, 'audioTimingInfo'));\n pipeline.audioSegmentStream.on('segmentTimingInfo', self.trigger.bind(self, 'audioSegmentTimingInfo')); // Set up the final part of the audio pipeline\n\n pipeline.adtsStream.pipe(pipeline.audioSegmentStream).pipe(pipeline.coalesceStream);\n } // emit pmt info\n\n self.trigger('trackinfo', {\n hasAudio: !!audioTrack,\n hasVideo: !!videoTrack\n });\n }\n }); // Re-emit any data coming from the coalesce stream to the outside world\n\n pipeline.coalesceStream.on('data', this.trigger.bind(this, 'data'));\n pipeline.coalesceStream.on('id3Frame', function (id3Frame) {\n id3Frame.dispatchType = pipeline.metadataStream.dispatchType;\n self.trigger('id3Frame', id3Frame);\n });\n pipeline.coalesceStream.on('caption', this.trigger.bind(this, 'caption')); // Let the consumer know we have finished flushing the entire pipeline\n\n pipeline.coalesceStream.on('done', this.trigger.bind(this, 'done'));\n addPipelineLogRetriggers(this, pipeline);\n }; // hook up the segment streams once track metadata is delivered\n\n this.setBaseMediaDecodeTime = function (baseMediaDecodeTime) {\n var pipeline = this.transmuxPipeline_;\n if (!options.keepOriginalTimestamps) {\n this.baseMediaDecodeTime = baseMediaDecodeTime;\n }\n if (audioTrack) {\n audioTrack.timelineStartInfo.dts = undefined;\n audioTrack.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(audioTrack);\n if (pipeline.audioTimestampRolloverStream) {\n pipeline.audioTimestampRolloverStream.discontinuity();\n }\n }\n if (videoTrack) {\n if (pipeline.videoSegmentStream) {\n pipeline.videoSegmentStream.gopCache_ = [];\n }\n videoTrack.timelineStartInfo.dts = undefined;\n videoTrack.timelineStartInfo.pts = undefined;\n trackDecodeInfo.clearDtsInfo(videoTrack);\n pipeline.captionStream.reset();\n }\n if (pipeline.timestampRolloverStream) {\n pipeline.timestampRolloverStream.discontinuity();\n }\n };\n this.setAudioAppendStart = function (timestamp) {\n if (audioTrack) {\n this.transmuxPipeline_.audioSegmentStream.setAudioAppendStart(timestamp);\n }\n };\n this.setRemux = function (val) {\n var pipeline = this.transmuxPipeline_;\n options.remux = val;\n if (pipeline && pipeline.coalesceStream) {\n pipeline.coalesceStream.setRemux(val);\n }\n };\n this.alignGopsWith = function (gopsToAlignWith) {\n if (videoTrack && this.transmuxPipeline_.videoSegmentStream) {\n this.transmuxPipeline_.videoSegmentStream.alignGopsWith(gopsToAlignWith);\n }\n };\n this.getLogTrigger_ = function (key) {\n var self = this;\n return function (event) {\n event.stream = key;\n self.trigger('log', event);\n };\n }; // feed incoming data to the front of the parsing pipeline\n\n this.push = function (data) {\n if (hasFlushed) {\n var isAac = isLikelyAacData(data);\n if (isAac && this.transmuxPipeline_.type !== 'aac') {\n this.setupAacPipeline();\n } else if (!isAac && this.transmuxPipeline_.type !== 'ts') {\n this.setupTsPipeline();\n }\n hasFlushed = false;\n }\n this.transmuxPipeline_.headOfPipeline.push(data);\n }; // flush any buffered data\n\n this.flush = function () {\n hasFlushed = true; // Start at the top of the pipeline and flush all pending work\n\n this.transmuxPipeline_.headOfPipeline.flush();\n };\n this.endTimeline = function () {\n this.transmuxPipeline_.headOfPipeline.endTimeline();\n };\n this.reset = function () {\n if (this.transmuxPipeline_.headOfPipeline) {\n this.transmuxPipeline_.headOfPipeline.reset();\n }\n }; // Caption data has to be reset when seeking outside buffered range\n\n this.resetCaptions = function () {\n if (this.transmuxPipeline_.captionStream) {\n this.transmuxPipeline_.captionStream.reset();\n }\n };\n };\n Transmuxer.prototype = new Stream();\n var transmuxer = {\n Transmuxer: Transmuxer,\n VideoSegmentStream: VideoSegmentStream,\n AudioSegmentStream: AudioSegmentStream,\n AUDIO_PROPERTIES: AUDIO_PROPERTIES,\n VIDEO_PROPERTIES: VIDEO_PROPERTIES,\n // exported for testing\n generateSegmentTimingInfo: generateSegmentTimingInfo\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n */\n\n var toUnsigned$3 = function (value) {\n return value >>> 0;\n };\n var toHexString$1 = function (value) {\n return ('00' + value.toString(16)).slice(-2);\n };\n var bin = {\n toUnsigned: toUnsigned$3,\n toHexString: toHexString$1\n };\n var parseType$3 = function (buffer) {\n var result = '';\n result += String.fromCharCode(buffer[0]);\n result += String.fromCharCode(buffer[1]);\n result += String.fromCharCode(buffer[2]);\n result += String.fromCharCode(buffer[3]);\n return result;\n };\n var parseType_1 = parseType$3;\n var toUnsigned$2 = bin.toUnsigned;\n var parseType$2 = parseType_1;\n var findBox$2 = function (data, path) {\n var results = [],\n i,\n size,\n type,\n end,\n subresults;\n if (!path.length) {\n // short-circuit the search for empty paths\n return null;\n }\n for (i = 0; i < data.byteLength;) {\n size = toUnsigned$2(data[i] << 24 | data[i + 1] << 16 | data[i + 2] << 8 | data[i + 3]);\n type = parseType$2(data.subarray(i + 4, i + 8));\n end = size > 1 ? i + size : data.byteLength;\n if (type === path[0]) {\n if (path.length === 1) {\n // this is the end of the path and we've found the box we were\n // looking for\n results.push(data.subarray(i + 8, end));\n } else {\n // recursively search for the next box along the path\n subresults = findBox$2(data.subarray(i + 8, end), path.slice(1));\n if (subresults.length) {\n results = results.concat(subresults);\n }\n }\n }\n i = end;\n } // we've finished searching all of data\n\n return results;\n };\n var findBox_1 = findBox$2;\n var toUnsigned$1 = bin.toUnsigned;\n var getUint64$2 = numbers.getUint64;\n var tfdt = function (data) {\n var result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4))\n };\n if (result.version === 1) {\n result.baseMediaDecodeTime = getUint64$2(data.subarray(4));\n } else {\n result.baseMediaDecodeTime = toUnsigned$1(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]);\n }\n return result;\n };\n var parseTfdt$2 = tfdt;\n var parseSampleFlags$1 = function (flags) {\n return {\n isLeading: (flags[0] & 0x0c) >>> 2,\n dependsOn: flags[0] & 0x03,\n isDependedOn: (flags[1] & 0xc0) >>> 6,\n hasRedundancy: (flags[1] & 0x30) >>> 4,\n paddingValue: (flags[1] & 0x0e) >>> 1,\n isNonSyncSample: flags[1] & 0x01,\n degradationPriority: flags[2] << 8 | flags[3]\n };\n };\n var parseSampleFlags_1 = parseSampleFlags$1;\n var parseSampleFlags = parseSampleFlags_1;\n var trun = function (data) {\n var result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n samples: []\n },\n view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n // Flag interpretation\n dataOffsetPresent = result.flags[2] & 0x01,\n // compare with 2nd byte of 0x1\n firstSampleFlagsPresent = result.flags[2] & 0x04,\n // compare with 2nd byte of 0x4\n sampleDurationPresent = result.flags[1] & 0x01,\n // compare with 2nd byte of 0x100\n sampleSizePresent = result.flags[1] & 0x02,\n // compare with 2nd byte of 0x200\n sampleFlagsPresent = result.flags[1] & 0x04,\n // compare with 2nd byte of 0x400\n sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08,\n // compare with 2nd byte of 0x800\n sampleCount = view.getUint32(4),\n offset = 8,\n sample;\n if (dataOffsetPresent) {\n // 32 bit signed integer\n result.dataOffset = view.getInt32(offset);\n offset += 4;\n } // Overrides the flags for the first sample only. The order of\n // optional values will be: duration, size, compositionTimeOffset\n\n if (firstSampleFlagsPresent && sampleCount) {\n sample = {\n flags: parseSampleFlags(data.subarray(offset, offset + 4))\n };\n offset += 4;\n if (sampleDurationPresent) {\n sample.duration = view.getUint32(offset);\n offset += 4;\n }\n if (sampleSizePresent) {\n sample.size = view.getUint32(offset);\n offset += 4;\n }\n if (sampleCompositionTimeOffsetPresent) {\n if (result.version === 1) {\n sample.compositionTimeOffset = view.getInt32(offset);\n } else {\n sample.compositionTimeOffset = view.getUint32(offset);\n }\n offset += 4;\n }\n result.samples.push(sample);\n sampleCount--;\n }\n while (sampleCount--) {\n sample = {};\n if (sampleDurationPresent) {\n sample.duration = view.getUint32(offset);\n offset += 4;\n }\n if (sampleSizePresent) {\n sample.size = view.getUint32(offset);\n offset += 4;\n }\n if (sampleFlagsPresent) {\n sample.flags = parseSampleFlags(data.subarray(offset, offset + 4));\n offset += 4;\n }\n if (sampleCompositionTimeOffsetPresent) {\n if (result.version === 1) {\n sample.compositionTimeOffset = view.getInt32(offset);\n } else {\n sample.compositionTimeOffset = view.getUint32(offset);\n }\n offset += 4;\n }\n result.samples.push(sample);\n }\n return result;\n };\n var parseTrun$2 = trun;\n var tfhd = function (data) {\n var view = new DataView(data.buffer, data.byteOffset, data.byteLength),\n result = {\n version: data[0],\n flags: new Uint8Array(data.subarray(1, 4)),\n trackId: view.getUint32(4)\n },\n baseDataOffsetPresent = result.flags[2] & 0x01,\n sampleDescriptionIndexPresent = result.flags[2] & 0x02,\n defaultSampleDurationPresent = result.flags[2] & 0x08,\n defaultSampleSizePresent = result.flags[2] & 0x10,\n defaultSampleFlagsPresent = result.flags[2] & 0x20,\n durationIsEmpty = result.flags[0] & 0x010000,\n defaultBaseIsMoof = result.flags[0] & 0x020000,\n i;\n i = 8;\n if (baseDataOffsetPresent) {\n i += 4; // truncate top 4 bytes\n // FIXME: should we read the full 64 bits?\n\n result.baseDataOffset = view.getUint32(12);\n i += 4;\n }\n if (sampleDescriptionIndexPresent) {\n result.sampleDescriptionIndex = view.getUint32(i);\n i += 4;\n }\n if (defaultSampleDurationPresent) {\n result.defaultSampleDuration = view.getUint32(i);\n i += 4;\n }\n if (defaultSampleSizePresent) {\n result.defaultSampleSize = view.getUint32(i);\n i += 4;\n }\n if (defaultSampleFlagsPresent) {\n result.defaultSampleFlags = view.getUint32(i);\n }\n if (durationIsEmpty) {\n result.durationIsEmpty = true;\n }\n if (!baseDataOffsetPresent && defaultBaseIsMoof) {\n result.baseDataOffsetIsMoof = true;\n }\n return result;\n };\n var parseTfhd$2 = tfhd;\n var win;\n if (typeof window !== \"undefined\") {\n win = window;\n } else if (typeof commonjsGlobal !== \"undefined\") {\n win = commonjsGlobal;\n } else if (typeof self !== \"undefined\") {\n win = self;\n } else {\n win = {};\n }\n var window_1 = win;\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Reads in-band CEA-708 captions out of FMP4 segments.\n * @see https://en.wikipedia.org/wiki/CEA-708\n */\n\n var discardEmulationPreventionBytes = captionPacketParser.discardEmulationPreventionBytes;\n var CaptionStream = captionStream.CaptionStream;\n var findBox$1 = findBox_1;\n var parseTfdt$1 = parseTfdt$2;\n var parseTrun$1 = parseTrun$2;\n var parseTfhd$1 = parseTfhd$2;\n var window$2 = window_1;\n /**\n * Maps an offset in the mdat to a sample based on the the size of the samples.\n * Assumes that `parseSamples` has been called first.\n *\n * @param {Number} offset - The offset into the mdat\n * @param {Object[]} samples - An array of samples, parsed using `parseSamples`\n * @return {?Object} The matching sample, or null if no match was found.\n *\n * @see ISO-BMFF-12/2015, Section 8.8.8\n **/\n\n var mapToSample = function (offset, samples) {\n var approximateOffset = offset;\n for (var i = 0; i < samples.length; i++) {\n var sample = samples[i];\n if (approximateOffset < sample.size) {\n return sample;\n }\n approximateOffset -= sample.size;\n }\n return null;\n };\n /**\n * Finds SEI nal units contained in a Media Data Box.\n * Assumes that `parseSamples` has been called first.\n *\n * @param {Uint8Array} avcStream - The bytes of the mdat\n * @param {Object[]} samples - The samples parsed out by `parseSamples`\n * @param {Number} trackId - The trackId of this video track\n * @return {Object[]} seiNals - the parsed SEI NALUs found.\n * The contents of the seiNal should match what is expected by\n * CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts)\n *\n * @see ISO-BMFF-12/2015, Section 8.1.1\n * @see Rec. ITU-T H.264, 7.3.2.3.1\n **/\n\n var findSeiNals = function (avcStream, samples, trackId) {\n var avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),\n result = {\n logs: [],\n seiNals: []\n },\n seiNal,\n i,\n length,\n lastMatchedSample;\n for (i = 0; i + 4 < avcStream.length; i += length) {\n length = avcView.getUint32(i);\n i += 4; // Bail if this doesn't appear to be an H264 stream\n\n if (length <= 0) {\n continue;\n }\n switch (avcStream[i] & 0x1F) {\n case 0x06:\n var data = avcStream.subarray(i + 1, i + 1 + length);\n var matchingSample = mapToSample(i, samples);\n seiNal = {\n nalUnitType: 'sei_rbsp',\n size: length,\n data: data,\n escapedRBSP: discardEmulationPreventionBytes(data),\n trackId: trackId\n };\n if (matchingSample) {\n seiNal.pts = matchingSample.pts;\n seiNal.dts = matchingSample.dts;\n lastMatchedSample = matchingSample;\n } else if (lastMatchedSample) {\n // If a matching sample cannot be found, use the last\n // sample's values as they should be as close as possible\n seiNal.pts = lastMatchedSample.pts;\n seiNal.dts = lastMatchedSample.dts;\n } else {\n result.logs.push({\n level: 'warn',\n message: 'We\\'ve encountered a nal unit without data at ' + i + ' for trackId ' + trackId + '. See mux.js#223.'\n });\n break;\n }\n result.seiNals.push(seiNal);\n break;\n }\n }\n return result;\n };\n /**\n * Parses sample information out of Track Run Boxes and calculates\n * the absolute presentation and decode timestamps of each sample.\n *\n * @param {Array} truns - The Trun Run boxes to be parsed\n * @param {Number|BigInt} baseMediaDecodeTime - base media decode time from tfdt\n @see ISO-BMFF-12/2015, Section 8.8.12\n * @param {Object} tfhd - The parsed Track Fragment Header\n * @see inspect.parseTfhd\n * @return {Object[]} the parsed samples\n *\n * @see ISO-BMFF-12/2015, Section 8.8.8\n **/\n\n var parseSamples = function (truns, baseMediaDecodeTime, tfhd) {\n var currentDts = baseMediaDecodeTime;\n var defaultSampleDuration = tfhd.defaultSampleDuration || 0;\n var defaultSampleSize = tfhd.defaultSampleSize || 0;\n var trackId = tfhd.trackId;\n var allSamples = [];\n truns.forEach(function (trun) {\n // Note: We currently do not parse the sample table as well\n // as the trun. It's possible some sources will require this.\n // moov > trak > mdia > minf > stbl\n var trackRun = parseTrun$1(trun);\n var samples = trackRun.samples;\n samples.forEach(function (sample) {\n if (sample.duration === undefined) {\n sample.duration = defaultSampleDuration;\n }\n if (sample.size === undefined) {\n sample.size = defaultSampleSize;\n }\n sample.trackId = trackId;\n sample.dts = currentDts;\n if (sample.compositionTimeOffset === undefined) {\n sample.compositionTimeOffset = 0;\n }\n if (typeof currentDts === 'bigint') {\n sample.pts = currentDts + window$2.BigInt(sample.compositionTimeOffset);\n currentDts += window$2.BigInt(sample.duration);\n } else {\n sample.pts = currentDts + sample.compositionTimeOffset;\n currentDts += sample.duration;\n }\n });\n allSamples = allSamples.concat(samples);\n });\n return allSamples;\n };\n /**\n * Parses out caption nals from an FMP4 segment's video tracks.\n *\n * @param {Uint8Array} segment - The bytes of a single segment\n * @param {Number} videoTrackId - The trackId of a video track in the segment\n * @return {Object.} A mapping of video trackId to\n * a list of seiNals found in that track\n **/\n\n var parseCaptionNals = function (segment, videoTrackId) {\n // To get the samples\n var trafs = findBox$1(segment, ['moof', 'traf']); // To get SEI NAL units\n\n var mdats = findBox$1(segment, ['mdat']);\n var captionNals = {};\n var mdatTrafPairs = []; // Pair up each traf with a mdat as moofs and mdats are in pairs\n\n mdats.forEach(function (mdat, index) {\n var matchingTraf = trafs[index];\n mdatTrafPairs.push({\n mdat: mdat,\n traf: matchingTraf\n });\n });\n mdatTrafPairs.forEach(function (pair) {\n var mdat = pair.mdat;\n var traf = pair.traf;\n var tfhd = findBox$1(traf, ['tfhd']); // Exactly 1 tfhd per traf\n\n var headerInfo = parseTfhd$1(tfhd[0]);\n var trackId = headerInfo.trackId;\n var tfdt = findBox$1(traf, ['tfdt']); // Either 0 or 1 tfdt per traf\n\n var baseMediaDecodeTime = tfdt.length > 0 ? parseTfdt$1(tfdt[0]).baseMediaDecodeTime : 0;\n var truns = findBox$1(traf, ['trun']);\n var samples;\n var result; // Only parse video data for the chosen video track\n\n if (videoTrackId === trackId && truns.length > 0) {\n samples = parseSamples(truns, baseMediaDecodeTime, headerInfo);\n result = findSeiNals(mdat, samples, trackId);\n if (!captionNals[trackId]) {\n captionNals[trackId] = {\n seiNals: [],\n logs: []\n };\n }\n captionNals[trackId].seiNals = captionNals[trackId].seiNals.concat(result.seiNals);\n captionNals[trackId].logs = captionNals[trackId].logs.concat(result.logs);\n }\n });\n return captionNals;\n };\n /**\n * Parses out inband captions from an MP4 container and returns\n * caption objects that can be used by WebVTT and the TextTrack API.\n * @see https://developer.mozilla.org/en-US/docs/Web/API/VTTCue\n * @see https://developer.mozilla.org/en-US/docs/Web/API/TextTrack\n * Assumes that `probe.getVideoTrackIds` and `probe.timescale` have been called first\n *\n * @param {Uint8Array} segment - The fmp4 segment containing embedded captions\n * @param {Number} trackId - The id of the video track to parse\n * @param {Number} timescale - The timescale for the video track from the init segment\n *\n * @return {?Object[]} parsedCaptions - A list of captions or null if no video tracks\n * @return {Number} parsedCaptions[].startTime - The time to show the caption in seconds\n * @return {Number} parsedCaptions[].endTime - The time to stop showing the caption in seconds\n * @return {Object[]} parsedCaptions[].content - A list of individual caption segments\n * @return {String} parsedCaptions[].content.text - The visible content of the caption segment\n * @return {Number} parsedCaptions[].content.line - The line height from 1-15 for positioning of the caption segment\n * @return {Number} parsedCaptions[].content.position - The column indent percentage for cue positioning from 10-80\n **/\n\n var parseEmbeddedCaptions = function (segment, trackId, timescale) {\n var captionNals; // the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there\n\n if (trackId === null) {\n return null;\n }\n captionNals = parseCaptionNals(segment, trackId);\n var trackNals = captionNals[trackId] || {};\n return {\n seiNals: trackNals.seiNals,\n logs: trackNals.logs,\n timescale: timescale\n };\n };\n /**\n * Converts SEI NALUs into captions that can be used by video.js\n **/\n\n var CaptionParser = function () {\n var isInitialized = false;\n var captionStream; // Stores segments seen before trackId and timescale are set\n\n var segmentCache; // Stores video track ID of the track being parsed\n\n var trackId; // Stores the timescale of the track being parsed\n\n var timescale; // Stores captions parsed so far\n\n var parsedCaptions; // Stores whether we are receiving partial data or not\n\n var parsingPartial;\n /**\n * A method to indicate whether a CaptionParser has been initalized\n * @returns {Boolean}\n **/\n\n this.isInitialized = function () {\n return isInitialized;\n };\n /**\n * Initializes the underlying CaptionStream, SEI NAL parsing\n * and management, and caption collection\n **/\n\n this.init = function (options) {\n captionStream = new CaptionStream();\n isInitialized = true;\n parsingPartial = options ? options.isPartial : false; // Collect dispatched captions\n\n captionStream.on('data', function (event) {\n // Convert to seconds in the source's timescale\n event.startTime = event.startPts / timescale;\n event.endTime = event.endPts / timescale;\n parsedCaptions.captions.push(event);\n parsedCaptions.captionStreams[event.stream] = true;\n });\n captionStream.on('log', function (log) {\n parsedCaptions.logs.push(log);\n });\n };\n /**\n * Determines if a new video track will be selected\n * or if the timescale changed\n * @return {Boolean}\n **/\n\n this.isNewInit = function (videoTrackIds, timescales) {\n if (videoTrackIds && videoTrackIds.length === 0 || timescales && typeof timescales === 'object' && Object.keys(timescales).length === 0) {\n return false;\n }\n return trackId !== videoTrackIds[0] || timescale !== timescales[trackId];\n };\n /**\n * Parses out SEI captions and interacts with underlying\n * CaptionStream to return dispatched captions\n *\n * @param {Uint8Array} segment - The fmp4 segment containing embedded captions\n * @param {Number[]} videoTrackIds - A list of video tracks found in the init segment\n * @param {Object.} timescales - The timescales found in the init segment\n * @see parseEmbeddedCaptions\n * @see m2ts/caption-stream.js\n **/\n\n this.parse = function (segment, videoTrackIds, timescales) {\n var parsedData;\n if (!this.isInitialized()) {\n return null; // This is not likely to be a video segment\n } else if (!videoTrackIds || !timescales) {\n return null;\n } else if (this.isNewInit(videoTrackIds, timescales)) {\n // Use the first video track only as there is no\n // mechanism to switch to other video tracks\n trackId = videoTrackIds[0];\n timescale = timescales[trackId]; // If an init segment has not been seen yet, hold onto segment\n // data until we have one.\n // the ISO-BMFF spec says that trackId can't be zero, but there's some broken content out there\n } else if (trackId === null || !timescale) {\n segmentCache.push(segment);\n return null;\n } // Now that a timescale and trackId is set, parse cached segments\n\n while (segmentCache.length > 0) {\n var cachedSegment = segmentCache.shift();\n this.parse(cachedSegment, videoTrackIds, timescales);\n }\n parsedData = parseEmbeddedCaptions(segment, trackId, timescale);\n if (parsedData && parsedData.logs) {\n parsedCaptions.logs = parsedCaptions.logs.concat(parsedData.logs);\n }\n if (parsedData === null || !parsedData.seiNals) {\n if (parsedCaptions.logs.length) {\n return {\n logs: parsedCaptions.logs,\n captions: [],\n captionStreams: []\n };\n }\n return null;\n }\n this.pushNals(parsedData.seiNals); // Force the parsed captions to be dispatched\n\n this.flushStream();\n return parsedCaptions;\n };\n /**\n * Pushes SEI NALUs onto CaptionStream\n * @param {Object[]} nals - A list of SEI nals parsed using `parseCaptionNals`\n * Assumes that `parseCaptionNals` has been called first\n * @see m2ts/caption-stream.js\n **/\n\n this.pushNals = function (nals) {\n if (!this.isInitialized() || !nals || nals.length === 0) {\n return null;\n }\n nals.forEach(function (nal) {\n captionStream.push(nal);\n });\n };\n /**\n * Flushes underlying CaptionStream to dispatch processed, displayable captions\n * @see m2ts/caption-stream.js\n **/\n\n this.flushStream = function () {\n if (!this.isInitialized()) {\n return null;\n }\n if (!parsingPartial) {\n captionStream.flush();\n } else {\n captionStream.partialFlush();\n }\n };\n /**\n * Reset caption buckets for new data\n **/\n\n this.clearParsedCaptions = function () {\n parsedCaptions.captions = [];\n parsedCaptions.captionStreams = {};\n parsedCaptions.logs = [];\n };\n /**\n * Resets underlying CaptionStream\n * @see m2ts/caption-stream.js\n **/\n\n this.resetCaptionStream = function () {\n if (!this.isInitialized()) {\n return null;\n }\n captionStream.reset();\n };\n /**\n * Convenience method to clear all captions flushed from the\n * CaptionStream and still being parsed\n * @see m2ts/caption-stream.js\n **/\n\n this.clearAllCaptions = function () {\n this.clearParsedCaptions();\n this.resetCaptionStream();\n };\n /**\n * Reset caption parser\n **/\n\n this.reset = function () {\n segmentCache = [];\n trackId = null;\n timescale = null;\n if (!parsedCaptions) {\n parsedCaptions = {\n captions: [],\n // CC1, CC2, CC3, CC4\n captionStreams: {},\n logs: []\n };\n } else {\n this.clearParsedCaptions();\n }\n this.resetCaptionStream();\n };\n this.reset();\n };\n var captionParser = CaptionParser;\n /**\n * Returns the first string in the data array ending with a null char '\\0'\n * @param {UInt8} data \n * @returns the string with the null char\n */\n\n var uint8ToCString$1 = function (data) {\n var index = 0;\n var curChar = String.fromCharCode(data[index]);\n var retString = '';\n while (curChar !== '\\0') {\n retString += curChar;\n index++;\n curChar = String.fromCharCode(data[index]);\n } // Add nullChar\n\n retString += curChar;\n return retString;\n };\n var string = {\n uint8ToCString: uint8ToCString$1\n };\n var uint8ToCString = string.uint8ToCString;\n var getUint64$1 = numbers.getUint64;\n /**\n * Based on: ISO/IEC 23009 Section: 5.10.3.3\n * References:\n * https://dashif-documents.azurewebsites.net/Events/master/event.html#emsg-format\n * https://aomediacodec.github.io/id3-emsg/\n * \n * Takes emsg box data as a uint8 array and returns a emsg box object\n * @param {UInt8Array} boxData data from emsg box\n * @returns A parsed emsg box object\n */\n\n var parseEmsgBox = function (boxData) {\n // version + flags\n var offset = 4;\n var version = boxData[0];\n var scheme_id_uri, value, timescale, presentation_time, presentation_time_delta, event_duration, id, message_data;\n if (version === 0) {\n scheme_id_uri = uint8ToCString(boxData.subarray(offset));\n offset += scheme_id_uri.length;\n value = uint8ToCString(boxData.subarray(offset));\n offset += value.length;\n var dv = new DataView(boxData.buffer);\n timescale = dv.getUint32(offset);\n offset += 4;\n presentation_time_delta = dv.getUint32(offset);\n offset += 4;\n event_duration = dv.getUint32(offset);\n offset += 4;\n id = dv.getUint32(offset);\n offset += 4;\n } else if (version === 1) {\n var dv = new DataView(boxData.buffer);\n timescale = dv.getUint32(offset);\n offset += 4;\n presentation_time = getUint64$1(boxData.subarray(offset));\n offset += 8;\n event_duration = dv.getUint32(offset);\n offset += 4;\n id = dv.getUint32(offset);\n offset += 4;\n scheme_id_uri = uint8ToCString(boxData.subarray(offset));\n offset += scheme_id_uri.length;\n value = uint8ToCString(boxData.subarray(offset));\n offset += value.length;\n }\n message_data = new Uint8Array(boxData.subarray(offset, boxData.byteLength));\n var emsgBox = {\n scheme_id_uri,\n value,\n // if timescale is undefined or 0 set to 1 \n timescale: timescale ? timescale : 1,\n presentation_time,\n presentation_time_delta,\n event_duration,\n id,\n message_data\n };\n return isValidEmsgBox(version, emsgBox) ? emsgBox : undefined;\n };\n /**\n * Scales a presentation time or time delta with an offset with a provided timescale\n * @param {number} presentationTime \n * @param {number} timescale \n * @param {number} timeDelta \n * @param {number} offset \n * @returns the scaled time as a number\n */\n\n var scaleTime = function (presentationTime, timescale, timeDelta, offset) {\n return presentationTime || presentationTime === 0 ? presentationTime / timescale : offset + timeDelta / timescale;\n };\n /**\n * Checks the emsg box data for validity based on the version\n * @param {number} version of the emsg box to validate\n * @param {Object} emsg the emsg data to validate\n * @returns if the box is valid as a boolean\n */\n\n var isValidEmsgBox = function (version, emsg) {\n var hasScheme = emsg.scheme_id_uri !== '\\0';\n var isValidV0Box = version === 0 && isDefined(emsg.presentation_time_delta) && hasScheme;\n var isValidV1Box = version === 1 && isDefined(emsg.presentation_time) && hasScheme; // Only valid versions of emsg are 0 and 1\n\n return !(version > 1) && isValidV0Box || isValidV1Box;\n }; // Utility function to check if an object is defined\n\n var isDefined = function (data) {\n return data !== undefined || data !== null;\n };\n var emsg$1 = {\n parseEmsgBox: parseEmsgBox,\n scaleTime: scaleTime\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Utilities to detect basic properties and metadata about MP4s.\n */\n\n var toUnsigned = bin.toUnsigned;\n var toHexString = bin.toHexString;\n var findBox = findBox_1;\n var parseType$1 = parseType_1;\n var emsg = emsg$1;\n var parseTfhd = parseTfhd$2;\n var parseTrun = parseTrun$2;\n var parseTfdt = parseTfdt$2;\n var getUint64 = numbers.getUint64;\n var timescale, startTime, compositionStartTime, getVideoTrackIds, getTracks, getTimescaleFromMediaHeader, getEmsgID3;\n var window$1 = window_1;\n var parseId3Frames = parseId3.parseId3Frames;\n /**\n * Parses an MP4 initialization segment and extracts the timescale\n * values for any declared tracks. Timescale values indicate the\n * number of clock ticks per second to assume for time-based values\n * elsewhere in the MP4.\n *\n * To determine the start time of an MP4, you need two pieces of\n * information: the timescale unit and the earliest base media decode\n * time. Multiple timescales can be specified within an MP4 but the\n * base media decode time is always expressed in the timescale from\n * the media header box for the track:\n * ```\n * moov > trak > mdia > mdhd.timescale\n * ```\n * @param init {Uint8Array} the bytes of the init segment\n * @return {object} a hash of track ids to timescale values or null if\n * the init segment is malformed.\n */\n\n timescale = function (init) {\n var result = {},\n traks = findBox(init, ['moov', 'trak']); // mdhd timescale\n\n return traks.reduce(function (result, trak) {\n var tkhd, version, index, id, mdhd;\n tkhd = findBox(trak, ['tkhd'])[0];\n if (!tkhd) {\n return null;\n }\n version = tkhd[0];\n index = version === 0 ? 12 : 20;\n id = toUnsigned(tkhd[index] << 24 | tkhd[index + 1] << 16 | tkhd[index + 2] << 8 | tkhd[index + 3]);\n mdhd = findBox(trak, ['mdia', 'mdhd'])[0];\n if (!mdhd) {\n return null;\n }\n version = mdhd[0];\n index = version === 0 ? 12 : 20;\n result[id] = toUnsigned(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);\n return result;\n }, result);\n };\n /**\n * Determine the base media decode start time, in seconds, for an MP4\n * fragment. If multiple fragments are specified, the earliest time is\n * returned.\n *\n * The base media decode time can be parsed from track fragment\n * metadata:\n * ```\n * moof > traf > tfdt.baseMediaDecodeTime\n * ```\n * It requires the timescale value from the mdhd to interpret.\n *\n * @param timescale {object} a hash of track ids to timescale values.\n * @return {number} the earliest base media decode start time for the\n * fragment, in seconds\n */\n\n startTime = function (timescale, fragment) {\n var trafs; // we need info from two childrend of each track fragment box\n\n trafs = findBox(fragment, ['moof', 'traf']); // determine the start times for each track\n\n var lowestTime = trafs.reduce(function (acc, traf) {\n var tfhd = findBox(traf, ['tfhd'])[0]; // get the track id from the tfhd\n\n var id = toUnsigned(tfhd[4] << 24 | tfhd[5] << 16 | tfhd[6] << 8 | tfhd[7]); // assume a 90kHz clock if no timescale was specified\n\n var scale = timescale[id] || 90e3; // get the base media decode time from the tfdt\n\n var tfdt = findBox(traf, ['tfdt'])[0];\n var dv = new DataView(tfdt.buffer, tfdt.byteOffset, tfdt.byteLength);\n var baseTime; // version 1 is 64 bit\n\n if (tfdt[0] === 1) {\n baseTime = getUint64(tfdt.subarray(4, 12));\n } else {\n baseTime = dv.getUint32(4);\n } // convert base time to seconds if it is a valid number.\n\n let seconds;\n if (typeof baseTime === 'bigint') {\n seconds = baseTime / window$1.BigInt(scale);\n } else if (typeof baseTime === 'number' && !isNaN(baseTime)) {\n seconds = baseTime / scale;\n }\n if (seconds < Number.MAX_SAFE_INTEGER) {\n seconds = Number(seconds);\n }\n if (seconds < acc) {\n acc = seconds;\n }\n return acc;\n }, Infinity);\n return typeof lowestTime === 'bigint' || isFinite(lowestTime) ? lowestTime : 0;\n };\n /**\n * Determine the composition start, in seconds, for an MP4\n * fragment.\n *\n * The composition start time of a fragment can be calculated using the base\n * media decode time, composition time offset, and timescale, as follows:\n *\n * compositionStartTime = (baseMediaDecodeTime + compositionTimeOffset) / timescale\n *\n * All of the aforementioned information is contained within a media fragment's\n * `traf` box, except for timescale info, which comes from the initialization\n * segment, so a track id (also contained within a `traf`) is also necessary to\n * associate it with a timescale\n *\n *\n * @param timescales {object} - a hash of track ids to timescale values.\n * @param fragment {Unit8Array} - the bytes of a media segment\n * @return {number} the composition start time for the fragment, in seconds\n **/\n\n compositionStartTime = function (timescales, fragment) {\n var trafBoxes = findBox(fragment, ['moof', 'traf']);\n var baseMediaDecodeTime = 0;\n var compositionTimeOffset = 0;\n var trackId;\n if (trafBoxes && trafBoxes.length) {\n // The spec states that track run samples contained within a `traf` box are contiguous, but\n // it does not explicitly state whether the `traf` boxes themselves are contiguous.\n // We will assume that they are, so we only need the first to calculate start time.\n var tfhd = findBox(trafBoxes[0], ['tfhd'])[0];\n var trun = findBox(trafBoxes[0], ['trun'])[0];\n var tfdt = findBox(trafBoxes[0], ['tfdt'])[0];\n if (tfhd) {\n var parsedTfhd = parseTfhd(tfhd);\n trackId = parsedTfhd.trackId;\n }\n if (tfdt) {\n var parsedTfdt = parseTfdt(tfdt);\n baseMediaDecodeTime = parsedTfdt.baseMediaDecodeTime;\n }\n if (trun) {\n var parsedTrun = parseTrun(trun);\n if (parsedTrun.samples && parsedTrun.samples.length) {\n compositionTimeOffset = parsedTrun.samples[0].compositionTimeOffset || 0;\n }\n }\n } // Get timescale for this specific track. Assume a 90kHz clock if no timescale was\n // specified.\n\n var timescale = timescales[trackId] || 90e3; // return the composition start time, in seconds\n\n if (typeof baseMediaDecodeTime === 'bigint') {\n compositionTimeOffset = window$1.BigInt(compositionTimeOffset);\n timescale = window$1.BigInt(timescale);\n }\n var result = (baseMediaDecodeTime + compositionTimeOffset) / timescale;\n if (typeof result === 'bigint' && result < Number.MAX_SAFE_INTEGER) {\n result = Number(result);\n }\n return result;\n };\n /**\n * Find the trackIds of the video tracks in this source.\n * Found by parsing the Handler Reference and Track Header Boxes:\n * moov > trak > mdia > hdlr\n * moov > trak > tkhd\n *\n * @param {Uint8Array} init - The bytes of the init segment for this source\n * @return {Number[]} A list of trackIds\n *\n * @see ISO-BMFF-12/2015, Section 8.4.3\n **/\n\n getVideoTrackIds = function (init) {\n var traks = findBox(init, ['moov', 'trak']);\n var videoTrackIds = [];\n traks.forEach(function (trak) {\n var hdlrs = findBox(trak, ['mdia', 'hdlr']);\n var tkhds = findBox(trak, ['tkhd']);\n hdlrs.forEach(function (hdlr, index) {\n var handlerType = parseType$1(hdlr.subarray(8, 12));\n var tkhd = tkhds[index];\n var view;\n var version;\n var trackId;\n if (handlerType === 'vide') {\n view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n version = view.getUint8(0);\n trackId = version === 0 ? view.getUint32(12) : view.getUint32(20);\n videoTrackIds.push(trackId);\n }\n });\n });\n return videoTrackIds;\n };\n getTimescaleFromMediaHeader = function (mdhd) {\n // mdhd is a FullBox, meaning it will have its own version as the first byte\n var version = mdhd[0];\n var index = version === 0 ? 12 : 20;\n return toUnsigned(mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]);\n };\n /**\n * Get all the video, audio, and hint tracks from a non fragmented\n * mp4 segment\n */\n\n getTracks = function (init) {\n var traks = findBox(init, ['moov', 'trak']);\n var tracks = [];\n traks.forEach(function (trak) {\n var track = {};\n var tkhd = findBox(trak, ['tkhd'])[0];\n var view, tkhdVersion; // id\n\n if (tkhd) {\n view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n tkhdVersion = view.getUint8(0);\n track.id = tkhdVersion === 0 ? view.getUint32(12) : view.getUint32(20);\n }\n var hdlr = findBox(trak, ['mdia', 'hdlr'])[0]; // type\n\n if (hdlr) {\n var type = parseType$1(hdlr.subarray(8, 12));\n if (type === 'vide') {\n track.type = 'video';\n } else if (type === 'soun') {\n track.type = 'audio';\n } else {\n track.type = type;\n }\n } // codec\n\n var stsd = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd'])[0];\n if (stsd) {\n var sampleDescriptions = stsd.subarray(8); // gives the codec type string\n\n track.codec = parseType$1(sampleDescriptions.subarray(4, 8));\n var codecBox = findBox(sampleDescriptions, [track.codec])[0];\n var codecConfig, codecConfigType;\n if (codecBox) {\n // https://tools.ietf.org/html/rfc6381#section-3.3\n if (/^[asm]vc[1-9]$/i.test(track.codec)) {\n // we don't need anything but the \"config\" parameter of the\n // avc1 codecBox\n codecConfig = codecBox.subarray(78);\n codecConfigType = parseType$1(codecConfig.subarray(4, 8));\n if (codecConfigType === 'avcC' && codecConfig.length > 11) {\n track.codec += '.'; // left padded with zeroes for single digit hex\n // profile idc\n\n track.codec += toHexString(codecConfig[9]); // the byte containing the constraint_set flags\n\n track.codec += toHexString(codecConfig[10]); // level idc\n\n track.codec += toHexString(codecConfig[11]);\n } else {\n // TODO: show a warning that we couldn't parse the codec\n // and are using the default\n track.codec = 'avc1.4d400d';\n }\n } else if (/^mp4[a,v]$/i.test(track.codec)) {\n // we do not need anything but the streamDescriptor of the mp4a codecBox\n codecConfig = codecBox.subarray(28);\n codecConfigType = parseType$1(codecConfig.subarray(4, 8));\n if (codecConfigType === 'esds' && codecConfig.length > 20 && codecConfig[19] !== 0) {\n track.codec += '.' + toHexString(codecConfig[19]); // this value is only a single digit\n\n track.codec += '.' + toHexString(codecConfig[20] >>> 2 & 0x3f).replace(/^0/, '');\n } else {\n // TODO: show a warning that we couldn't parse the codec\n // and are using the default\n track.codec = 'mp4a.40.2';\n }\n } else {\n // flac, opus, etc\n track.codec = track.codec.toLowerCase();\n }\n }\n }\n var mdhd = findBox(trak, ['mdia', 'mdhd'])[0];\n if (mdhd) {\n track.timescale = getTimescaleFromMediaHeader(mdhd);\n }\n tracks.push(track);\n });\n return tracks;\n };\n /**\n * Returns an array of emsg ID3 data from the provided segmentData.\n * An offset can also be provided as the Latest Arrival Time to calculate \n * the Event Start Time of v0 EMSG boxes. \n * See: https://dashif-documents.azurewebsites.net/Events/master/event.html#Inband-event-timing\n * \n * @param {Uint8Array} segmentData the segment byte array.\n * @param {number} offset the segment start time or Latest Arrival Time, \n * @return {Object[]} an array of ID3 parsed from EMSG boxes\n */\n\n getEmsgID3 = function (segmentData, offset = 0) {\n var emsgBoxes = findBox(segmentData, ['emsg']);\n return emsgBoxes.map(data => {\n var parsedBox = emsg.parseEmsgBox(new Uint8Array(data));\n var parsedId3Frames = parseId3Frames(parsedBox.message_data);\n return {\n cueTime: emsg.scaleTime(parsedBox.presentation_time, parsedBox.timescale, parsedBox.presentation_time_delta, offset),\n duration: emsg.scaleTime(parsedBox.event_duration, parsedBox.timescale),\n frames: parsedId3Frames\n };\n });\n };\n var probe$2 = {\n // export mp4 inspector's findBox and parseType for backwards compatibility\n findBox: findBox,\n parseType: parseType$1,\n timescale: timescale,\n startTime: startTime,\n compositionStartTime: compositionStartTime,\n videoTrackIds: getVideoTrackIds,\n tracks: getTracks,\n getTimescaleFromMediaHeader: getTimescaleFromMediaHeader,\n getEmsgID3: getEmsgID3\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Utilities to detect basic properties and metadata about TS Segments.\n */\n\n var StreamTypes$1 = streamTypes;\n var parsePid = function (packet) {\n var pid = packet[1] & 0x1f;\n pid <<= 8;\n pid |= packet[2];\n return pid;\n };\n var parsePayloadUnitStartIndicator = function (packet) {\n return !!(packet[1] & 0x40);\n };\n var parseAdaptionField = function (packet) {\n var offset = 0; // if an adaption field is present, its length is specified by the\n // fifth byte of the TS packet header. The adaptation field is\n // used to add stuffing to PES packets that don't fill a complete\n // TS packet, and to specify some forms of timing and control data\n // that we do not currently use.\n\n if ((packet[3] & 0x30) >>> 4 > 0x01) {\n offset += packet[4] + 1;\n }\n return offset;\n };\n var parseType = function (packet, pmtPid) {\n var pid = parsePid(packet);\n if (pid === 0) {\n return 'pat';\n } else if (pid === pmtPid) {\n return 'pmt';\n } else if (pmtPid) {\n return 'pes';\n }\n return null;\n };\n var parsePat = function (packet) {\n var pusi = parsePayloadUnitStartIndicator(packet);\n var offset = 4 + parseAdaptionField(packet);\n if (pusi) {\n offset += packet[offset] + 1;\n }\n return (packet[offset + 10] & 0x1f) << 8 | packet[offset + 11];\n };\n var parsePmt = function (packet) {\n var programMapTable = {};\n var pusi = parsePayloadUnitStartIndicator(packet);\n var payloadOffset = 4 + parseAdaptionField(packet);\n if (pusi) {\n payloadOffset += packet[payloadOffset] + 1;\n } // PMTs can be sent ahead of the time when they should actually\n // take effect. We don't believe this should ever be the case\n // for HLS but we'll ignore \"forward\" PMT declarations if we see\n // them. Future PMT declarations have the current_next_indicator\n // set to zero.\n\n if (!(packet[payloadOffset + 5] & 0x01)) {\n return;\n }\n var sectionLength, tableEnd, programInfoLength; // the mapping table ends at the end of the current section\n\n sectionLength = (packet[payloadOffset + 1] & 0x0f) << 8 | packet[payloadOffset + 2];\n tableEnd = 3 + sectionLength - 4; // to determine where the table is, we have to figure out how\n // long the program info descriptors are\n\n programInfoLength = (packet[payloadOffset + 10] & 0x0f) << 8 | packet[payloadOffset + 11]; // advance the offset to the first entry in the mapping table\n\n var offset = 12 + programInfoLength;\n while (offset < tableEnd) {\n var i = payloadOffset + offset; // add an entry that maps the elementary_pid to the stream_type\n\n programMapTable[(packet[i + 1] & 0x1F) << 8 | packet[i + 2]] = packet[i]; // move to the next table entry\n // skip past the elementary stream descriptors, if present\n\n offset += ((packet[i + 3] & 0x0F) << 8 | packet[i + 4]) + 5;\n }\n return programMapTable;\n };\n var parsePesType = function (packet, programMapTable) {\n var pid = parsePid(packet);\n var type = programMapTable[pid];\n switch (type) {\n case StreamTypes$1.H264_STREAM_TYPE:\n return 'video';\n case StreamTypes$1.ADTS_STREAM_TYPE:\n return 'audio';\n case StreamTypes$1.METADATA_STREAM_TYPE:\n return 'timed-metadata';\n default:\n return null;\n }\n };\n var parsePesTime = function (packet) {\n var pusi = parsePayloadUnitStartIndicator(packet);\n if (!pusi) {\n return null;\n }\n var offset = 4 + parseAdaptionField(packet);\n if (offset >= packet.byteLength) {\n // From the H 222.0 MPEG-TS spec\n // \"For transport stream packets carrying PES packets, stuffing is needed when there\n // is insufficient PES packet data to completely fill the transport stream packet\n // payload bytes. Stuffing is accomplished by defining an adaptation field longer than\n // the sum of the lengths of the data elements in it, so that the payload bytes\n // remaining after the adaptation field exactly accommodates the available PES packet\n // data.\"\n //\n // If the offset is >= the length of the packet, then the packet contains no data\n // and instead is just adaption field stuffing bytes\n return null;\n }\n var pes = null;\n var ptsDtsFlags; // PES packets may be annotated with a PTS value, or a PTS value\n // and a DTS value. Determine what combination of values is\n // available to work with.\n\n ptsDtsFlags = packet[offset + 7]; // PTS and DTS are normally stored as a 33-bit number. Javascript\n // performs all bitwise operations on 32-bit integers but javascript\n // supports a much greater range (52-bits) of integer using standard\n // mathematical operations.\n // We construct a 31-bit value using bitwise operators over the 31\n // most significant bits and then multiply by 4 (equal to a left-shift\n // of 2) before we add the final 2 least significant bits of the\n // timestamp (equal to an OR.)\n\n if (ptsDtsFlags & 0xC0) {\n pes = {}; // the PTS and DTS are not written out directly. For information\n // on how they are encoded, see\n // http://dvd.sourceforge.net/dvdinfo/pes-hdr.html\n\n pes.pts = (packet[offset + 9] & 0x0E) << 27 | (packet[offset + 10] & 0xFF) << 20 | (packet[offset + 11] & 0xFE) << 12 | (packet[offset + 12] & 0xFF) << 5 | (packet[offset + 13] & 0xFE) >>> 3;\n pes.pts *= 4; // Left shift by 2\n\n pes.pts += (packet[offset + 13] & 0x06) >>> 1; // OR by the two LSBs\n\n pes.dts = pes.pts;\n if (ptsDtsFlags & 0x40) {\n pes.dts = (packet[offset + 14] & 0x0E) << 27 | (packet[offset + 15] & 0xFF) << 20 | (packet[offset + 16] & 0xFE) << 12 | (packet[offset + 17] & 0xFF) << 5 | (packet[offset + 18] & 0xFE) >>> 3;\n pes.dts *= 4; // Left shift by 2\n\n pes.dts += (packet[offset + 18] & 0x06) >>> 1; // OR by the two LSBs\n }\n }\n\n return pes;\n };\n var parseNalUnitType = function (type) {\n switch (type) {\n case 0x05:\n return 'slice_layer_without_partitioning_rbsp_idr';\n case 0x06:\n return 'sei_rbsp';\n case 0x07:\n return 'seq_parameter_set_rbsp';\n case 0x08:\n return 'pic_parameter_set_rbsp';\n case 0x09:\n return 'access_unit_delimiter_rbsp';\n default:\n return null;\n }\n };\n var videoPacketContainsKeyFrame = function (packet) {\n var offset = 4 + parseAdaptionField(packet);\n var frameBuffer = packet.subarray(offset);\n var frameI = 0;\n var frameSyncPoint = 0;\n var foundKeyFrame = false;\n var nalType; // advance the sync point to a NAL start, if necessary\n\n for (; frameSyncPoint < frameBuffer.byteLength - 3; frameSyncPoint++) {\n if (frameBuffer[frameSyncPoint + 2] === 1) {\n // the sync point is properly aligned\n frameI = frameSyncPoint + 5;\n break;\n }\n }\n while (frameI < frameBuffer.byteLength) {\n // look at the current byte to determine if we've hit the end of\n // a NAL unit boundary\n switch (frameBuffer[frameI]) {\n case 0:\n // skip past non-sync sequences\n if (frameBuffer[frameI - 1] !== 0) {\n frameI += 2;\n break;\n } else if (frameBuffer[frameI - 2] !== 0) {\n frameI++;\n break;\n }\n if (frameSyncPoint + 3 !== frameI - 2) {\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n } // drop trailing zeroes\n\n do {\n frameI++;\n } while (frameBuffer[frameI] !== 1 && frameI < frameBuffer.length);\n frameSyncPoint = frameI - 2;\n frameI += 3;\n break;\n case 1:\n // skip past non-sync sequences\n if (frameBuffer[frameI - 1] !== 0 || frameBuffer[frameI - 2] !== 0) {\n frameI += 3;\n break;\n }\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n frameSyncPoint = frameI - 2;\n frameI += 3;\n break;\n default:\n // the current byte isn't a one or zero, so it cannot be part\n // of a sync sequence\n frameI += 3;\n break;\n }\n }\n frameBuffer = frameBuffer.subarray(frameSyncPoint);\n frameI -= frameSyncPoint;\n frameSyncPoint = 0; // parse the final nal\n\n if (frameBuffer && frameBuffer.byteLength > 3) {\n nalType = parseNalUnitType(frameBuffer[frameSyncPoint + 3] & 0x1f);\n if (nalType === 'slice_layer_without_partitioning_rbsp_idr') {\n foundKeyFrame = true;\n }\n }\n return foundKeyFrame;\n };\n var probe$1 = {\n parseType: parseType,\n parsePat: parsePat,\n parsePmt: parsePmt,\n parsePayloadUnitStartIndicator: parsePayloadUnitStartIndicator,\n parsePesType: parsePesType,\n parsePesTime: parsePesTime,\n videoPacketContainsKeyFrame: videoPacketContainsKeyFrame\n };\n /**\n * mux.js\n *\n * Copyright (c) Brightcove\n * Licensed Apache-2.0 https://github.com/videojs/mux.js/blob/master/LICENSE\n *\n * Parse mpeg2 transport stream packets to extract basic timing information\n */\n\n var StreamTypes = streamTypes;\n var handleRollover = timestampRolloverStream.handleRollover;\n var probe = {};\n probe.ts = probe$1;\n probe.aac = utils;\n var ONE_SECOND_IN_TS = clock$2.ONE_SECOND_IN_TS;\n var MP2T_PACKET_LENGTH = 188,\n // bytes\n SYNC_BYTE = 0x47;\n /**\n * walks through segment data looking for pat and pmt packets to parse out\n * program map table information\n */\n\n var parsePsi_ = function (bytes, pmt) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH,\n packet,\n type;\n while (endIndex < bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pat':\n pmt.pid = probe.ts.parsePat(packet);\n break;\n case 'pmt':\n var table = probe.ts.parsePmt(packet);\n pmt.table = pmt.table || {};\n Object.keys(table).forEach(function (key) {\n pmt.table[key] = table[key];\n });\n break;\n }\n startIndex += MP2T_PACKET_LENGTH;\n endIndex += MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n }\n };\n /**\n * walks through the segment data from the start and end to get timing information\n * for the first and last audio pes packets\n */\n\n var parseAudioPes_ = function (bytes, pmt, result) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH,\n packet,\n type,\n pesType,\n pusi,\n parsed;\n var endLoop = false; // Start walking from start of segment to get first audio packet\n\n while (endIndex <= bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && (bytes[endIndex] === SYNC_BYTE || endIndex === bytes.byteLength)) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'audio' && pusi) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'audio';\n result.audio.push(parsed);\n endLoop = true;\n }\n }\n break;\n }\n if (endLoop) {\n break;\n }\n startIndex += MP2T_PACKET_LENGTH;\n endIndex += MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n } // Start walking from end of segment to get last audio packet\n\n endIndex = bytes.byteLength;\n startIndex = endIndex - MP2T_PACKET_LENGTH;\n endLoop = false;\n while (startIndex >= 0) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && (bytes[endIndex] === SYNC_BYTE || endIndex === bytes.byteLength)) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'audio' && pusi) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'audio';\n result.audio.push(parsed);\n endLoop = true;\n }\n }\n break;\n }\n if (endLoop) {\n break;\n }\n startIndex -= MP2T_PACKET_LENGTH;\n endIndex -= MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex--;\n endIndex--;\n }\n };\n /**\n * walks through the segment data from the start and end to get timing information\n * for the first and last video pes packets as well as timing information for the first\n * key frame.\n */\n\n var parseVideoPes_ = function (bytes, pmt, result) {\n var startIndex = 0,\n endIndex = MP2T_PACKET_LENGTH,\n packet,\n type,\n pesType,\n pusi,\n parsed,\n frame,\n i,\n pes;\n var endLoop = false;\n var currentFrame = {\n data: [],\n size: 0\n }; // Start walking from start of segment to get first video packet\n\n while (endIndex < bytes.byteLength) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'video') {\n if (pusi && !endLoop) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'video';\n result.video.push(parsed);\n endLoop = true;\n }\n }\n if (!result.firstKeyFrame) {\n if (pusi) {\n if (currentFrame.size !== 0) {\n frame = new Uint8Array(currentFrame.size);\n i = 0;\n while (currentFrame.data.length) {\n pes = currentFrame.data.shift();\n frame.set(pes, i);\n i += pes.byteLength;\n }\n if (probe.ts.videoPacketContainsKeyFrame(frame)) {\n var firstKeyFrame = probe.ts.parsePesTime(frame); // PTS/DTS may not be available. Simply *not* setting\n // the keyframe seems to work fine with HLS playback\n // and definitely preferable to a crash with TypeError...\n\n if (firstKeyFrame) {\n result.firstKeyFrame = firstKeyFrame;\n result.firstKeyFrame.type = 'video';\n } else {\n // eslint-disable-next-line\n console.warn('Failed to extract PTS/DTS from PES at first keyframe. ' + 'This could be an unusual TS segment, or else mux.js did not ' + 'parse your TS segment correctly. If you know your TS ' + 'segments do contain PTS/DTS on keyframes please file a bug ' + 'report! You can try ffprobe to double check for yourself.');\n }\n }\n currentFrame.size = 0;\n }\n }\n currentFrame.data.push(packet);\n currentFrame.size += packet.byteLength;\n }\n }\n break;\n }\n if (endLoop && result.firstKeyFrame) {\n break;\n }\n startIndex += MP2T_PACKET_LENGTH;\n endIndex += MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex++;\n endIndex++;\n } // Start walking from end of segment to get last video packet\n\n endIndex = bytes.byteLength;\n startIndex = endIndex - MP2T_PACKET_LENGTH;\n endLoop = false;\n while (startIndex >= 0) {\n // Look for a pair of start and end sync bytes in the data..\n if (bytes[startIndex] === SYNC_BYTE && bytes[endIndex] === SYNC_BYTE) {\n // We found a packet\n packet = bytes.subarray(startIndex, endIndex);\n type = probe.ts.parseType(packet, pmt.pid);\n switch (type) {\n case 'pes':\n pesType = probe.ts.parsePesType(packet, pmt.table);\n pusi = probe.ts.parsePayloadUnitStartIndicator(packet);\n if (pesType === 'video' && pusi) {\n parsed = probe.ts.parsePesTime(packet);\n if (parsed) {\n parsed.type = 'video';\n result.video.push(parsed);\n endLoop = true;\n }\n }\n break;\n }\n if (endLoop) {\n break;\n }\n startIndex -= MP2T_PACKET_LENGTH;\n endIndex -= MP2T_PACKET_LENGTH;\n continue;\n } // If we get here, we have somehow become de-synchronized and we need to step\n // forward one byte at a time until we find a pair of sync bytes that denote\n // a packet\n\n startIndex--;\n endIndex--;\n }\n };\n /**\n * Adjusts the timestamp information for the segment to account for\n * rollover and convert to seconds based on pes packet timescale (90khz clock)\n */\n\n var adjustTimestamp_ = function (segmentInfo, baseTimestamp) {\n if (segmentInfo.audio && segmentInfo.audio.length) {\n var audioBaseTimestamp = baseTimestamp;\n if (typeof audioBaseTimestamp === 'undefined' || isNaN(audioBaseTimestamp)) {\n audioBaseTimestamp = segmentInfo.audio[0].dts;\n }\n segmentInfo.audio.forEach(function (info) {\n info.dts = handleRollover(info.dts, audioBaseTimestamp);\n info.pts = handleRollover(info.pts, audioBaseTimestamp); // time in seconds\n\n info.dtsTime = info.dts / ONE_SECOND_IN_TS;\n info.ptsTime = info.pts / ONE_SECOND_IN_TS;\n });\n }\n if (segmentInfo.video && segmentInfo.video.length) {\n var videoBaseTimestamp = baseTimestamp;\n if (typeof videoBaseTimestamp === 'undefined' || isNaN(videoBaseTimestamp)) {\n videoBaseTimestamp = segmentInfo.video[0].dts;\n }\n segmentInfo.video.forEach(function (info) {\n info.dts = handleRollover(info.dts, videoBaseTimestamp);\n info.pts = handleRollover(info.pts, videoBaseTimestamp); // time in seconds\n\n info.dtsTime = info.dts / ONE_SECOND_IN_TS;\n info.ptsTime = info.pts / ONE_SECOND_IN_TS;\n });\n if (segmentInfo.firstKeyFrame) {\n var frame = segmentInfo.firstKeyFrame;\n frame.dts = handleRollover(frame.dts, videoBaseTimestamp);\n frame.pts = handleRollover(frame.pts, videoBaseTimestamp); // time in seconds\n\n frame.dtsTime = frame.dts / ONE_SECOND_IN_TS;\n frame.ptsTime = frame.pts / ONE_SECOND_IN_TS;\n }\n }\n };\n /**\n * inspects the aac data stream for start and end time information\n */\n\n var inspectAac_ = function (bytes) {\n var endLoop = false,\n audioCount = 0,\n sampleRate = null,\n timestamp = null,\n frameSize = 0,\n byteIndex = 0,\n packet;\n while (bytes.length - byteIndex >= 3) {\n var type = probe.aac.parseType(bytes, byteIndex);\n switch (type) {\n case 'timed-metadata':\n // Exit early because we don't have enough to parse\n // the ID3 tag header\n if (bytes.length - byteIndex < 10) {\n endLoop = true;\n break;\n }\n frameSize = probe.aac.parseId3TagSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (frameSize > bytes.length) {\n endLoop = true;\n break;\n }\n if (timestamp === null) {\n packet = bytes.subarray(byteIndex, byteIndex + frameSize);\n timestamp = probe.aac.parseAacTimestamp(packet);\n }\n byteIndex += frameSize;\n break;\n case 'audio':\n // Exit early because we don't have enough to parse\n // the ADTS frame header\n if (bytes.length - byteIndex < 7) {\n endLoop = true;\n break;\n }\n frameSize = probe.aac.parseAdtsSize(bytes, byteIndex); // Exit early if we don't have enough in the buffer\n // to emit a full packet\n\n if (frameSize > bytes.length) {\n endLoop = true;\n break;\n }\n if (sampleRate === null) {\n packet = bytes.subarray(byteIndex, byteIndex + frameSize);\n sampleRate = probe.aac.parseSampleRate(packet);\n }\n audioCount++;\n byteIndex += frameSize;\n break;\n default:\n byteIndex++;\n break;\n }\n if (endLoop) {\n return null;\n }\n }\n if (sampleRate === null || timestamp === null) {\n return null;\n }\n var audioTimescale = ONE_SECOND_IN_TS / sampleRate;\n var result = {\n audio: [{\n type: 'audio',\n dts: timestamp,\n pts: timestamp\n }, {\n type: 'audio',\n dts: timestamp + audioCount * 1024 * audioTimescale,\n pts: timestamp + audioCount * 1024 * audioTimescale\n }]\n };\n return result;\n };\n /**\n * inspects the transport stream segment data for start and end time information\n * of the audio and video tracks (when present) as well as the first key frame's\n * start time.\n */\n\n var inspectTs_ = function (bytes) {\n var pmt = {\n pid: null,\n table: null\n };\n var result = {};\n parsePsi_(bytes, pmt);\n for (var pid in pmt.table) {\n if (pmt.table.hasOwnProperty(pid)) {\n var type = pmt.table[pid];\n switch (type) {\n case StreamTypes.H264_STREAM_TYPE:\n result.video = [];\n parseVideoPes_(bytes, pmt, result);\n if (result.video.length === 0) {\n delete result.video;\n }\n break;\n case StreamTypes.ADTS_STREAM_TYPE:\n result.audio = [];\n parseAudioPes_(bytes, pmt, result);\n if (result.audio.length === 0) {\n delete result.audio;\n }\n break;\n }\n }\n }\n return result;\n };\n /**\n * Inspects segment byte data and returns an object with start and end timing information\n *\n * @param {Uint8Array} bytes The segment byte data\n * @param {Number} baseTimestamp Relative reference timestamp used when adjusting frame\n * timestamps for rollover. This value must be in 90khz clock.\n * @return {Object} Object containing start and end frame timing info of segment.\n */\n\n var inspect = function (bytes, baseTimestamp) {\n var isAacData = probe.aac.isLikelyAacData(bytes);\n var result;\n if (isAacData) {\n result = inspectAac_(bytes);\n } else {\n result = inspectTs_(bytes);\n }\n if (!result || !result.audio && !result.video) {\n return null;\n }\n adjustTimestamp_(result, baseTimestamp);\n return result;\n };\n var tsInspector = {\n inspect: inspect,\n parseAudioPes_: parseAudioPes_\n };\n /* global self */\n\n /**\n * Re-emits transmuxer events by converting them into messages to the\n * world outside the worker.\n *\n * @param {Object} transmuxer the transmuxer to wire events on\n * @private\n */\n\n const wireTransmuxerEvents = function (self, transmuxer) {\n transmuxer.on('data', function (segment) {\n // transfer ownership of the underlying ArrayBuffer\n // instead of doing a copy to save memory\n // ArrayBuffers are transferable but generic TypedArrays are not\n // @link https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers#Passing_data_by_transferring_ownership_(transferable_objects)\n const initArray = segment.initSegment;\n segment.initSegment = {\n data: initArray.buffer,\n byteOffset: initArray.byteOffset,\n byteLength: initArray.byteLength\n };\n const typedArray = segment.data;\n segment.data = typedArray.buffer;\n self.postMessage({\n action: 'data',\n segment,\n byteOffset: typedArray.byteOffset,\n byteLength: typedArray.byteLength\n }, [segment.data]);\n });\n transmuxer.on('done', function (data) {\n self.postMessage({\n action: 'done'\n });\n });\n transmuxer.on('gopInfo', function (gopInfo) {\n self.postMessage({\n action: 'gopInfo',\n gopInfo\n });\n });\n transmuxer.on('videoSegmentTimingInfo', function (timingInfo) {\n const videoSegmentTimingInfo = {\n start: {\n decode: clock$2.videoTsToSeconds(timingInfo.start.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.start.pts)\n },\n end: {\n decode: clock$2.videoTsToSeconds(timingInfo.end.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.end.pts)\n },\n baseMediaDecodeTime: clock$2.videoTsToSeconds(timingInfo.baseMediaDecodeTime)\n };\n if (timingInfo.prependedContentDuration) {\n videoSegmentTimingInfo.prependedContentDuration = clock$2.videoTsToSeconds(timingInfo.prependedContentDuration);\n }\n self.postMessage({\n action: 'videoSegmentTimingInfo',\n videoSegmentTimingInfo\n });\n });\n transmuxer.on('audioSegmentTimingInfo', function (timingInfo) {\n // Note that all times for [audio/video]SegmentTimingInfo events are in video clock\n const audioSegmentTimingInfo = {\n start: {\n decode: clock$2.videoTsToSeconds(timingInfo.start.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.start.pts)\n },\n end: {\n decode: clock$2.videoTsToSeconds(timingInfo.end.dts),\n presentation: clock$2.videoTsToSeconds(timingInfo.end.pts)\n },\n baseMediaDecodeTime: clock$2.videoTsToSeconds(timingInfo.baseMediaDecodeTime)\n };\n if (timingInfo.prependedContentDuration) {\n audioSegmentTimingInfo.prependedContentDuration = clock$2.videoTsToSeconds(timingInfo.prependedContentDuration);\n }\n self.postMessage({\n action: 'audioSegmentTimingInfo',\n audioSegmentTimingInfo\n });\n });\n transmuxer.on('id3Frame', function (id3Frame) {\n self.postMessage({\n action: 'id3Frame',\n id3Frame\n });\n });\n transmuxer.on('caption', function (caption) {\n self.postMessage({\n action: 'caption',\n caption\n });\n });\n transmuxer.on('trackinfo', function (trackInfo) {\n self.postMessage({\n action: 'trackinfo',\n trackInfo\n });\n });\n transmuxer.on('audioTimingInfo', function (audioTimingInfo) {\n // convert to video TS since we prioritize video time over audio\n self.postMessage({\n action: 'audioTimingInfo',\n audioTimingInfo: {\n start: clock$2.videoTsToSeconds(audioTimingInfo.start),\n end: clock$2.videoTsToSeconds(audioTimingInfo.end)\n }\n });\n });\n transmuxer.on('videoTimingInfo', function (videoTimingInfo) {\n self.postMessage({\n action: 'videoTimingInfo',\n videoTimingInfo: {\n start: clock$2.videoTsToSeconds(videoTimingInfo.start),\n end: clock$2.videoTsToSeconds(videoTimingInfo.end)\n }\n });\n });\n transmuxer.on('log', function (log) {\n self.postMessage({\n action: 'log',\n log\n });\n });\n };\n /**\n * All incoming messages route through this hash. If no function exists\n * to handle an incoming message, then we ignore the message.\n *\n * @class MessageHandlers\n * @param {Object} options the options to initialize with\n */\n\n class MessageHandlers {\n constructor(self, options) {\n this.options = options || {};\n this.self = self;\n this.init();\n }\n /**\n * initialize our web worker and wire all the events.\n */\n\n init() {\n if (this.transmuxer) {\n this.transmuxer.dispose();\n }\n this.transmuxer = new transmuxer.Transmuxer(this.options);\n wireTransmuxerEvents(this.self, this.transmuxer);\n }\n pushMp4Captions(data) {\n if (!this.captionParser) {\n this.captionParser = new captionParser();\n this.captionParser.init();\n }\n const segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);\n const parsed = this.captionParser.parse(segment, data.trackIds, data.timescales);\n this.self.postMessage({\n action: 'mp4Captions',\n captions: parsed && parsed.captions || [],\n logs: parsed && parsed.logs || [],\n data: segment.buffer\n }, [segment.buffer]);\n }\n probeMp4StartTime({\n timescales,\n data\n }) {\n const startTime = probe$2.startTime(timescales, data);\n this.self.postMessage({\n action: 'probeMp4StartTime',\n startTime,\n data\n }, [data.buffer]);\n }\n probeMp4Tracks({\n data\n }) {\n const tracks = probe$2.tracks(data);\n this.self.postMessage({\n action: 'probeMp4Tracks',\n tracks,\n data\n }, [data.buffer]);\n }\n /**\n * Probes an mp4 segment for EMSG boxes containing ID3 data.\n * https://aomediacodec.github.io/id3-emsg/\n *\n * @param {Uint8Array} data segment data\n * @param {number} offset segment start time\n * @return {Object[]} an array of ID3 frames\n */\n\n probeEmsgID3({\n data,\n offset\n }) {\n const id3Frames = probe$2.getEmsgID3(data, offset);\n this.self.postMessage({\n action: 'probeEmsgID3',\n id3Frames,\n emsgData: data\n }, [data.buffer]);\n }\n /**\n * Probe an mpeg2-ts segment to determine the start time of the segment in it's\n * internal \"media time,\" as well as whether it contains video and/or audio.\n *\n * @private\n * @param {Uint8Array} bytes - segment bytes\n * @param {number} baseStartTime\n * Relative reference timestamp used when adjusting frame timestamps for rollover.\n * This value should be in seconds, as it's converted to a 90khz clock within the\n * function body.\n * @return {Object} The start time of the current segment in \"media time\" as well as\n * whether it contains video and/or audio\n */\n\n probeTs({\n data,\n baseStartTime\n }) {\n const tsStartTime = typeof baseStartTime === 'number' && !isNaN(baseStartTime) ? baseStartTime * clock$2.ONE_SECOND_IN_TS : void 0;\n const timeInfo = tsInspector.inspect(data, tsStartTime);\n let result = null;\n if (timeInfo) {\n result = {\n // each type's time info comes back as an array of 2 times, start and end\n hasVideo: timeInfo.video && timeInfo.video.length === 2 || false,\n hasAudio: timeInfo.audio && timeInfo.audio.length === 2 || false\n };\n if (result.hasVideo) {\n result.videoStart = timeInfo.video[0].ptsTime;\n }\n if (result.hasAudio) {\n result.audioStart = timeInfo.audio[0].ptsTime;\n }\n }\n this.self.postMessage({\n action: 'probeTs',\n result,\n data\n }, [data.buffer]);\n }\n clearAllMp4Captions() {\n if (this.captionParser) {\n this.captionParser.clearAllCaptions();\n }\n }\n clearParsedMp4Captions() {\n if (this.captionParser) {\n this.captionParser.clearParsedCaptions();\n }\n }\n /**\n * Adds data (a ts segment) to the start of the transmuxer pipeline for\n * processing.\n *\n * @param {ArrayBuffer} data data to push into the muxer\n */\n\n push(data) {\n // Cast array buffer to correct type for transmuxer\n const segment = new Uint8Array(data.data, data.byteOffset, data.byteLength);\n this.transmuxer.push(segment);\n }\n /**\n * Recreate the transmuxer so that the next segment added via `push`\n * start with a fresh transmuxer.\n */\n\n reset() {\n this.transmuxer.reset();\n }\n /**\n * Set the value that will be used as the `baseMediaDecodeTime` time for the\n * next segment pushed in. Subsequent segments will have their `baseMediaDecodeTime`\n * set relative to the first based on the PTS values.\n *\n * @param {Object} data used to set the timestamp offset in the muxer\n */\n\n setTimestampOffset(data) {\n const timestampOffset = data.timestampOffset || 0;\n this.transmuxer.setBaseMediaDecodeTime(Math.round(clock$2.secondsToVideoTs(timestampOffset)));\n }\n setAudioAppendStart(data) {\n this.transmuxer.setAudioAppendStart(Math.ceil(clock$2.secondsToVideoTs(data.appendStart)));\n }\n setRemux(data) {\n this.transmuxer.setRemux(data.remux);\n }\n /**\n * Forces the pipeline to finish processing the last segment and emit it's\n * results.\n *\n * @param {Object} data event data, not really used\n */\n\n flush(data) {\n this.transmuxer.flush(); // transmuxed done action is fired after both audio/video pipelines are flushed\n\n self.postMessage({\n action: 'done',\n type: 'transmuxed'\n });\n }\n endTimeline() {\n this.transmuxer.endTimeline(); // transmuxed endedtimeline action is fired after both audio/video pipelines end their\n // timelines\n\n self.postMessage({\n action: 'endedtimeline',\n type: 'transmuxed'\n });\n }\n alignGopsWith(data) {\n this.transmuxer.alignGopsWith(data.gopsToAlignWith.slice());\n }\n }\n /**\n * Our web worker interface so that things can talk to mux.js\n * that will be running in a web worker. the scope is passed to this by\n * webworkify.\n *\n * @param {Object} self the scope for the web worker\n */\n\n self.onmessage = function (event) {\n if (event.data.action === 'init' && event.data.options) {\n this.messageHandlers = new MessageHandlers(self, event.data.options);\n return;\n }\n if (!this.messageHandlers) {\n this.messageHandlers = new MessageHandlers(self);\n }\n if (event.data && event.data.action && event.data.action !== 'init') {\n if (this.messageHandlers[event.data.action]) {\n this.messageHandlers[event.data.action](event.data);\n }\n }\n };\n}));\nvar TransmuxWorker = factory(workerCode$1);\n/* rollup-plugin-worker-factory end for worker!/home/runner/work/http-streaming/http-streaming/src/transmuxer-worker.js */\n\nconst handleData_ = (event, transmuxedData, callback) => {\n const {\n type,\n initSegment,\n captions,\n captionStreams,\n metadata,\n videoFrameDtsTime,\n videoFramePtsTime\n } = event.data.segment;\n transmuxedData.buffer.push({\n captions,\n captionStreams,\n metadata\n });\n const boxes = event.data.segment.boxes || {\n data: event.data.segment.data\n };\n const result = {\n type,\n // cast ArrayBuffer to TypedArray\n data: new Uint8Array(boxes.data, boxes.data.byteOffset, boxes.data.byteLength),\n initSegment: new Uint8Array(initSegment.data, initSegment.byteOffset, initSegment.byteLength)\n };\n if (typeof videoFrameDtsTime !== 'undefined') {\n result.videoFrameDtsTime = videoFrameDtsTime;\n }\n if (typeof videoFramePtsTime !== 'undefined') {\n result.videoFramePtsTime = videoFramePtsTime;\n }\n callback(result);\n};\nconst handleDone_ = ({\n transmuxedData,\n callback\n}) => {\n // Previously we only returned data on data events,\n // not on done events. Clear out the buffer to keep that consistent.\n transmuxedData.buffer = []; // all buffers should have been flushed from the muxer, so start processing anything we\n // have received\n\n callback(transmuxedData);\n};\nconst handleGopInfo_ = (event, transmuxedData) => {\n transmuxedData.gopInfo = event.data.gopInfo;\n};\nconst processTransmux = options => {\n const {\n transmuxer,\n bytes,\n audioAppendStart,\n gopsToAlignWith,\n remux,\n onData,\n onTrackInfo,\n onAudioTimingInfo,\n onVideoTimingInfo,\n onVideoSegmentTimingInfo,\n onAudioSegmentTimingInfo,\n onId3,\n onCaptions,\n onDone,\n onEndedTimeline,\n onTransmuxerLog,\n isEndOfTimeline\n } = options;\n const transmuxedData = {\n buffer: []\n };\n let waitForEndedTimelineEvent = isEndOfTimeline;\n const handleMessage = event => {\n if (transmuxer.currentTransmux !== options) {\n // disposed\n return;\n }\n if (event.data.action === 'data') {\n handleData_(event, transmuxedData, onData);\n }\n if (event.data.action === 'trackinfo') {\n onTrackInfo(event.data.trackInfo);\n }\n if (event.data.action === 'gopInfo') {\n handleGopInfo_(event, transmuxedData);\n }\n if (event.data.action === 'audioTimingInfo') {\n onAudioTimingInfo(event.data.audioTimingInfo);\n }\n if (event.data.action === 'videoTimingInfo') {\n onVideoTimingInfo(event.data.videoTimingInfo);\n }\n if (event.data.action === 'videoSegmentTimingInfo') {\n onVideoSegmentTimingInfo(event.data.videoSegmentTimingInfo);\n }\n if (event.data.action === 'audioSegmentTimingInfo') {\n onAudioSegmentTimingInfo(event.data.audioSegmentTimingInfo);\n }\n if (event.data.action === 'id3Frame') {\n onId3([event.data.id3Frame], event.data.id3Frame.dispatchType);\n }\n if (event.data.action === 'caption') {\n onCaptions(event.data.caption);\n }\n if (event.data.action === 'endedtimeline') {\n waitForEndedTimelineEvent = false;\n onEndedTimeline();\n }\n if (event.data.action === 'log') {\n onTransmuxerLog(event.data.log);\n } // wait for the transmuxed event since we may have audio and video\n\n if (event.data.type !== 'transmuxed') {\n return;\n } // If the \"endedtimeline\" event has not yet fired, and this segment represents the end\n // of a timeline, that means there may still be data events before the segment\n // processing can be considerred complete. In that case, the final event should be\n // an \"endedtimeline\" event with the type \"transmuxed.\"\n\n if (waitForEndedTimelineEvent) {\n return;\n }\n transmuxer.onmessage = null;\n handleDone_({\n transmuxedData,\n callback: onDone\n });\n /* eslint-disable no-use-before-define */\n\n dequeue(transmuxer);\n /* eslint-enable */\n };\n\n transmuxer.onmessage = handleMessage;\n if (audioAppendStart) {\n transmuxer.postMessage({\n action: 'setAudioAppendStart',\n appendStart: audioAppendStart\n });\n } // allow empty arrays to be passed to clear out GOPs\n\n if (Array.isArray(gopsToAlignWith)) {\n transmuxer.postMessage({\n action: 'alignGopsWith',\n gopsToAlignWith\n });\n }\n if (typeof remux !== 'undefined') {\n transmuxer.postMessage({\n action: 'setRemux',\n remux\n });\n }\n if (bytes.byteLength) {\n const buffer = bytes instanceof ArrayBuffer ? bytes : bytes.buffer;\n const byteOffset = bytes instanceof ArrayBuffer ? 0 : bytes.byteOffset;\n transmuxer.postMessage({\n action: 'push',\n // Send the typed-array of data as an ArrayBuffer so that\n // it can be sent as a \"Transferable\" and avoid the costly\n // memory copy\n data: buffer,\n // To recreate the original typed-array, we need information\n // about what portion of the ArrayBuffer it was a view into\n byteOffset,\n byteLength: bytes.byteLength\n }, [buffer]);\n }\n if (isEndOfTimeline) {\n transmuxer.postMessage({\n action: 'endTimeline'\n });\n } // even if we didn't push any bytes, we have to make sure we flush in case we reached\n // the end of the segment\n\n transmuxer.postMessage({\n action: 'flush'\n });\n};\nconst dequeue = transmuxer => {\n transmuxer.currentTransmux = null;\n if (transmuxer.transmuxQueue.length) {\n transmuxer.currentTransmux = transmuxer.transmuxQueue.shift();\n if (typeof transmuxer.currentTransmux === 'function') {\n transmuxer.currentTransmux();\n } else {\n processTransmux(transmuxer.currentTransmux);\n }\n }\n};\nconst processAction = (transmuxer, action) => {\n transmuxer.postMessage({\n action\n });\n dequeue(transmuxer);\n};\nconst enqueueAction = (action, transmuxer) => {\n if (!transmuxer.currentTransmux) {\n transmuxer.currentTransmux = action;\n processAction(transmuxer, action);\n return;\n }\n transmuxer.transmuxQueue.push(processAction.bind(null, transmuxer, action));\n};\nconst reset = transmuxer => {\n enqueueAction('reset', transmuxer);\n};\nconst endTimeline = transmuxer => {\n enqueueAction('endTimeline', transmuxer);\n};\nconst transmux = options => {\n if (!options.transmuxer.currentTransmux) {\n options.transmuxer.currentTransmux = options;\n processTransmux(options);\n return;\n }\n options.transmuxer.transmuxQueue.push(options);\n};\nconst createTransmuxer = options => {\n const transmuxer = new TransmuxWorker();\n transmuxer.currentTransmux = null;\n transmuxer.transmuxQueue = [];\n const term = transmuxer.terminate;\n transmuxer.terminate = () => {\n transmuxer.currentTransmux = null;\n transmuxer.transmuxQueue.length = 0;\n return term.call(transmuxer);\n };\n transmuxer.postMessage({\n action: 'init',\n options\n });\n return transmuxer;\n};\nvar segmentTransmuxer = {\n reset,\n endTimeline,\n transmux,\n createTransmuxer\n};\nconst workerCallback = function (options) {\n const transmuxer = options.transmuxer;\n const endAction = options.endAction || options.action;\n const callback = options.callback;\n const message = _extends({}, options, {\n endAction: null,\n transmuxer: null,\n callback: null\n });\n const listenForEndEvent = event => {\n if (event.data.action !== endAction) {\n return;\n }\n transmuxer.removeEventListener('message', listenForEndEvent); // transfer ownership of bytes back to us.\n\n if (event.data.data) {\n event.data.data = new Uint8Array(event.data.data, options.byteOffset || 0, options.byteLength || event.data.data.byteLength);\n if (options.data) {\n options.data = event.data.data;\n }\n }\n callback(event.data);\n };\n transmuxer.addEventListener('message', listenForEndEvent);\n if (options.data) {\n const isArrayBuffer = options.data instanceof ArrayBuffer;\n message.byteOffset = isArrayBuffer ? 0 : options.data.byteOffset;\n message.byteLength = options.data.byteLength;\n const transfers = [isArrayBuffer ? options.data : options.data.buffer];\n transmuxer.postMessage(message, transfers);\n } else {\n transmuxer.postMessage(message);\n }\n};\nconst REQUEST_ERRORS = {\n FAILURE: 2,\n TIMEOUT: -101,\n ABORTED: -102\n};\n/**\n * Abort all requests\n *\n * @param {Object} activeXhrs - an object that tracks all XHR requests\n */\n\nconst abortAll = activeXhrs => {\n activeXhrs.forEach(xhr => {\n xhr.abort();\n });\n};\n/**\n * Gather important bandwidth stats once a request has completed\n *\n * @param {Object} request - the XHR request from which to gather stats\n */\n\nconst getRequestStats = request => {\n return {\n bandwidth: request.bandwidth,\n bytesReceived: request.bytesReceived || 0,\n roundTripTime: request.roundTripTime || 0\n };\n};\n/**\n * If possible gather bandwidth stats as a request is in\n * progress\n *\n * @param {Event} progressEvent - an event object from an XHR's progress event\n */\n\nconst getProgressStats = progressEvent => {\n const request = progressEvent.target;\n const roundTripTime = Date.now() - request.requestTime;\n const stats = {\n bandwidth: Infinity,\n bytesReceived: 0,\n roundTripTime: roundTripTime || 0\n };\n stats.bytesReceived = progressEvent.loaded; // This can result in Infinity if stats.roundTripTime is 0 but that is ok\n // because we should only use bandwidth stats on progress to determine when\n // abort a request early due to insufficient bandwidth\n\n stats.bandwidth = Math.floor(stats.bytesReceived / stats.roundTripTime * 8 * 1000);\n return stats;\n};\n/**\n * Handle all error conditions in one place and return an object\n * with all the information\n *\n * @param {Error|null} error - if non-null signals an error occured with the XHR\n * @param {Object} request - the XHR request that possibly generated the error\n */\n\nconst handleErrors = (error, request) => {\n if (request.timedout) {\n return {\n status: request.status,\n message: 'HLS request timed-out at URL: ' + request.uri,\n code: REQUEST_ERRORS.TIMEOUT,\n xhr: request\n };\n }\n if (request.aborted) {\n return {\n status: request.status,\n message: 'HLS request aborted at URL: ' + request.uri,\n code: REQUEST_ERRORS.ABORTED,\n xhr: request\n };\n }\n if (error) {\n return {\n status: request.status,\n message: 'HLS request errored at URL: ' + request.uri,\n code: REQUEST_ERRORS.FAILURE,\n xhr: request\n };\n }\n if (request.responseType === 'arraybuffer' && request.response.byteLength === 0) {\n return {\n status: request.status,\n message: 'Empty HLS response at URL: ' + request.uri,\n code: REQUEST_ERRORS.FAILURE,\n xhr: request\n };\n }\n return null;\n};\n/**\n * Handle responses for key data and convert the key data to the correct format\n * for the decryption step later\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Array} objects - objects to add the key bytes to.\n * @param {Function} finishProcessingFn - a callback to execute to continue processing\n * this request\n */\n\nconst handleKeyResponse = (segment, objects, finishProcessingFn) => (error, request) => {\n const response = request.response;\n const errorObj = handleErrors(error, request);\n if (errorObj) {\n return finishProcessingFn(errorObj, segment);\n }\n if (response.byteLength !== 16) {\n return finishProcessingFn({\n status: request.status,\n message: 'Invalid HLS key at URL: ' + request.uri,\n code: REQUEST_ERRORS.FAILURE,\n xhr: request\n }, segment);\n }\n const view = new DataView(response);\n const bytes = new Uint32Array([view.getUint32(0), view.getUint32(4), view.getUint32(8), view.getUint32(12)]);\n for (let i = 0; i < objects.length; i++) {\n objects[i].bytes = bytes;\n }\n return finishProcessingFn(null, segment);\n};\nconst parseInitSegment = (segment, callback) => {\n const type = detectContainerForBytes(segment.map.bytes); // TODO: We should also handle ts init segments here, but we\n // only know how to parse mp4 init segments at the moment\n\n if (type !== 'mp4') {\n const uri = segment.map.resolvedUri || segment.map.uri;\n const mediaType = type || 'unknown';\n return callback({\n internal: true,\n message: `Found unsupported ${mediaType} container for initialization segment at URL: ${uri}`,\n code: REQUEST_ERRORS.FAILURE,\n metadata: {\n errorType: videojs.Error.UnsupportedMediaInitialization,\n mediaType\n }\n });\n }\n workerCallback({\n action: 'probeMp4Tracks',\n data: segment.map.bytes,\n transmuxer: segment.transmuxer,\n callback: ({\n tracks,\n data\n }) => {\n // transfer bytes back to us\n segment.map.bytes = data;\n tracks.forEach(function (track) {\n segment.map.tracks = segment.map.tracks || {}; // only support one track of each type for now\n\n if (segment.map.tracks[track.type]) {\n return;\n }\n segment.map.tracks[track.type] = track;\n if (typeof track.id === 'number' && track.timescale) {\n segment.map.timescales = segment.map.timescales || {};\n segment.map.timescales[track.id] = track.timescale;\n }\n });\n return callback(null);\n }\n });\n};\n/**\n * Handle init-segment responses\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} finishProcessingFn - a callback to execute to continue processing\n * this request\n */\n\nconst handleInitSegmentResponse = ({\n segment,\n finishProcessingFn\n}) => (error, request) => {\n const errorObj = handleErrors(error, request);\n if (errorObj) {\n return finishProcessingFn(errorObj, segment);\n }\n const bytes = new Uint8Array(request.response); // init segment is encypted, we will have to wait\n // until the key request is done to decrypt.\n\n if (segment.map.key) {\n segment.map.encryptedBytes = bytes;\n return finishProcessingFn(null, segment);\n }\n segment.map.bytes = bytes;\n parseInitSegment(segment, function (parseError) {\n if (parseError) {\n parseError.xhr = request;\n parseError.status = request.status;\n return finishProcessingFn(parseError, segment);\n }\n finishProcessingFn(null, segment);\n });\n};\n/**\n * Response handler for segment-requests being sure to set the correct\n * property depending on whether the segment is encryped or not\n * Also records and keeps track of stats that are used for ABR purposes\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} finishProcessingFn - a callback to execute to continue processing\n * this request\n */\n\nconst handleSegmentResponse = ({\n segment,\n finishProcessingFn,\n responseType\n}) => (error, request) => {\n const errorObj = handleErrors(error, request);\n if (errorObj) {\n return finishProcessingFn(errorObj, segment);\n }\n const newBytes =\n // although responseText \"should\" exist, this guard serves to prevent an error being\n // thrown for two primary cases:\n // 1. the mime type override stops working, or is not implemented for a specific\n // browser\n // 2. when using mock XHR libraries like sinon that do not allow the override behavior\n responseType === 'arraybuffer' || !request.responseText ? request.response : stringToArrayBuffer(request.responseText.substring(segment.lastReachedChar || 0));\n segment.stats = getRequestStats(request);\n if (segment.key) {\n segment.encryptedBytes = new Uint8Array(newBytes);\n } else {\n segment.bytes = new Uint8Array(newBytes);\n }\n return finishProcessingFn(null, segment);\n};\nconst transmuxAndNotify = ({\n segment,\n bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n}) => {\n const fmp4Tracks = segment.map && segment.map.tracks || {};\n const isMuxed = Boolean(fmp4Tracks.audio && fmp4Tracks.video); // Keep references to each function so we can null them out after we're done with them.\n // One reason for this is that in the case of full segments, we want to trust start\n // times from the probe, rather than the transmuxer.\n\n let audioStartFn = timingInfoFn.bind(null, segment, 'audio', 'start');\n const audioEndFn = timingInfoFn.bind(null, segment, 'audio', 'end');\n let videoStartFn = timingInfoFn.bind(null, segment, 'video', 'start');\n const videoEndFn = timingInfoFn.bind(null, segment, 'video', 'end');\n const finish = () => transmux({\n bytes,\n transmuxer: segment.transmuxer,\n audioAppendStart: segment.audioAppendStart,\n gopsToAlignWith: segment.gopsToAlignWith,\n remux: isMuxed,\n onData: result => {\n result.type = result.type === 'combined' ? 'video' : result.type;\n dataFn(segment, result);\n },\n onTrackInfo: trackInfo => {\n if (trackInfoFn) {\n if (isMuxed) {\n trackInfo.isMuxed = true;\n }\n trackInfoFn(segment, trackInfo);\n }\n },\n onAudioTimingInfo: audioTimingInfo => {\n // we only want the first start value we encounter\n if (audioStartFn && typeof audioTimingInfo.start !== 'undefined') {\n audioStartFn(audioTimingInfo.start);\n audioStartFn = null;\n } // we want to continually update the end time\n\n if (audioEndFn && typeof audioTimingInfo.end !== 'undefined') {\n audioEndFn(audioTimingInfo.end);\n }\n },\n onVideoTimingInfo: videoTimingInfo => {\n // we only want the first start value we encounter\n if (videoStartFn && typeof videoTimingInfo.start !== 'undefined') {\n videoStartFn(videoTimingInfo.start);\n videoStartFn = null;\n } // we want to continually update the end time\n\n if (videoEndFn && typeof videoTimingInfo.end !== 'undefined') {\n videoEndFn(videoTimingInfo.end);\n }\n },\n onVideoSegmentTimingInfo: videoSegmentTimingInfo => {\n videoSegmentTimingInfoFn(videoSegmentTimingInfo);\n },\n onAudioSegmentTimingInfo: audioSegmentTimingInfo => {\n audioSegmentTimingInfoFn(audioSegmentTimingInfo);\n },\n onId3: (id3Frames, dispatchType) => {\n id3Fn(segment, id3Frames, dispatchType);\n },\n onCaptions: captions => {\n captionsFn(segment, [captions]);\n },\n isEndOfTimeline,\n onEndedTimeline: () => {\n endedTimelineFn();\n },\n onTransmuxerLog,\n onDone: result => {\n if (!doneFn) {\n return;\n }\n result.type = result.type === 'combined' ? 'video' : result.type;\n doneFn(null, segment, result);\n }\n }); // In the transmuxer, we don't yet have the ability to extract a \"proper\" start time.\n // Meaning cached frame data may corrupt our notion of where this segment\n // really starts. To get around this, probe for the info needed.\n\n workerCallback({\n action: 'probeTs',\n transmuxer: segment.transmuxer,\n data: bytes,\n baseStartTime: segment.baseStartTime,\n callback: data => {\n segment.bytes = bytes = data.data;\n const probeResult = data.result;\n if (probeResult) {\n trackInfoFn(segment, {\n hasAudio: probeResult.hasAudio,\n hasVideo: probeResult.hasVideo,\n isMuxed\n });\n trackInfoFn = null;\n }\n finish();\n }\n });\n};\nconst handleSegmentBytes = ({\n segment,\n bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n}) => {\n let bytesAsUint8Array = new Uint8Array(bytes); // TODO:\n // We should have a handler that fetches the number of bytes required\n // to check if something is fmp4. This will allow us to save bandwidth\n // because we can only exclude a playlist and abort requests\n // by codec after trackinfo triggers.\n\n if (isLikelyFmp4MediaSegment(bytesAsUint8Array)) {\n segment.isFmp4 = true;\n const {\n tracks\n } = segment.map;\n const trackInfo = {\n isFmp4: true,\n hasVideo: !!tracks.video,\n hasAudio: !!tracks.audio\n }; // if we have a audio track, with a codec that is not set to\n // encrypted audio\n\n if (tracks.audio && tracks.audio.codec && tracks.audio.codec !== 'enca') {\n trackInfo.audioCodec = tracks.audio.codec;\n } // if we have a video track, with a codec that is not set to\n // encrypted video\n\n if (tracks.video && tracks.video.codec && tracks.video.codec !== 'encv') {\n trackInfo.videoCodec = tracks.video.codec;\n }\n if (tracks.video && tracks.audio) {\n trackInfo.isMuxed = true;\n } // since we don't support appending fmp4 data on progress, we know we have the full\n // segment here\n\n trackInfoFn(segment, trackInfo); // The probe doesn't provide the segment end time, so only callback with the start\n // time. The end time can be roughly calculated by the receiver using the duration.\n //\n // Note that the start time returned by the probe reflects the baseMediaDecodeTime, as\n // that is the true start of the segment (where the playback engine should begin\n // decoding).\n\n const finishLoading = (captions, id3Frames) => {\n // if the track still has audio at this point it is only possible\n // for it to be audio only. See `tracks.video && tracks.audio` if statement\n // above.\n // we make sure to use segment.bytes here as that\n dataFn(segment, {\n data: bytesAsUint8Array,\n type: trackInfo.hasAudio && !trackInfo.isMuxed ? 'audio' : 'video'\n });\n if (id3Frames && id3Frames.length) {\n id3Fn(segment, id3Frames);\n }\n if (captions && captions.length) {\n captionsFn(segment, captions);\n }\n doneFn(null, segment, {});\n };\n workerCallback({\n action: 'probeMp4StartTime',\n timescales: segment.map.timescales,\n data: bytesAsUint8Array,\n transmuxer: segment.transmuxer,\n callback: ({\n data,\n startTime\n }) => {\n // transfer bytes back to us\n bytes = data.buffer;\n segment.bytes = bytesAsUint8Array = data;\n if (trackInfo.hasAudio && !trackInfo.isMuxed) {\n timingInfoFn(segment, 'audio', 'start', startTime);\n }\n if (trackInfo.hasVideo) {\n timingInfoFn(segment, 'video', 'start', startTime);\n }\n workerCallback({\n action: 'probeEmsgID3',\n data: bytesAsUint8Array,\n transmuxer: segment.transmuxer,\n offset: startTime,\n callback: ({\n emsgData,\n id3Frames\n }) => {\n // transfer bytes back to us\n bytes = emsgData.buffer;\n segment.bytes = bytesAsUint8Array = emsgData; // Run through the CaptionParser in case there are captions.\n // Initialize CaptionParser if it hasn't been yet\n\n if (!tracks.video || !emsgData.byteLength || !segment.transmuxer) {\n finishLoading(undefined, id3Frames);\n return;\n }\n workerCallback({\n action: 'pushMp4Captions',\n endAction: 'mp4Captions',\n transmuxer: segment.transmuxer,\n data: bytesAsUint8Array,\n timescales: segment.map.timescales,\n trackIds: [tracks.video.id],\n callback: message => {\n // transfer bytes back to us\n bytes = message.data.buffer;\n segment.bytes = bytesAsUint8Array = message.data;\n message.logs.forEach(function (log) {\n onTransmuxerLog(merge(log, {\n stream: 'mp4CaptionParser'\n }));\n });\n finishLoading(message.captions, id3Frames);\n }\n });\n }\n });\n }\n });\n return;\n } // VTT or other segments that don't need processing\n\n if (!segment.transmuxer) {\n doneFn(null, segment, {});\n return;\n }\n if (typeof segment.container === 'undefined') {\n segment.container = detectContainerForBytes(bytesAsUint8Array);\n }\n if (segment.container !== 'ts' && segment.container !== 'aac') {\n trackInfoFn(segment, {\n hasAudio: false,\n hasVideo: false\n });\n doneFn(null, segment, {});\n return;\n } // ts or aac\n\n transmuxAndNotify({\n segment,\n bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n });\n};\nconst decrypt = function ({\n id,\n key,\n encryptedBytes,\n decryptionWorker\n}, callback) {\n const decryptionHandler = event => {\n if (event.data.source === id) {\n decryptionWorker.removeEventListener('message', decryptionHandler);\n const decrypted = event.data.decrypted;\n callback(new Uint8Array(decrypted.bytes, decrypted.byteOffset, decrypted.byteLength));\n }\n };\n decryptionWorker.addEventListener('message', decryptionHandler);\n let keyBytes;\n if (key.bytes.slice) {\n keyBytes = key.bytes.slice();\n } else {\n keyBytes = new Uint32Array(Array.prototype.slice.call(key.bytes));\n } // incrementally decrypt the bytes\n\n decryptionWorker.postMessage(createTransferableMessage({\n source: id,\n encrypted: encryptedBytes,\n key: keyBytes,\n iv: key.iv\n }), [encryptedBytes.buffer, keyBytes.buffer]);\n};\n/**\n * Decrypt the segment via the decryption web worker\n *\n * @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128 decryption\n * routines\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that is executed when segment bytes are available\n * and ready to use\n * @param {Function} doneFn - a callback that is executed after decryption has completed\n */\n\nconst decryptSegment = ({\n decryptionWorker,\n segment,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n}) => {\n decrypt({\n id: segment.requestId,\n key: segment.key,\n encryptedBytes: segment.encryptedBytes,\n decryptionWorker\n }, decryptedBytes => {\n segment.bytes = decryptedBytes;\n handleSegmentBytes({\n segment,\n bytes: segment.bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n });\n });\n};\n/**\n * This function waits for all XHRs to finish (with either success or failure)\n * before continueing processing via it's callback. The function gathers errors\n * from each request into a single errors array so that the error status for\n * each request can be examined later.\n *\n * @param {Object} activeXhrs - an object that tracks all XHR requests\n * @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128 decryption\n * routines\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} id3Fn - a callback that receives ID3 metadata\n * @param {Function} captionsFn - a callback that receives captions\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that is executed when segment bytes are available\n * and ready to use\n * @param {Function} doneFn - a callback that is executed after all resources have been\n * downloaded and any decryption completed\n */\n\nconst waitForCompletion = ({\n activeXhrs,\n decryptionWorker,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n}) => {\n let count = 0;\n let didError = false;\n return (error, segment) => {\n if (didError) {\n return;\n }\n if (error) {\n didError = true; // If there are errors, we have to abort any outstanding requests\n\n abortAll(activeXhrs); // Even though the requests above are aborted, and in theory we could wait until we\n // handle the aborted events from those requests, there are some cases where we may\n // never get an aborted event. For instance, if the network connection is lost and\n // there were two requests, the first may have triggered an error immediately, while\n // the second request remains unsent. In that case, the aborted algorithm will not\n // trigger an abort: see https://xhr.spec.whatwg.org/#the-abort()-method\n //\n // We also can't rely on the ready state of the XHR, since the request that\n // triggered the connection error may also show as a ready state of 0 (unsent).\n // Therefore, we have to finish this group of requests immediately after the first\n // seen error.\n\n return doneFn(error, segment);\n }\n count += 1;\n if (count === activeXhrs.length) {\n const segmentFinish = function () {\n if (segment.encryptedBytes) {\n return decryptSegment({\n decryptionWorker,\n segment,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n });\n } // Otherwise, everything is ready just continue\n\n handleSegmentBytes({\n segment,\n bytes: segment.bytes,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n });\n }; // Keep track of when *all* of the requests have completed\n\n segment.endOfAllRequests = Date.now();\n if (segment.map && segment.map.encryptedBytes && !segment.map.bytes) {\n return decrypt({\n decryptionWorker,\n // add -init to the \"id\" to differentiate between segment\n // and init segment decryption, just in case they happen\n // at the same time at some point in the future.\n id: segment.requestId + '-init',\n encryptedBytes: segment.map.encryptedBytes,\n key: segment.map.key\n }, decryptedBytes => {\n segment.map.bytes = decryptedBytes;\n parseInitSegment(segment, parseError => {\n if (parseError) {\n abortAll(activeXhrs);\n return doneFn(parseError, segment);\n }\n segmentFinish();\n });\n });\n }\n segmentFinish();\n }\n };\n};\n/**\n * Calls the abort callback if any request within the batch was aborted. Will only call\n * the callback once per batch of requests, even if multiple were aborted.\n *\n * @param {Object} loadendState - state to check to see if the abort function was called\n * @param {Function} abortFn - callback to call for abort\n */\n\nconst handleLoadEnd = ({\n loadendState,\n abortFn\n}) => event => {\n const request = event.target;\n if (request.aborted && abortFn && !loadendState.calledAbortFn) {\n abortFn();\n loadendState.calledAbortFn = true;\n }\n};\n/**\n * Simple progress event callback handler that gathers some stats before\n * executing a provided callback with the `segment` object\n *\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} progressFn - a callback that is executed each time a progress event\n * is received\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that is executed when segment bytes are available\n * and ready to use\n * @param {Event} event - the progress event object from XMLHttpRequest\n */\n\nconst handleProgress = ({\n segment,\n progressFn,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn\n}) => event => {\n const request = event.target;\n if (request.aborted) {\n return;\n }\n segment.stats = merge(segment.stats, getProgressStats(event)); // record the time that we receive the first byte of data\n\n if (!segment.stats.firstBytesReceivedAt && segment.stats.bytesReceived) {\n segment.stats.firstBytesReceivedAt = Date.now();\n }\n return progressFn(event, segment);\n};\n/**\n * Load all resources and does any processing necessary for a media-segment\n *\n * Features:\n * decrypts the media-segment if it has a key uri and an iv\n * aborts *all* requests if *any* one request fails\n *\n * The segment object, at minimum, has the following format:\n * {\n * resolvedUri: String,\n * [transmuxer]: Object,\n * [byterange]: {\n * offset: Number,\n * length: Number\n * },\n * [key]: {\n * resolvedUri: String\n * [byterange]: {\n * offset: Number,\n * length: Number\n * },\n * iv: {\n * bytes: Uint32Array\n * }\n * },\n * [map]: {\n * resolvedUri: String,\n * [byterange]: {\n * offset: Number,\n * length: Number\n * },\n * [bytes]: Uint8Array\n * }\n * }\n * ...where [name] denotes optional properties\n *\n * @param {Function} xhr - an instance of the xhr wrapper in xhr.js\n * @param {Object} xhrOptions - the base options to provide to all xhr requests\n * @param {WebWorker} decryptionWorker - a WebWorker interface to AES-128\n * decryption routines\n * @param {Object} segment - a simplified copy of the segmentInfo object\n * from SegmentLoader\n * @param {Function} abortFn - a callback called (only once) if any piece of a request was\n * aborted\n * @param {Function} progressFn - a callback that receives progress events from the main\n * segment's xhr request\n * @param {Function} trackInfoFn - a callback that receives track info\n * @param {Function} timingInfoFn - a callback that receives timing info\n * @param {Function} videoSegmentTimingInfoFn\n * a callback that receives video timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} audioSegmentTimingInfoFn\n * a callback that receives audio timing info based on media times and\n * any adjustments made by the transmuxer\n * @param {Function} id3Fn - a callback that receives ID3 metadata\n * @param {Function} captionsFn - a callback that receives captions\n * @param {boolean} isEndOfTimeline\n * true if this segment represents the last segment in a timeline\n * @param {Function} endedTimelineFn\n * a callback made when a timeline is ended, will only be called if\n * isEndOfTimeline is true\n * @param {Function} dataFn - a callback that receives data from the main segment's xhr\n * request, transmuxed if needed\n * @param {Function} doneFn - a callback that is executed only once all requests have\n * succeeded or failed\n * @return {Function} a function that, when invoked, immediately aborts all\n * outstanding requests\n */\n\nconst mediaSegmentRequest = ({\n xhr,\n xhrOptions,\n decryptionWorker,\n segment,\n abortFn,\n progressFn,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n}) => {\n const activeXhrs = [];\n const finishProcessingFn = waitForCompletion({\n activeXhrs,\n decryptionWorker,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn,\n doneFn,\n onTransmuxerLog\n }); // optionally, request the decryption key\n\n if (segment.key && !segment.key.bytes) {\n const objects = [segment.key];\n if (segment.map && !segment.map.bytes && segment.map.key && segment.map.key.resolvedUri === segment.key.resolvedUri) {\n objects.push(segment.map.key);\n }\n const keyRequestOptions = merge(xhrOptions, {\n uri: segment.key.resolvedUri,\n responseType: 'arraybuffer',\n requestType: 'segment-key'\n });\n const keyRequestCallback = handleKeyResponse(segment, objects, finishProcessingFn);\n const keyXhr = xhr(keyRequestOptions, keyRequestCallback);\n activeXhrs.push(keyXhr);\n } // optionally, request the associated media init segment\n\n if (segment.map && !segment.map.bytes) {\n const differentMapKey = segment.map.key && (!segment.key || segment.key.resolvedUri !== segment.map.key.resolvedUri);\n if (differentMapKey) {\n const mapKeyRequestOptions = merge(xhrOptions, {\n uri: segment.map.key.resolvedUri,\n responseType: 'arraybuffer',\n requestType: 'segment-key'\n });\n const mapKeyRequestCallback = handleKeyResponse(segment, [segment.map.key], finishProcessingFn);\n const mapKeyXhr = xhr(mapKeyRequestOptions, mapKeyRequestCallback);\n activeXhrs.push(mapKeyXhr);\n }\n const initSegmentOptions = merge(xhrOptions, {\n uri: segment.map.resolvedUri,\n responseType: 'arraybuffer',\n headers: segmentXhrHeaders(segment.map),\n requestType: 'segment-media-initialization'\n });\n const initSegmentRequestCallback = handleInitSegmentResponse({\n segment,\n finishProcessingFn\n });\n const initSegmentXhr = xhr(initSegmentOptions, initSegmentRequestCallback);\n activeXhrs.push(initSegmentXhr);\n }\n const segmentRequestOptions = merge(xhrOptions, {\n uri: segment.part && segment.part.resolvedUri || segment.resolvedUri,\n responseType: 'arraybuffer',\n headers: segmentXhrHeaders(segment),\n requestType: 'segment'\n });\n const segmentRequestCallback = handleSegmentResponse({\n segment,\n finishProcessingFn,\n responseType: segmentRequestOptions.responseType\n });\n const segmentXhr = xhr(segmentRequestOptions, segmentRequestCallback);\n segmentXhr.addEventListener('progress', handleProgress({\n segment,\n progressFn,\n trackInfoFn,\n timingInfoFn,\n videoSegmentTimingInfoFn,\n audioSegmentTimingInfoFn,\n id3Fn,\n captionsFn,\n isEndOfTimeline,\n endedTimelineFn,\n dataFn\n }));\n activeXhrs.push(segmentXhr); // since all parts of the request must be considered, but should not make callbacks\n // multiple times, provide a shared state object\n\n const loadendState = {};\n activeXhrs.forEach(activeXhr => {\n activeXhr.addEventListener('loadend', handleLoadEnd({\n loadendState,\n abortFn\n }));\n });\n return () => abortAll(activeXhrs);\n};\n\n/**\n * @file - codecs.js - Handles tasks regarding codec strings such as translating them to\n * codec strings, or translating codec strings into objects that can be examined.\n */\nconst logFn$1 = logger('CodecUtils');\n/**\n * Returns a set of codec strings parsed from the playlist or the default\n * codec strings if no codecs were specified in the playlist\n *\n * @param {Playlist} media the current media playlist\n * @return {Object} an object with the video and audio codecs\n */\n\nconst getCodecs = function (media) {\n // if the codecs were explicitly specified, use them instead of the\n // defaults\n const mediaAttributes = media.attributes || {};\n if (mediaAttributes.CODECS) {\n return parseCodecs(mediaAttributes.CODECS);\n }\n};\nconst isMaat = (main, media) => {\n const mediaAttributes = media.attributes || {};\n return main && main.mediaGroups && main.mediaGroups.AUDIO && mediaAttributes.AUDIO && main.mediaGroups.AUDIO[mediaAttributes.AUDIO];\n};\nconst isMuxed = (main, media) => {\n if (!isMaat(main, media)) {\n return true;\n }\n const mediaAttributes = media.attributes || {};\n const audioGroup = main.mediaGroups.AUDIO[mediaAttributes.AUDIO];\n for (const groupId in audioGroup) {\n // If an audio group has a URI (the case for HLS, as HLS will use external playlists),\n // or there are listed playlists (the case for DASH, as the manifest will have already\n // provided all of the details necessary to generate the audio playlist, as opposed to\n // HLS' externally requested playlists), then the content is demuxed.\n if (!audioGroup[groupId].uri && !audioGroup[groupId].playlists) {\n return true;\n }\n }\n return false;\n};\nconst unwrapCodecList = function (codecList) {\n const codecs = {};\n codecList.forEach(({\n mediaType,\n type,\n details\n }) => {\n codecs[mediaType] = codecs[mediaType] || [];\n codecs[mediaType].push(translateLegacyCodec(`${type}${details}`));\n });\n Object.keys(codecs).forEach(function (mediaType) {\n if (codecs[mediaType].length > 1) {\n logFn$1(`multiple ${mediaType} codecs found as attributes: ${codecs[mediaType].join(', ')}. Setting playlist codecs to null so that we wait for mux.js to probe segments for real codecs.`);\n codecs[mediaType] = null;\n return;\n }\n codecs[mediaType] = codecs[mediaType][0];\n });\n return codecs;\n};\nconst codecCount = function (codecObj) {\n let count = 0;\n if (codecObj.audio) {\n count++;\n }\n if (codecObj.video) {\n count++;\n }\n return count;\n};\n/**\n * Calculates the codec strings for a working configuration of\n * SourceBuffers to play variant streams in a main playlist. If\n * there is no possible working configuration, an empty object will be\n * returned.\n *\n * @param main {Object} the m3u8 object for the main playlist\n * @param media {Object} the m3u8 object for the variant playlist\n * @return {Object} the codec strings.\n *\n * @private\n */\n\nconst codecsForPlaylist = function (main, media) {\n const mediaAttributes = media.attributes || {};\n const codecInfo = unwrapCodecList(getCodecs(media) || []); // HLS with multiple-audio tracks must always get an audio codec.\n // Put another way, there is no way to have a video-only multiple-audio HLS!\n\n if (isMaat(main, media) && !codecInfo.audio) {\n if (!isMuxed(main, media)) {\n // It is possible for codecs to be specified on the audio media group playlist but\n // not on the rendition playlist. This is mostly the case for DASH, where audio and\n // video are always separate (and separately specified).\n const defaultCodecs = unwrapCodecList(codecsFromDefault(main, mediaAttributes.AUDIO) || []);\n if (defaultCodecs.audio) {\n codecInfo.audio = defaultCodecs.audio;\n }\n }\n }\n return codecInfo;\n};\nconst logFn = logger('PlaylistSelector');\nconst representationToString = function (representation) {\n if (!representation || !representation.playlist) {\n return;\n }\n const playlist = representation.playlist;\n return JSON.stringify({\n id: playlist.id,\n bandwidth: representation.bandwidth,\n width: representation.width,\n height: representation.height,\n codecs: playlist.attributes && playlist.attributes.CODECS || ''\n });\n}; // Utilities\n\n/**\n * Returns the CSS value for the specified property on an element\n * using `getComputedStyle`. Firefox has a long-standing issue where\n * getComputedStyle() may return null when running in an iframe with\n * `display: none`.\n *\n * @see https://bugzilla.mozilla.org/show_bug.cgi?id=548397\n * @param {HTMLElement} el the htmlelement to work on\n * @param {string} the proprety to get the style for\n */\n\nconst safeGetComputedStyle = function (el, property) {\n if (!el) {\n return '';\n }\n const result = window$1.getComputedStyle(el);\n if (!result) {\n return '';\n }\n return result[property];\n};\n/**\n * Resuable stable sort function\n *\n * @param {Playlists} array\n * @param {Function} sortFn Different comparators\n * @function stableSort\n */\n\nconst stableSort = function (array, sortFn) {\n const newArray = array.slice();\n array.sort(function (left, right) {\n const cmp = sortFn(left, right);\n if (cmp === 0) {\n return newArray.indexOf(left) - newArray.indexOf(right);\n }\n return cmp;\n });\n};\n/**\n * A comparator function to sort two playlist object by bandwidth.\n *\n * @param {Object} left a media playlist object\n * @param {Object} right a media playlist object\n * @return {number} Greater than zero if the bandwidth attribute of\n * left is greater than the corresponding attribute of right. Less\n * than zero if the bandwidth of right is greater than left and\n * exactly zero if the two are equal.\n */\n\nconst comparePlaylistBandwidth = function (left, right) {\n let leftBandwidth;\n let rightBandwidth;\n if (left.attributes.BANDWIDTH) {\n leftBandwidth = left.attributes.BANDWIDTH;\n }\n leftBandwidth = leftBandwidth || window$1.Number.MAX_VALUE;\n if (right.attributes.BANDWIDTH) {\n rightBandwidth = right.attributes.BANDWIDTH;\n }\n rightBandwidth = rightBandwidth || window$1.Number.MAX_VALUE;\n return leftBandwidth - rightBandwidth;\n};\n/**\n * A comparator function to sort two playlist object by resolution (width).\n *\n * @param {Object} left a media playlist object\n * @param {Object} right a media playlist object\n * @return {number} Greater than zero if the resolution.width attribute of\n * left is greater than the corresponding attribute of right. Less\n * than zero if the resolution.width of right is greater than left and\n * exactly zero if the two are equal.\n */\n\nconst comparePlaylistResolution = function (left, right) {\n let leftWidth;\n let rightWidth;\n if (left.attributes.RESOLUTION && left.attributes.RESOLUTION.width) {\n leftWidth = left.attributes.RESOLUTION.width;\n }\n leftWidth = leftWidth || window$1.Number.MAX_VALUE;\n if (right.attributes.RESOLUTION && right.attributes.RESOLUTION.width) {\n rightWidth = right.attributes.RESOLUTION.width;\n }\n rightWidth = rightWidth || window$1.Number.MAX_VALUE; // NOTE - Fallback to bandwidth sort as appropriate in cases where multiple renditions\n // have the same media dimensions/ resolution\n\n if (leftWidth === rightWidth && left.attributes.BANDWIDTH && right.attributes.BANDWIDTH) {\n return left.attributes.BANDWIDTH - right.attributes.BANDWIDTH;\n }\n return leftWidth - rightWidth;\n};\n/**\n * Chooses the appropriate media playlist based on bandwidth and player size\n *\n * @param {Object} main\n * Object representation of the main manifest\n * @param {number} playerBandwidth\n * Current calculated bandwidth of the player\n * @param {number} playerWidth\n * Current width of the player element (should account for the device pixel ratio)\n * @param {number} playerHeight\n * Current height of the player element (should account for the device pixel ratio)\n * @param {boolean} limitRenditionByPlayerDimensions\n * True if the player width and height should be used during the selection, false otherwise\n * @param {Object} playlistController\n * the current playlistController object\n * @return {Playlist} the highest bitrate playlist less than the\n * currently detected bandwidth, accounting for some amount of\n * bandwidth variance\n */\n\nlet simpleSelector = function (main, playerBandwidth, playerWidth, playerHeight, limitRenditionByPlayerDimensions, playlistController) {\n // If we end up getting called before `main` is available, exit early\n if (!main) {\n return;\n }\n const options = {\n bandwidth: playerBandwidth,\n width: playerWidth,\n height: playerHeight,\n limitRenditionByPlayerDimensions\n };\n let playlists = main.playlists; // if playlist is audio only, select between currently active audio group playlists.\n\n if (Playlist.isAudioOnly(main)) {\n playlists = playlistController.getAudioTrackPlaylists_(); // add audioOnly to options so that we log audioOnly: true\n // at the buttom of this function for debugging.\n\n options.audioOnly = true;\n } // convert the playlists to an intermediary representation to make comparisons easier\n\n let sortedPlaylistReps = playlists.map(playlist => {\n let bandwidth;\n const width = playlist.attributes && playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.width;\n const height = playlist.attributes && playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.height;\n bandwidth = playlist.attributes && playlist.attributes.BANDWIDTH;\n bandwidth = bandwidth || window$1.Number.MAX_VALUE;\n return {\n bandwidth,\n width,\n height,\n playlist\n };\n });\n stableSort(sortedPlaylistReps, (left, right) => left.bandwidth - right.bandwidth); // filter out any playlists that have been excluded due to\n // incompatible configurations\n\n sortedPlaylistReps = sortedPlaylistReps.filter(rep => !Playlist.isIncompatible(rep.playlist)); // filter out any playlists that have been disabled manually through the representations\n // api or excluded temporarily due to playback errors.\n\n let enabledPlaylistReps = sortedPlaylistReps.filter(rep => Playlist.isEnabled(rep.playlist));\n if (!enabledPlaylistReps.length) {\n // if there are no enabled playlists, then they have all been excluded or disabled\n // by the user through the representations api. In this case, ignore exclusion and\n // fallback to what the user wants by using playlists the user has not disabled.\n enabledPlaylistReps = sortedPlaylistReps.filter(rep => !Playlist.isDisabled(rep.playlist));\n } // filter out any variant that has greater effective bitrate\n // than the current estimated bandwidth\n\n const bandwidthPlaylistReps = enabledPlaylistReps.filter(rep => rep.bandwidth * Config.BANDWIDTH_VARIANCE < playerBandwidth);\n let highestRemainingBandwidthRep = bandwidthPlaylistReps[bandwidthPlaylistReps.length - 1]; // get all of the renditions with the same (highest) bandwidth\n // and then taking the very first element\n\n const bandwidthBestRep = bandwidthPlaylistReps.filter(rep => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0]; // if we're not going to limit renditions by player size, make an early decision.\n\n if (limitRenditionByPlayerDimensions === false) {\n const chosenRep = bandwidthBestRep || enabledPlaylistReps[0] || sortedPlaylistReps[0];\n if (chosenRep && chosenRep.playlist) {\n let type = 'sortedPlaylistReps';\n if (bandwidthBestRep) {\n type = 'bandwidthBestRep';\n }\n if (enabledPlaylistReps[0]) {\n type = 'enabledPlaylistReps';\n }\n logFn(`choosing ${representationToString(chosenRep)} using ${type} with options`, options);\n return chosenRep.playlist;\n }\n logFn('could not choose a playlist with options', options);\n return null;\n } // filter out playlists without resolution information\n\n const haveResolution = bandwidthPlaylistReps.filter(rep => rep.width && rep.height); // sort variants by resolution\n\n stableSort(haveResolution, (left, right) => left.width - right.width); // if we have the exact resolution as the player use it\n\n const resolutionBestRepList = haveResolution.filter(rep => rep.width === playerWidth && rep.height === playerHeight);\n highestRemainingBandwidthRep = resolutionBestRepList[resolutionBestRepList.length - 1]; // ensure that we pick the highest bandwidth variant that have exact resolution\n\n const resolutionBestRep = resolutionBestRepList.filter(rep => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0];\n let resolutionPlusOneList;\n let resolutionPlusOneSmallest;\n let resolutionPlusOneRep; // find the smallest variant that is larger than the player\n // if there is no match of exact resolution\n\n if (!resolutionBestRep) {\n resolutionPlusOneList = haveResolution.filter(rep => rep.width > playerWidth || rep.height > playerHeight); // find all the variants have the same smallest resolution\n\n resolutionPlusOneSmallest = resolutionPlusOneList.filter(rep => rep.width === resolutionPlusOneList[0].width && rep.height === resolutionPlusOneList[0].height); // ensure that we also pick the highest bandwidth variant that\n // is just-larger-than the video player\n\n highestRemainingBandwidthRep = resolutionPlusOneSmallest[resolutionPlusOneSmallest.length - 1];\n resolutionPlusOneRep = resolutionPlusOneSmallest.filter(rep => rep.bandwidth === highestRemainingBandwidthRep.bandwidth)[0];\n }\n let leastPixelDiffRep; // If this selector proves to be better than others,\n // resolutionPlusOneRep and resolutionBestRep and all\n // the code involving them should be removed.\n\n if (playlistController.leastPixelDiffSelector) {\n // find the variant that is closest to the player's pixel size\n const leastPixelDiffList = haveResolution.map(rep => {\n rep.pixelDiff = Math.abs(rep.width - playerWidth) + Math.abs(rep.height - playerHeight);\n return rep;\n }); // get the highest bandwidth, closest resolution playlist\n\n stableSort(leastPixelDiffList, (left, right) => {\n // sort by highest bandwidth if pixelDiff is the same\n if (left.pixelDiff === right.pixelDiff) {\n return right.bandwidth - left.bandwidth;\n }\n return left.pixelDiff - right.pixelDiff;\n });\n leastPixelDiffRep = leastPixelDiffList[0];\n } // fallback chain of variants\n\n const chosenRep = leastPixelDiffRep || resolutionPlusOneRep || resolutionBestRep || bandwidthBestRep || enabledPlaylistReps[0] || sortedPlaylistReps[0];\n if (chosenRep && chosenRep.playlist) {\n let type = 'sortedPlaylistReps';\n if (leastPixelDiffRep) {\n type = 'leastPixelDiffRep';\n } else if (resolutionPlusOneRep) {\n type = 'resolutionPlusOneRep';\n } else if (resolutionBestRep) {\n type = 'resolutionBestRep';\n } else if (bandwidthBestRep) {\n type = 'bandwidthBestRep';\n } else if (enabledPlaylistReps[0]) {\n type = 'enabledPlaylistReps';\n }\n logFn(`choosing ${representationToString(chosenRep)} using ${type} with options`, options);\n return chosenRep.playlist;\n }\n logFn('could not choose a playlist with options', options);\n return null;\n};\n\n/**\n * Chooses the appropriate media playlist based on the most recent\n * bandwidth estimate and the player size.\n *\n * Expects to be called within the context of an instance of VhsHandler\n *\n * @return {Playlist} the highest bitrate playlist less than the\n * currently detected bandwidth, accounting for some amount of\n * bandwidth variance\n */\n\nconst lastBandwidthSelector = function () {\n let pixelRatio = this.useDevicePixelRatio ? window$1.devicePixelRatio || 1 : 1;\n if (!isNaN(this.customPixelRatio)) {\n pixelRatio = this.customPixelRatio;\n }\n return simpleSelector(this.playlists.main, this.systemBandwidth, parseInt(safeGetComputedStyle(this.tech_.el(), 'width'), 10) * pixelRatio, parseInt(safeGetComputedStyle(this.tech_.el(), 'height'), 10) * pixelRatio, this.limitRenditionByPlayerDimensions, this.playlistController_);\n};\n/**\n * Chooses the appropriate media playlist based on an\n * exponential-weighted moving average of the bandwidth after\n * filtering for player size.\n *\n * Expects to be called within the context of an instance of VhsHandler\n *\n * @param {number} decay - a number between 0 and 1. Higher values of\n * this parameter will cause previous bandwidth estimates to lose\n * significance more quickly.\n * @return {Function} a function which can be invoked to create a new\n * playlist selector function.\n * @see https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average\n */\n\nconst movingAverageBandwidthSelector = function (decay) {\n let average = -1;\n let lastSystemBandwidth = -1;\n if (decay < 0 || decay > 1) {\n throw new Error('Moving average bandwidth decay must be between 0 and 1.');\n }\n return function () {\n let pixelRatio = this.useDevicePixelRatio ? window$1.devicePixelRatio || 1 : 1;\n if (!isNaN(this.customPixelRatio)) {\n pixelRatio = this.customPixelRatio;\n }\n if (average < 0) {\n average = this.systemBandwidth;\n lastSystemBandwidth = this.systemBandwidth;\n } // stop the average value from decaying for every 250ms\n // when the systemBandwidth is constant\n // and\n // stop average from setting to a very low value when the\n // systemBandwidth becomes 0 in case of chunk cancellation\n\n if (this.systemBandwidth > 0 && this.systemBandwidth !== lastSystemBandwidth) {\n average = decay * this.systemBandwidth + (1 - decay) * average;\n lastSystemBandwidth = this.systemBandwidth;\n }\n return simpleSelector(this.playlists.main, average, parseInt(safeGetComputedStyle(this.tech_.el(), 'width'), 10) * pixelRatio, parseInt(safeGetComputedStyle(this.tech_.el(), 'height'), 10) * pixelRatio, this.limitRenditionByPlayerDimensions, this.playlistController_);\n };\n};\n/**\n * Chooses the appropriate media playlist based on the potential to rebuffer\n *\n * @param {Object} settings\n * Object of information required to use this selector\n * @param {Object} settings.main\n * Object representation of the main manifest\n * @param {number} settings.currentTime\n * The current time of the player\n * @param {number} settings.bandwidth\n * Current measured bandwidth\n * @param {number} settings.duration\n * Duration of the media\n * @param {number} settings.segmentDuration\n * Segment duration to be used in round trip time calculations\n * @param {number} settings.timeUntilRebuffer\n * Time left in seconds until the player has to rebuffer\n * @param {number} settings.currentTimeline\n * The current timeline segments are being loaded from\n * @param {SyncController} settings.syncController\n * SyncController for determining if we have a sync point for a given playlist\n * @return {Object|null}\n * {Object} return.playlist\n * The highest bandwidth playlist with the least amount of rebuffering\n * {Number} return.rebufferingImpact\n * The amount of time in seconds switching to this playlist will rebuffer. A\n * negative value means that switching will cause zero rebuffering.\n */\n\nconst minRebufferMaxBandwidthSelector = function (settings) {\n const {\n main,\n currentTime,\n bandwidth,\n duration,\n segmentDuration,\n timeUntilRebuffer,\n currentTimeline,\n syncController\n } = settings; // filter out any playlists that have been excluded due to\n // incompatible configurations\n\n const compatiblePlaylists = main.playlists.filter(playlist => !Playlist.isIncompatible(playlist)); // filter out any playlists that have been disabled manually through the representations\n // api or excluded temporarily due to playback errors.\n\n let enabledPlaylists = compatiblePlaylists.filter(Playlist.isEnabled);\n if (!enabledPlaylists.length) {\n // if there are no enabled playlists, then they have all been excluded or disabled\n // by the user through the representations api. In this case, ignore exclusion and\n // fallback to what the user wants by using playlists the user has not disabled.\n enabledPlaylists = compatiblePlaylists.filter(playlist => !Playlist.isDisabled(playlist));\n }\n const bandwidthPlaylists = enabledPlaylists.filter(Playlist.hasAttribute.bind(null, 'BANDWIDTH'));\n const rebufferingEstimates = bandwidthPlaylists.map(playlist => {\n const syncPoint = syncController.getSyncPoint(playlist, duration, currentTimeline, currentTime); // If there is no sync point for this playlist, switching to it will require a\n // sync request first. This will double the request time\n\n const numRequests = syncPoint ? 1 : 2;\n const requestTimeEstimate = Playlist.estimateSegmentRequestTime(segmentDuration, bandwidth, playlist);\n const rebufferingImpact = requestTimeEstimate * numRequests - timeUntilRebuffer;\n return {\n playlist,\n rebufferingImpact\n };\n });\n const noRebufferingPlaylists = rebufferingEstimates.filter(estimate => estimate.rebufferingImpact <= 0); // Sort by bandwidth DESC\n\n stableSort(noRebufferingPlaylists, (a, b) => comparePlaylistBandwidth(b.playlist, a.playlist));\n if (noRebufferingPlaylists.length) {\n return noRebufferingPlaylists[0];\n }\n stableSort(rebufferingEstimates, (a, b) => a.rebufferingImpact - b.rebufferingImpact);\n return rebufferingEstimates[0] || null;\n};\n/**\n * Chooses the appropriate media playlist, which in this case is the lowest bitrate\n * one with video. If no renditions with video exist, return the lowest audio rendition.\n *\n * Expects to be called within the context of an instance of VhsHandler\n *\n * @return {Object|null}\n * {Object} return.playlist\n * The lowest bitrate playlist that contains a video codec. If no such rendition\n * exists pick the lowest audio rendition.\n */\n\nconst lowestBitrateCompatibleVariantSelector = function () {\n // filter out any playlists that have been excluded due to\n // incompatible configurations or playback errors\n const playlists = this.playlists.main.playlists.filter(Playlist.isEnabled); // Sort ascending by bitrate\n\n stableSort(playlists, (a, b) => comparePlaylistBandwidth(a, b)); // Parse and assume that playlists with no video codec have no video\n // (this is not necessarily true, although it is generally true).\n //\n // If an entire manifest has no valid videos everything will get filtered\n // out.\n\n const playlistsWithVideo = playlists.filter(playlist => !!codecsForPlaylist(this.playlists.main, playlist).video);\n return playlistsWithVideo[0] || null;\n};\n\n/**\n * Combine all segments into a single Uint8Array\n *\n * @param {Object} segmentObj\n * @return {Uint8Array} concatenated bytes\n * @private\n */\nconst concatSegments = segmentObj => {\n let offset = 0;\n let tempBuffer;\n if (segmentObj.bytes) {\n tempBuffer = new Uint8Array(segmentObj.bytes); // combine the individual segments into one large typed-array\n\n segmentObj.segments.forEach(segment => {\n tempBuffer.set(segment, offset);\n offset += segment.byteLength;\n });\n }\n return tempBuffer;\n};\n/**\n * Example:\n * https://host.com/path1/path2/path3/segment.ts?arg1=val1\n * -->\n * path3/segment.ts\n *\n * @param resolvedUri\n * @return {string}\n */\n\nfunction compactSegmentUrlDescription(resolvedUri) {\n try {\n return new URL(resolvedUri).pathname.split('/').slice(-2).join('/');\n } catch (e) {\n return '';\n }\n}\n\n/**\n * @file text-tracks.js\n */\n/**\n * Create captions text tracks on video.js if they do not exist\n *\n * @param {Object} inbandTextTracks a reference to current inbandTextTracks\n * @param {Object} tech the video.js tech\n * @param {Object} captionStream the caption stream to create\n * @private\n */\n\nconst createCaptionsTrackIfNotExists = function (inbandTextTracks, tech, captionStream) {\n if (!inbandTextTracks[captionStream]) {\n tech.trigger({\n type: 'usage',\n name: 'vhs-608'\n });\n let instreamId = captionStream; // we need to translate SERVICEn for 708 to how mux.js currently labels them\n\n if (/^cc708_/.test(captionStream)) {\n instreamId = 'SERVICE' + captionStream.split('_')[1];\n }\n const track = tech.textTracks().getTrackById(instreamId);\n if (track) {\n // Resuse an existing track with a CC# id because this was\n // very likely created by videojs-contrib-hls from information\n // in the m3u8 for us to use\n inbandTextTracks[captionStream] = track;\n } else {\n // This section gets called when we have caption services that aren't specified in the manifest.\n // Manifest level caption services are handled in media-groups.js under CLOSED-CAPTIONS.\n const captionServices = tech.options_.vhs && tech.options_.vhs.captionServices || {};\n let label = captionStream;\n let language = captionStream;\n let def = false;\n const captionService = captionServices[instreamId];\n if (captionService) {\n label = captionService.label;\n language = captionService.language;\n def = captionService.default;\n } // Otherwise, create a track with the default `CC#` label and\n // without a language\n\n inbandTextTracks[captionStream] = tech.addRemoteTextTrack({\n kind: 'captions',\n id: instreamId,\n // TODO: investigate why this doesn't seem to turn the caption on by default\n default: def,\n label,\n language\n }, false).track;\n }\n }\n};\n/**\n * Add caption text track data to a source handler given an array of captions\n *\n * @param {Object}\n * @param {Object} inbandTextTracks the inband text tracks\n * @param {number} timestampOffset the timestamp offset of the source buffer\n * @param {Array} captionArray an array of caption data\n * @private\n */\n\nconst addCaptionData = function ({\n inbandTextTracks,\n captionArray,\n timestampOffset\n}) {\n if (!captionArray) {\n return;\n }\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n captionArray.forEach(caption => {\n const track = caption.stream; // in CEA 608 captions, video.js/mux.js sends a content array\n // with positioning data\n\n if (caption.content) {\n caption.content.forEach(value => {\n const cue = new Cue(caption.startTime + timestampOffset, caption.endTime + timestampOffset, value.text);\n cue.line = value.line;\n cue.align = 'left';\n cue.position = value.position;\n cue.positionAlign = 'line-left';\n inbandTextTracks[track].addCue(cue);\n });\n } else {\n // otherwise, a text value with combined captions is sent\n inbandTextTracks[track].addCue(new Cue(caption.startTime + timestampOffset, caption.endTime + timestampOffset, caption.text));\n }\n });\n};\n/**\n * Define properties on a cue for backwards compatability,\n * but warn the user that the way that they are using it\n * is depricated and will be removed at a later date.\n *\n * @param {Cue} cue the cue to add the properties on\n * @private\n */\n\nconst deprecateOldCue = function (cue) {\n Object.defineProperties(cue.frame, {\n id: {\n get() {\n videojs.log.warn('cue.frame.id is deprecated. Use cue.value.key instead.');\n return cue.value.key;\n }\n },\n value: {\n get() {\n videojs.log.warn('cue.frame.value is deprecated. Use cue.value.data instead.');\n return cue.value.data;\n }\n },\n privateData: {\n get() {\n videojs.log.warn('cue.frame.privateData is deprecated. Use cue.value.data instead.');\n return cue.value.data;\n }\n }\n });\n};\n/**\n * Add metadata text track data to a source handler given an array of metadata\n *\n * @param {Object}\n * @param {Object} inbandTextTracks the inband text tracks\n * @param {Array} metadataArray an array of meta data\n * @param {number} timestampOffset the timestamp offset of the source buffer\n * @param {number} videoDuration the duration of the video\n * @private\n */\n\nconst addMetadata = ({\n inbandTextTracks,\n metadataArray,\n timestampOffset,\n videoDuration\n}) => {\n if (!metadataArray) {\n return;\n }\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n const metadataTrack = inbandTextTracks.metadataTrack_;\n if (!metadataTrack) {\n return;\n }\n metadataArray.forEach(metadata => {\n const time = metadata.cueTime + timestampOffset; // if time isn't a finite number between 0 and Infinity, like NaN,\n // ignore this bit of metadata.\n // This likely occurs when you have an non-timed ID3 tag like TIT2,\n // which is the \"Title/Songname/Content description\" frame\n\n if (typeof time !== 'number' || window$1.isNaN(time) || time < 0 || !(time < Infinity)) {\n return;\n } // If we have no frames, we can't create a cue.\n\n if (!metadata.frames || !metadata.frames.length) {\n return;\n }\n metadata.frames.forEach(frame => {\n const cue = new Cue(time, time, frame.value || frame.url || frame.data || '');\n cue.frame = frame;\n cue.value = frame;\n deprecateOldCue(cue);\n metadataTrack.addCue(cue);\n });\n });\n if (!metadataTrack.cues || !metadataTrack.cues.length) {\n return;\n } // Updating the metadeta cues so that\n // the endTime of each cue is the startTime of the next cue\n // the endTime of last cue is the duration of the video\n\n const cues = metadataTrack.cues;\n const cuesArray = []; // Create a copy of the TextTrackCueList...\n // ...disregarding cues with a falsey value\n\n for (let i = 0; i < cues.length; i++) {\n if (cues[i]) {\n cuesArray.push(cues[i]);\n }\n } // Group cues by their startTime value\n\n const cuesGroupedByStartTime = cuesArray.reduce((obj, cue) => {\n const timeSlot = obj[cue.startTime] || [];\n timeSlot.push(cue);\n obj[cue.startTime] = timeSlot;\n return obj;\n }, {}); // Sort startTimes by ascending order\n\n const sortedStartTimes = Object.keys(cuesGroupedByStartTime).sort((a, b) => Number(a) - Number(b)); // Map each cue group's endTime to the next group's startTime\n\n sortedStartTimes.forEach((startTime, idx) => {\n const cueGroup = cuesGroupedByStartTime[startTime];\n const finiteDuration = isFinite(videoDuration) ? videoDuration : startTime;\n const nextTime = Number(sortedStartTimes[idx + 1]) || finiteDuration; // Map each cue's endTime the next group's startTime\n\n cueGroup.forEach(cue => {\n cue.endTime = nextTime;\n });\n });\n}; // object for mapping daterange attributes\n\nconst dateRangeAttr = {\n id: 'ID',\n class: 'CLASS',\n startDate: 'START-DATE',\n duration: 'DURATION',\n endDate: 'END-DATE',\n endOnNext: 'END-ON-NEXT',\n plannedDuration: 'PLANNED-DURATION',\n scte35Out: 'SCTE35-OUT',\n scte35In: 'SCTE35-IN'\n};\nconst dateRangeKeysToOmit = new Set(['id', 'class', 'startDate', 'duration', 'endDate', 'endOnNext', 'startTime', 'endTime', 'processDateRange']);\n/**\n * Add DateRange metadata text track to a source handler given an array of metadata\n *\n * @param {Object}\n * @param {Object} inbandTextTracks the inband text tracks\n * @param {Array} dateRanges parsed media playlist\n * @private\n */\n\nconst addDateRangeMetadata = ({\n inbandTextTracks,\n dateRanges\n}) => {\n const metadataTrack = inbandTextTracks.metadataTrack_;\n if (!metadataTrack) {\n return;\n }\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n dateRanges.forEach(dateRange => {\n // we generate multiple cues for each date range with different attributes\n for (const key of Object.keys(dateRange)) {\n if (dateRangeKeysToOmit.has(key)) {\n continue;\n }\n const cue = new Cue(dateRange.startTime, dateRange.endTime, '');\n cue.id = dateRange.id;\n cue.type = 'com.apple.quicktime.HLS';\n cue.value = {\n key: dateRangeAttr[key],\n data: dateRange[key]\n };\n if (key === 'scte35Out' || key === 'scte35In') {\n cue.value.data = new Uint8Array(cue.value.data.match(/[\\da-f]{2}/gi)).buffer;\n }\n metadataTrack.addCue(cue);\n }\n dateRange.processDateRange();\n });\n};\n/**\n * Create metadata text track on video.js if it does not exist\n *\n * @param {Object} inbandTextTracks a reference to current inbandTextTracks\n * @param {string} dispatchType the inband metadata track dispatch type\n * @param {Object} tech the video.js tech\n * @private\n */\n\nconst createMetadataTrackIfNotExists = (inbandTextTracks, dispatchType, tech) => {\n if (inbandTextTracks.metadataTrack_) {\n return;\n }\n inbandTextTracks.metadataTrack_ = tech.addRemoteTextTrack({\n kind: 'metadata',\n label: 'Timed Metadata'\n }, false).track;\n if (!videojs.browser.IS_ANY_SAFARI) {\n inbandTextTracks.metadataTrack_.inBandMetadataTrackDispatchType = dispatchType;\n }\n};\n/**\n * Remove cues from a track on video.js.\n *\n * @param {Double} start start of where we should remove the cue\n * @param {Double} end end of where the we should remove the cue\n * @param {Object} track the text track to remove the cues from\n * @private\n */\n\nconst removeCuesFromTrack = function (start, end, track) {\n let i;\n let cue;\n if (!track) {\n return;\n }\n if (!track.cues) {\n return;\n }\n i = track.cues.length;\n while (i--) {\n cue = track.cues[i]; // Remove any cue within the provided start and end time\n\n if (cue.startTime >= start && cue.endTime <= end) {\n track.removeCue(cue);\n }\n }\n};\n/**\n * Remove duplicate cues from a track on video.js (a cue is considered a\n * duplicate if it has the same time interval and text as another)\n *\n * @param {Object} track the text track to remove the duplicate cues from\n * @private\n */\n\nconst removeDuplicateCuesFromTrack = function (track) {\n const cues = track.cues;\n if (!cues) {\n return;\n }\n const uniqueCues = {};\n for (let i = cues.length - 1; i >= 0; i--) {\n const cue = cues[i];\n const cueKey = `${cue.startTime}-${cue.endTime}-${cue.text}`;\n if (uniqueCues[cueKey]) {\n track.removeCue(cue);\n } else {\n uniqueCues[cueKey] = cue;\n }\n }\n};\n\n/**\n * Returns a list of gops in the buffer that have a pts value of 3 seconds or more in\n * front of current time.\n *\n * @param {Array} buffer\n * The current buffer of gop information\n * @param {number} currentTime\n * The current time\n * @param {Double} mapping\n * Offset to map display time to stream presentation time\n * @return {Array}\n * List of gops considered safe to append over\n */\n\nconst gopsSafeToAlignWith = (buffer, currentTime, mapping) => {\n if (typeof currentTime === 'undefined' || currentTime === null || !buffer.length) {\n return [];\n } // pts value for current time + 3 seconds to give a bit more wiggle room\n\n const currentTimePts = Math.ceil((currentTime - mapping + 3) * ONE_SECOND_IN_TS);\n let i;\n for (i = 0; i < buffer.length; i++) {\n if (buffer[i].pts > currentTimePts) {\n break;\n }\n }\n return buffer.slice(i);\n};\n/**\n * Appends gop information (timing and byteLength) received by the transmuxer for the\n * gops appended in the last call to appendBuffer\n *\n * @param {Array} buffer\n * The current buffer of gop information\n * @param {Array} gops\n * List of new gop information\n * @param {boolean} replace\n * If true, replace the buffer with the new gop information. If false, append the\n * new gop information to the buffer in the right location of time.\n * @return {Array}\n * Updated list of gop information\n */\n\nconst updateGopBuffer = (buffer, gops, replace) => {\n if (!gops.length) {\n return buffer;\n }\n if (replace) {\n // If we are in safe append mode, then completely overwrite the gop buffer\n // with the most recent appeneded data. This will make sure that when appending\n // future segments, we only try to align with gops that are both ahead of current\n // time and in the last segment appended.\n return gops.slice();\n }\n const start = gops[0].pts;\n let i = 0;\n for (i; i < buffer.length; i++) {\n if (buffer[i].pts >= start) {\n break;\n }\n }\n return buffer.slice(0, i).concat(gops);\n};\n/**\n * Removes gop information in buffer that overlaps with provided start and end\n *\n * @param {Array} buffer\n * The current buffer of gop information\n * @param {Double} start\n * position to start the remove at\n * @param {Double} end\n * position to end the remove at\n * @param {Double} mapping\n * Offset to map display time to stream presentation time\n */\n\nconst removeGopBuffer = (buffer, start, end, mapping) => {\n const startPts = Math.ceil((start - mapping) * ONE_SECOND_IN_TS);\n const endPts = Math.ceil((end - mapping) * ONE_SECOND_IN_TS);\n const updatedBuffer = buffer.slice();\n let i = buffer.length;\n while (i--) {\n if (buffer[i].pts <= endPts) {\n break;\n }\n }\n if (i === -1) {\n // no removal because end of remove range is before start of buffer\n return updatedBuffer;\n }\n let j = i + 1;\n while (j--) {\n if (buffer[j].pts <= startPts) {\n break;\n }\n } // clamp remove range start to 0 index\n\n j = Math.max(j, 0);\n updatedBuffer.splice(j, i - j + 1);\n return updatedBuffer;\n};\nconst shallowEqual = function (a, b) {\n // if both are undefined\n // or one or the other is undefined\n // they are not equal\n if (!a && !b || !a && b || a && !b) {\n return false;\n } // they are the same object and thus, equal\n\n if (a === b) {\n return true;\n } // sort keys so we can make sure they have\n // all the same keys later.\n\n const akeys = Object.keys(a).sort();\n const bkeys = Object.keys(b).sort(); // different number of keys, not equal\n\n if (akeys.length !== bkeys.length) {\n return false;\n }\n for (let i = 0; i < akeys.length; i++) {\n const key = akeys[i]; // different sorted keys, not equal\n\n if (key !== bkeys[i]) {\n return false;\n } // different values, not equal\n\n if (a[key] !== b[key]) {\n return false;\n }\n }\n return true;\n};\n\n// https://www.w3.org/TR/WebIDL-1/#quotaexceedederror\nconst QUOTA_EXCEEDED_ERR = 22;\n\n/**\n * The segment loader has no recourse except to fetch a segment in the\n * current playlist and use the internal timestamps in that segment to\n * generate a syncPoint. This function returns a good candidate index\n * for that process.\n *\n * @param {Array} segments - the segments array from a playlist.\n * @return {number} An index of a segment from the playlist to load\n */\n\nconst getSyncSegmentCandidate = function (currentTimeline, segments, targetTime) {\n segments = segments || [];\n const timelineSegments = [];\n let time = 0;\n for (let i = 0; i < segments.length; i++) {\n const segment = segments[i];\n if (currentTimeline === segment.timeline) {\n timelineSegments.push(i);\n time += segment.duration;\n if (time > targetTime) {\n return i;\n }\n }\n }\n if (timelineSegments.length === 0) {\n return 0;\n } // default to the last timeline segment\n\n return timelineSegments[timelineSegments.length - 1];\n}; // In the event of a quota exceeded error, keep at least one second of back buffer. This\n// number was arbitrarily chosen and may be updated in the future, but seemed reasonable\n// as a start to prevent any potential issues with removing content too close to the\n// playhead.\n\nconst MIN_BACK_BUFFER = 1; // in ms\n\nconst CHECK_BUFFER_DELAY = 500;\nconst finite = num => typeof num === 'number' && isFinite(num); // With most content hovering around 30fps, if a segment has a duration less than a half\n// frame at 30fps or one frame at 60fps, the bandwidth and throughput calculations will\n// not accurately reflect the rest of the content.\n\nconst MIN_SEGMENT_DURATION_TO_SAVE_STATS = 1 / 60;\nconst illegalMediaSwitch = (loaderType, startingMedia, trackInfo) => {\n // Although these checks should most likely cover non 'main' types, for now it narrows\n // the scope of our checks.\n if (loaderType !== 'main' || !startingMedia || !trackInfo) {\n return null;\n }\n if (!trackInfo.hasAudio && !trackInfo.hasVideo) {\n return 'Neither audio nor video found in segment.';\n }\n if (startingMedia.hasVideo && !trackInfo.hasVideo) {\n return 'Only audio found in segment when we expected video.' + ' We can\\'t switch to audio only from a stream that had video.' + ' To get rid of this message, please add codec information to the manifest.';\n }\n if (!startingMedia.hasVideo && trackInfo.hasVideo) {\n return 'Video found in segment when we expected only audio.' + ' We can\\'t switch to a stream with video from an audio only stream.' + ' To get rid of this message, please add codec information to the manifest.';\n }\n return null;\n};\n/**\n * Calculates a time value that is safe to remove from the back buffer without interrupting\n * playback.\n *\n * @param {TimeRange} seekable\n * The current seekable range\n * @param {number} currentTime\n * The current time of the player\n * @param {number} targetDuration\n * The target duration of the current playlist\n * @return {number}\n * Time that is safe to remove from the back buffer without interrupting playback\n */\n\nconst safeBackBufferTrimTime = (seekable, currentTime, targetDuration) => {\n // 30 seconds before the playhead provides a safe default for trimming.\n //\n // Choosing a reasonable default is particularly important for high bitrate content and\n // VOD videos/live streams with large windows, as the buffer may end up overfilled and\n // throw an APPEND_BUFFER_ERR.\n let trimTime = currentTime - Config.BACK_BUFFER_LENGTH;\n if (seekable.length) {\n // Some live playlists may have a shorter window of content than the full allowed back\n // buffer. For these playlists, don't save content that's no longer within the window.\n trimTime = Math.max(trimTime, seekable.start(0));\n } // Don't remove within target duration of the current time to avoid the possibility of\n // removing the GOP currently being played, as removing it can cause playback stalls.\n\n const maxTrimTime = currentTime - targetDuration;\n return Math.min(maxTrimTime, trimTime);\n};\nconst segmentInfoString = segmentInfo => {\n const {\n startOfSegment,\n duration,\n segment,\n part,\n playlist: {\n mediaSequence: seq,\n id,\n segments = []\n },\n mediaIndex: index,\n partIndex,\n timeline\n } = segmentInfo;\n const segmentLen = segments.length - 1;\n let selection = 'mediaIndex/partIndex increment';\n if (segmentInfo.getMediaInfoForTime) {\n selection = `getMediaInfoForTime (${segmentInfo.getMediaInfoForTime})`;\n } else if (segmentInfo.isSyncRequest) {\n selection = 'getSyncSegmentCandidate (isSyncRequest)';\n }\n if (segmentInfo.independent) {\n selection += ` with independent ${segmentInfo.independent}`;\n }\n const hasPartIndex = typeof partIndex === 'number';\n const name = segmentInfo.segment.uri ? 'segment' : 'pre-segment';\n const zeroBasedPartCount = hasPartIndex ? getKnownPartCount({\n preloadSegment: segment\n }) - 1 : 0;\n return `${name} [${seq + index}/${seq + segmentLen}]` + (hasPartIndex ? ` part [${partIndex}/${zeroBasedPartCount}]` : '') + ` segment start/end [${segment.start} => ${segment.end}]` + (hasPartIndex ? ` part start/end [${part.start} => ${part.end}]` : '') + ` startOfSegment [${startOfSegment}]` + ` duration [${duration}]` + ` timeline [${timeline}]` + ` selected by [${selection}]` + ` playlist [${id}]`;\n};\nconst timingInfoPropertyForMedia = mediaType => `${mediaType}TimingInfo`;\n/**\n * Returns the timestamp offset to use for the segment.\n *\n * @param {number} segmentTimeline\n * The timeline of the segment\n * @param {number} currentTimeline\n * The timeline currently being followed by the loader\n * @param {number} startOfSegment\n * The estimated segment start\n * @param {TimeRange[]} buffered\n * The loader's buffer\n * @param {boolean} overrideCheck\n * If true, no checks are made to see if the timestamp offset value should be set,\n * but sets it directly to a value.\n *\n * @return {number|null}\n * Either a number representing a new timestamp offset, or null if the segment is\n * part of the same timeline\n */\n\nconst timestampOffsetForSegment = ({\n segmentTimeline,\n currentTimeline,\n startOfSegment,\n buffered,\n overrideCheck\n}) => {\n // Check to see if we are crossing a discontinuity to see if we need to set the\n // timestamp offset on the transmuxer and source buffer.\n //\n // Previously, we changed the timestampOffset if the start of this segment was less than\n // the currently set timestampOffset, but this isn't desirable as it can produce bad\n // behavior, especially around long running live streams.\n if (!overrideCheck && segmentTimeline === currentTimeline) {\n return null;\n } // When changing renditions, it's possible to request a segment on an older timeline. For\n // instance, given two renditions with the following:\n //\n // #EXTINF:10\n // segment1\n // #EXT-X-DISCONTINUITY\n // #EXTINF:10\n // segment2\n // #EXTINF:10\n // segment3\n //\n // And the current player state:\n //\n // current time: 8\n // buffer: 0 => 20\n //\n // The next segment on the current rendition would be segment3, filling the buffer from\n // 20s onwards. However, if a rendition switch happens after segment2 was requested,\n // then the next segment to be requested will be segment1 from the new rendition in\n // order to fill time 8 and onwards. Using the buffered end would result in repeated\n // content (since it would position segment1 of the new rendition starting at 20s). This\n // case can be identified when the new segment's timeline is a prior value. Instead of\n // using the buffered end, the startOfSegment can be used, which, hopefully, will be\n // more accurate to the actual start time of the segment.\n\n if (segmentTimeline < currentTimeline) {\n return startOfSegment;\n } // segmentInfo.startOfSegment used to be used as the timestamp offset, however, that\n // value uses the end of the last segment if it is available. While this value\n // should often be correct, it's better to rely on the buffered end, as the new\n // content post discontinuity should line up with the buffered end as if it were\n // time 0 for the new content.\n\n return buffered.length ? buffered.end(buffered.length - 1) : startOfSegment;\n};\n/**\n * Returns whether or not the loader should wait for a timeline change from the timeline\n * change controller before processing the segment.\n *\n * Primary timing in VHS goes by video. This is different from most media players, as\n * audio is more often used as the primary timing source. For the foreseeable future, VHS\n * will continue to use video as the primary timing source, due to the current logic and\n * expectations built around it.\n\n * Since the timing follows video, in order to maintain sync, the video loader is\n * responsible for setting both audio and video source buffer timestamp offsets.\n *\n * Setting different values for audio and video source buffers could lead to\n * desyncing. The following examples demonstrate some of the situations where this\n * distinction is important. Note that all of these cases involve demuxed content. When\n * content is muxed, the audio and video are packaged together, therefore syncing\n * separate media playlists is not an issue.\n *\n * CASE 1: Audio prepares to load a new timeline before video:\n *\n * Timeline: 0 1\n * Audio Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Audio Loader: ^\n * Video Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Video Loader ^\n *\n * In the above example, the audio loader is preparing to load the 6th segment, the first\n * after a discontinuity, while the video loader is still loading the 5th segment, before\n * the discontinuity.\n *\n * If the audio loader goes ahead and loads and appends the 6th segment before the video\n * loader crosses the discontinuity, then when appended, the 6th audio segment will use\n * the timestamp offset from timeline 0. This will likely lead to desyncing. In addition,\n * the audio loader must provide the audioAppendStart value to trim the content in the\n * transmuxer, and that value relies on the audio timestamp offset. Since the audio\n * timestamp offset is set by the video (main) loader, the audio loader shouldn't load the\n * segment until that value is provided.\n *\n * CASE 2: Video prepares to load a new timeline before audio:\n *\n * Timeline: 0 1\n * Audio Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Audio Loader: ^\n * Video Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Video Loader ^\n *\n * In the above example, the video loader is preparing to load the 6th segment, the first\n * after a discontinuity, while the audio loader is still loading the 5th segment, before\n * the discontinuity.\n *\n * If the video loader goes ahead and loads and appends the 6th segment, then once the\n * segment is loaded and processed, both the video and audio timestamp offsets will be\n * set, since video is used as the primary timing source. This is to ensure content lines\n * up appropriately, as any modifications to the video timing are reflected by audio when\n * the video loader sets the audio and video timestamp offsets to the same value. However,\n * setting the timestamp offset for audio before audio has had a chance to change\n * timelines will likely lead to desyncing, as the audio loader will append segment 5 with\n * a timestamp intended to apply to segments from timeline 1 rather than timeline 0.\n *\n * CASE 3: When seeking, audio prepares to load a new timeline before video\n *\n * Timeline: 0 1\n * Audio Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Audio Loader: ^\n * Video Segments: 0 1 2 3 4 5 DISCO 6 7 8 9\n * Video Loader ^\n *\n * In the above example, both audio and video loaders are loading segments from timeline\n * 0, but imagine that the seek originated from timeline 1.\n *\n * When seeking to a new timeline, the timestamp offset will be set based on the expected\n * segment start of the loaded video segment. In order to maintain sync, the audio loader\n * must wait for the video loader to load its segment and update both the audio and video\n * timestamp offsets before it may load and append its own segment. This is the case\n * whether the seek results in a mismatched segment request (e.g., the audio loader\n * chooses to load segment 3 and the video loader chooses to load segment 4) or the\n * loaders choose to load the same segment index from each playlist, as the segments may\n * not be aligned perfectly, even for matching segment indexes.\n *\n * @param {Object} timelinechangeController\n * @param {number} currentTimeline\n * The timeline currently being followed by the loader\n * @param {number} segmentTimeline\n * The timeline of the segment being loaded\n * @param {('main'|'audio')} loaderType\n * The loader type\n * @param {boolean} audioDisabled\n * Whether the audio is disabled for the loader. This should only be true when the\n * loader may have muxed audio in its segment, but should not append it, e.g., for\n * the main loader when an alternate audio playlist is active.\n *\n * @return {boolean}\n * Whether the loader should wait for a timeline change from the timeline change\n * controller before processing the segment\n */\n\nconst shouldWaitForTimelineChange = ({\n timelineChangeController,\n currentTimeline,\n segmentTimeline,\n loaderType,\n audioDisabled\n}) => {\n if (currentTimeline === segmentTimeline) {\n return false;\n }\n if (loaderType === 'audio') {\n const lastMainTimelineChange = timelineChangeController.lastTimelineChange({\n type: 'main'\n }); // Audio loader should wait if:\n //\n // * main hasn't had a timeline change yet (thus has not loaded its first segment)\n // * main hasn't yet changed to the timeline audio is looking to load\n\n return !lastMainTimelineChange || lastMainTimelineChange.to !== segmentTimeline;\n } // The main loader only needs to wait for timeline changes if there's demuxed audio.\n // Otherwise, there's nothing to wait for, since audio would be muxed into the main\n // loader's segments (or the content is audio/video only and handled by the main\n // loader).\n\n if (loaderType === 'main' && audioDisabled) {\n const pendingAudioTimelineChange = timelineChangeController.pendingTimelineChange({\n type: 'audio'\n }); // Main loader should wait for the audio loader if audio is not pending a timeline\n // change to the current timeline.\n //\n // Since the main loader is responsible for setting the timestamp offset for both\n // audio and video, the main loader must wait for audio to be about to change to its\n // timeline before setting the offset, otherwise, if audio is behind in loading,\n // segments from the previous timeline would be adjusted by the new timestamp offset.\n //\n // This requirement means that video will not cross a timeline until the audio is\n // about to cross to it, so that way audio and video will always cross the timeline\n // together.\n //\n // In addition to normal timeline changes, these rules also apply to the start of a\n // stream (going from a non-existent timeline, -1, to timeline 0). It's important\n // that these rules apply to the first timeline change because if they did not, it's\n // possible that the main loader will cross two timelines before the audio loader has\n // crossed one. Logic may be implemented to handle the startup as a special case, but\n // it's easier to simply treat all timeline changes the same.\n\n if (pendingAudioTimelineChange && pendingAudioTimelineChange.to === segmentTimeline) {\n return false;\n }\n return true;\n }\n return false;\n};\nconst mediaDuration = timingInfos => {\n let maxDuration = 0;\n ['video', 'audio'].forEach(function (type) {\n const typeTimingInfo = timingInfos[`${type}TimingInfo`];\n if (!typeTimingInfo) {\n return;\n }\n const {\n start,\n end\n } = typeTimingInfo;\n let duration;\n if (typeof start === 'bigint' || typeof end === 'bigint') {\n duration = window$1.BigInt(end) - window$1.BigInt(start);\n } else if (typeof start === 'number' && typeof end === 'number') {\n duration = end - start;\n }\n if (typeof duration !== 'undefined' && duration > maxDuration) {\n maxDuration = duration;\n }\n }); // convert back to a number if it is lower than MAX_SAFE_INTEGER\n // as we only need BigInt when we are above that.\n\n if (typeof maxDuration === 'bigint' && maxDuration < Number.MAX_SAFE_INTEGER) {\n maxDuration = Number(maxDuration);\n }\n return maxDuration;\n};\nconst segmentTooLong = ({\n segmentDuration,\n maxDuration\n}) => {\n // 0 duration segments are most likely due to metadata only segments or a lack of\n // information.\n if (!segmentDuration) {\n return false;\n } // For HLS:\n //\n // https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.3.1\n // The EXTINF duration of each Media Segment in the Playlist\n // file, when rounded to the nearest integer, MUST be less than or equal\n // to the target duration; longer segments can trigger playback stalls\n // or other errors.\n //\n // For DASH, the mpd-parser uses the largest reported segment duration as the target\n // duration. Although that reported duration is occasionally approximate (i.e., not\n // exact), a strict check may report that a segment is too long more often in DASH.\n\n return Math.round(segmentDuration) > maxDuration + TIME_FUDGE_FACTOR;\n};\nconst getTroublesomeSegmentDurationMessage = (segmentInfo, sourceType) => {\n // Right now we aren't following DASH's timing model exactly, so only perform\n // this check for HLS content.\n if (sourceType !== 'hls') {\n return null;\n }\n const segmentDuration = mediaDuration({\n audioTimingInfo: segmentInfo.audioTimingInfo,\n videoTimingInfo: segmentInfo.videoTimingInfo\n }); // Don't report if we lack information.\n //\n // If the segment has a duration of 0 it is either a lack of information or a\n // metadata only segment and shouldn't be reported here.\n\n if (!segmentDuration) {\n return null;\n }\n const targetDuration = segmentInfo.playlist.targetDuration;\n const isSegmentWayTooLong = segmentTooLong({\n segmentDuration,\n maxDuration: targetDuration * 2\n });\n const isSegmentSlightlyTooLong = segmentTooLong({\n segmentDuration,\n maxDuration: targetDuration\n });\n const segmentTooLongMessage = `Segment with index ${segmentInfo.mediaIndex} ` + `from playlist ${segmentInfo.playlist.id} ` + `has a duration of ${segmentDuration} ` + `when the reported duration is ${segmentInfo.duration} ` + `and the target duration is ${targetDuration}. ` + 'For HLS content, a duration in excess of the target duration may result in ' + 'playback issues. See the HLS specification section on EXT-X-TARGETDURATION for ' + 'more details: ' + 'https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.3.1';\n if (isSegmentWayTooLong || isSegmentSlightlyTooLong) {\n return {\n severity: isSegmentWayTooLong ? 'warn' : 'info',\n message: segmentTooLongMessage\n };\n }\n return null;\n};\n/**\n * An object that manages segment loading and appending.\n *\n * @class SegmentLoader\n * @param {Object} options required and optional options\n * @extends videojs.EventTarget\n */\n\nclass SegmentLoader extends videojs.EventTarget {\n constructor(settings, options = {}) {\n super(); // check pre-conditions\n\n if (!settings) {\n throw new TypeError('Initialization settings are required');\n }\n if (typeof settings.currentTime !== 'function') {\n throw new TypeError('No currentTime getter specified');\n }\n if (!settings.mediaSource) {\n throw new TypeError('No MediaSource specified');\n } // public properties\n\n this.bandwidth = settings.bandwidth;\n this.throughput = {\n rate: 0,\n count: 0\n };\n this.roundTrip = NaN;\n this.resetStats_();\n this.mediaIndex = null;\n this.partIndex = null; // private settings\n\n this.hasPlayed_ = settings.hasPlayed;\n this.currentTime_ = settings.currentTime;\n this.seekable_ = settings.seekable;\n this.seeking_ = settings.seeking;\n this.duration_ = settings.duration;\n this.mediaSource_ = settings.mediaSource;\n this.vhs_ = settings.vhs;\n this.loaderType_ = settings.loaderType;\n this.currentMediaInfo_ = void 0;\n this.startingMediaInfo_ = void 0;\n this.segmentMetadataTrack_ = settings.segmentMetadataTrack;\n this.goalBufferLength_ = settings.goalBufferLength;\n this.sourceType_ = settings.sourceType;\n this.sourceUpdater_ = settings.sourceUpdater;\n this.inbandTextTracks_ = settings.inbandTextTracks;\n this.state_ = 'INIT';\n this.timelineChangeController_ = settings.timelineChangeController;\n this.shouldSaveSegmentTimingInfo_ = true;\n this.parse708captions_ = settings.parse708captions;\n this.useDtsForTimestampOffset_ = settings.useDtsForTimestampOffset;\n this.captionServices_ = settings.captionServices;\n this.exactManifestTimings = settings.exactManifestTimings;\n this.addMetadataToTextTrack = settings.addMetadataToTextTrack; // private instance variables\n\n this.checkBufferTimeout_ = null;\n this.error_ = void 0;\n this.currentTimeline_ = -1;\n this.shouldForceTimestampOffsetAfterResync_ = false;\n this.pendingSegment_ = null;\n this.xhrOptions_ = null;\n this.pendingSegments_ = [];\n this.audioDisabled_ = false;\n this.isPendingTimestampOffset_ = false; // TODO possibly move gopBuffer and timeMapping info to a separate controller\n\n this.gopBuffer_ = [];\n this.timeMapping_ = 0;\n this.safeAppend_ = false;\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n this.playlistOfLastInitSegment_ = {\n audio: null,\n video: null\n };\n this.callQueue_ = []; // If the segment loader prepares to load a segment, but does not have enough\n // information yet to start the loading process (e.g., if the audio loader wants to\n // load a segment from the next timeline but the main loader hasn't yet crossed that\n // timeline), then the load call will be added to the queue until it is ready to be\n // processed.\n\n this.loadQueue_ = [];\n this.metadataQueue_ = {\n id3: [],\n caption: []\n };\n this.waitingOnRemove_ = false;\n this.quotaExceededErrorRetryTimeout_ = null; // Fragmented mp4 playback\n\n this.activeInitSegmentId_ = null;\n this.initSegments_ = {}; // HLSe playback\n\n this.cacheEncryptionKeys_ = settings.cacheEncryptionKeys;\n this.keyCache_ = {};\n this.decrypter_ = settings.decrypter; // Manages the tracking and generation of sync-points, mappings\n // between a time in the display time and a segment index within\n // a playlist\n\n this.syncController_ = settings.syncController;\n this.syncPoint_ = {\n segmentIndex: 0,\n time: 0\n };\n this.transmuxer_ = this.createTransmuxer_();\n this.triggerSyncInfoUpdate_ = () => this.trigger('syncinfoupdate');\n this.syncController_.on('syncinfoupdate', this.triggerSyncInfoUpdate_);\n this.mediaSource_.addEventListener('sourceopen', () => {\n if (!this.isEndOfStream_()) {\n this.ended_ = false;\n }\n }); // ...for determining the fetch location\n\n this.fetchAtBuffer_ = false;\n this.logger_ = logger(`SegmentLoader[${this.loaderType_}]`);\n Object.defineProperty(this, 'state', {\n get() {\n return this.state_;\n },\n set(newState) {\n if (newState !== this.state_) {\n this.logger_(`${this.state_} -> ${newState}`);\n this.state_ = newState;\n this.trigger('statechange');\n }\n }\n });\n this.sourceUpdater_.on('ready', () => {\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n }); // Only the main loader needs to listen for pending timeline changes, as the main\n // loader should wait for audio to be ready to change its timeline so that both main\n // and audio timelines change together. For more details, see the\n // shouldWaitForTimelineChange function.\n\n if (this.loaderType_ === 'main') {\n this.timelineChangeController_.on('pendingtimelinechange', () => {\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n });\n } // The main loader only listens on pending timeline changes, but the audio loader,\n // since its loads follow main, needs to listen on timeline changes. For more details,\n // see the shouldWaitForTimelineChange function.\n\n if (this.loaderType_ === 'audio') {\n this.timelineChangeController_.on('timelinechange', () => {\n if (this.hasEnoughInfoToLoad_()) {\n this.processLoadQueue_();\n }\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n });\n }\n }\n /**\n * TODO: Current sync controller consists of many hls-specific strategies\n * media sequence sync is also hls-specific, and we would like to be protocol-agnostic on this level\n * this should be a part of the sync-controller and sync controller should expect different strategy list based on the protocol.\n *\n * @return {MediaSequenceSync|null}\n * @private\n */\n\n get mediaSequenceSync_() {\n return this.syncController_.getMediaSequenceSync(this.loaderType_);\n }\n createTransmuxer_() {\n return segmentTransmuxer.createTransmuxer({\n remux: false,\n alignGopsAtEnd: this.safeAppend_,\n keepOriginalTimestamps: true,\n parse708captions: this.parse708captions_,\n captionServices: this.captionServices_\n });\n }\n /**\n * reset all of our media stats\n *\n * @private\n */\n\n resetStats_() {\n this.mediaBytesTransferred = 0;\n this.mediaRequests = 0;\n this.mediaRequestsAborted = 0;\n this.mediaRequestsTimedout = 0;\n this.mediaRequestsErrored = 0;\n this.mediaTransferDuration = 0;\n this.mediaSecondsLoaded = 0;\n this.mediaAppends = 0;\n }\n /**\n * dispose of the SegmentLoader and reset to the default state\n */\n\n dispose() {\n this.trigger('dispose');\n this.state = 'DISPOSED';\n this.pause();\n this.abort_();\n if (this.transmuxer_) {\n this.transmuxer_.terminate();\n }\n this.resetStats_();\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n }\n if (this.syncController_ && this.triggerSyncInfoUpdate_) {\n this.syncController_.off('syncinfoupdate', this.triggerSyncInfoUpdate_);\n }\n this.off();\n }\n setAudio(enable) {\n this.audioDisabled_ = !enable;\n if (enable) {\n this.appendInitSegment_.audio = true;\n } else {\n // remove current track audio if it gets disabled\n this.sourceUpdater_.removeAudio(0, this.duration_());\n }\n }\n /**\n * abort anything that is currently doing on with the SegmentLoader\n * and reset to a default state\n */\n\n abort() {\n if (this.state !== 'WAITING') {\n if (this.pendingSegment_) {\n this.pendingSegment_ = null;\n }\n return;\n }\n this.abort_(); // We aborted the requests we were waiting on, so reset the loader's state to READY\n // since we are no longer \"waiting\" on any requests. XHR callback is not always run\n // when the request is aborted. This will prevent the loader from being stuck in the\n // WAITING state indefinitely.\n\n this.state = 'READY'; // don't wait for buffer check timeouts to begin fetching the\n // next segment\n\n if (!this.paused()) {\n this.monitorBuffer_();\n }\n }\n /**\n * abort all pending xhr requests and null any pending segements\n *\n * @private\n */\n\n abort_() {\n if (this.pendingSegment_ && this.pendingSegment_.abortRequests) {\n this.pendingSegment_.abortRequests();\n } // clear out the segment being processed\n\n this.pendingSegment_ = null;\n this.callQueue_ = [];\n this.loadQueue_ = [];\n this.metadataQueue_.id3 = [];\n this.metadataQueue_.caption = [];\n this.timelineChangeController_.clearPendingTimelineChange(this.loaderType_);\n this.waitingOnRemove_ = false;\n window$1.clearTimeout(this.quotaExceededErrorRetryTimeout_);\n this.quotaExceededErrorRetryTimeout_ = null;\n }\n checkForAbort_(requestId) {\n // If the state is APPENDING, then aborts will not modify the state, meaning the first\n // callback that happens should reset the state to READY so that loading can continue.\n if (this.state === 'APPENDING' && !this.pendingSegment_) {\n this.state = 'READY';\n return true;\n }\n if (!this.pendingSegment_ || this.pendingSegment_.requestId !== requestId) {\n return true;\n }\n return false;\n }\n /**\n * set an error on the segment loader and null out any pending segements\n *\n * @param {Error} error the error to set on the SegmentLoader\n * @return {Error} the error that was set or that is currently set\n */\n\n error(error) {\n if (typeof error !== 'undefined') {\n this.logger_('error occurred:', error);\n this.error_ = error;\n }\n this.pendingSegment_ = null;\n return this.error_;\n }\n endOfStream() {\n this.ended_ = true;\n if (this.transmuxer_) {\n // need to clear out any cached data to prepare for the new segment\n segmentTransmuxer.reset(this.transmuxer_);\n }\n this.gopBuffer_.length = 0;\n this.pause();\n this.trigger('ended');\n }\n /**\n * Indicates which time ranges are buffered\n *\n * @return {TimeRange}\n * TimeRange object representing the current buffered ranges\n */\n\n buffered_() {\n const trackInfo = this.getMediaInfo_();\n if (!this.sourceUpdater_ || !trackInfo) {\n return createTimeRanges();\n }\n if (this.loaderType_ === 'main') {\n const {\n hasAudio,\n hasVideo,\n isMuxed\n } = trackInfo;\n if (hasVideo && hasAudio && !this.audioDisabled_ && !isMuxed) {\n return this.sourceUpdater_.buffered();\n }\n if (hasVideo) {\n return this.sourceUpdater_.videoBuffered();\n }\n } // One case that can be ignored for now is audio only with alt audio,\n // as we don't yet have proper support for that.\n\n return this.sourceUpdater_.audioBuffered();\n }\n /**\n * Gets and sets init segment for the provided map\n *\n * @param {Object} map\n * The map object representing the init segment to get or set\n * @param {boolean=} set\n * If true, the init segment for the provided map should be saved\n * @return {Object}\n * map object for desired init segment\n */\n\n initSegmentForMap(map, set = false) {\n if (!map) {\n return null;\n }\n const id = initSegmentId(map);\n let storedMap = this.initSegments_[id];\n if (set && !storedMap && map.bytes) {\n this.initSegments_[id] = storedMap = {\n resolvedUri: map.resolvedUri,\n byterange: map.byterange,\n bytes: map.bytes,\n tracks: map.tracks,\n timescales: map.timescales\n };\n }\n return storedMap || map;\n }\n /**\n * Gets and sets key for the provided key\n *\n * @param {Object} key\n * The key object representing the key to get or set\n * @param {boolean=} set\n * If true, the key for the provided key should be saved\n * @return {Object}\n * Key object for desired key\n */\n\n segmentKey(key, set = false) {\n if (!key) {\n return null;\n }\n const id = segmentKeyId(key);\n let storedKey = this.keyCache_[id]; // TODO: We should use the HTTP Expires header to invalidate our cache per\n // https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-6.2.3\n\n if (this.cacheEncryptionKeys_ && set && !storedKey && key.bytes) {\n this.keyCache_[id] = storedKey = {\n resolvedUri: key.resolvedUri,\n bytes: key.bytes\n };\n }\n const result = {\n resolvedUri: (storedKey || key).resolvedUri\n };\n if (storedKey) {\n result.bytes = storedKey.bytes;\n }\n return result;\n }\n /**\n * Returns true if all configuration required for loading is present, otherwise false.\n *\n * @return {boolean} True if the all configuration is ready for loading\n * @private\n */\n\n couldBeginLoading_() {\n return this.playlist_ && !this.paused();\n }\n /**\n * load a playlist and start to fill the buffer\n */\n\n load() {\n // un-pause\n this.monitorBuffer_(); // if we don't have a playlist yet, keep waiting for one to be\n // specified\n\n if (!this.playlist_) {\n return;\n } // if all the configuration is ready, initialize and begin loading\n\n if (this.state === 'INIT' && this.couldBeginLoading_()) {\n return this.init_();\n } // if we're in the middle of processing a segment already, don't\n // kick off an additional segment request\n\n if (!this.couldBeginLoading_() || this.state !== 'READY' && this.state !== 'INIT') {\n return;\n }\n this.state = 'READY';\n }\n /**\n * Once all the starting parameters have been specified, begin\n * operation. This method should only be invoked from the INIT\n * state.\n *\n * @private\n */\n\n init_() {\n this.state = 'READY'; // if this is the audio segment loader, and it hasn't been inited before, then any old\n // audio data from the muxed content should be removed\n\n this.resetEverything();\n return this.monitorBuffer_();\n }\n /**\n * set a playlist on the segment loader\n *\n * @param {PlaylistLoader} media the playlist to set on the segment loader\n */\n\n playlist(newPlaylist, options = {}) {\n if (!newPlaylist) {\n return;\n }\n const oldPlaylist = this.playlist_;\n const segmentInfo = this.pendingSegment_;\n this.playlist_ = newPlaylist;\n this.xhrOptions_ = options; // when we haven't started playing yet, the start of a live playlist\n // is always our zero-time so force a sync update each time the playlist\n // is refreshed from the server\n //\n // Use the INIT state to determine if playback has started, as the playlist sync info\n // should be fixed once requests begin (as sync points are generated based on sync\n // info), but not before then.\n\n if (this.state === 'INIT') {\n newPlaylist.syncInfo = {\n mediaSequence: newPlaylist.mediaSequence,\n time: 0\n }; // Setting the date time mapping means mapping the program date time (if available)\n // to time 0 on the player's timeline. The playlist's syncInfo serves a similar\n // purpose, mapping the initial mediaSequence to time zero. Since the syncInfo can\n // be updated as the playlist is refreshed before the loader starts loading, the\n // program date time mapping needs to be updated as well.\n //\n // This mapping is only done for the main loader because a program date time should\n // map equivalently between playlists.\n\n if (this.loaderType_ === 'main') {\n this.syncController_.setDateTimeMappingForStart(newPlaylist);\n }\n }\n let oldId = null;\n if (oldPlaylist) {\n if (oldPlaylist.id) {\n oldId = oldPlaylist.id;\n } else if (oldPlaylist.uri) {\n oldId = oldPlaylist.uri;\n }\n }\n this.logger_(`playlist update [${oldId} => ${newPlaylist.id || newPlaylist.uri}]`);\n if (this.mediaSequenceSync_) {\n this.mediaSequenceSync_.update(newPlaylist, this.currentTime_());\n this.logger_(`Playlist update:\ncurrentTime: ${this.currentTime_()}\nbufferedEnd: ${lastBufferedEnd(this.buffered_())}\n`, this.mediaSequenceSync_.diagnostics);\n } // in VOD, this is always a rendition switch (or we updated our syncInfo above)\n // in LIVE, we always want to update with new playlists (including refreshes)\n\n this.trigger('syncinfoupdate'); // if we were unpaused but waiting for a playlist, start\n // buffering now\n\n if (this.state === 'INIT' && this.couldBeginLoading_()) {\n return this.init_();\n }\n if (!oldPlaylist || oldPlaylist.uri !== newPlaylist.uri) {\n if (this.mediaIndex !== null) {\n // we must reset/resync the segment loader when we switch renditions and\n // the segment loader is already synced to the previous rendition\n // We only want to reset the loader here for LLHLS playback, as resetLoader sets fetchAtBuffer_\n // to false, resulting in fetching segments at currentTime and causing repeated\n // same-segment requests on playlist change. This erroneously drives up the playback watcher\n // stalled segment count, as re-requesting segments at the currentTime or browser cached segments\n // will not change the buffer.\n // Reference for LLHLS fixes: https://github.com/videojs/http-streaming/pull/1201\n const isLLHLS = !newPlaylist.endList && typeof newPlaylist.partTargetDuration === 'number';\n if (isLLHLS) {\n this.resetLoader();\n } else {\n this.resyncLoader();\n }\n }\n this.currentMediaInfo_ = void 0;\n this.trigger('playlistupdate'); // the rest of this function depends on `oldPlaylist` being defined\n\n return;\n } // we reloaded the same playlist so we are in a live scenario\n // and we will likely need to adjust the mediaIndex\n\n const mediaSequenceDiff = newPlaylist.mediaSequence - oldPlaylist.mediaSequence;\n this.logger_(`live window shift [${mediaSequenceDiff}]`); // update the mediaIndex on the SegmentLoader\n // this is important because we can abort a request and this value must be\n // equal to the last appended mediaIndex\n\n if (this.mediaIndex !== null) {\n this.mediaIndex -= mediaSequenceDiff; // this can happen if we are going to load the first segment, but get a playlist\n // update during that. mediaIndex would go from 0 to -1 if mediaSequence in the\n // new playlist was incremented by 1.\n\n if (this.mediaIndex < 0) {\n this.mediaIndex = null;\n this.partIndex = null;\n } else {\n const segment = this.playlist_.segments[this.mediaIndex]; // partIndex should remain the same for the same segment\n // unless parts fell off of the playlist for this segment.\n // In that case we need to reset partIndex and resync\n\n if (this.partIndex && (!segment.parts || !segment.parts.length || !segment.parts[this.partIndex])) {\n const mediaIndex = this.mediaIndex;\n this.logger_(`currently processing part (index ${this.partIndex}) no longer exists.`);\n this.resetLoader(); // We want to throw away the partIndex and the data associated with it,\n // as the part was dropped from our current playlists segment.\n // The mediaIndex will still be valid so keep that around.\n\n this.mediaIndex = mediaIndex;\n }\n }\n } // update the mediaIndex on the SegmentInfo object\n // this is important because we will update this.mediaIndex with this value\n // in `handleAppendsDone_` after the segment has been successfully appended\n\n if (segmentInfo) {\n segmentInfo.mediaIndex -= mediaSequenceDiff;\n if (segmentInfo.mediaIndex < 0) {\n segmentInfo.mediaIndex = null;\n segmentInfo.partIndex = null;\n } else {\n // we need to update the referenced segment so that timing information is\n // saved for the new playlist's segment, however, if the segment fell off the\n // playlist, we can leave the old reference and just lose the timing info\n if (segmentInfo.mediaIndex >= 0) {\n segmentInfo.segment = newPlaylist.segments[segmentInfo.mediaIndex];\n }\n if (segmentInfo.partIndex >= 0 && segmentInfo.segment.parts) {\n segmentInfo.part = segmentInfo.segment.parts[segmentInfo.partIndex];\n }\n }\n }\n this.syncController_.saveExpiredSegmentInfo(oldPlaylist, newPlaylist);\n }\n /**\n * Prevent the loader from fetching additional segments. If there\n * is a segment request outstanding, it will finish processing\n * before the loader halts. A segment loader can be unpaused by\n * calling load().\n */\n\n pause() {\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n this.checkBufferTimeout_ = null;\n }\n }\n /**\n * Returns whether the segment loader is fetching additional\n * segments when given the opportunity. This property can be\n * modified through calls to pause() and load().\n */\n\n paused() {\n return this.checkBufferTimeout_ === null;\n }\n /**\n * Delete all the buffered data and reset the SegmentLoader\n *\n * @param {Function} [done] an optional callback to be executed when the remove\n * operation is complete\n */\n\n resetEverything(done) {\n this.ended_ = false;\n this.activeInitSegmentId_ = null;\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n this.resetLoader(); // remove from 0, the earliest point, to Infinity, to signify removal of everything.\n // VTT Segment Loader doesn't need to do anything but in the regular SegmentLoader,\n // we then clamp the value to duration if necessary.\n\n this.remove(0, Infinity, done); // clears fmp4 captions\n\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearAllMp4Captions'\n }); // reset the cache in the transmuxer\n\n this.transmuxer_.postMessage({\n action: 'reset'\n });\n }\n }\n /**\n * Force the SegmentLoader to resync and start loading around the currentTime instead\n * of starting at the end of the buffer\n *\n * Useful for fast quality changes\n */\n\n resetLoader() {\n this.fetchAtBuffer_ = false;\n if (this.mediaSequenceSync_) {\n this.mediaSequenceSync_.resetAppendedStatus();\n }\n this.resyncLoader();\n }\n /**\n * Force the SegmentLoader to restart synchronization and make a conservative guess\n * before returning to the simple walk-forward method\n */\n\n resyncLoader() {\n if (this.transmuxer_) {\n // need to clear out any cached data to prepare for the new segment\n segmentTransmuxer.reset(this.transmuxer_);\n }\n this.mediaIndex = null;\n this.partIndex = null;\n this.syncPoint_ = null;\n this.isPendingTimestampOffset_ = false; // this is mainly to sync timing-info when switching between renditions with and without timestamp-rollover,\n // so we don't want it for DASH or fragmented mp4 segments.\n\n const isFmp4 = this.currentMediaInfo_ && this.currentMediaInfo_.isFmp4;\n const isHlsTs = this.sourceType_ === 'hls' && !isFmp4;\n if (isHlsTs) {\n this.shouldForceTimestampOffsetAfterResync_ = true;\n }\n this.callQueue_ = [];\n this.loadQueue_ = [];\n this.metadataQueue_.id3 = [];\n this.metadataQueue_.caption = [];\n this.abort();\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearParsedMp4Captions'\n });\n }\n }\n /**\n * Remove any data in the source buffer between start and end times\n *\n * @param {number} start - the start time of the region to remove from the buffer\n * @param {number} end - the end time of the region to remove from the buffer\n * @param {Function} [done] - an optional callback to be executed when the remove\n * @param {boolean} force - force all remove operations to happen\n * operation is complete\n */\n\n remove(start, end, done = () => {}, force = false) {\n // clamp end to duration if we need to remove everything.\n // This is due to a browser bug that causes issues if we remove to Infinity.\n // videojs/videojs-contrib-hls#1225\n if (end === Infinity) {\n end = this.duration_();\n } // skip removes that would throw an error\n // commonly happens during a rendition switch at the start of a video\n // from start 0 to end 0\n\n if (end <= start) {\n this.logger_('skipping remove because end ${end} is <= start ${start}');\n return;\n }\n if (!this.sourceUpdater_ || !this.getMediaInfo_()) {\n this.logger_('skipping remove because no source updater or starting media info'); // nothing to remove if we haven't processed any media\n\n return;\n } // set it to one to complete this function's removes\n\n let removesRemaining = 1;\n const removeFinished = () => {\n removesRemaining--;\n if (removesRemaining === 0) {\n done();\n }\n };\n if (force || !this.audioDisabled_) {\n removesRemaining++;\n this.sourceUpdater_.removeAudio(start, end, removeFinished);\n } // While it would be better to only remove video if the main loader has video, this\n // should be safe with audio only as removeVideo will call back even if there's no\n // video buffer.\n //\n // In theory we can check to see if there's video before calling the remove, but in\n // the event that we're switching between renditions and from video to audio only\n // (when we add support for that), we may need to clear the video contents despite\n // what the new media will contain.\n\n if (force || this.loaderType_ === 'main') {\n this.gopBuffer_ = removeGopBuffer(this.gopBuffer_, start, end, this.timeMapping_);\n removesRemaining++;\n this.sourceUpdater_.removeVideo(start, end, removeFinished);\n } // remove any captions and ID3 tags\n\n for (const track in this.inbandTextTracks_) {\n removeCuesFromTrack(start, end, this.inbandTextTracks_[track]);\n }\n removeCuesFromTrack(start, end, this.segmentMetadataTrack_); // finished this function's removes\n\n removeFinished();\n }\n /**\n * (re-)schedule monitorBufferTick_ to run as soon as possible\n *\n * @private\n */\n\n monitorBuffer_() {\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n }\n this.checkBufferTimeout_ = window$1.setTimeout(this.monitorBufferTick_.bind(this), 1);\n }\n /**\n * As long as the SegmentLoader is in the READY state, periodically\n * invoke fillBuffer_().\n *\n * @private\n */\n\n monitorBufferTick_() {\n if (this.state === 'READY') {\n this.fillBuffer_();\n }\n if (this.checkBufferTimeout_) {\n window$1.clearTimeout(this.checkBufferTimeout_);\n }\n this.checkBufferTimeout_ = window$1.setTimeout(this.monitorBufferTick_.bind(this), CHECK_BUFFER_DELAY);\n }\n /**\n * fill the buffer with segements unless the sourceBuffers are\n * currently updating\n *\n * Note: this function should only ever be called by monitorBuffer_\n * and never directly\n *\n * @private\n */\n\n fillBuffer_() {\n // TODO since the source buffer maintains a queue, and we shouldn't call this function\n // except when we're ready for the next segment, this check can most likely be removed\n if (this.sourceUpdater_.updating()) {\n return;\n } // see if we need to begin loading immediately\n\n const segmentInfo = this.chooseNextRequest_();\n if (!segmentInfo) {\n return;\n }\n if (typeof segmentInfo.timestampOffset === 'number') {\n this.isPendingTimestampOffset_ = false;\n this.timelineChangeController_.pendingTimelineChange({\n type: this.loaderType_,\n from: this.currentTimeline_,\n to: segmentInfo.timeline\n });\n }\n this.loadSegment_(segmentInfo);\n }\n /**\n * Determines if we should call endOfStream on the media source based\n * on the state of the buffer or if appened segment was the final\n * segment in the playlist.\n *\n * @param {number} [mediaIndex] the media index of segment we last appended\n * @param {Object} [playlist] a media playlist object\n * @return {boolean} do we need to call endOfStream on the MediaSource\n */\n\n isEndOfStream_(mediaIndex = this.mediaIndex, playlist = this.playlist_, partIndex = this.partIndex) {\n if (!playlist || !this.mediaSource_) {\n return false;\n }\n const segment = typeof mediaIndex === 'number' && playlist.segments[mediaIndex]; // mediaIndex is zero based but length is 1 based\n\n const appendedLastSegment = mediaIndex + 1 === playlist.segments.length; // true if there are no parts, or this is the last part.\n\n const appendedLastPart = !segment || !segment.parts || partIndex + 1 === segment.parts.length; // if we've buffered to the end of the video, we need to call endOfStream\n // so that MediaSources can trigger the `ended` event when it runs out of\n // buffered data instead of waiting for me\n\n return playlist.endList && this.mediaSource_.readyState === 'open' && appendedLastSegment && appendedLastPart;\n }\n /**\n * Determines what request should be made given current segment loader state.\n *\n * @return {Object} a request object that describes the segment/part to load\n */\n\n chooseNextRequest_() {\n const buffered = this.buffered_();\n const bufferedEnd = lastBufferedEnd(buffered) || 0;\n const bufferedTime = timeAheadOf(buffered, this.currentTime_());\n const preloaded = !this.hasPlayed_() && bufferedTime >= 1;\n const haveEnoughBuffer = bufferedTime >= this.goalBufferLength_();\n const segments = this.playlist_.segments; // return no segment if:\n // 1. we don't have segments\n // 2. The video has not yet played and we already downloaded a segment\n // 3. we already have enough buffered time\n\n if (!segments.length || preloaded || haveEnoughBuffer) {\n return null;\n }\n this.syncPoint_ = this.syncPoint_ || this.syncController_.getSyncPoint(this.playlist_, this.duration_(), this.currentTimeline_, this.currentTime_(), this.loaderType_);\n const next = {\n partIndex: null,\n mediaIndex: null,\n startOfSegment: null,\n playlist: this.playlist_,\n isSyncRequest: Boolean(!this.syncPoint_)\n };\n if (next.isSyncRequest) {\n next.mediaIndex = getSyncSegmentCandidate(this.currentTimeline_, segments, bufferedEnd);\n this.logger_(`choose next request. Can not find sync point. Fallback to media Index: ${next.mediaIndex}`);\n } else if (this.mediaIndex !== null) {\n const segment = segments[this.mediaIndex];\n const partIndex = typeof this.partIndex === 'number' ? this.partIndex : -1;\n next.startOfSegment = segment.end ? segment.end : bufferedEnd;\n if (segment.parts && segment.parts[partIndex + 1]) {\n next.mediaIndex = this.mediaIndex;\n next.partIndex = partIndex + 1;\n } else {\n next.mediaIndex = this.mediaIndex + 1;\n }\n } else {\n let segmentIndex;\n let partIndex;\n let startTime;\n const targetTime = this.fetchAtBuffer_ ? bufferedEnd : this.currentTime_();\n if (this.mediaSequenceSync_) {\n this.logger_(`chooseNextRequest_ request after Quality Switch:\nFor TargetTime: ${targetTime}.\nCurrentTime: ${this.currentTime_()}\nBufferedEnd: ${bufferedEnd}\nFetch At Buffer: ${this.fetchAtBuffer_}\n`, this.mediaSequenceSync_.diagnostics);\n }\n if (this.mediaSequenceSync_ && this.mediaSequenceSync_.isReliable) {\n const syncInfo = this.getSyncInfoFromMediaSequenceSync_(targetTime);\n if (!syncInfo) {\n this.logger_('chooseNextRequest_ - no sync info found using media sequence sync'); // no match\n\n return null;\n }\n this.logger_(`chooseNextRequest_ mediaSequence syncInfo (${syncInfo.start} --> ${syncInfo.end})`);\n segmentIndex = syncInfo.segmentIndex;\n partIndex = syncInfo.partIndex;\n startTime = syncInfo.start;\n } else {\n this.logger_('chooseNextRequest_ - fallback to a regular segment selection algorithm, based on a syncPoint.'); // fallback\n\n const mediaInfoForTime = Playlist.getMediaInfoForTime({\n exactManifestTimings: this.exactManifestTimings,\n playlist: this.playlist_,\n currentTime: targetTime,\n startingPartIndex: this.syncPoint_.partIndex,\n startingSegmentIndex: this.syncPoint_.segmentIndex,\n startTime: this.syncPoint_.time\n });\n segmentIndex = mediaInfoForTime.segmentIndex;\n partIndex = mediaInfoForTime.partIndex;\n startTime = mediaInfoForTime.startTime;\n }\n next.getMediaInfoForTime = this.fetchAtBuffer_ ? `bufferedEnd ${targetTime}` : `currentTime ${targetTime}`;\n next.mediaIndex = segmentIndex;\n next.startOfSegment = startTime;\n next.partIndex = partIndex;\n this.logger_(`choose next request. Playlist switched and we have a sync point. Media Index: ${next.mediaIndex} `);\n }\n const nextSegment = segments[next.mediaIndex];\n let nextPart = nextSegment && typeof next.partIndex === 'number' && nextSegment.parts && nextSegment.parts[next.partIndex]; // if the next segment index is invalid or\n // the next partIndex is invalid do not choose a next segment.\n\n if (!nextSegment || typeof next.partIndex === 'number' && !nextPart) {\n return null;\n } // if the next segment has parts, and we don't have a partIndex.\n // Set partIndex to 0\n\n if (typeof next.partIndex !== 'number' && nextSegment.parts) {\n next.partIndex = 0;\n nextPart = nextSegment.parts[0];\n } // independentSegments applies to every segment in a playlist. If independentSegments appears in a main playlist,\n // it applies to each segment in each media playlist.\n // https://datatracker.ietf.org/doc/html/draft-pantos-http-live-streaming-23#section-4.3.5.1\n\n const hasIndependentSegments = this.vhs_.playlists && this.vhs_.playlists.main && this.vhs_.playlists.main.independentSegments || this.playlist_.independentSegments; // if we have no buffered data then we need to make sure\n // that the next part we append is \"independent\" if possible.\n // So we check if the previous part is independent, and request\n // it if it is.\n\n if (!bufferedTime && nextPart && !hasIndependentSegments && !nextPart.independent) {\n if (next.partIndex === 0) {\n const lastSegment = segments[next.mediaIndex - 1];\n const lastSegmentLastPart = lastSegment.parts && lastSegment.parts.length && lastSegment.parts[lastSegment.parts.length - 1];\n if (lastSegmentLastPart && lastSegmentLastPart.independent) {\n next.mediaIndex -= 1;\n next.partIndex = lastSegment.parts.length - 1;\n next.independent = 'previous segment';\n }\n } else if (nextSegment.parts[next.partIndex - 1].independent) {\n next.partIndex -= 1;\n next.independent = 'previous part';\n }\n }\n const ended = this.mediaSource_ && this.mediaSource_.readyState === 'ended'; // do not choose a next segment if all of the following:\n // 1. this is the last segment in the playlist\n // 2. end of stream has been called on the media source already\n // 3. the player is not seeking\n\n if (next.mediaIndex >= segments.length - 1 && ended && !this.seeking_()) {\n return null;\n }\n if (this.shouldForceTimestampOffsetAfterResync_) {\n this.shouldForceTimestampOffsetAfterResync_ = false;\n next.forceTimestampOffset = true;\n this.logger_('choose next request. Force timestamp offset after loader resync');\n }\n return this.generateSegmentInfo_(next);\n }\n getSyncInfoFromMediaSequenceSync_(targetTime) {\n if (!this.mediaSequenceSync_) {\n return null;\n } // we should pull the target time to the least available time if we drop out of sync for any reason\n\n const finalTargetTime = Math.max(targetTime, this.mediaSequenceSync_.start);\n if (targetTime !== finalTargetTime) {\n this.logger_(`getSyncInfoFromMediaSequenceSync_. Pulled target time from ${targetTime} to ${finalTargetTime}`);\n }\n const mediaSequenceSyncInfo = this.mediaSequenceSync_.getSyncInfoForTime(finalTargetTime);\n if (!mediaSequenceSyncInfo) {\n // no match at all\n return null;\n }\n if (!mediaSequenceSyncInfo.isAppended) {\n // has a perfect match\n return mediaSequenceSyncInfo;\n } // has match, but segment was already appended.\n // attempt to auto-advance to the nearest next segment:\n\n const nextMediaSequenceSyncInfo = this.mediaSequenceSync_.getSyncInfoForTime(mediaSequenceSyncInfo.end);\n if (!nextMediaSequenceSyncInfo) {\n // no match at all\n return null;\n }\n if (nextMediaSequenceSyncInfo.isAppended) {\n this.logger_('getSyncInfoFromMediaSequenceSync_: We encounter unexpected scenario where next media sequence sync info is also appended!');\n } // got match with the nearest next segment\n\n return nextMediaSequenceSyncInfo;\n }\n generateSegmentInfo_(options) {\n const {\n independent,\n playlist,\n mediaIndex,\n startOfSegment,\n isSyncRequest,\n partIndex,\n forceTimestampOffset,\n getMediaInfoForTime\n } = options;\n const segment = playlist.segments[mediaIndex];\n const part = typeof partIndex === 'number' && segment.parts[partIndex];\n const segmentInfo = {\n requestId: 'segment-loader-' + Math.random(),\n // resolve the segment URL relative to the playlist\n uri: part && part.resolvedUri || segment.resolvedUri,\n // the segment's mediaIndex at the time it was requested\n mediaIndex,\n partIndex: part ? partIndex : null,\n // whether or not to update the SegmentLoader's state with this\n // segment's mediaIndex\n isSyncRequest,\n startOfSegment,\n // the segment's playlist\n playlist,\n // unencrypted bytes of the segment\n bytes: null,\n // when a key is defined for this segment, the encrypted bytes\n encryptedBytes: null,\n // The target timestampOffset for this segment when we append it\n // to the source buffer\n timestampOffset: null,\n // The timeline that the segment is in\n timeline: segment.timeline,\n // The expected duration of the segment in seconds\n duration: part && part.duration || segment.duration,\n // retain the segment in case the playlist updates while doing an async process\n segment,\n part,\n byteLength: 0,\n transmuxer: this.transmuxer_,\n // type of getMediaInfoForTime that was used to get this segment\n getMediaInfoForTime,\n independent\n };\n const overrideCheck = typeof forceTimestampOffset !== 'undefined' ? forceTimestampOffset : this.isPendingTimestampOffset_;\n segmentInfo.timestampOffset = this.timestampOffsetForSegment_({\n segmentTimeline: segment.timeline,\n currentTimeline: this.currentTimeline_,\n startOfSegment,\n buffered: this.buffered_(),\n overrideCheck\n });\n const audioBufferedEnd = lastBufferedEnd(this.sourceUpdater_.audioBuffered());\n if (typeof audioBufferedEnd === 'number') {\n // since the transmuxer is using the actual timing values, but the buffer is\n // adjusted by the timestamp offset, we must adjust the value here\n segmentInfo.audioAppendStart = audioBufferedEnd - this.sourceUpdater_.audioTimestampOffset();\n }\n if (this.sourceUpdater_.videoBuffered().length) {\n segmentInfo.gopsToAlignWith = gopsSafeToAlignWith(this.gopBuffer_,\n // since the transmuxer is using the actual timing values, but the time is\n // adjusted by the timestmap offset, we must adjust the value here\n this.currentTime_() - this.sourceUpdater_.videoTimestampOffset(), this.timeMapping_);\n }\n return segmentInfo;\n } // get the timestampoffset for a segment,\n // added so that vtt segment loader can override and prevent\n // adding timestamp offsets.\n\n timestampOffsetForSegment_(options) {\n return timestampOffsetForSegment(options);\n }\n /**\n * Determines if the network has enough bandwidth to complete the current segment\n * request in a timely manner. If not, the request will be aborted early and bandwidth\n * updated to trigger a playlist switch.\n *\n * @param {Object} stats\n * Object containing stats about the request timing and size\n * @private\n */\n\n earlyAbortWhenNeeded_(stats) {\n if (this.vhs_.tech_.paused() ||\n // Don't abort if the current playlist is on the lowestEnabledRendition\n // TODO: Replace using timeout with a boolean indicating whether this playlist is\n // the lowestEnabledRendition.\n !this.xhrOptions_.timeout ||\n // Don't abort if we have no bandwidth information to estimate segment sizes\n !this.playlist_.attributes.BANDWIDTH) {\n return;\n } // Wait at least 1 second since the first byte of data has been received before\n // using the calculated bandwidth from the progress event to allow the bitrate\n // to stabilize\n\n if (Date.now() - (stats.firstBytesReceivedAt || Date.now()) < 1000) {\n return;\n }\n const currentTime = this.currentTime_();\n const measuredBandwidth = stats.bandwidth;\n const segmentDuration = this.pendingSegment_.duration;\n const requestTimeRemaining = Playlist.estimateSegmentRequestTime(segmentDuration, measuredBandwidth, this.playlist_, stats.bytesReceived); // Subtract 1 from the timeUntilRebuffer so we still consider an early abort\n // if we are only left with less than 1 second when the request completes.\n // A negative timeUntilRebuffering indicates we are already rebuffering\n\n const timeUntilRebuffer$1 = timeUntilRebuffer(this.buffered_(), currentTime, this.vhs_.tech_.playbackRate()) - 1; // Only consider aborting early if the estimated time to finish the download\n // is larger than the estimated time until the player runs out of forward buffer\n\n if (requestTimeRemaining <= timeUntilRebuffer$1) {\n return;\n }\n const switchCandidate = minRebufferMaxBandwidthSelector({\n main: this.vhs_.playlists.main,\n currentTime,\n bandwidth: measuredBandwidth,\n duration: this.duration_(),\n segmentDuration,\n timeUntilRebuffer: timeUntilRebuffer$1,\n currentTimeline: this.currentTimeline_,\n syncController: this.syncController_\n });\n if (!switchCandidate) {\n return;\n }\n const rebufferingImpact = requestTimeRemaining - timeUntilRebuffer$1;\n const timeSavedBySwitching = rebufferingImpact - switchCandidate.rebufferingImpact;\n let minimumTimeSaving = 0.5; // If we are already rebuffering, increase the amount of variance we add to the\n // potential round trip time of the new request so that we are not too aggressive\n // with switching to a playlist that might save us a fraction of a second.\n\n if (timeUntilRebuffer$1 <= TIME_FUDGE_FACTOR) {\n minimumTimeSaving = 1;\n }\n if (!switchCandidate.playlist || switchCandidate.playlist.uri === this.playlist_.uri || timeSavedBySwitching < minimumTimeSaving) {\n return;\n } // set the bandwidth to that of the desired playlist being sure to scale by\n // BANDWIDTH_VARIANCE and add one so the playlist selector does not exclude it\n // don't trigger a bandwidthupdate as the bandwidth is artifial\n\n this.bandwidth = switchCandidate.playlist.attributes.BANDWIDTH * Config.BANDWIDTH_VARIANCE + 1;\n this.trigger('earlyabort');\n }\n handleAbort_(segmentInfo) {\n this.logger_(`Aborting ${segmentInfoString(segmentInfo)}`);\n this.mediaRequestsAborted += 1;\n }\n /**\n * XHR `progress` event handler\n *\n * @param {Event}\n * The XHR `progress` event\n * @param {Object} simpleSegment\n * A simplified segment object copy\n * @private\n */\n\n handleProgress_(event, simpleSegment) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n this.trigger('progress');\n }\n handleTrackInfo_(simpleSegment, trackInfo) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n if (this.checkForIllegalMediaSwitch(trackInfo)) {\n return;\n }\n trackInfo = trackInfo || {}; // When we have track info, determine what media types this loader is dealing with.\n // Guard against cases where we're not getting track info at all until we are\n // certain that all streams will provide it.\n\n if (!shallowEqual(this.currentMediaInfo_, trackInfo)) {\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n this.startingMediaInfo_ = trackInfo;\n this.currentMediaInfo_ = trackInfo;\n this.logger_('trackinfo update', trackInfo);\n this.trigger('trackinfo');\n } // trackinfo may cause an abort if the trackinfo\n // causes a codec change to an unsupported codec.\n\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n } // set trackinfo on the pending segment so that\n // it can append.\n\n this.pendingSegment_.trackInfo = trackInfo; // check if any calls were waiting on the track info\n\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n }\n handleTimingInfo_(simpleSegment, mediaType, timeType, time) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n const segmentInfo = this.pendingSegment_;\n const timingInfoProperty = timingInfoPropertyForMedia(mediaType);\n segmentInfo[timingInfoProperty] = segmentInfo[timingInfoProperty] || {};\n segmentInfo[timingInfoProperty][timeType] = time;\n this.logger_(`timinginfo: ${mediaType} - ${timeType} - ${time}`); // check if any calls were waiting on the timing info\n\n if (this.hasEnoughInfoToAppend_()) {\n this.processCallQueue_();\n }\n }\n handleCaptions_(simpleSegment, captionData) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n } // This could only happen with fmp4 segments, but\n // should still not happen in general\n\n if (captionData.length === 0) {\n this.logger_('SegmentLoader received no captions from a caption event');\n return;\n }\n const segmentInfo = this.pendingSegment_; // Wait until we have some video data so that caption timing\n // can be adjusted by the timestamp offset\n\n if (!segmentInfo.hasAppendedData_) {\n this.metadataQueue_.caption.push(this.handleCaptions_.bind(this, simpleSegment, captionData));\n return;\n }\n const timestampOffset = this.sourceUpdater_.videoTimestampOffset() === null ? this.sourceUpdater_.audioTimestampOffset() : this.sourceUpdater_.videoTimestampOffset();\n const captionTracks = {}; // get total start/end and captions for each track/stream\n\n captionData.forEach(caption => {\n // caption.stream is actually a track name...\n // set to the existing values in tracks or default values\n captionTracks[caption.stream] = captionTracks[caption.stream] || {\n // Infinity, as any other value will be less than this\n startTime: Infinity,\n captions: [],\n // 0 as an other value will be more than this\n endTime: 0\n };\n const captionTrack = captionTracks[caption.stream];\n captionTrack.startTime = Math.min(captionTrack.startTime, caption.startTime + timestampOffset);\n captionTrack.endTime = Math.max(captionTrack.endTime, caption.endTime + timestampOffset);\n captionTrack.captions.push(caption);\n });\n Object.keys(captionTracks).forEach(trackName => {\n const {\n startTime,\n endTime,\n captions\n } = captionTracks[trackName];\n const inbandTextTracks = this.inbandTextTracks_;\n this.logger_(`adding cues from ${startTime} -> ${endTime} for ${trackName}`);\n createCaptionsTrackIfNotExists(inbandTextTracks, this.vhs_.tech_, trackName); // clear out any cues that start and end at the same time period for the same track.\n // We do this because a rendition change that also changes the timescale for captions\n // will result in captions being re-parsed for certain segments. If we add them again\n // without clearing we will have two of the same captions visible.\n\n removeCuesFromTrack(startTime, endTime, inbandTextTracks[trackName]);\n addCaptionData({\n captionArray: captions,\n inbandTextTracks,\n timestampOffset\n });\n }); // Reset stored captions since we added parsed\n // captions to a text track at this point\n\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearParsedMp4Captions'\n });\n }\n }\n handleId3_(simpleSegment, id3Frames, dispatchType) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n }\n const segmentInfo = this.pendingSegment_; // we need to have appended data in order for the timestamp offset to be set\n\n if (!segmentInfo.hasAppendedData_) {\n this.metadataQueue_.id3.push(this.handleId3_.bind(this, simpleSegment, id3Frames, dispatchType));\n return;\n }\n this.addMetadataToTextTrack(dispatchType, id3Frames, this.duration_());\n }\n processMetadataQueue_() {\n this.metadataQueue_.id3.forEach(fn => fn());\n this.metadataQueue_.caption.forEach(fn => fn());\n this.metadataQueue_.id3 = [];\n this.metadataQueue_.caption = [];\n }\n processCallQueue_() {\n const callQueue = this.callQueue_; // Clear out the queue before the queued functions are run, since some of the\n // functions may check the length of the load queue and default to pushing themselves\n // back onto the queue.\n\n this.callQueue_ = [];\n callQueue.forEach(fun => fun());\n }\n processLoadQueue_() {\n const loadQueue = this.loadQueue_; // Clear out the queue before the queued functions are run, since some of the\n // functions may check the length of the load queue and default to pushing themselves\n // back onto the queue.\n\n this.loadQueue_ = [];\n loadQueue.forEach(fun => fun());\n }\n /**\n * Determines whether the loader has enough info to load the next segment.\n *\n * @return {boolean}\n * Whether or not the loader has enough info to load the next segment\n */\n\n hasEnoughInfoToLoad_() {\n // Since primary timing goes by video, only the audio loader potentially needs to wait\n // to load.\n if (this.loaderType_ !== 'audio') {\n return true;\n }\n const segmentInfo = this.pendingSegment_; // A fill buffer must have already run to establish a pending segment before there's\n // enough info to load.\n\n if (!segmentInfo) {\n return false;\n } // The first segment can and should be loaded immediately so that source buffers are\n // created together (before appending). Source buffer creation uses the presence of\n // audio and video data to determine whether to create audio/video source buffers, and\n // uses processed (transmuxed or parsed) media to determine the types required.\n\n if (!this.getCurrentMediaInfo_()) {\n return true;\n }\n if (\n // Technically, instead of waiting to load a segment on timeline changes, a segment\n // can be requested and downloaded and only wait before it is transmuxed or parsed.\n // But in practice, there are a few reasons why it is better to wait until a loader\n // is ready to append that segment before requesting and downloading:\n //\n // 1. Because audio and main loaders cross discontinuities together, if this loader\n // is waiting for the other to catch up, then instead of requesting another\n // segment and using up more bandwidth, by not yet loading, more bandwidth is\n // allotted to the loader currently behind.\n // 2. media-segment-request doesn't have to have logic to consider whether a segment\n // is ready to be processed or not, isolating the queueing behavior to the loader.\n // 3. The audio loader bases some of its segment properties on timing information\n // provided by the main loader, meaning that, if the logic for waiting on\n // processing was in media-segment-request, then it would also need to know how\n // to re-generate the segment information after the main loader caught up.\n shouldWaitForTimelineChange({\n timelineChangeController: this.timelineChangeController_,\n currentTimeline: this.currentTimeline_,\n segmentTimeline: segmentInfo.timeline,\n loaderType: this.loaderType_,\n audioDisabled: this.audioDisabled_\n })) {\n return false;\n }\n return true;\n }\n getCurrentMediaInfo_(segmentInfo = this.pendingSegment_) {\n return segmentInfo && segmentInfo.trackInfo || this.currentMediaInfo_;\n }\n getMediaInfo_(segmentInfo = this.pendingSegment_) {\n return this.getCurrentMediaInfo_(segmentInfo) || this.startingMediaInfo_;\n }\n getPendingSegmentPlaylist() {\n return this.pendingSegment_ ? this.pendingSegment_.playlist : null;\n }\n hasEnoughInfoToAppend_() {\n if (!this.sourceUpdater_.ready()) {\n return false;\n } // If content needs to be removed or the loader is waiting on an append reattempt,\n // then no additional content should be appended until the prior append is resolved.\n\n if (this.waitingOnRemove_ || this.quotaExceededErrorRetryTimeout_) {\n return false;\n }\n const segmentInfo = this.pendingSegment_;\n const trackInfo = this.getCurrentMediaInfo_(); // no segment to append any data for or\n // we do not have information on this specific\n // segment yet\n\n if (!segmentInfo || !trackInfo) {\n return false;\n }\n const {\n hasAudio,\n hasVideo,\n isMuxed\n } = trackInfo;\n if (hasVideo && !segmentInfo.videoTimingInfo) {\n return false;\n } // muxed content only relies on video timing information for now.\n\n if (hasAudio && !this.audioDisabled_ && !isMuxed && !segmentInfo.audioTimingInfo) {\n return false;\n }\n if (shouldWaitForTimelineChange({\n timelineChangeController: this.timelineChangeController_,\n currentTimeline: this.currentTimeline_,\n segmentTimeline: segmentInfo.timeline,\n loaderType: this.loaderType_,\n audioDisabled: this.audioDisabled_\n })) {\n return false;\n }\n return true;\n }\n handleData_(simpleSegment, result) {\n this.earlyAbortWhenNeeded_(simpleSegment.stats);\n if (this.checkForAbort_(simpleSegment.requestId)) {\n return;\n } // If there's anything in the call queue, then this data came later and should be\n // executed after the calls currently queued.\n\n if (this.callQueue_.length || !this.hasEnoughInfoToAppend_()) {\n this.callQueue_.push(this.handleData_.bind(this, simpleSegment, result));\n return;\n }\n const segmentInfo = this.pendingSegment_; // update the time mapping so we can translate from display time to media time\n\n this.setTimeMapping_(segmentInfo.timeline); // for tracking overall stats\n\n this.updateMediaSecondsLoaded_(segmentInfo.part || segmentInfo.segment); // Note that the state isn't changed from loading to appending. This is because abort\n // logic may change behavior depending on the state, and changing state too early may\n // inflate our estimates of bandwidth. In the future this should be re-examined to\n // note more granular states.\n // don't process and append data if the mediaSource is closed\n\n if (this.mediaSource_.readyState === 'closed') {\n return;\n } // if this request included an initialization segment, save that data\n // to the initSegment cache\n\n if (simpleSegment.map) {\n simpleSegment.map = this.initSegmentForMap(simpleSegment.map, true); // move over init segment properties to media request\n\n segmentInfo.segment.map = simpleSegment.map;\n } // if this request included a segment key, save that data in the cache\n\n if (simpleSegment.key) {\n this.segmentKey(simpleSegment.key, true);\n }\n segmentInfo.isFmp4 = simpleSegment.isFmp4;\n segmentInfo.timingInfo = segmentInfo.timingInfo || {};\n if (segmentInfo.isFmp4) {\n this.trigger('fmp4');\n segmentInfo.timingInfo.start = segmentInfo[timingInfoPropertyForMedia(result.type)].start;\n } else {\n const trackInfo = this.getCurrentMediaInfo_();\n const useVideoTimingInfo = this.loaderType_ === 'main' && trackInfo && trackInfo.hasVideo;\n let firstVideoFrameTimeForData;\n if (useVideoTimingInfo) {\n firstVideoFrameTimeForData = segmentInfo.videoTimingInfo.start;\n } // Segment loader knows more about segment timing than the transmuxer (in certain\n // aspects), so make any changes required for a more accurate start time.\n // Don't set the end time yet, as the segment may not be finished processing.\n\n segmentInfo.timingInfo.start = this.trueSegmentStart_({\n currentStart: segmentInfo.timingInfo.start,\n playlist: segmentInfo.playlist,\n mediaIndex: segmentInfo.mediaIndex,\n currentVideoTimestampOffset: this.sourceUpdater_.videoTimestampOffset(),\n useVideoTimingInfo,\n firstVideoFrameTimeForData,\n videoTimingInfo: segmentInfo.videoTimingInfo,\n audioTimingInfo: segmentInfo.audioTimingInfo\n });\n } // Init segments for audio and video only need to be appended in certain cases. Now\n // that data is about to be appended, we can check the final cases to determine\n // whether we should append an init segment.\n\n this.updateAppendInitSegmentStatus(segmentInfo, result.type); // Timestamp offset should be updated once we get new data and have its timing info,\n // as we use the start of the segment to offset the best guess (playlist provided)\n // timestamp offset.\n\n this.updateSourceBufferTimestampOffset_(segmentInfo); // if this is a sync request we need to determine whether it should\n // be appended or not.\n\n if (segmentInfo.isSyncRequest) {\n // first save/update our timing info for this segment.\n // this is what allows us to choose an accurate segment\n // and the main reason we make a sync request.\n this.updateTimingInfoEnd_(segmentInfo);\n this.syncController_.saveSegmentTimingInfo({\n segmentInfo,\n shouldSaveTimelineMapping: this.loaderType_ === 'main'\n });\n const next = this.chooseNextRequest_(); // If the sync request isn't the segment that would be requested next\n // after taking into account its timing info, do not append it.\n\n if (next.mediaIndex !== segmentInfo.mediaIndex || next.partIndex !== segmentInfo.partIndex) {\n this.logger_('sync segment was incorrect, not appending');\n return;\n } // otherwise append it like any other segment as our guess was correct.\n\n this.logger_('sync segment was correct, appending');\n } // Save some state so that in the future anything waiting on first append (and/or\n // timestamp offset(s)) can process immediately. While the extra state isn't optimal,\n // we need some notion of whether the timestamp offset or other relevant information\n // has had a chance to be set.\n\n segmentInfo.hasAppendedData_ = true; // Now that the timestamp offset should be set, we can append any waiting ID3 tags.\n\n this.processMetadataQueue_();\n this.appendData_(segmentInfo, result);\n }\n updateAppendInitSegmentStatus(segmentInfo, type) {\n // alt audio doesn't manage timestamp offset\n if (this.loaderType_ === 'main' && typeof segmentInfo.timestampOffset === 'number' &&\n // in the case that we're handling partial data, we don't want to append an init\n // segment for each chunk\n !segmentInfo.changedTimestampOffset) {\n // if the timestamp offset changed, the timeline may have changed, so we have to re-\n // append init segments\n this.appendInitSegment_ = {\n audio: true,\n video: true\n };\n }\n if (this.playlistOfLastInitSegment_[type] !== segmentInfo.playlist) {\n // make sure we append init segment on playlist changes, in case the media config\n // changed\n this.appendInitSegment_[type] = true;\n }\n }\n getInitSegmentAndUpdateState_({\n type,\n initSegment,\n map,\n playlist\n }) {\n // \"The EXT-X-MAP tag specifies how to obtain the Media Initialization Section\n // (Section 3) required to parse the applicable Media Segments. It applies to every\n // Media Segment that appears after it in the Playlist until the next EXT-X-MAP tag\n // or until the end of the playlist.\"\n // https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.2.5\n if (map) {\n const id = initSegmentId(map);\n if (this.activeInitSegmentId_ === id) {\n // don't need to re-append the init segment if the ID matches\n return null;\n } // a map-specified init segment takes priority over any transmuxed (or otherwise\n // obtained) init segment\n //\n // this also caches the init segment for later use\n\n initSegment = this.initSegmentForMap(map, true).bytes;\n this.activeInitSegmentId_ = id;\n } // We used to always prepend init segments for video, however, that shouldn't be\n // necessary. Instead, we should only append on changes, similar to what we've always\n // done for audio. This is more important (though may not be that important) for\n // frame-by-frame appending for LHLS, simply because of the increased quantity of\n // appends.\n\n if (initSegment && this.appendInitSegment_[type]) {\n // Make sure we track the playlist that we last used for the init segment, so that\n // we can re-append the init segment in the event that we get data from a new\n // playlist. Discontinuities and track changes are handled in other sections.\n this.playlistOfLastInitSegment_[type] = playlist; // Disable future init segment appends for this type. Until a change is necessary.\n\n this.appendInitSegment_[type] = false; // we need to clear out the fmp4 active init segment id, since\n // we are appending the muxer init segment\n\n this.activeInitSegmentId_ = null;\n return initSegment;\n }\n return null;\n }\n handleQuotaExceededError_({\n segmentInfo,\n type,\n bytes\n }, error) {\n const audioBuffered = this.sourceUpdater_.audioBuffered();\n const videoBuffered = this.sourceUpdater_.videoBuffered(); // For now we're ignoring any notion of gaps in the buffer, but they, in theory,\n // should be cleared out during the buffer removals. However, log in case it helps\n // debug.\n\n if (audioBuffered.length > 1) {\n this.logger_('On QUOTA_EXCEEDED_ERR, found gaps in the audio buffer: ' + timeRangesToArray(audioBuffered).join(', '));\n }\n if (videoBuffered.length > 1) {\n this.logger_('On QUOTA_EXCEEDED_ERR, found gaps in the video buffer: ' + timeRangesToArray(videoBuffered).join(', '));\n }\n const audioBufferStart = audioBuffered.length ? audioBuffered.start(0) : 0;\n const audioBufferEnd = audioBuffered.length ? audioBuffered.end(audioBuffered.length - 1) : 0;\n const videoBufferStart = videoBuffered.length ? videoBuffered.start(0) : 0;\n const videoBufferEnd = videoBuffered.length ? videoBuffered.end(videoBuffered.length - 1) : 0;\n if (audioBufferEnd - audioBufferStart <= MIN_BACK_BUFFER && videoBufferEnd - videoBufferStart <= MIN_BACK_BUFFER) {\n // Can't remove enough buffer to make room for new segment (or the browser doesn't\n // allow for appends of segments this size). In the future, it may be possible to\n // split up the segment and append in pieces, but for now, error out this playlist\n // in an attempt to switch to a more manageable rendition.\n this.logger_('On QUOTA_EXCEEDED_ERR, single segment too large to append to ' + 'buffer, triggering an error. ' + `Appended byte length: ${bytes.byteLength}, ` + `audio buffer: ${timeRangesToArray(audioBuffered).join(', ')}, ` + `video buffer: ${timeRangesToArray(videoBuffered).join(', ')}, `);\n this.error({\n message: 'Quota exceeded error with append of a single segment of content',\n excludeUntil: Infinity,\n metadata: {\n errorType: videojs.Error.SegmentExceedsSourceBufferQuota\n }\n });\n this.trigger('error');\n return;\n } // To try to resolve the quota exceeded error, clear back buffer and retry. This means\n // that the segment-loader should block on future events until this one is handled, so\n // that it doesn't keep moving onto further segments. Adding the call to the call\n // queue will prevent further appends until waitingOnRemove_ and\n // quotaExceededErrorRetryTimeout_ are cleared.\n //\n // Note that this will only block the current loader. In the case of demuxed content,\n // the other load may keep filling as fast as possible. In practice, this should be\n // OK, as it is a rare case when either audio has a high enough bitrate to fill up a\n // source buffer, or video fills without enough room for audio to append (and without\n // the availability of clearing out seconds of back buffer to make room for audio).\n // But it might still be good to handle this case in the future as a TODO.\n\n this.waitingOnRemove_ = true;\n this.callQueue_.push(this.appendToSourceBuffer_.bind(this, {\n segmentInfo,\n type,\n bytes\n }));\n const currentTime = this.currentTime_(); // Try to remove as much audio and video as possible to make room for new content\n // before retrying.\n\n const timeToRemoveUntil = currentTime - MIN_BACK_BUFFER;\n this.logger_(`On QUOTA_EXCEEDED_ERR, removing audio/video from 0 to ${timeToRemoveUntil}`);\n this.remove(0, timeToRemoveUntil, () => {\n this.logger_(`On QUOTA_EXCEEDED_ERR, retrying append in ${MIN_BACK_BUFFER}s`);\n this.waitingOnRemove_ = false; // wait the length of time alotted in the back buffer to prevent wasted\n // attempts (since we can't clear less than the minimum)\n\n this.quotaExceededErrorRetryTimeout_ = window$1.setTimeout(() => {\n this.logger_('On QUOTA_EXCEEDED_ERR, re-processing call queue');\n this.quotaExceededErrorRetryTimeout_ = null;\n this.processCallQueue_();\n }, MIN_BACK_BUFFER * 1000);\n }, true);\n }\n handleAppendError_({\n segmentInfo,\n type,\n bytes\n }, error) {\n // if there's no error, nothing to do\n if (!error) {\n return;\n }\n if (error.code === QUOTA_EXCEEDED_ERR) {\n this.handleQuotaExceededError_({\n segmentInfo,\n type,\n bytes\n }); // A quota exceeded error should be recoverable with a future re-append, so no need\n // to trigger an append error.\n\n return;\n }\n this.logger_('Received non QUOTA_EXCEEDED_ERR on append', error); // If an append errors, we often can't recover.\n // (see https://w3c.github.io/media-source/#sourcebuffer-append-error).\n //\n // Trigger a special error so that it can be handled separately from normal,\n // recoverable errors.\n\n this.error({\n message: `${type} append of ${bytes.length}b failed for segment ` + `#${segmentInfo.mediaIndex} in playlist ${segmentInfo.playlist.id}`,\n metadata: {\n errorType: videojs.Error.SegmentAppendError\n }\n });\n this.trigger('appenderror');\n }\n appendToSourceBuffer_({\n segmentInfo,\n type,\n initSegment,\n data,\n bytes\n }) {\n // If this is a re-append, bytes were already created and don't need to be recreated\n if (!bytes) {\n const segments = [data];\n let byteLength = data.byteLength;\n if (initSegment) {\n // if the media initialization segment is changing, append it before the content\n // segment\n segments.unshift(initSegment);\n byteLength += initSegment.byteLength;\n } // Technically we should be OK appending the init segment separately, however, we\n // haven't yet tested that, and prepending is how we have always done things.\n\n bytes = concatSegments({\n bytes: byteLength,\n segments\n });\n }\n this.sourceUpdater_.appendBuffer({\n segmentInfo,\n type,\n bytes\n }, this.handleAppendError_.bind(this, {\n segmentInfo,\n type,\n bytes\n }));\n }\n handleSegmentTimingInfo_(type, requestId, segmentTimingInfo) {\n if (!this.pendingSegment_ || requestId !== this.pendingSegment_.requestId) {\n return;\n }\n const segment = this.pendingSegment_.segment;\n const timingInfoProperty = `${type}TimingInfo`;\n if (!segment[timingInfoProperty]) {\n segment[timingInfoProperty] = {};\n }\n segment[timingInfoProperty].transmuxerPrependedSeconds = segmentTimingInfo.prependedContentDuration || 0;\n segment[timingInfoProperty].transmuxedPresentationStart = segmentTimingInfo.start.presentation;\n segment[timingInfoProperty].transmuxedDecodeStart = segmentTimingInfo.start.decode;\n segment[timingInfoProperty].transmuxedPresentationEnd = segmentTimingInfo.end.presentation;\n segment[timingInfoProperty].transmuxedDecodeEnd = segmentTimingInfo.end.decode; // mainly used as a reference for debugging\n\n segment[timingInfoProperty].baseMediaDecodeTime = segmentTimingInfo.baseMediaDecodeTime;\n }\n appendData_(segmentInfo, result) {\n const {\n type,\n data\n } = result;\n if (!data || !data.byteLength) {\n return;\n }\n if (type === 'audio' && this.audioDisabled_) {\n return;\n }\n const initSegment = this.getInitSegmentAndUpdateState_({\n type,\n initSegment: result.initSegment,\n playlist: segmentInfo.playlist,\n map: segmentInfo.isFmp4 ? segmentInfo.segment.map : null\n });\n this.appendToSourceBuffer_({\n segmentInfo,\n type,\n initSegment,\n data\n });\n }\n /**\n * load a specific segment from a request into the buffer\n *\n * @private\n */\n\n loadSegment_(segmentInfo) {\n this.state = 'WAITING';\n this.pendingSegment_ = segmentInfo;\n this.trimBackBuffer_(segmentInfo);\n if (typeof segmentInfo.timestampOffset === 'number') {\n if (this.transmuxer_) {\n this.transmuxer_.postMessage({\n action: 'clearAllMp4Captions'\n });\n }\n }\n if (!this.hasEnoughInfoToLoad_()) {\n this.loadQueue_.push(() => {\n // regenerate the audioAppendStart, timestampOffset, etc as they\n // may have changed since this function was added to the queue.\n const options = _extends({}, segmentInfo, {\n forceTimestampOffset: true\n });\n _extends(segmentInfo, this.generateSegmentInfo_(options));\n this.isPendingTimestampOffset_ = false;\n this.updateTransmuxerAndRequestSegment_(segmentInfo);\n });\n return;\n }\n this.updateTransmuxerAndRequestSegment_(segmentInfo);\n }\n updateTransmuxerAndRequestSegment_(segmentInfo) {\n // We'll update the source buffer's timestamp offset once we have transmuxed data, but\n // the transmuxer still needs to be updated before then.\n //\n // Even though keepOriginalTimestamps is set to true for the transmuxer, timestamp\n // offset must be passed to the transmuxer for stream correcting adjustments.\n if (this.shouldUpdateTransmuxerTimestampOffset_(segmentInfo.timestampOffset)) {\n this.gopBuffer_.length = 0; // gopsToAlignWith was set before the GOP buffer was cleared\n\n segmentInfo.gopsToAlignWith = [];\n this.timeMapping_ = 0; // reset values in the transmuxer since a discontinuity should start fresh\n\n this.transmuxer_.postMessage({\n action: 'reset'\n });\n this.transmuxer_.postMessage({\n action: 'setTimestampOffset',\n timestampOffset: segmentInfo.timestampOffset\n });\n }\n const simpleSegment = this.createSimplifiedSegmentObj_(segmentInfo);\n const isEndOfStream = this.isEndOfStream_(segmentInfo.mediaIndex, segmentInfo.playlist, segmentInfo.partIndex);\n const isWalkingForward = this.mediaIndex !== null;\n const isDiscontinuity = segmentInfo.timeline !== this.currentTimeline_ &&\n // currentTimeline starts at -1, so we shouldn't end the timeline switching to 0,\n // the first timeline\n segmentInfo.timeline > 0;\n const isEndOfTimeline = isEndOfStream || isWalkingForward && isDiscontinuity;\n this.logger_(`Requesting\n${compactSegmentUrlDescription(segmentInfo.uri)}\n${segmentInfoString(segmentInfo)}`); // If there's an init segment associated with this segment, but it is not cached (identified by a lack of bytes),\n // then this init segment has never been seen before and should be appended.\n //\n // At this point the content type (audio/video or both) is not yet known, but it should be safe to set\n // both to true and leave the decision of whether to append the init segment to append time.\n\n if (simpleSegment.map && !simpleSegment.map.bytes) {\n this.logger_('going to request init segment.');\n this.appendInitSegment_ = {\n video: true,\n audio: true\n };\n }\n segmentInfo.abortRequests = mediaSegmentRequest({\n xhr: this.vhs_.xhr,\n xhrOptions: this.xhrOptions_,\n decryptionWorker: this.decrypter_,\n segment: simpleSegment,\n abortFn: this.handleAbort_.bind(this, segmentInfo),\n progressFn: this.handleProgress_.bind(this),\n trackInfoFn: this.handleTrackInfo_.bind(this),\n timingInfoFn: this.handleTimingInfo_.bind(this),\n videoSegmentTimingInfoFn: this.handleSegmentTimingInfo_.bind(this, 'video', segmentInfo.requestId),\n audioSegmentTimingInfoFn: this.handleSegmentTimingInfo_.bind(this, 'audio', segmentInfo.requestId),\n captionsFn: this.handleCaptions_.bind(this),\n isEndOfTimeline,\n endedTimelineFn: () => {\n this.logger_('received endedtimeline callback');\n },\n id3Fn: this.handleId3_.bind(this),\n dataFn: this.handleData_.bind(this),\n doneFn: this.segmentRequestFinished_.bind(this),\n onTransmuxerLog: ({\n message,\n level,\n stream\n }) => {\n this.logger_(`${segmentInfoString(segmentInfo)} logged from transmuxer stream ${stream} as a ${level}: ${message}`);\n }\n });\n }\n /**\n * trim the back buffer so that we don't have too much data\n * in the source buffer\n *\n * @private\n *\n * @param {Object} segmentInfo - the current segment\n */\n\n trimBackBuffer_(segmentInfo) {\n const removeToTime = safeBackBufferTrimTime(this.seekable_(), this.currentTime_(), this.playlist_.targetDuration || 10); // Chrome has a hard limit of 150MB of\n // buffer and a very conservative \"garbage collector\"\n // We manually clear out the old buffer to ensure\n // we don't trigger the QuotaExceeded error\n // on the source buffer during subsequent appends\n\n if (removeToTime > 0) {\n this.remove(0, removeToTime);\n }\n }\n /**\n * created a simplified copy of the segment object with just the\n * information necessary to perform the XHR and decryption\n *\n * @private\n *\n * @param {Object} segmentInfo - the current segment\n * @return {Object} a simplified segment object copy\n */\n\n createSimplifiedSegmentObj_(segmentInfo) {\n const segment = segmentInfo.segment;\n const part = segmentInfo.part;\n const simpleSegment = {\n resolvedUri: part ? part.resolvedUri : segment.resolvedUri,\n byterange: part ? part.byterange : segment.byterange,\n requestId: segmentInfo.requestId,\n transmuxer: segmentInfo.transmuxer,\n audioAppendStart: segmentInfo.audioAppendStart,\n gopsToAlignWith: segmentInfo.gopsToAlignWith,\n part: segmentInfo.part\n };\n const previousSegment = segmentInfo.playlist.segments[segmentInfo.mediaIndex - 1];\n if (previousSegment && previousSegment.timeline === segment.timeline) {\n // The baseStartTime of a segment is used to handle rollover when probing the TS\n // segment to retrieve timing information. Since the probe only looks at the media's\n // times (e.g., PTS and DTS values of the segment), and doesn't consider the\n // player's time (e.g., player.currentTime()), baseStartTime should reflect the\n // media time as well. transmuxedDecodeEnd represents the end time of a segment, in\n // seconds of media time, so should be used here. The previous segment is used since\n // the end of the previous segment should represent the beginning of the current\n // segment, so long as they are on the same timeline.\n if (previousSegment.videoTimingInfo) {\n simpleSegment.baseStartTime = previousSegment.videoTimingInfo.transmuxedDecodeEnd;\n } else if (previousSegment.audioTimingInfo) {\n simpleSegment.baseStartTime = previousSegment.audioTimingInfo.transmuxedDecodeEnd;\n }\n }\n if (segment.key) {\n // if the media sequence is greater than 2^32, the IV will be incorrect\n // assuming 10s segments, that would be about 1300 years\n const iv = segment.key.iv || new Uint32Array([0, 0, 0, segmentInfo.mediaIndex + segmentInfo.playlist.mediaSequence]);\n simpleSegment.key = this.segmentKey(segment.key);\n simpleSegment.key.iv = iv;\n }\n if (segment.map) {\n simpleSegment.map = this.initSegmentForMap(segment.map);\n }\n return simpleSegment;\n }\n saveTransferStats_(stats) {\n // every request counts as a media request even if it has been aborted\n // or canceled due to a timeout\n this.mediaRequests += 1;\n if (stats) {\n this.mediaBytesTransferred += stats.bytesReceived;\n this.mediaTransferDuration += stats.roundTripTime;\n }\n }\n saveBandwidthRelatedStats_(duration, stats) {\n // byteLength will be used for throughput, and should be based on bytes receieved,\n // which we only know at the end of the request and should reflect total bytes\n // downloaded rather than just bytes processed from components of the segment\n this.pendingSegment_.byteLength = stats.bytesReceived;\n if (duration < MIN_SEGMENT_DURATION_TO_SAVE_STATS) {\n this.logger_(`Ignoring segment's bandwidth because its duration of ${duration}` + ` is less than the min to record ${MIN_SEGMENT_DURATION_TO_SAVE_STATS}`);\n return;\n }\n this.bandwidth = stats.bandwidth;\n this.roundTrip = stats.roundTripTime;\n }\n handleTimeout_() {\n // although the VTT segment loader bandwidth isn't really used, it's good to\n // maintain functinality between segment loaders\n this.mediaRequestsTimedout += 1;\n this.bandwidth = 1;\n this.roundTrip = NaN;\n this.trigger('bandwidthupdate');\n this.trigger('timeout');\n }\n /**\n * Handle the callback from the segmentRequest function and set the\n * associated SegmentLoader state and errors if necessary\n *\n * @private\n */\n\n segmentRequestFinished_(error, simpleSegment, result) {\n // TODO handle special cases, e.g., muxed audio/video but only audio in the segment\n // check the call queue directly since this function doesn't need to deal with any\n // data, and can continue even if the source buffers are not set up and we didn't get\n // any data from the segment\n if (this.callQueue_.length) {\n this.callQueue_.push(this.segmentRequestFinished_.bind(this, error, simpleSegment, result));\n return;\n }\n this.saveTransferStats_(simpleSegment.stats); // The request was aborted and the SegmentLoader has already been reset\n\n if (!this.pendingSegment_) {\n return;\n } // the request was aborted and the SegmentLoader has already started\n // another request. this can happen when the timeout for an aborted\n // request triggers due to a limitation in the XHR library\n // do not count this as any sort of request or we risk double-counting\n\n if (simpleSegment.requestId !== this.pendingSegment_.requestId) {\n return;\n } // an error occurred from the active pendingSegment_ so reset everything\n\n if (error) {\n this.pendingSegment_ = null;\n this.state = 'READY'; // aborts are not a true error condition and nothing corrective needs to be done\n\n if (error.code === REQUEST_ERRORS.ABORTED) {\n return;\n }\n this.pause(); // the error is really just that at least one of the requests timed-out\n // set the bandwidth to a very low value and trigger an ABR switch to\n // take emergency action\n\n if (error.code === REQUEST_ERRORS.TIMEOUT) {\n this.handleTimeout_();\n return;\n } // if control-flow has arrived here, then the error is real\n // emit an error event to exclude the current playlist\n\n this.mediaRequestsErrored += 1;\n this.error(error);\n this.trigger('error');\n return;\n }\n const segmentInfo = this.pendingSegment_; // the response was a success so set any bandwidth stats the request\n // generated for ABR purposes\n\n this.saveBandwidthRelatedStats_(segmentInfo.duration, simpleSegment.stats);\n segmentInfo.endOfAllRequests = simpleSegment.endOfAllRequests;\n if (result.gopInfo) {\n this.gopBuffer_ = updateGopBuffer(this.gopBuffer_, result.gopInfo, this.safeAppend_);\n } // Although we may have already started appending on progress, we shouldn't switch the\n // state away from loading until we are officially done loading the segment data.\n\n this.state = 'APPENDING'; // used for testing\n\n this.trigger('appending');\n this.waitForAppendsToComplete_(segmentInfo);\n }\n setTimeMapping_(timeline) {\n const timelineMapping = this.syncController_.mappingForTimeline(timeline);\n if (timelineMapping !== null) {\n this.timeMapping_ = timelineMapping;\n }\n }\n updateMediaSecondsLoaded_(segment) {\n if (typeof segment.start === 'number' && typeof segment.end === 'number') {\n this.mediaSecondsLoaded += segment.end - segment.start;\n } else {\n this.mediaSecondsLoaded += segment.duration;\n }\n }\n shouldUpdateTransmuxerTimestampOffset_(timestampOffset) {\n if (timestampOffset === null) {\n return false;\n } // note that we're potentially using the same timestamp offset for both video and\n // audio\n\n if (this.loaderType_ === 'main' && timestampOffset !== this.sourceUpdater_.videoTimestampOffset()) {\n return true;\n }\n if (!this.audioDisabled_ && timestampOffset !== this.sourceUpdater_.audioTimestampOffset()) {\n return true;\n }\n return false;\n }\n trueSegmentStart_({\n currentStart,\n playlist,\n mediaIndex,\n firstVideoFrameTimeForData,\n currentVideoTimestampOffset,\n useVideoTimingInfo,\n videoTimingInfo,\n audioTimingInfo\n }) {\n if (typeof currentStart !== 'undefined') {\n // if start was set once, keep using it\n return currentStart;\n }\n if (!useVideoTimingInfo) {\n return audioTimingInfo.start;\n }\n const previousSegment = playlist.segments[mediaIndex - 1]; // The start of a segment should be the start of the first full frame contained\n // within that segment. Since the transmuxer maintains a cache of incomplete data\n // from and/or the last frame seen, the start time may reflect a frame that starts\n // in the previous segment. Check for that case and ensure the start time is\n // accurate for the segment.\n\n if (mediaIndex === 0 || !previousSegment || typeof previousSegment.start === 'undefined' || previousSegment.end !== firstVideoFrameTimeForData + currentVideoTimestampOffset) {\n return firstVideoFrameTimeForData;\n }\n return videoTimingInfo.start;\n }\n waitForAppendsToComplete_(segmentInfo) {\n const trackInfo = this.getCurrentMediaInfo_(segmentInfo);\n if (!trackInfo) {\n this.error({\n message: 'No starting media returned, likely due to an unsupported media format.',\n playlistExclusionDuration: Infinity,\n metadata: {\n errorType: videojs.Error.SegmentUnsupportedMediaFormat\n }\n });\n this.trigger('error');\n return;\n } // Although transmuxing is done, appends may not yet be finished. Throw a marker\n // on each queue this loader is responsible for to ensure that the appends are\n // complete.\n\n const {\n hasAudio,\n hasVideo,\n isMuxed\n } = trackInfo;\n const waitForVideo = this.loaderType_ === 'main' && hasVideo;\n const waitForAudio = !this.audioDisabled_ && hasAudio && !isMuxed;\n segmentInfo.waitingOnAppends = 0; // segments with no data\n\n if (!segmentInfo.hasAppendedData_) {\n if (!segmentInfo.timingInfo && typeof segmentInfo.timestampOffset === 'number') {\n // When there's no audio or video data in the segment, there's no audio or video\n // timing information.\n //\n // If there's no audio or video timing information, then the timestamp offset\n // can't be adjusted to the appropriate value for the transmuxer and source\n // buffers.\n //\n // Therefore, the next segment should be used to set the timestamp offset.\n this.isPendingTimestampOffset_ = true;\n } // override settings for metadata only segments\n\n segmentInfo.timingInfo = {\n start: 0\n };\n segmentInfo.waitingOnAppends++;\n if (!this.isPendingTimestampOffset_) {\n // update the timestampoffset\n this.updateSourceBufferTimestampOffset_(segmentInfo); // make sure the metadata queue is processed even though we have\n // no video/audio data.\n\n this.processMetadataQueue_();\n } // append is \"done\" instantly with no data.\n\n this.checkAppendsDone_(segmentInfo);\n return;\n } // Since source updater could call back synchronously, do the increments first.\n\n if (waitForVideo) {\n segmentInfo.waitingOnAppends++;\n }\n if (waitForAudio) {\n segmentInfo.waitingOnAppends++;\n }\n if (waitForVideo) {\n this.sourceUpdater_.videoQueueCallback(this.checkAppendsDone_.bind(this, segmentInfo));\n }\n if (waitForAudio) {\n this.sourceUpdater_.audioQueueCallback(this.checkAppendsDone_.bind(this, segmentInfo));\n }\n }\n checkAppendsDone_(segmentInfo) {\n if (this.checkForAbort_(segmentInfo.requestId)) {\n return;\n }\n segmentInfo.waitingOnAppends--;\n if (segmentInfo.waitingOnAppends === 0) {\n this.handleAppendsDone_();\n }\n }\n checkForIllegalMediaSwitch(trackInfo) {\n const illegalMediaSwitchError = illegalMediaSwitch(this.loaderType_, this.getCurrentMediaInfo_(), trackInfo);\n if (illegalMediaSwitchError) {\n this.error({\n message: illegalMediaSwitchError,\n playlistExclusionDuration: Infinity,\n metadata: {\n errorType: videojs.Error.SegmentSwitchError\n }\n });\n this.trigger('error');\n return true;\n }\n return false;\n }\n updateSourceBufferTimestampOffset_(segmentInfo) {\n if (segmentInfo.timestampOffset === null ||\n // we don't yet have the start for whatever media type (video or audio) has\n // priority, timing-wise, so we must wait\n typeof segmentInfo.timingInfo.start !== 'number' ||\n // already updated the timestamp offset for this segment\n segmentInfo.changedTimestampOffset ||\n // the alt audio loader should not be responsible for setting the timestamp offset\n this.loaderType_ !== 'main') {\n return;\n }\n let didChange = false; // Primary timing goes by video, and audio is trimmed in the transmuxer, meaning that\n // the timing info here comes from video. In the event that the audio is longer than\n // the video, this will trim the start of the audio.\n // This also trims any offset from 0 at the beginning of the media\n\n segmentInfo.timestampOffset -= this.getSegmentStartTimeForTimestampOffsetCalculation_({\n videoTimingInfo: segmentInfo.segment.videoTimingInfo,\n audioTimingInfo: segmentInfo.segment.audioTimingInfo,\n timingInfo: segmentInfo.timingInfo\n }); // In the event that there are part segment downloads, each will try to update the\n // timestamp offset. Retaining this bit of state prevents us from updating in the\n // future (within the same segment), however, there may be a better way to handle it.\n\n segmentInfo.changedTimestampOffset = true;\n if (segmentInfo.timestampOffset !== this.sourceUpdater_.videoTimestampOffset()) {\n this.sourceUpdater_.videoTimestampOffset(segmentInfo.timestampOffset);\n didChange = true;\n }\n if (segmentInfo.timestampOffset !== this.sourceUpdater_.audioTimestampOffset()) {\n this.sourceUpdater_.audioTimestampOffset(segmentInfo.timestampOffset);\n didChange = true;\n }\n if (didChange) {\n this.trigger('timestampoffset');\n }\n }\n getSegmentStartTimeForTimestampOffsetCalculation_({\n videoTimingInfo,\n audioTimingInfo,\n timingInfo\n }) {\n if (!this.useDtsForTimestampOffset_) {\n return timingInfo.start;\n }\n if (videoTimingInfo && typeof videoTimingInfo.transmuxedDecodeStart === 'number') {\n return videoTimingInfo.transmuxedDecodeStart;\n } // handle audio only\n\n if (audioTimingInfo && typeof audioTimingInfo.transmuxedDecodeStart === 'number') {\n return audioTimingInfo.transmuxedDecodeStart;\n } // handle content not transmuxed (e.g., MP4)\n\n return timingInfo.start;\n }\n updateTimingInfoEnd_(segmentInfo) {\n segmentInfo.timingInfo = segmentInfo.timingInfo || {};\n const trackInfo = this.getMediaInfo_();\n const useVideoTimingInfo = this.loaderType_ === 'main' && trackInfo && trackInfo.hasVideo;\n const prioritizedTimingInfo = useVideoTimingInfo && segmentInfo.videoTimingInfo ? segmentInfo.videoTimingInfo : segmentInfo.audioTimingInfo;\n if (!prioritizedTimingInfo) {\n return;\n }\n segmentInfo.timingInfo.end = typeof prioritizedTimingInfo.end === 'number' ?\n // End time may not exist in a case where we aren't parsing the full segment (one\n // current example is the case of fmp4), so use the rough duration to calculate an\n // end time.\n prioritizedTimingInfo.end : prioritizedTimingInfo.start + segmentInfo.duration;\n }\n /**\n * callback to run when appendBuffer is finished. detects if we are\n * in a good state to do things with the data we got, or if we need\n * to wait for more\n *\n * @private\n */\n\n handleAppendsDone_() {\n // appendsdone can cause an abort\n if (this.pendingSegment_) {\n this.trigger('appendsdone');\n }\n if (!this.pendingSegment_) {\n this.state = 'READY'; // TODO should this move into this.checkForAbort to speed up requests post abort in\n // all appending cases?\n\n if (!this.paused()) {\n this.monitorBuffer_();\n }\n return;\n }\n const segmentInfo = this.pendingSegment_;\n if (segmentInfo.part && segmentInfo.part.syncInfo) {\n // low-latency flow\n segmentInfo.part.syncInfo.markAppended();\n } else if (segmentInfo.segment.syncInfo) {\n // normal flow\n segmentInfo.segment.syncInfo.markAppended();\n } // Now that the end of the segment has been reached, we can set the end time. It's\n // best to wait until all appends are done so we're sure that the primary media is\n // finished (and we have its end time).\n\n this.updateTimingInfoEnd_(segmentInfo);\n if (this.shouldSaveSegmentTimingInfo_) {\n // Timeline mappings should only be saved for the main loader. This is for multiple\n // reasons:\n //\n // 1) Only one mapping is saved per timeline, meaning that if both the audio loader\n // and the main loader try to save the timeline mapping, whichever comes later\n // will overwrite the first. In theory this is OK, as the mappings should be the\n // same, however, it breaks for (2)\n // 2) In the event of a live stream, the initial live point will make for a somewhat\n // arbitrary mapping. If audio and video streams are not perfectly in-sync, then\n // the mapping will be off for one of the streams, dependent on which one was\n // first saved (see (1)).\n // 3) Primary timing goes by video in VHS, so the mapping should be video.\n //\n // Since the audio loader will wait for the main loader to load the first segment,\n // the main loader will save the first timeline mapping, and ensure that there won't\n // be a case where audio loads two segments without saving a mapping (thus leading\n // to missing segment timing info).\n this.syncController_.saveSegmentTimingInfo({\n segmentInfo,\n shouldSaveTimelineMapping: this.loaderType_ === 'main'\n });\n }\n const segmentDurationMessage = getTroublesomeSegmentDurationMessage(segmentInfo, this.sourceType_);\n if (segmentDurationMessage) {\n if (segmentDurationMessage.severity === 'warn') {\n videojs.log.warn(segmentDurationMessage.message);\n } else {\n this.logger_(segmentDurationMessage.message);\n }\n }\n this.recordThroughput_(segmentInfo);\n this.pendingSegment_ = null;\n this.state = 'READY';\n if (segmentInfo.isSyncRequest) {\n this.trigger('syncinfoupdate'); // if the sync request was not appended\n // then it was not the correct segment.\n // throw it away and use the data it gave us\n // to get the correct one.\n\n if (!segmentInfo.hasAppendedData_) {\n this.logger_(`Throwing away un-appended sync request ${segmentInfoString(segmentInfo)}`);\n return;\n }\n }\n this.logger_(`Appended ${segmentInfoString(segmentInfo)}`);\n this.addSegmentMetadataCue_(segmentInfo);\n this.fetchAtBuffer_ = true;\n if (this.currentTimeline_ !== segmentInfo.timeline) {\n this.timelineChangeController_.lastTimelineChange({\n type: this.loaderType_,\n from: this.currentTimeline_,\n to: segmentInfo.timeline\n }); // If audio is not disabled, the main segment loader is responsible for updating\n // the audio timeline as well. If the content is video only, this won't have any\n // impact.\n\n if (this.loaderType_ === 'main' && !this.audioDisabled_) {\n this.timelineChangeController_.lastTimelineChange({\n type: 'audio',\n from: this.currentTimeline_,\n to: segmentInfo.timeline\n });\n }\n }\n this.currentTimeline_ = segmentInfo.timeline; // We must update the syncinfo to recalculate the seekable range before\n // the following conditional otherwise it may consider this a bad \"guess\"\n // and attempt to resync when the post-update seekable window and live\n // point would mean that this was the perfect segment to fetch\n\n this.trigger('syncinfoupdate');\n const segment = segmentInfo.segment;\n const part = segmentInfo.part;\n const badSegmentGuess = segment.end && this.currentTime_() - segment.end > segmentInfo.playlist.targetDuration * 3;\n const badPartGuess = part && part.end && this.currentTime_() - part.end > segmentInfo.playlist.partTargetDuration * 3; // If we previously appended a segment/part that ends more than 3 part/targetDurations before\n // the currentTime_ that means that our conservative guess was too conservative.\n // In that case, reset the loader state so that we try to use any information gained\n // from the previous request to create a new, more accurate, sync-point.\n\n if (badSegmentGuess || badPartGuess) {\n this.logger_(`bad ${badSegmentGuess ? 'segment' : 'part'} ${segmentInfoString(segmentInfo)}`);\n this.resetEverything();\n return;\n }\n const isWalkingForward = this.mediaIndex !== null; // Don't do a rendition switch unless we have enough time to get a sync segment\n // and conservatively guess\n\n if (isWalkingForward) {\n this.trigger('bandwidthupdate');\n }\n this.trigger('progress');\n this.mediaIndex = segmentInfo.mediaIndex;\n this.partIndex = segmentInfo.partIndex; // any time an update finishes and the last segment is in the\n // buffer, end the stream. this ensures the \"ended\" event will\n // fire if playback reaches that point.\n\n if (this.isEndOfStream_(segmentInfo.mediaIndex, segmentInfo.playlist, segmentInfo.partIndex)) {\n this.endOfStream();\n } // used for testing\n\n this.trigger('appended');\n if (segmentInfo.hasAppendedData_) {\n this.mediaAppends++;\n }\n if (!this.paused()) {\n this.monitorBuffer_();\n }\n }\n /**\n * Records the current throughput of the decrypt, transmux, and append\n * portion of the semgment pipeline. `throughput.rate` is a the cumulative\n * moving average of the throughput. `throughput.count` is the number of\n * data points in the average.\n *\n * @private\n * @param {Object} segmentInfo the object returned by loadSegment\n */\n\n recordThroughput_(segmentInfo) {\n if (segmentInfo.duration < MIN_SEGMENT_DURATION_TO_SAVE_STATS) {\n this.logger_(`Ignoring segment's throughput because its duration of ${segmentInfo.duration}` + ` is less than the min to record ${MIN_SEGMENT_DURATION_TO_SAVE_STATS}`);\n return;\n }\n const rate = this.throughput.rate; // Add one to the time to ensure that we don't accidentally attempt to divide\n // by zero in the case where the throughput is ridiculously high\n\n const segmentProcessingTime = Date.now() - segmentInfo.endOfAllRequests + 1; // Multiply by 8000 to convert from bytes/millisecond to bits/second\n\n const segmentProcessingThroughput = Math.floor(segmentInfo.byteLength / segmentProcessingTime * 8 * 1000); // This is just a cumulative moving average calculation:\n // newAvg = oldAvg + (sample - oldAvg) / (sampleCount + 1)\n\n this.throughput.rate += (segmentProcessingThroughput - rate) / ++this.throughput.count;\n }\n /**\n * Adds a cue to the segment-metadata track with some metadata information about the\n * segment\n *\n * @private\n * @param {Object} segmentInfo\n * the object returned by loadSegment\n * @method addSegmentMetadataCue_\n */\n\n addSegmentMetadataCue_(segmentInfo) {\n if (!this.segmentMetadataTrack_) {\n return;\n }\n const segment = segmentInfo.segment;\n const start = segment.start;\n const end = segment.end; // Do not try adding the cue if the start and end times are invalid.\n\n if (!finite(start) || !finite(end)) {\n return;\n }\n removeCuesFromTrack(start, end, this.segmentMetadataTrack_);\n const Cue = window$1.WebKitDataCue || window$1.VTTCue;\n const value = {\n custom: segment.custom,\n dateTimeObject: segment.dateTimeObject,\n dateTimeString: segment.dateTimeString,\n programDateTime: segment.programDateTime,\n bandwidth: segmentInfo.playlist.attributes.BANDWIDTH,\n resolution: segmentInfo.playlist.attributes.RESOLUTION,\n codecs: segmentInfo.playlist.attributes.CODECS,\n byteLength: segmentInfo.byteLength,\n uri: segmentInfo.uri,\n timeline: segmentInfo.timeline,\n playlist: segmentInfo.playlist.id,\n start,\n end\n };\n const data = JSON.stringify(value);\n const cue = new Cue(start, end, data); // Attach the metadata to the value property of the cue to keep consistency between\n // the differences of WebKitDataCue in safari and VTTCue in other browsers\n\n cue.value = value;\n this.segmentMetadataTrack_.addCue(cue);\n }\n}\nfunction noop() {}\nconst toTitleCase = function (string) {\n if (typeof string !== 'string') {\n return string;\n }\n return string.replace(/./, w => w.toUpperCase());\n};\n\n/**\n * @file source-updater.js\n */\nconst bufferTypes = ['video', 'audio'];\nconst updating = (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`];\n return sourceBuffer && sourceBuffer.updating || sourceUpdater.queuePending[type];\n};\nconst nextQueueIndexOfType = (type, queue) => {\n for (let i = 0; i < queue.length; i++) {\n const queueEntry = queue[i];\n if (queueEntry.type === 'mediaSource') {\n // If the next entry is a media source entry (uses multiple source buffers), block\n // processing to allow it to go through first.\n return null;\n }\n if (queueEntry.type === type) {\n return i;\n }\n }\n return null;\n};\nconst shiftQueue = (type, sourceUpdater) => {\n if (sourceUpdater.queue.length === 0) {\n return;\n }\n let queueIndex = 0;\n let queueEntry = sourceUpdater.queue[queueIndex];\n if (queueEntry.type === 'mediaSource') {\n if (!sourceUpdater.updating() && sourceUpdater.mediaSource.readyState !== 'closed') {\n sourceUpdater.queue.shift();\n queueEntry.action(sourceUpdater);\n if (queueEntry.doneFn) {\n queueEntry.doneFn();\n } // Only specific source buffer actions must wait for async updateend events. Media\n // Source actions process synchronously. Therefore, both audio and video source\n // buffers are now clear to process the next queue entries.\n\n shiftQueue('audio', sourceUpdater);\n shiftQueue('video', sourceUpdater);\n } // Media Source actions require both source buffers, so if the media source action\n // couldn't process yet (because one or both source buffers are busy), block other\n // queue actions until both are available and the media source action can process.\n\n return;\n }\n if (type === 'mediaSource') {\n // If the queue was shifted by a media source action (this happens when pushing a\n // media source action onto the queue), then it wasn't from an updateend event from an\n // audio or video source buffer, so there's no change from previous state, and no\n // processing should be done.\n return;\n } // Media source queue entries don't need to consider whether the source updater is\n // started (i.e., source buffers are created) as they don't need the source buffers, but\n // source buffer queue entries do.\n\n if (!sourceUpdater.ready() || sourceUpdater.mediaSource.readyState === 'closed' || updating(type, sourceUpdater)) {\n return;\n }\n if (queueEntry.type !== type) {\n queueIndex = nextQueueIndexOfType(type, sourceUpdater.queue);\n if (queueIndex === null) {\n // Either there's no queue entry that uses this source buffer type in the queue, or\n // there's a media source queue entry before the next entry of this type, in which\n // case wait for that action to process first.\n return;\n }\n queueEntry = sourceUpdater.queue[queueIndex];\n }\n sourceUpdater.queue.splice(queueIndex, 1); // Keep a record that this source buffer type is in use.\n //\n // The queue pending operation must be set before the action is performed in the event\n // that the action results in a synchronous event that is acted upon. For instance, if\n // an exception is thrown that can be handled, it's possible that new actions will be\n // appended to an empty queue and immediately executed, but would not have the correct\n // pending information if this property was set after the action was performed.\n\n sourceUpdater.queuePending[type] = queueEntry;\n queueEntry.action(type, sourceUpdater);\n if (!queueEntry.doneFn) {\n // synchronous operation, process next entry\n sourceUpdater.queuePending[type] = null;\n shiftQueue(type, sourceUpdater);\n return;\n }\n};\nconst cleanupBuffer = (type, sourceUpdater) => {\n const buffer = sourceUpdater[`${type}Buffer`];\n const titleType = toTitleCase(type);\n if (!buffer) {\n return;\n }\n buffer.removeEventListener('updateend', sourceUpdater[`on${titleType}UpdateEnd_`]);\n buffer.removeEventListener('error', sourceUpdater[`on${titleType}Error_`]);\n sourceUpdater.codecs[type] = null;\n sourceUpdater[`${type}Buffer`] = null;\n};\nconst inSourceBuffers = (mediaSource, sourceBuffer) => mediaSource && sourceBuffer && Array.prototype.indexOf.call(mediaSource.sourceBuffers, sourceBuffer) !== -1;\nconst actions = {\n appendBuffer: (bytes, segmentInfo, onError) => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Appending segment ${segmentInfo.mediaIndex}'s ${bytes.length} bytes to ${type}Buffer`);\n try {\n sourceBuffer.appendBuffer(bytes);\n } catch (e) {\n sourceUpdater.logger_(`Error with code ${e.code} ` + (e.code === QUOTA_EXCEEDED_ERR ? '(QUOTA_EXCEEDED_ERR) ' : '') + `when appending segment ${segmentInfo.mediaIndex} to ${type}Buffer`);\n sourceUpdater.queuePending[type] = null;\n onError(e);\n }\n },\n remove: (start, end) => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Removing ${start} to ${end} from ${type}Buffer`);\n try {\n sourceBuffer.remove(start, end);\n } catch (e) {\n sourceUpdater.logger_(`Remove ${start} to ${end} from ${type}Buffer failed`);\n }\n },\n timestampOffset: offset => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Setting ${type}timestampOffset to ${offset}`);\n sourceBuffer.timestampOffset = offset;\n },\n callback: callback => (type, sourceUpdater) => {\n callback();\n },\n endOfStream: error => sourceUpdater => {\n if (sourceUpdater.mediaSource.readyState !== 'open') {\n return;\n }\n sourceUpdater.logger_(`Calling mediaSource endOfStream(${error || ''})`);\n try {\n sourceUpdater.mediaSource.endOfStream(error);\n } catch (e) {\n videojs.log.warn('Failed to call media source endOfStream', e);\n }\n },\n duration: duration => sourceUpdater => {\n sourceUpdater.logger_(`Setting mediaSource duration to ${duration}`);\n try {\n sourceUpdater.mediaSource.duration = duration;\n } catch (e) {\n videojs.log.warn('Failed to set media source duration', e);\n }\n },\n abort: () => (type, sourceUpdater) => {\n if (sourceUpdater.mediaSource.readyState !== 'open') {\n return;\n }\n const sourceBuffer = sourceUpdater[`${type}Buffer`]; // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`calling abort on ${type}Buffer`);\n try {\n sourceBuffer.abort();\n } catch (e) {\n videojs.log.warn(`Failed to abort on ${type}Buffer`, e);\n }\n },\n addSourceBuffer: (type, codec) => sourceUpdater => {\n const titleType = toTitleCase(type);\n const mime = getMimeForCodec(codec);\n sourceUpdater.logger_(`Adding ${type}Buffer with codec ${codec} to mediaSource`);\n const sourceBuffer = sourceUpdater.mediaSource.addSourceBuffer(mime);\n sourceBuffer.addEventListener('updateend', sourceUpdater[`on${titleType}UpdateEnd_`]);\n sourceBuffer.addEventListener('error', sourceUpdater[`on${titleType}Error_`]);\n sourceUpdater.codecs[type] = codec;\n sourceUpdater[`${type}Buffer`] = sourceBuffer;\n },\n removeSourceBuffer: type => sourceUpdater => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`];\n cleanupBuffer(type, sourceUpdater); // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n }\n sourceUpdater.logger_(`Removing ${type}Buffer with codec ${sourceUpdater.codecs[type]} from mediaSource`);\n try {\n sourceUpdater.mediaSource.removeSourceBuffer(sourceBuffer);\n } catch (e) {\n videojs.log.warn(`Failed to removeSourceBuffer ${type}Buffer`, e);\n }\n },\n changeType: codec => (type, sourceUpdater) => {\n const sourceBuffer = sourceUpdater[`${type}Buffer`];\n const mime = getMimeForCodec(codec); // can't do anything if the media source / source buffer is null\n // or the media source does not contain this source buffer.\n\n if (!inSourceBuffers(sourceUpdater.mediaSource, sourceBuffer)) {\n return;\n } // do not update codec if we don't need to.\n // Only update if we change the codec base.\n // For example, going from avc1.640028 to avc1.64001f does not require a changeType call.\n\n const newCodecBase = codec.substring(0, codec.indexOf('.'));\n const oldCodec = sourceUpdater.codecs[type];\n const oldCodecBase = oldCodec.substring(0, oldCodec.indexOf('.'));\n if (oldCodecBase === newCodecBase) {\n return;\n }\n sourceUpdater.logger_(`changing ${type}Buffer codec from ${sourceUpdater.codecs[type]} to ${codec}`); // check if change to the provided type is supported\n\n try {\n sourceBuffer.changeType(mime);\n sourceUpdater.codecs[type] = codec;\n } catch (e) {\n videojs.log.warn(`Failed to changeType on ${type}Buffer`, e);\n }\n }\n};\nconst pushQueue = ({\n type,\n sourceUpdater,\n action,\n doneFn,\n name\n}) => {\n sourceUpdater.queue.push({\n type,\n action,\n doneFn,\n name\n });\n shiftQueue(type, sourceUpdater);\n};\nconst onUpdateend = (type, sourceUpdater) => e => {\n // Although there should, in theory, be a pending action for any updateend receieved,\n // there are some actions that may trigger updateend events without set definitions in\n // the w3c spec. For instance, setting the duration on the media source may trigger\n // updateend events on source buffers. This does not appear to be in the spec. As such,\n // if we encounter an updateend without a corresponding pending action from our queue\n // for that source buffer type, process the next action.\n const bufferedRangesForType = sourceUpdater[`${type}Buffered`]();\n const descriptiveString = bufferedRangesToString(bufferedRangesForType);\n sourceUpdater.logger_(`received \"updateend\" event for ${type} Source Buffer: `, descriptiveString);\n if (sourceUpdater.queuePending[type]) {\n const doneFn = sourceUpdater.queuePending[type].doneFn;\n sourceUpdater.queuePending[type] = null;\n if (doneFn) {\n // if there's an error, report it\n doneFn(sourceUpdater[`${type}Error_`]);\n }\n }\n shiftQueue(type, sourceUpdater);\n};\n/**\n * A queue of callbacks to be serialized and applied when a\n * MediaSource and its associated SourceBuffers are not in the\n * updating state. It is used by the segment loader to update the\n * underlying SourceBuffers when new data is loaded, for instance.\n *\n * @class SourceUpdater\n * @param {MediaSource} mediaSource the MediaSource to create the SourceBuffer from\n * @param {string} mimeType the desired MIME type of the underlying SourceBuffer\n */\n\nclass SourceUpdater extends videojs.EventTarget {\n constructor(mediaSource) {\n super();\n this.mediaSource = mediaSource;\n this.sourceopenListener_ = () => shiftQueue('mediaSource', this);\n this.mediaSource.addEventListener('sourceopen', this.sourceopenListener_);\n this.logger_ = logger('SourceUpdater'); // initial timestamp offset is 0\n\n this.audioTimestampOffset_ = 0;\n this.videoTimestampOffset_ = 0;\n this.queue = [];\n this.queuePending = {\n audio: null,\n video: null\n };\n this.delayedAudioAppendQueue_ = [];\n this.videoAppendQueued_ = false;\n this.codecs = {};\n this.onVideoUpdateEnd_ = onUpdateend('video', this);\n this.onAudioUpdateEnd_ = onUpdateend('audio', this);\n this.onVideoError_ = e => {\n // used for debugging\n this.videoError_ = e;\n };\n this.onAudioError_ = e => {\n // used for debugging\n this.audioError_ = e;\n };\n this.createdSourceBuffers_ = false;\n this.initializedEme_ = false;\n this.triggeredReady_ = false;\n }\n initializedEme() {\n this.initializedEme_ = true;\n this.triggerReady();\n }\n hasCreatedSourceBuffers() {\n // if false, likely waiting on one of the segment loaders to get enough data to create\n // source buffers\n return this.createdSourceBuffers_;\n }\n hasInitializedAnyEme() {\n return this.initializedEme_;\n }\n ready() {\n return this.hasCreatedSourceBuffers() && this.hasInitializedAnyEme();\n }\n createSourceBuffers(codecs) {\n if (this.hasCreatedSourceBuffers()) {\n // already created them before\n return;\n } // the intial addOrChangeSourceBuffers will always be\n // two add buffers.\n\n this.addOrChangeSourceBuffers(codecs);\n this.createdSourceBuffers_ = true;\n this.trigger('createdsourcebuffers');\n this.triggerReady();\n }\n triggerReady() {\n // only allow ready to be triggered once, this prevents the case\n // where:\n // 1. we trigger createdsourcebuffers\n // 2. ie 11 synchronously initializates eme\n // 3. the synchronous initialization causes us to trigger ready\n // 4. We go back to the ready check in createSourceBuffers and ready is triggered again.\n if (this.ready() && !this.triggeredReady_) {\n this.triggeredReady_ = true;\n this.trigger('ready');\n }\n }\n /**\n * Add a type of source buffer to the media source.\n *\n * @param {string} type\n * The type of source buffer to add.\n *\n * @param {string} codec\n * The codec to add the source buffer with.\n */\n\n addSourceBuffer(type, codec) {\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.addSourceBuffer(type, codec),\n name: 'addSourceBuffer'\n });\n }\n /**\n * call abort on a source buffer.\n *\n * @param {string} type\n * The type of source buffer to call abort on.\n */\n\n abort(type) {\n pushQueue({\n type,\n sourceUpdater: this,\n action: actions.abort(type),\n name: 'abort'\n });\n }\n /**\n * Call removeSourceBuffer and remove a specific type\n * of source buffer on the mediaSource.\n *\n * @param {string} type\n * The type of source buffer to remove.\n */\n\n removeSourceBuffer(type) {\n if (!this.canRemoveSourceBuffer()) {\n videojs.log.error('removeSourceBuffer is not supported!');\n return;\n }\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.removeSourceBuffer(type),\n name: 'removeSourceBuffer'\n });\n }\n /**\n * Whether or not the removeSourceBuffer function is supported\n * on the mediaSource.\n *\n * @return {boolean}\n * if removeSourceBuffer can be called.\n */\n\n canRemoveSourceBuffer() {\n // As of Firefox 83 removeSourceBuffer\n // throws errors, so we report that it does not support this.\n return !videojs.browser.IS_FIREFOX && window$1.MediaSource && window$1.MediaSource.prototype && typeof window$1.MediaSource.prototype.removeSourceBuffer === 'function';\n }\n /**\n * Whether or not the changeType function is supported\n * on our SourceBuffers.\n *\n * @return {boolean}\n * if changeType can be called.\n */\n\n static canChangeType() {\n return window$1.SourceBuffer && window$1.SourceBuffer.prototype && typeof window$1.SourceBuffer.prototype.changeType === 'function';\n }\n /**\n * Whether or not the changeType function is supported\n * on our SourceBuffers.\n *\n * @return {boolean}\n * if changeType can be called.\n */\n\n canChangeType() {\n return this.constructor.canChangeType();\n }\n /**\n * Call the changeType function on a source buffer, given the code and type.\n *\n * @param {string} type\n * The type of source buffer to call changeType on.\n *\n * @param {string} codec\n * The codec string to change type with on the source buffer.\n */\n\n changeType(type, codec) {\n if (!this.canChangeType()) {\n videojs.log.error('changeType is not supported!');\n return;\n }\n pushQueue({\n type,\n sourceUpdater: this,\n action: actions.changeType(codec),\n name: 'changeType'\n });\n }\n /**\n * Add source buffers with a codec or, if they are already created,\n * call changeType on source buffers using changeType.\n *\n * @param {Object} codecs\n * Codecs to switch to\n */\n\n addOrChangeSourceBuffers(codecs) {\n if (!codecs || typeof codecs !== 'object' || Object.keys(codecs).length === 0) {\n throw new Error('Cannot addOrChangeSourceBuffers to undefined codecs');\n }\n Object.keys(codecs).forEach(type => {\n const codec = codecs[type];\n if (!this.hasCreatedSourceBuffers()) {\n return this.addSourceBuffer(type, codec);\n }\n if (this.canChangeType()) {\n this.changeType(type, codec);\n }\n });\n }\n /**\n * Queue an update to append an ArrayBuffer.\n *\n * @param {MediaObject} object containing audioBytes and/or videoBytes\n * @param {Function} done the function to call when done\n * @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-appendBuffer-void-ArrayBuffer-data\n */\n\n appendBuffer(options, doneFn) {\n const {\n segmentInfo,\n type,\n bytes\n } = options;\n this.processedAppend_ = true;\n if (type === 'audio' && this.videoBuffer && !this.videoAppendQueued_) {\n this.delayedAudioAppendQueue_.push([options, doneFn]);\n this.logger_(`delayed audio append of ${bytes.length} until video append`);\n return;\n } // In the case of certain errors, for instance, QUOTA_EXCEEDED_ERR, updateend will\n // not be fired. This means that the queue will be blocked until the next action\n // taken by the segment-loader. Provide a mechanism for segment-loader to handle\n // these errors by calling the doneFn with the specific error.\n\n const onError = doneFn;\n pushQueue({\n type,\n sourceUpdater: this,\n action: actions.appendBuffer(bytes, segmentInfo || {\n mediaIndex: -1\n }, onError),\n doneFn,\n name: 'appendBuffer'\n });\n if (type === 'video') {\n this.videoAppendQueued_ = true;\n if (!this.delayedAudioAppendQueue_.length) {\n return;\n }\n const queue = this.delayedAudioAppendQueue_.slice();\n this.logger_(`queuing delayed audio ${queue.length} appendBuffers`);\n this.delayedAudioAppendQueue_.length = 0;\n queue.forEach(que => {\n this.appendBuffer.apply(this, que);\n });\n }\n }\n /**\n * Get the audio buffer's buffered timerange.\n *\n * @return {TimeRange}\n * The audio buffer's buffered time range\n */\n\n audioBuffered() {\n // no media source/source buffer or it isn't in the media sources\n // source buffer list\n if (!inSourceBuffers(this.mediaSource, this.audioBuffer)) {\n return createTimeRanges();\n }\n return this.audioBuffer.buffered ? this.audioBuffer.buffered : createTimeRanges();\n }\n /**\n * Get the video buffer's buffered timerange.\n *\n * @return {TimeRange}\n * The video buffer's buffered time range\n */\n\n videoBuffered() {\n // no media source/source buffer or it isn't in the media sources\n // source buffer list\n if (!inSourceBuffers(this.mediaSource, this.videoBuffer)) {\n return createTimeRanges();\n }\n return this.videoBuffer.buffered ? this.videoBuffer.buffered : createTimeRanges();\n }\n /**\n * Get a combined video/audio buffer's buffered timerange.\n *\n * @return {TimeRange}\n * the combined time range\n */\n\n buffered() {\n const video = inSourceBuffers(this.mediaSource, this.videoBuffer) ? this.videoBuffer : null;\n const audio = inSourceBuffers(this.mediaSource, this.audioBuffer) ? this.audioBuffer : null;\n if (audio && !video) {\n return this.audioBuffered();\n }\n if (video && !audio) {\n return this.videoBuffered();\n }\n return bufferIntersection(this.audioBuffered(), this.videoBuffered());\n }\n /**\n * Add a callback to the queue that will set duration on the mediaSource.\n *\n * @param {number} duration\n * The duration to set\n *\n * @param {Function} [doneFn]\n * function to run after duration has been set.\n */\n\n setDuration(duration, doneFn = noop) {\n // In order to set the duration on the media source, it's necessary to wait for all\n // source buffers to no longer be updating. \"If the updating attribute equals true on\n // any SourceBuffer in sourceBuffers, then throw an InvalidStateError exception and\n // abort these steps.\" (source: https://www.w3.org/TR/media-source/#attributes).\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.duration(duration),\n name: 'duration',\n doneFn\n });\n }\n /**\n * Add a mediaSource endOfStream call to the queue\n *\n * @param {Error} [error]\n * Call endOfStream with an error\n *\n * @param {Function} [doneFn]\n * A function that should be called when the\n * endOfStream call has finished.\n */\n\n endOfStream(error = null, doneFn = noop) {\n if (typeof error !== 'string') {\n error = undefined;\n } // In order to set the duration on the media source, it's necessary to wait for all\n // source buffers to no longer be updating. \"If the updating attribute equals true on\n // any SourceBuffer in sourceBuffers, then throw an InvalidStateError exception and\n // abort these steps.\" (source: https://www.w3.org/TR/media-source/#attributes).\n\n pushQueue({\n type: 'mediaSource',\n sourceUpdater: this,\n action: actions.endOfStream(error),\n name: 'endOfStream',\n doneFn\n });\n }\n /**\n * Queue an update to remove a time range from the buffer.\n *\n * @param {number} start where to start the removal\n * @param {number} end where to end the removal\n * @param {Function} [done=noop] optional callback to be executed when the remove\n * operation is complete\n * @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-remove-void-double-start-unrestricted-double-end\n */\n\n removeAudio(start, end, done = noop) {\n if (!this.audioBuffered().length || this.audioBuffered().end(0) === 0) {\n done();\n return;\n }\n pushQueue({\n type: 'audio',\n sourceUpdater: this,\n action: actions.remove(start, end),\n doneFn: done,\n name: 'remove'\n });\n }\n /**\n * Queue an update to remove a time range from the buffer.\n *\n * @param {number} start where to start the removal\n * @param {number} end where to end the removal\n * @param {Function} [done=noop] optional callback to be executed when the remove\n * operation is complete\n * @see http://www.w3.org/TR/media-source/#widl-SourceBuffer-remove-void-double-start-unrestricted-double-end\n */\n\n removeVideo(start, end, done = noop) {\n if (!this.videoBuffered().length || this.videoBuffered().end(0) === 0) {\n done();\n return;\n }\n pushQueue({\n type: 'video',\n sourceUpdater: this,\n action: actions.remove(start, end),\n doneFn: done,\n name: 'remove'\n });\n }\n /**\n * Whether the underlying sourceBuffer is updating or not\n *\n * @return {boolean} the updating status of the SourceBuffer\n */\n\n updating() {\n // the audio/video source buffer is updating\n if (updating('audio', this) || updating('video', this)) {\n return true;\n }\n return false;\n }\n /**\n * Set/get the timestampoffset on the audio SourceBuffer\n *\n * @return {number} the timestamp offset\n */\n\n audioTimestampOffset(offset) {\n if (typeof offset !== 'undefined' && this.audioBuffer &&\n // no point in updating if it's the same\n this.audioTimestampOffset_ !== offset) {\n pushQueue({\n type: 'audio',\n sourceUpdater: this,\n action: actions.timestampOffset(offset),\n name: 'timestampOffset'\n });\n this.audioTimestampOffset_ = offset;\n }\n return this.audioTimestampOffset_;\n }\n /**\n * Set/get the timestampoffset on the video SourceBuffer\n *\n * @return {number} the timestamp offset\n */\n\n videoTimestampOffset(offset) {\n if (typeof offset !== 'undefined' && this.videoBuffer &&\n // no point in updating if it's the same\n this.videoTimestampOffset !== offset) {\n pushQueue({\n type: 'video',\n sourceUpdater: this,\n action: actions.timestampOffset(offset),\n name: 'timestampOffset'\n });\n this.videoTimestampOffset_ = offset;\n }\n return this.videoTimestampOffset_;\n }\n /**\n * Add a function to the queue that will be called\n * when it is its turn to run in the audio queue.\n *\n * @param {Function} callback\n * The callback to queue.\n */\n\n audioQueueCallback(callback) {\n if (!this.audioBuffer) {\n return;\n }\n pushQueue({\n type: 'audio',\n sourceUpdater: this,\n action: actions.callback(callback),\n name: 'callback'\n });\n }\n /**\n * Add a function to the queue that will be called\n * when it is its turn to run in the video queue.\n *\n * @param {Function} callback\n * The callback to queue.\n */\n\n videoQueueCallback(callback) {\n if (!this.videoBuffer) {\n return;\n }\n pushQueue({\n type: 'video',\n sourceUpdater: this,\n action: actions.callback(callback),\n name: 'callback'\n });\n }\n /**\n * dispose of the source updater and the underlying sourceBuffer\n */\n\n dispose() {\n this.trigger('dispose');\n bufferTypes.forEach(type => {\n this.abort(type);\n if (this.canRemoveSourceBuffer()) {\n this.removeSourceBuffer(type);\n } else {\n this[`${type}QueueCallback`](() => cleanupBuffer(type, this));\n }\n });\n this.videoAppendQueued_ = false;\n this.delayedAudioAppendQueue_.length = 0;\n if (this.sourceopenListener_) {\n this.mediaSource.removeEventListener('sourceopen', this.sourceopenListener_);\n }\n this.off();\n }\n}\nconst uint8ToUtf8 = uintArray => decodeURIComponent(escape(String.fromCharCode.apply(null, uintArray)));\nconst bufferToHexString = buffer => {\n const uInt8Buffer = new Uint8Array(buffer);\n return Array.from(uInt8Buffer).map(byte => byte.toString(16).padStart(2, '0')).join('');\n};\n\n/**\n * @file vtt-segment-loader.js\n */\nconst VTT_LINE_TERMINATORS = new Uint8Array('\\n\\n'.split('').map(char => char.charCodeAt(0)));\nclass NoVttJsError extends Error {\n constructor() {\n super('Trying to parse received VTT cues, but there is no WebVTT. Make sure vtt.js is loaded.');\n }\n}\n/**\n * An object that manages segment loading and appending.\n *\n * @class VTTSegmentLoader\n * @param {Object} options required and optional options\n * @extends videojs.EventTarget\n */\n\nclass VTTSegmentLoader extends SegmentLoader {\n constructor(settings, options = {}) {\n super(settings, options); // SegmentLoader requires a MediaSource be specified or it will throw an error;\n // however, VTTSegmentLoader has no need of a media source, so delete the reference\n\n this.mediaSource_ = null;\n this.subtitlesTrack_ = null;\n this.featuresNativeTextTracks_ = settings.featuresNativeTextTracks;\n this.loadVttJs = settings.loadVttJs; // The VTT segment will have its own time mappings. Saving VTT segment timing info in\n // the sync controller leads to improper behavior.\n\n this.shouldSaveSegmentTimingInfo_ = false;\n }\n createTransmuxer_() {\n // don't need to transmux any subtitles\n return null;\n }\n /**\n * Indicates which time ranges are buffered\n *\n * @return {TimeRange}\n * TimeRange object representing the current buffered ranges\n */\n\n buffered_() {\n if (!this.subtitlesTrack_ || !this.subtitlesTrack_.cues || !this.subtitlesTrack_.cues.length) {\n return createTimeRanges();\n }\n const cues = this.subtitlesTrack_.cues;\n const start = cues[0].startTime;\n const end = cues[cues.length - 1].startTime;\n return createTimeRanges([[start, end]]);\n }\n /**\n * Gets and sets init segment for the provided map\n *\n * @param {Object} map\n * The map object representing the init segment to get or set\n * @param {boolean=} set\n * If true, the init segment for the provided map should be saved\n * @return {Object}\n * map object for desired init segment\n */\n\n initSegmentForMap(map, set = false) {\n if (!map) {\n return null;\n }\n const id = initSegmentId(map);\n let storedMap = this.initSegments_[id];\n if (set && !storedMap && map.bytes) {\n // append WebVTT line terminators to the media initialization segment if it exists\n // to follow the WebVTT spec (https://w3c.github.io/webvtt/#file-structure) that\n // requires two or more WebVTT line terminators between the WebVTT header and the\n // rest of the file\n const combinedByteLength = VTT_LINE_TERMINATORS.byteLength + map.bytes.byteLength;\n const combinedSegment = new Uint8Array(combinedByteLength);\n combinedSegment.set(map.bytes);\n combinedSegment.set(VTT_LINE_TERMINATORS, map.bytes.byteLength);\n this.initSegments_[id] = storedMap = {\n resolvedUri: map.resolvedUri,\n byterange: map.byterange,\n bytes: combinedSegment\n };\n }\n return storedMap || map;\n }\n /**\n * Returns true if all configuration required for loading is present, otherwise false.\n *\n * @return {boolean} True if the all configuration is ready for loading\n * @private\n */\n\n couldBeginLoading_() {\n return this.playlist_ && this.subtitlesTrack_ && !this.paused();\n }\n /**\n * Once all the starting parameters have been specified, begin\n * operation. This method should only be invoked from the INIT\n * state.\n *\n * @private\n */\n\n init_() {\n this.state = 'READY';\n this.resetEverything();\n return this.monitorBuffer_();\n }\n /**\n * Set a subtitle track on the segment loader to add subtitles to\n *\n * @param {TextTrack=} track\n * The text track to add loaded subtitles to\n * @return {TextTrack}\n * Returns the subtitles track\n */\n\n track(track) {\n if (typeof track === 'undefined') {\n return this.subtitlesTrack_;\n }\n this.subtitlesTrack_ = track; // if we were unpaused but waiting for a sourceUpdater, start\n // buffering now\n\n if (this.state === 'INIT' && this.couldBeginLoading_()) {\n this.init_();\n }\n return this.subtitlesTrack_;\n }\n /**\n * Remove any data in the source buffer between start and end times\n *\n * @param {number} start - the start time of the region to remove from the buffer\n * @param {number} end - the end time of the region to remove from the buffer\n */\n\n remove(start, end) {\n removeCuesFromTrack(start, end, this.subtitlesTrack_);\n }\n /**\n * fill the buffer with segements unless the sourceBuffers are\n * currently updating\n *\n * Note: this function should only ever be called by monitorBuffer_\n * and never directly\n *\n * @private\n */\n\n fillBuffer_() {\n // see if we need to begin loading immediately\n const segmentInfo = this.chooseNextRequest_();\n if (!segmentInfo) {\n return;\n }\n if (this.syncController_.timestampOffsetForTimeline(segmentInfo.timeline) === null) {\n // We don't have the timestamp offset that we need to sync subtitles.\n // Rerun on a timestamp offset or user interaction.\n const checkTimestampOffset = () => {\n this.state = 'READY';\n if (!this.paused()) {\n // if not paused, queue a buffer check as soon as possible\n this.monitorBuffer_();\n }\n };\n this.syncController_.one('timestampoffset', checkTimestampOffset);\n this.state = 'WAITING_ON_TIMELINE';\n return;\n }\n this.loadSegment_(segmentInfo);\n } // never set a timestamp offset for vtt segments.\n\n timestampOffsetForSegment_() {\n return null;\n }\n chooseNextRequest_() {\n return this.skipEmptySegments_(super.chooseNextRequest_());\n }\n /**\n * Prevents the segment loader from requesting segments we know contain no subtitles\n * by walking forward until we find the next segment that we don't know whether it is\n * empty or not.\n *\n * @param {Object} segmentInfo\n * a segment info object that describes the current segment\n * @return {Object}\n * a segment info object that describes the current segment\n */\n\n skipEmptySegments_(segmentInfo) {\n while (segmentInfo && segmentInfo.segment.empty) {\n // stop at the last possible segmentInfo\n if (segmentInfo.mediaIndex + 1 >= segmentInfo.playlist.segments.length) {\n segmentInfo = null;\n break;\n }\n segmentInfo = this.generateSegmentInfo_({\n playlist: segmentInfo.playlist,\n mediaIndex: segmentInfo.mediaIndex + 1,\n startOfSegment: segmentInfo.startOfSegment + segmentInfo.duration,\n isSyncRequest: segmentInfo.isSyncRequest\n });\n }\n return segmentInfo;\n }\n stopForError(error) {\n this.error(error);\n this.state = 'READY';\n this.pause();\n this.trigger('error');\n }\n /**\n * append a decrypted segement to the SourceBuffer through a SourceUpdater\n *\n * @private\n */\n\n segmentRequestFinished_(error, simpleSegment, result) {\n if (!this.subtitlesTrack_) {\n this.state = 'READY';\n return;\n }\n this.saveTransferStats_(simpleSegment.stats); // the request was aborted\n\n if (!this.pendingSegment_) {\n this.state = 'READY';\n this.mediaRequestsAborted += 1;\n return;\n }\n if (error) {\n if (error.code === REQUEST_ERRORS.TIMEOUT) {\n this.handleTimeout_();\n }\n if (error.code === REQUEST_ERRORS.ABORTED) {\n this.mediaRequestsAborted += 1;\n } else {\n this.mediaRequestsErrored += 1;\n }\n this.stopForError(error);\n return;\n }\n const segmentInfo = this.pendingSegment_; // although the VTT segment loader bandwidth isn't really used, it's good to\n // maintain functionality between segment loaders\n\n this.saveBandwidthRelatedStats_(segmentInfo.duration, simpleSegment.stats); // if this request included a segment key, save that data in the cache\n\n if (simpleSegment.key) {\n this.segmentKey(simpleSegment.key, true);\n }\n this.state = 'APPENDING'; // used for tests\n\n this.trigger('appending');\n const segment = segmentInfo.segment;\n if (segment.map) {\n segment.map.bytes = simpleSegment.map.bytes;\n }\n segmentInfo.bytes = simpleSegment.bytes; // Make sure that vttjs has loaded, otherwise, load it and wait till it finished loading\n\n if (typeof window$1.WebVTT !== 'function' && typeof this.loadVttJs === 'function') {\n this.state = 'WAITING_ON_VTTJS'; // should be fine to call multiple times\n // script will be loaded once but multiple listeners will be added to the queue, which is expected.\n\n this.loadVttJs().then(() => this.segmentRequestFinished_(error, simpleSegment, result), () => this.stopForError({\n message: 'Error loading vtt.js',\n metadata: {\n errorType: videojs.Error.VttLoadError\n }\n }));\n return;\n }\n segment.requested = true;\n try {\n this.parseVTTCues_(segmentInfo);\n } catch (e) {\n this.stopForError({\n message: e.message,\n metadata: {\n errorType: videojs.Error.VttCueParsingError\n }\n });\n return;\n }\n this.updateTimeMapping_(segmentInfo, this.syncController_.timelines[segmentInfo.timeline], this.playlist_);\n if (segmentInfo.cues.length) {\n segmentInfo.timingInfo = {\n start: segmentInfo.cues[0].startTime,\n end: segmentInfo.cues[segmentInfo.cues.length - 1].endTime\n };\n } else {\n segmentInfo.timingInfo = {\n start: segmentInfo.startOfSegment,\n end: segmentInfo.startOfSegment + segmentInfo.duration\n };\n }\n if (segmentInfo.isSyncRequest) {\n this.trigger('syncinfoupdate');\n this.pendingSegment_ = null;\n this.state = 'READY';\n return;\n }\n segmentInfo.byteLength = segmentInfo.bytes.byteLength;\n this.mediaSecondsLoaded += segment.duration; // Create VTTCue instances for each cue in the new segment and add them to\n // the subtitle track\n\n segmentInfo.cues.forEach(cue => {\n this.subtitlesTrack_.addCue(this.featuresNativeTextTracks_ ? new window$1.VTTCue(cue.startTime, cue.endTime, cue.text) : cue);\n }); // Remove any duplicate cues from the subtitle track. The WebVTT spec allows\n // cues to have identical time-intervals, but if the text is also identical\n // we can safely assume it is a duplicate that can be removed (ex. when a cue\n // \"overlaps\" VTT segments)\n\n removeDuplicateCuesFromTrack(this.subtitlesTrack_);\n this.handleAppendsDone_();\n }\n handleData_() {// noop as we shouldn't be getting video/audio data captions\n // that we do not support here.\n }\n updateTimingInfoEnd_() {// noop\n }\n /**\n * Uses the WebVTT parser to parse the segment response\n *\n * @throws NoVttJsError\n *\n * @param {Object} segmentInfo\n * a segment info object that describes the current segment\n * @private\n */\n\n parseVTTCues_(segmentInfo) {\n let decoder;\n let decodeBytesToString = false;\n if (typeof window$1.WebVTT !== 'function') {\n // caller is responsible for exception handling.\n throw new NoVttJsError();\n }\n if (typeof window$1.TextDecoder === 'function') {\n decoder = new window$1.TextDecoder('utf8');\n } else {\n decoder = window$1.WebVTT.StringDecoder();\n decodeBytesToString = true;\n }\n const parser = new window$1.WebVTT.Parser(window$1, window$1.vttjs, decoder);\n segmentInfo.cues = [];\n segmentInfo.timestampmap = {\n MPEGTS: 0,\n LOCAL: 0\n };\n parser.oncue = segmentInfo.cues.push.bind(segmentInfo.cues);\n parser.ontimestampmap = map => {\n segmentInfo.timestampmap = map;\n };\n parser.onparsingerror = error => {\n videojs.log.warn('Error encountered when parsing cues: ' + error.message);\n };\n if (segmentInfo.segment.map) {\n let mapData = segmentInfo.segment.map.bytes;\n if (decodeBytesToString) {\n mapData = uint8ToUtf8(mapData);\n }\n parser.parse(mapData);\n }\n let segmentData = segmentInfo.bytes;\n if (decodeBytesToString) {\n segmentData = uint8ToUtf8(segmentData);\n }\n parser.parse(segmentData);\n parser.flush();\n }\n /**\n * Updates the start and end times of any cues parsed by the WebVTT parser using\n * the information parsed from the X-TIMESTAMP-MAP header and a TS to media time mapping\n * from the SyncController\n *\n * @param {Object} segmentInfo\n * a segment info object that describes the current segment\n * @param {Object} mappingObj\n * object containing a mapping from TS to media time\n * @param {Object} playlist\n * the playlist object containing the segment\n * @private\n */\n\n updateTimeMapping_(segmentInfo, mappingObj, playlist) {\n const segment = segmentInfo.segment;\n if (!mappingObj) {\n // If the sync controller does not have a mapping of TS to Media Time for the\n // timeline, then we don't have enough information to update the cue\n // start/end times\n return;\n }\n if (!segmentInfo.cues.length) {\n // If there are no cues, we also do not have enough information to figure out\n // segment timing. Mark that the segment contains no cues so we don't re-request\n // an empty segment.\n segment.empty = true;\n return;\n }\n const {\n MPEGTS,\n LOCAL\n } = segmentInfo.timestampmap;\n /**\n * From the spec:\n * The MPEGTS media timestamp MUST use a 90KHz timescale,\n * even when non-WebVTT Media Segments use a different timescale.\n */\n\n const mpegTsInSeconds = MPEGTS / ONE_SECOND_IN_TS;\n const diff = mpegTsInSeconds - LOCAL + mappingObj.mapping;\n segmentInfo.cues.forEach(cue => {\n const duration = cue.endTime - cue.startTime;\n const startTime = MPEGTS === 0 ? cue.startTime + diff : this.handleRollover_(cue.startTime + diff, mappingObj.time);\n cue.startTime = Math.max(startTime, 0);\n cue.endTime = Math.max(startTime + duration, 0);\n });\n if (!playlist.syncInfo) {\n const firstStart = segmentInfo.cues[0].startTime;\n const lastStart = segmentInfo.cues[segmentInfo.cues.length - 1].startTime;\n playlist.syncInfo = {\n mediaSequence: playlist.mediaSequence + segmentInfo.mediaIndex,\n time: Math.min(firstStart, lastStart - segment.duration)\n };\n }\n }\n /**\n * MPEG-TS PES timestamps are limited to 2^33.\n * Once they reach 2^33, they roll over to 0.\n * mux.js handles PES timestamp rollover for the following scenarios:\n * [forward rollover(right)] ->\n * PES timestamps monotonically increase, and once they reach 2^33, they roll over to 0\n * [backward rollover(left)] -->\n * we seek back to position before rollover.\n *\n * According to the HLS SPEC:\n * When synchronizing WebVTT with PES timestamps, clients SHOULD account\n * for cases where the 33-bit PES timestamps have wrapped and the WebVTT\n * cue times have not. When the PES timestamp wraps, the WebVTT Segment\n * SHOULD have a X-TIMESTAMP-MAP header that maps the current WebVTT\n * time to the new (low valued) PES timestamp.\n *\n * So we want to handle rollover here and align VTT Cue start/end time to the player's time.\n */\n\n handleRollover_(value, reference) {\n if (reference === null) {\n return value;\n }\n let valueIn90khz = value * ONE_SECOND_IN_TS;\n const referenceIn90khz = reference * ONE_SECOND_IN_TS;\n let offset;\n if (referenceIn90khz < valueIn90khz) {\n // - 2^33\n offset = -8589934592;\n } else {\n // + 2^33\n offset = 8589934592;\n } // distance(value - reference) > 2^32\n\n while (Math.abs(valueIn90khz - referenceIn90khz) > 4294967296) {\n valueIn90khz += offset;\n }\n return valueIn90khz / ONE_SECOND_IN_TS;\n }\n}\n\n/**\n * @file ad-cue-tags.js\n */\n/**\n * Searches for an ad cue that overlaps with the given mediaTime\n *\n * @param {Object} track\n * the track to find the cue for\n *\n * @param {number} mediaTime\n * the time to find the cue at\n *\n * @return {Object|null}\n * the found cue or null\n */\n\nconst findAdCue = function (track, mediaTime) {\n const cues = track.cues;\n for (let i = 0; i < cues.length; i++) {\n const cue = cues[i];\n if (mediaTime >= cue.adStartTime && mediaTime <= cue.adEndTime) {\n return cue;\n }\n }\n return null;\n};\nconst updateAdCues = function (media, track, offset = 0) {\n if (!media.segments) {\n return;\n }\n let mediaTime = offset;\n let cue;\n for (let i = 0; i < media.segments.length; i++) {\n const segment = media.segments[i];\n if (!cue) {\n // Since the cues will span for at least the segment duration, adding a fudge\n // factor of half segment duration will prevent duplicate cues from being\n // created when timing info is not exact (e.g. cue start time initialized\n // at 10.006677, but next call mediaTime is 10.003332 )\n cue = findAdCue(track, mediaTime + segment.duration / 2);\n }\n if (cue) {\n if ('cueIn' in segment) {\n // Found a CUE-IN so end the cue\n cue.endTime = mediaTime;\n cue.adEndTime = mediaTime;\n mediaTime += segment.duration;\n cue = null;\n continue;\n }\n if (mediaTime < cue.endTime) {\n // Already processed this mediaTime for this cue\n mediaTime += segment.duration;\n continue;\n } // otherwise extend cue until a CUE-IN is found\n\n cue.endTime += segment.duration;\n } else {\n if ('cueOut' in segment) {\n cue = new window$1.VTTCue(mediaTime, mediaTime + segment.duration, segment.cueOut);\n cue.adStartTime = mediaTime; // Assumes tag format to be\n // #EXT-X-CUE-OUT:30\n\n cue.adEndTime = mediaTime + parseFloat(segment.cueOut);\n track.addCue(cue);\n }\n if ('cueOutCont' in segment) {\n // Entered into the middle of an ad cue\n // Assumes tag formate to be\n // #EXT-X-CUE-OUT-CONT:10/30\n const [adOffset, adTotal] = segment.cueOutCont.split('/').map(parseFloat);\n cue = new window$1.VTTCue(mediaTime, mediaTime + segment.duration, '');\n cue.adStartTime = mediaTime - adOffset;\n cue.adEndTime = cue.adStartTime + adTotal;\n track.addCue(cue);\n }\n }\n mediaTime += segment.duration;\n }\n};\nclass SyncInfo {\n /**\n * @param {number} start - media sequence start\n * @param {number} end - media sequence end\n * @param {number} segmentIndex - index for associated segment\n * @param {number|null} [partIndex] - index for associated part\n * @param {boolean} [appended] - appended indicator\n *\n */\n constructor({\n start,\n end,\n segmentIndex,\n partIndex = null,\n appended = false\n }) {\n this.start_ = start;\n this.end_ = end;\n this.segmentIndex_ = segmentIndex;\n this.partIndex_ = partIndex;\n this.appended_ = appended;\n }\n isInRange(targetTime) {\n return targetTime >= this.start && targetTime < this.end;\n }\n markAppended() {\n this.appended_ = true;\n }\n resetAppendedStatus() {\n this.appended_ = false;\n }\n get isAppended() {\n return this.appended_;\n }\n get start() {\n return this.start_;\n }\n get end() {\n return this.end_;\n }\n get segmentIndex() {\n return this.segmentIndex_;\n }\n get partIndex() {\n return this.partIndex_;\n }\n}\nclass SyncInfoData {\n /**\n *\n * @param {SyncInfo} segmentSyncInfo - sync info for a given segment\n * @param {Array} [partsSyncInfo] - sync infos for a list of parts for a given segment\n */\n constructor(segmentSyncInfo, partsSyncInfo = []) {\n this.segmentSyncInfo_ = segmentSyncInfo;\n this.partsSyncInfo_ = partsSyncInfo;\n }\n get segmentSyncInfo() {\n return this.segmentSyncInfo_;\n }\n get partsSyncInfo() {\n return this.partsSyncInfo_;\n }\n get hasPartsSyncInfo() {\n return this.partsSyncInfo_.length > 0;\n }\n resetAppendStatus() {\n this.segmentSyncInfo_.resetAppendedStatus();\n this.partsSyncInfo_.forEach(partSyncInfo => partSyncInfo.resetAppendedStatus());\n }\n}\nclass MediaSequenceSync {\n constructor() {\n /**\n * @type {Map}\n * @private\n */\n this.storage_ = new Map();\n this.diagnostics_ = '';\n this.isReliable_ = false;\n this.start_ = -Infinity;\n this.end_ = Infinity;\n }\n get start() {\n return this.start_;\n }\n get end() {\n return this.end_;\n }\n get diagnostics() {\n return this.diagnostics_;\n }\n get isReliable() {\n return this.isReliable_;\n }\n resetAppendedStatus() {\n this.storage_.forEach(syncInfoData => syncInfoData.resetAppendStatus());\n }\n /**\n * update sync storage\n *\n * @param {Object} playlist\n * @param {number} currentTime\n *\n * @return {void}\n */\n\n update(playlist, currentTime) {\n const {\n mediaSequence,\n segments\n } = playlist;\n this.isReliable_ = this.isReliablePlaylist_(mediaSequence, segments);\n if (!this.isReliable_) {\n return;\n }\n return this.updateStorage_(segments, mediaSequence, this.calculateBaseTime_(mediaSequence, currentTime));\n }\n /**\n * @param {number} targetTime\n * @return {SyncInfo|null}\n */\n\n getSyncInfoForTime(targetTime) {\n for (const {\n segmentSyncInfo,\n partsSyncInfo\n } of this.storage_.values()) {\n // Normal segment flow:\n if (!partsSyncInfo.length) {\n if (segmentSyncInfo.isInRange(targetTime)) {\n return segmentSyncInfo;\n }\n } else {\n // Low latency flow:\n for (const partSyncInfo of partsSyncInfo) {\n if (partSyncInfo.isInRange(targetTime)) {\n return partSyncInfo;\n }\n }\n }\n }\n return null;\n }\n updateStorage_(segments, startingMediaSequence, startingTime) {\n const newStorage = new Map();\n let newDiagnostics = '\\n';\n let currentStart = startingTime;\n let currentMediaSequence = startingMediaSequence;\n this.start_ = currentStart;\n segments.forEach((segment, segmentIndex) => {\n const prevSyncInfoData = this.storage_.get(currentMediaSequence);\n const segmentStart = currentStart;\n const segmentEnd = segmentStart + segment.duration;\n const segmentIsAppended = Boolean(prevSyncInfoData && prevSyncInfoData.segmentSyncInfo && prevSyncInfoData.segmentSyncInfo.isAppended);\n const segmentSyncInfo = new SyncInfo({\n start: segmentStart,\n end: segmentEnd,\n appended: segmentIsAppended,\n segmentIndex\n });\n segment.syncInfo = segmentSyncInfo;\n let currentPartStart = currentStart;\n const partsSyncInfo = (segment.parts || []).map((part, partIndex) => {\n const partStart = currentPartStart;\n const partEnd = currentPartStart + part.duration;\n const partIsAppended = Boolean(prevSyncInfoData && prevSyncInfoData.partsSyncInfo && prevSyncInfoData.partsSyncInfo[partIndex] && prevSyncInfoData.partsSyncInfo[partIndex].isAppended);\n const partSyncInfo = new SyncInfo({\n start: partStart,\n end: partEnd,\n appended: partIsAppended,\n segmentIndex,\n partIndex\n });\n currentPartStart = partEnd;\n newDiagnostics += `Media Sequence: ${currentMediaSequence}.${partIndex} | Range: ${partStart} --> ${partEnd} | Appended: ${partIsAppended}\\n`;\n part.syncInfo = partSyncInfo;\n return partSyncInfo;\n });\n newStorage.set(currentMediaSequence, new SyncInfoData(segmentSyncInfo, partsSyncInfo));\n newDiagnostics += `${compactSegmentUrlDescription(segment.resolvedUri)} | Media Sequence: ${currentMediaSequence} | Range: ${segmentStart} --> ${segmentEnd} | Appended: ${segmentIsAppended}\\n`;\n currentMediaSequence++;\n currentStart = segmentEnd;\n });\n this.end_ = currentStart;\n this.storage_ = newStorage;\n this.diagnostics_ = newDiagnostics;\n }\n calculateBaseTime_(mediaSequence, fallback) {\n if (!this.storage_.size) {\n // Initial setup flow.\n return 0;\n }\n if (this.storage_.has(mediaSequence)) {\n // Normal flow.\n return this.storage_.get(mediaSequence).segmentSyncInfo.start;\n } // Fallback flow.\n // There is a gap between last recorded playlist and a new one received.\n\n return fallback;\n }\n isReliablePlaylist_(mediaSequence, segments) {\n return mediaSequence !== undefined && mediaSequence !== null && Array.isArray(segments) && segments.length;\n }\n}\n\n/**\n * @file sync-controller.js\n */\n// synchronize expired playlist segments.\n// the max media sequence diff is 48 hours of live stream\n// content with two second segments. Anything larger than that\n// will likely be invalid.\n\nconst MAX_MEDIA_SEQUENCE_DIFF_FOR_SYNC = 86400;\nconst syncPointStrategies = [\n// Stategy \"VOD\": Handle the VOD-case where the sync-point is *always*\n// the equivalence display-time 0 === segment-index 0\n{\n name: 'VOD',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n if (duration !== Infinity) {\n const syncPoint = {\n time: 0,\n segmentIndex: 0,\n partIndex: null\n };\n return syncPoint;\n }\n return null;\n }\n}, {\n name: 'MediaSequence',\n /**\n * run media sequence strategy\n *\n * @param {SyncController} syncController\n * @param {Object} playlist\n * @param {number} duration\n * @param {number} currentTimeline\n * @param {number} currentTime\n * @param {string} type\n */\n run: (syncController, playlist, duration, currentTimeline, currentTime, type) => {\n const mediaSequenceSync = syncController.getMediaSequenceSync(type);\n if (!mediaSequenceSync) {\n return null;\n }\n if (!mediaSequenceSync.isReliable) {\n return null;\n }\n const syncInfo = mediaSequenceSync.getSyncInfoForTime(currentTime);\n if (!syncInfo) {\n return null;\n }\n return {\n time: syncInfo.start,\n partIndex: syncInfo.partIndex,\n segmentIndex: syncInfo.segmentIndex\n };\n }\n},\n// Stategy \"ProgramDateTime\": We have a program-date-time tag in this playlist\n{\n name: 'ProgramDateTime',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n if (!Object.keys(syncController.timelineToDatetimeMappings).length) {\n return null;\n }\n let syncPoint = null;\n let lastDistance = null;\n const partsAndSegments = getPartsAndSegments(playlist);\n currentTime = currentTime || 0;\n for (let i = 0; i < partsAndSegments.length; i++) {\n // start from the end and loop backwards for live\n // or start from the front and loop forwards for non-live\n const index = playlist.endList || currentTime === 0 ? i : partsAndSegments.length - (i + 1);\n const partAndSegment = partsAndSegments[index];\n const segment = partAndSegment.segment;\n const datetimeMapping = syncController.timelineToDatetimeMappings[segment.timeline];\n if (!datetimeMapping || !segment.dateTimeObject) {\n continue;\n }\n const segmentTime = segment.dateTimeObject.getTime() / 1000;\n let start = segmentTime + datetimeMapping; // take part duration into account.\n\n if (segment.parts && typeof partAndSegment.partIndex === 'number') {\n for (let z = 0; z < partAndSegment.partIndex; z++) {\n start += segment.parts[z].duration;\n }\n }\n const distance = Math.abs(currentTime - start); // Once the distance begins to increase, or if distance is 0, we have passed\n // currentTime and can stop looking for better candidates\n\n if (lastDistance !== null && (distance === 0 || lastDistance < distance)) {\n break;\n }\n lastDistance = distance;\n syncPoint = {\n time: start,\n segmentIndex: partAndSegment.segmentIndex,\n partIndex: partAndSegment.partIndex\n };\n }\n return syncPoint;\n }\n},\n// Stategy \"Segment\": We have a known time mapping for a timeline and a\n// segment in the current timeline with timing data\n{\n name: 'Segment',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n let syncPoint = null;\n let lastDistance = null;\n currentTime = currentTime || 0;\n const partsAndSegments = getPartsAndSegments(playlist);\n for (let i = 0; i < partsAndSegments.length; i++) {\n // start from the end and loop backwards for live\n // or start from the front and loop forwards for non-live\n const index = playlist.endList || currentTime === 0 ? i : partsAndSegments.length - (i + 1);\n const partAndSegment = partsAndSegments[index];\n const segment = partAndSegment.segment;\n const start = partAndSegment.part && partAndSegment.part.start || segment && segment.start;\n if (segment.timeline === currentTimeline && typeof start !== 'undefined') {\n const distance = Math.abs(currentTime - start); // Once the distance begins to increase, we have passed\n // currentTime and can stop looking for better candidates\n\n if (lastDistance !== null && lastDistance < distance) {\n break;\n }\n if (!syncPoint || lastDistance === null || lastDistance >= distance) {\n lastDistance = distance;\n syncPoint = {\n time: start,\n segmentIndex: partAndSegment.segmentIndex,\n partIndex: partAndSegment.partIndex\n };\n }\n }\n }\n return syncPoint;\n }\n},\n// Stategy \"Discontinuity\": We have a discontinuity with a known\n// display-time\n{\n name: 'Discontinuity',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n let syncPoint = null;\n currentTime = currentTime || 0;\n if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) {\n let lastDistance = null;\n for (let i = 0; i < playlist.discontinuityStarts.length; i++) {\n const segmentIndex = playlist.discontinuityStarts[i];\n const discontinuity = playlist.discontinuitySequence + i + 1;\n const discontinuitySync = syncController.discontinuities[discontinuity];\n if (discontinuitySync) {\n const distance = Math.abs(currentTime - discontinuitySync.time); // Once the distance begins to increase, we have passed\n // currentTime and can stop looking for better candidates\n\n if (lastDistance !== null && lastDistance < distance) {\n break;\n }\n if (!syncPoint || lastDistance === null || lastDistance >= distance) {\n lastDistance = distance;\n syncPoint = {\n time: discontinuitySync.time,\n segmentIndex,\n partIndex: null\n };\n }\n }\n }\n }\n return syncPoint;\n }\n},\n// Stategy \"Playlist\": We have a playlist with a known mapping of\n// segment index to display time\n{\n name: 'Playlist',\n run: (syncController, playlist, duration, currentTimeline, currentTime) => {\n if (playlist.syncInfo) {\n const syncPoint = {\n time: playlist.syncInfo.time,\n segmentIndex: playlist.syncInfo.mediaSequence - playlist.mediaSequence,\n partIndex: null\n };\n return syncPoint;\n }\n return null;\n }\n}];\nclass SyncController extends videojs.EventTarget {\n constructor(options = {}) {\n super(); // ...for synching across variants\n\n this.timelines = [];\n this.discontinuities = [];\n this.timelineToDatetimeMappings = {}; // TODO: this map should be only available for HLS. Since only HLS has MediaSequence.\n // For some reason this map helps with syncing between quality switch for MPEG-DASH as well.\n // Moreover if we disable this map for MPEG-DASH - quality switch will be broken.\n // MPEG-DASH should have its own separate sync strategy\n\n this.mediaSequenceStorage_ = {\n main: new MediaSequenceSync(),\n audio: new MediaSequenceSync(),\n vtt: new MediaSequenceSync()\n };\n this.logger_ = logger('SyncController');\n }\n /**\n *\n * @param {string} loaderType\n * @return {MediaSequenceSync|null}\n */\n\n getMediaSequenceSync(loaderType) {\n return this.mediaSequenceStorage_[loaderType] || null;\n }\n /**\n * Find a sync-point for the playlist specified\n *\n * A sync-point is defined as a known mapping from display-time to\n * a segment-index in the current playlist.\n *\n * @param {Playlist} playlist\n * The playlist that needs a sync-point\n * @param {number} duration\n * Duration of the MediaSource (Infinite if playing a live source)\n * @param {number} currentTimeline\n * The last timeline from which a segment was loaded\n * @param {number} currentTime\n * Current player's time\n * @param {string} type\n * Segment loader type\n * @return {Object}\n * A sync-point object\n */\n\n getSyncPoint(playlist, duration, currentTimeline, currentTime, type) {\n // Always use VOD sync point for VOD\n if (duration !== Infinity) {\n const vodSyncPointStrategy = syncPointStrategies.find(({\n name\n }) => name === 'VOD');\n return vodSyncPointStrategy.run(this, playlist, duration);\n }\n const syncPoints = this.runStrategies_(playlist, duration, currentTimeline, currentTime, type);\n if (!syncPoints.length) {\n // Signal that we need to attempt to get a sync-point manually\n // by fetching a segment in the playlist and constructing\n // a sync-point from that information\n return null;\n } // If we have exact match just return it instead of finding the nearest distance\n\n for (const syncPointInfo of syncPoints) {\n const {\n syncPoint,\n strategy\n } = syncPointInfo;\n const {\n segmentIndex,\n time\n } = syncPoint;\n if (segmentIndex < 0) {\n continue;\n }\n const selectedSegment = playlist.segments[segmentIndex];\n const start = time;\n const end = start + selectedSegment.duration;\n this.logger_(`Strategy: ${strategy}. Current time: ${currentTime}. selected segment: ${segmentIndex}. Time: [${start} -> ${end}]}`);\n if (currentTime >= start && currentTime < end) {\n this.logger_('Found sync point with exact match: ', syncPoint);\n return syncPoint;\n }\n } // Now find the sync-point that is closest to the currentTime because\n // that should result in the most accurate guess about which segment\n // to fetch\n\n return this.selectSyncPoint_(syncPoints, {\n key: 'time',\n value: currentTime\n });\n }\n /**\n * Calculate the amount of time that has expired off the playlist during playback\n *\n * @param {Playlist} playlist\n * Playlist object to calculate expired from\n * @param {number} duration\n * Duration of the MediaSource (Infinity if playling a live source)\n * @return {number|null}\n * The amount of time that has expired off the playlist during playback. Null\n * if no sync-points for the playlist can be found.\n */\n\n getExpiredTime(playlist, duration) {\n if (!playlist || !playlist.segments) {\n return null;\n }\n const syncPoints = this.runStrategies_(playlist, duration, playlist.discontinuitySequence, 0); // Without sync-points, there is not enough information to determine the expired time\n\n if (!syncPoints.length) {\n return null;\n }\n const syncPoint = this.selectSyncPoint_(syncPoints, {\n key: 'segmentIndex',\n value: 0\n }); // If the sync-point is beyond the start of the playlist, we want to subtract the\n // duration from index 0 to syncPoint.segmentIndex instead of adding.\n\n if (syncPoint.segmentIndex > 0) {\n syncPoint.time *= -1;\n }\n return Math.abs(syncPoint.time + sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: playlist.segments,\n startIndex: syncPoint.segmentIndex,\n endIndex: 0\n }));\n }\n /**\n * Runs each sync-point strategy and returns a list of sync-points returned by the\n * strategies\n *\n * @private\n * @param {Playlist} playlist\n * The playlist that needs a sync-point\n * @param {number} duration\n * Duration of the MediaSource (Infinity if playing a live source)\n * @param {number} currentTimeline\n * The last timeline from which a segment was loaded\n * @param {number} currentTime\n * Current player's time\n * @param {string} type\n * Segment loader type\n * @return {Array}\n * A list of sync-point objects\n */\n\n runStrategies_(playlist, duration, currentTimeline, currentTime, type) {\n const syncPoints = []; // Try to find a sync-point in by utilizing various strategies...\n\n for (let i = 0; i < syncPointStrategies.length; i++) {\n const strategy = syncPointStrategies[i];\n const syncPoint = strategy.run(this, playlist, duration, currentTimeline, currentTime, type);\n if (syncPoint) {\n syncPoint.strategy = strategy.name;\n syncPoints.push({\n strategy: strategy.name,\n syncPoint\n });\n }\n }\n return syncPoints;\n }\n /**\n * Selects the sync-point nearest the specified target\n *\n * @private\n * @param {Array} syncPoints\n * List of sync-points to select from\n * @param {Object} target\n * Object specifying the property and value we are targeting\n * @param {string} target.key\n * Specifies the property to target. Must be either 'time' or 'segmentIndex'\n * @param {number} target.value\n * The value to target for the specified key.\n * @return {Object}\n * The sync-point nearest the target\n */\n\n selectSyncPoint_(syncPoints, target) {\n let bestSyncPoint = syncPoints[0].syncPoint;\n let bestDistance = Math.abs(syncPoints[0].syncPoint[target.key] - target.value);\n let bestStrategy = syncPoints[0].strategy;\n for (let i = 1; i < syncPoints.length; i++) {\n const newDistance = Math.abs(syncPoints[i].syncPoint[target.key] - target.value);\n if (newDistance < bestDistance) {\n bestDistance = newDistance;\n bestSyncPoint = syncPoints[i].syncPoint;\n bestStrategy = syncPoints[i].strategy;\n }\n }\n this.logger_(`syncPoint for [${target.key}: ${target.value}] chosen with strategy` + ` [${bestStrategy}]: [time:${bestSyncPoint.time},` + ` segmentIndex:${bestSyncPoint.segmentIndex}` + (typeof bestSyncPoint.partIndex === 'number' ? `,partIndex:${bestSyncPoint.partIndex}` : '') + ']');\n return bestSyncPoint;\n }\n /**\n * Save any meta-data present on the segments when segments leave\n * the live window to the playlist to allow for synchronization at the\n * playlist level later.\n *\n * @param {Playlist} oldPlaylist - The previous active playlist\n * @param {Playlist} newPlaylist - The updated and most current playlist\n */\n\n saveExpiredSegmentInfo(oldPlaylist, newPlaylist) {\n const mediaSequenceDiff = newPlaylist.mediaSequence - oldPlaylist.mediaSequence; // Ignore large media sequence gaps\n\n if (mediaSequenceDiff > MAX_MEDIA_SEQUENCE_DIFF_FOR_SYNC) {\n videojs.log.warn(`Not saving expired segment info. Media sequence gap ${mediaSequenceDiff} is too large.`);\n return;\n } // When a segment expires from the playlist and it has a start time\n // save that information as a possible sync-point reference in future\n\n for (let i = mediaSequenceDiff - 1; i >= 0; i--) {\n const lastRemovedSegment = oldPlaylist.segments[i];\n if (lastRemovedSegment && typeof lastRemovedSegment.start !== 'undefined') {\n newPlaylist.syncInfo = {\n mediaSequence: oldPlaylist.mediaSequence + i,\n time: lastRemovedSegment.start\n };\n this.logger_(`playlist refresh sync: [time:${newPlaylist.syncInfo.time},` + ` mediaSequence: ${newPlaylist.syncInfo.mediaSequence}]`);\n this.trigger('syncinfoupdate');\n break;\n }\n }\n }\n /**\n * Save the mapping from playlist's ProgramDateTime to display. This should only happen\n * before segments start to load.\n *\n * @param {Playlist} playlist - The currently active playlist\n */\n\n setDateTimeMappingForStart(playlist) {\n // It's possible for the playlist to be updated before playback starts, meaning time\n // zero is not yet set. If, during these playlist refreshes, a discontinuity is\n // crossed, then the old time zero mapping (for the prior timeline) would be retained\n // unless the mappings are cleared.\n this.timelineToDatetimeMappings = {};\n if (playlist.segments && playlist.segments.length && playlist.segments[0].dateTimeObject) {\n const firstSegment = playlist.segments[0];\n const playlistTimestamp = firstSegment.dateTimeObject.getTime() / 1000;\n this.timelineToDatetimeMappings[firstSegment.timeline] = -playlistTimestamp;\n }\n }\n /**\n * Calculates and saves timeline mappings, playlist sync info, and segment timing values\n * based on the latest timing information.\n *\n * @param {Object} options\n * Options object\n * @param {SegmentInfo} options.segmentInfo\n * The current active request information\n * @param {boolean} options.shouldSaveTimelineMapping\n * If there's a timeline change, determines if the timeline mapping should be\n * saved for timeline mapping and program date time mappings.\n */\n\n saveSegmentTimingInfo({\n segmentInfo,\n shouldSaveTimelineMapping\n }) {\n const didCalculateSegmentTimeMapping = this.calculateSegmentTimeMapping_(segmentInfo, segmentInfo.timingInfo, shouldSaveTimelineMapping);\n const segment = segmentInfo.segment;\n if (didCalculateSegmentTimeMapping) {\n this.saveDiscontinuitySyncInfo_(segmentInfo); // If the playlist does not have sync information yet, record that information\n // now with segment timing information\n\n if (!segmentInfo.playlist.syncInfo) {\n segmentInfo.playlist.syncInfo = {\n mediaSequence: segmentInfo.playlist.mediaSequence + segmentInfo.mediaIndex,\n time: segment.start\n };\n }\n }\n const dateTime = segment.dateTimeObject;\n if (segment.discontinuity && shouldSaveTimelineMapping && dateTime) {\n this.timelineToDatetimeMappings[segment.timeline] = -(dateTime.getTime() / 1000);\n }\n }\n timestampOffsetForTimeline(timeline) {\n if (typeof this.timelines[timeline] === 'undefined') {\n return null;\n }\n return this.timelines[timeline].time;\n }\n mappingForTimeline(timeline) {\n if (typeof this.timelines[timeline] === 'undefined') {\n return null;\n }\n return this.timelines[timeline].mapping;\n }\n /**\n * Use the \"media time\" for a segment to generate a mapping to \"display time\" and\n * save that display time to the segment.\n *\n * @private\n * @param {SegmentInfo} segmentInfo\n * The current active request information\n * @param {Object} timingInfo\n * The start and end time of the current segment in \"media time\"\n * @param {boolean} shouldSaveTimelineMapping\n * If there's a timeline change, determines if the timeline mapping should be\n * saved in timelines.\n * @return {boolean}\n * Returns false if segment time mapping could not be calculated\n */\n\n calculateSegmentTimeMapping_(segmentInfo, timingInfo, shouldSaveTimelineMapping) {\n // TODO: remove side effects\n const segment = segmentInfo.segment;\n const part = segmentInfo.part;\n let mappingObj = this.timelines[segmentInfo.timeline];\n let start;\n let end;\n if (typeof segmentInfo.timestampOffset === 'number') {\n mappingObj = {\n time: segmentInfo.startOfSegment,\n mapping: segmentInfo.startOfSegment - timingInfo.start\n };\n if (shouldSaveTimelineMapping) {\n this.timelines[segmentInfo.timeline] = mappingObj;\n this.trigger('timestampoffset');\n this.logger_(`time mapping for timeline ${segmentInfo.timeline}: ` + `[time: ${mappingObj.time}] [mapping: ${mappingObj.mapping}]`);\n }\n start = segmentInfo.startOfSegment;\n end = timingInfo.end + mappingObj.mapping;\n } else if (mappingObj) {\n start = timingInfo.start + mappingObj.mapping;\n end = timingInfo.end + mappingObj.mapping;\n } else {\n return false;\n }\n if (part) {\n part.start = start;\n part.end = end;\n } // If we don't have a segment start yet or the start value we got\n // is less than our current segment.start value, save a new start value.\n // We have to do this because parts will have segment timing info saved\n // multiple times and we want segment start to be the earliest part start\n // value for that segment.\n\n if (!segment.start || start < segment.start) {\n segment.start = start;\n }\n segment.end = end;\n return true;\n }\n /**\n * Each time we have discontinuity in the playlist, attempt to calculate the location\n * in display of the start of the discontinuity and save that. We also save an accuracy\n * value so that we save values with the most accuracy (closest to 0.)\n *\n * @private\n * @param {SegmentInfo} segmentInfo - The current active request information\n */\n\n saveDiscontinuitySyncInfo_(segmentInfo) {\n const playlist = segmentInfo.playlist;\n const segment = segmentInfo.segment; // If the current segment is a discontinuity then we know exactly where\n // the start of the range and it's accuracy is 0 (greater accuracy values\n // mean more approximation)\n\n if (segment.discontinuity) {\n this.discontinuities[segment.timeline] = {\n time: segment.start,\n accuracy: 0\n };\n } else if (playlist.discontinuityStarts && playlist.discontinuityStarts.length) {\n // Search for future discontinuities that we can provide better timing\n // information for and save that information for sync purposes\n for (let i = 0; i < playlist.discontinuityStarts.length; i++) {\n const segmentIndex = playlist.discontinuityStarts[i];\n const discontinuity = playlist.discontinuitySequence + i + 1;\n const mediaIndexDiff = segmentIndex - segmentInfo.mediaIndex;\n const accuracy = Math.abs(mediaIndexDiff);\n if (!this.discontinuities[discontinuity] || this.discontinuities[discontinuity].accuracy > accuracy) {\n let time;\n if (mediaIndexDiff < 0) {\n time = segment.start - sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: playlist.segments,\n startIndex: segmentInfo.mediaIndex,\n endIndex: segmentIndex\n });\n } else {\n time = segment.end + sumDurations({\n defaultDuration: playlist.targetDuration,\n durationList: playlist.segments,\n startIndex: segmentInfo.mediaIndex + 1,\n endIndex: segmentIndex\n });\n }\n this.discontinuities[discontinuity] = {\n time,\n accuracy\n };\n }\n }\n }\n }\n dispose() {\n this.trigger('dispose');\n this.off();\n }\n}\n\n/**\n * The TimelineChangeController acts as a source for segment loaders to listen for and\n * keep track of latest and pending timeline changes. This is useful to ensure proper\n * sync, as each loader may need to make a consideration for what timeline the other\n * loader is on before making changes which could impact the other loader's media.\n *\n * @class TimelineChangeController\n * @extends videojs.EventTarget\n */\n\nclass TimelineChangeController extends videojs.EventTarget {\n constructor() {\n super();\n this.pendingTimelineChanges_ = {};\n this.lastTimelineChanges_ = {};\n }\n clearPendingTimelineChange(type) {\n this.pendingTimelineChanges_[type] = null;\n this.trigger('pendingtimelinechange');\n }\n pendingTimelineChange({\n type,\n from,\n to\n }) {\n if (typeof from === 'number' && typeof to === 'number') {\n this.pendingTimelineChanges_[type] = {\n type,\n from,\n to\n };\n this.trigger('pendingtimelinechange');\n }\n return this.pendingTimelineChanges_[type];\n }\n lastTimelineChange({\n type,\n from,\n to\n }) {\n if (typeof from === 'number' && typeof to === 'number') {\n this.lastTimelineChanges_[type] = {\n type,\n from,\n to\n };\n delete this.pendingTimelineChanges_[type];\n this.trigger('timelinechange');\n }\n return this.lastTimelineChanges_[type];\n }\n dispose() {\n this.trigger('dispose');\n this.pendingTimelineChanges_ = {};\n this.lastTimelineChanges_ = {};\n this.off();\n }\n}\n\n/* rollup-plugin-worker-factory start for worker!/home/runner/work/http-streaming/http-streaming/src/decrypter-worker.js */\nconst workerCode = transform(getWorkerString(function () {\n /**\n * @file stream.js\n */\n\n /**\n * A lightweight readable stream implemention that handles event dispatching.\n *\n * @class Stream\n */\n\n var Stream = /*#__PURE__*/function () {\n function Stream() {\n this.listeners = {};\n }\n /**\n * Add a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener the callback to be invoked when an event of\n * the specified type occurs\n */\n\n var _proto = Stream.prototype;\n _proto.on = function on(type, listener) {\n if (!this.listeners[type]) {\n this.listeners[type] = [];\n }\n this.listeners[type].push(listener);\n }\n /**\n * Remove a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener a function previously registered for this\n * type of event through `on`\n * @return {boolean} if we could turn it off or not\n */;\n\n _proto.off = function off(type, listener) {\n if (!this.listeners[type]) {\n return false;\n }\n var index = this.listeners[type].indexOf(listener); // TODO: which is better?\n // In Video.js we slice listener functions\n // on trigger so that it does not mess up the order\n // while we loop through.\n //\n // Here we slice on off so that the loop in trigger\n // can continue using it's old reference to loop without\n // messing up the order.\n\n this.listeners[type] = this.listeners[type].slice(0);\n this.listeners[type].splice(index, 1);\n return index > -1;\n }\n /**\n * Trigger an event of the specified type on this stream. Any additional\n * arguments to this function are passed as parameters to event listeners.\n *\n * @param {string} type the event name\n */;\n\n _proto.trigger = function trigger(type) {\n var callbacks = this.listeners[type];\n if (!callbacks) {\n return;\n } // Slicing the arguments on every invocation of this method\n // can add a significant amount of overhead. Avoid the\n // intermediate object creation for the common case of a\n // single callback argument\n\n if (arguments.length === 2) {\n var length = callbacks.length;\n for (var i = 0; i < length; ++i) {\n callbacks[i].call(this, arguments[1]);\n }\n } else {\n var args = Array.prototype.slice.call(arguments, 1);\n var _length = callbacks.length;\n for (var _i = 0; _i < _length; ++_i) {\n callbacks[_i].apply(this, args);\n }\n }\n }\n /**\n * Destroys the stream and cleans up.\n */;\n\n _proto.dispose = function dispose() {\n this.listeners = {};\n }\n /**\n * Forwards all `data` events on this stream to the destination stream. The\n * destination stream should provide a method `push` to receive the data\n * events as they arrive.\n *\n * @param {Stream} destination the stream that will receive all `data` events\n * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options\n */;\n\n _proto.pipe = function pipe(destination) {\n this.on('data', function (data) {\n destination.push(data);\n });\n };\n return Stream;\n }();\n /*! @name pkcs7 @version 1.0.4 @license Apache-2.0 */\n\n /**\n * Returns the subarray of a Uint8Array without PKCS#7 padding.\n *\n * @param padded {Uint8Array} unencrypted bytes that have been padded\n * @return {Uint8Array} the unpadded bytes\n * @see http://tools.ietf.org/html/rfc5652\n */\n\n function unpad(padded) {\n return padded.subarray(0, padded.byteLength - padded[padded.byteLength - 1]);\n }\n /*! @name aes-decrypter @version 4.0.1 @license Apache-2.0 */\n\n /**\n * @file aes.js\n *\n * This file contains an adaptation of the AES decryption algorithm\n * from the Standford Javascript Cryptography Library. That work is\n * covered by the following copyright and permissions notice:\n *\n * Copyright 2009-2010 Emily Stark, Mike Hamburg, Dan Boneh.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n *\n * 2. Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following\n * disclaimer in the documentation and/or other materials provided\n * with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\n * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\n * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\n * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n *\n * The views and conclusions contained in the software and documentation\n * are those of the authors and should not be interpreted as representing\n * official policies, either expressed or implied, of the authors.\n */\n\n /**\n * Expand the S-box tables.\n *\n * @private\n */\n\n const precompute = function () {\n const tables = [[[], [], [], [], []], [[], [], [], [], []]];\n const encTable = tables[0];\n const decTable = tables[1];\n const sbox = encTable[4];\n const sboxInv = decTable[4];\n let i;\n let x;\n let xInv;\n const d = [];\n const th = [];\n let x2;\n let x4;\n let x8;\n let s;\n let tEnc;\n let tDec; // Compute double and third tables\n\n for (i = 0; i < 256; i++) {\n th[(d[i] = i << 1 ^ (i >> 7) * 283) ^ i] = i;\n }\n for (x = xInv = 0; !sbox[x]; x ^= x2 || 1, xInv = th[xInv] || 1) {\n // Compute sbox\n s = xInv ^ xInv << 1 ^ xInv << 2 ^ xInv << 3 ^ xInv << 4;\n s = s >> 8 ^ s & 255 ^ 99;\n sbox[x] = s;\n sboxInv[s] = x; // Compute MixColumns\n\n x8 = d[x4 = d[x2 = d[x]]];\n tDec = x8 * 0x1010101 ^ x4 * 0x10001 ^ x2 * 0x101 ^ x * 0x1010100;\n tEnc = d[s] * 0x101 ^ s * 0x1010100;\n for (i = 0; i < 4; i++) {\n encTable[i][x] = tEnc = tEnc << 24 ^ tEnc >>> 8;\n decTable[i][s] = tDec = tDec << 24 ^ tDec >>> 8;\n }\n } // Compactify. Considerable speedup on Firefox.\n\n for (i = 0; i < 5; i++) {\n encTable[i] = encTable[i].slice(0);\n decTable[i] = decTable[i].slice(0);\n }\n return tables;\n };\n let aesTables = null;\n /**\n * Schedule out an AES key for both encryption and decryption. This\n * is a low-level class. Use a cipher mode to do bulk encryption.\n *\n * @class AES\n * @param key {Array} The key as an array of 4, 6 or 8 words.\n */\n\n class AES {\n constructor(key) {\n /**\n * The expanded S-box and inverse S-box tables. These will be computed\n * on the client so that we don't have to send them down the wire.\n *\n * There are two tables, _tables[0] is for encryption and\n * _tables[1] is for decryption.\n *\n * The first 4 sub-tables are the expanded S-box with MixColumns. The\n * last (_tables[01][4]) is the S-box itself.\n *\n * @private\n */\n // if we have yet to precompute the S-box tables\n // do so now\n if (!aesTables) {\n aesTables = precompute();\n } // then make a copy of that object for use\n\n this._tables = [[aesTables[0][0].slice(), aesTables[0][1].slice(), aesTables[0][2].slice(), aesTables[0][3].slice(), aesTables[0][4].slice()], [aesTables[1][0].slice(), aesTables[1][1].slice(), aesTables[1][2].slice(), aesTables[1][3].slice(), aesTables[1][4].slice()]];\n let i;\n let j;\n let tmp;\n const sbox = this._tables[0][4];\n const decTable = this._tables[1];\n const keyLen = key.length;\n let rcon = 1;\n if (keyLen !== 4 && keyLen !== 6 && keyLen !== 8) {\n throw new Error('Invalid aes key size');\n }\n const encKey = key.slice(0);\n const decKey = [];\n this._key = [encKey, decKey]; // schedule encryption keys\n\n for (i = keyLen; i < 4 * keyLen + 28; i++) {\n tmp = encKey[i - 1]; // apply sbox\n\n if (i % keyLen === 0 || keyLen === 8 && i % keyLen === 4) {\n tmp = sbox[tmp >>> 24] << 24 ^ sbox[tmp >> 16 & 255] << 16 ^ sbox[tmp >> 8 & 255] << 8 ^ sbox[tmp & 255]; // shift rows and add rcon\n\n if (i % keyLen === 0) {\n tmp = tmp << 8 ^ tmp >>> 24 ^ rcon << 24;\n rcon = rcon << 1 ^ (rcon >> 7) * 283;\n }\n }\n encKey[i] = encKey[i - keyLen] ^ tmp;\n } // schedule decryption keys\n\n for (j = 0; i; j++, i--) {\n tmp = encKey[j & 3 ? i : i - 4];\n if (i <= 4 || j < 4) {\n decKey[j] = tmp;\n } else {\n decKey[j] = decTable[0][sbox[tmp >>> 24]] ^ decTable[1][sbox[tmp >> 16 & 255]] ^ decTable[2][sbox[tmp >> 8 & 255]] ^ decTable[3][sbox[tmp & 255]];\n }\n }\n }\n /**\n * Decrypt 16 bytes, specified as four 32-bit words.\n *\n * @param {number} encrypted0 the first word to decrypt\n * @param {number} encrypted1 the second word to decrypt\n * @param {number} encrypted2 the third word to decrypt\n * @param {number} encrypted3 the fourth word to decrypt\n * @param {Int32Array} out the array to write the decrypted words\n * into\n * @param {number} offset the offset into the output array to start\n * writing results\n * @return {Array} The plaintext.\n */\n\n decrypt(encrypted0, encrypted1, encrypted2, encrypted3, out, offset) {\n const key = this._key[1]; // state variables a,b,c,d are loaded with pre-whitened data\n\n let a = encrypted0 ^ key[0];\n let b = encrypted3 ^ key[1];\n let c = encrypted2 ^ key[2];\n let d = encrypted1 ^ key[3];\n let a2;\n let b2;\n let c2; // key.length === 2 ?\n\n const nInnerRounds = key.length / 4 - 2;\n let i;\n let kIndex = 4;\n const table = this._tables[1]; // load up the tables\n\n const table0 = table[0];\n const table1 = table[1];\n const table2 = table[2];\n const table3 = table[3];\n const sbox = table[4]; // Inner rounds. Cribbed from OpenSSL.\n\n for (i = 0; i < nInnerRounds; i++) {\n a2 = table0[a >>> 24] ^ table1[b >> 16 & 255] ^ table2[c >> 8 & 255] ^ table3[d & 255] ^ key[kIndex];\n b2 = table0[b >>> 24] ^ table1[c >> 16 & 255] ^ table2[d >> 8 & 255] ^ table3[a & 255] ^ key[kIndex + 1];\n c2 = table0[c >>> 24] ^ table1[d >> 16 & 255] ^ table2[a >> 8 & 255] ^ table3[b & 255] ^ key[kIndex + 2];\n d = table0[d >>> 24] ^ table1[a >> 16 & 255] ^ table2[b >> 8 & 255] ^ table3[c & 255] ^ key[kIndex + 3];\n kIndex += 4;\n a = a2;\n b = b2;\n c = c2;\n } // Last round.\n\n for (i = 0; i < 4; i++) {\n out[(3 & -i) + offset] = sbox[a >>> 24] << 24 ^ sbox[b >> 16 & 255] << 16 ^ sbox[c >> 8 & 255] << 8 ^ sbox[d & 255] ^ key[kIndex++];\n a2 = a;\n a = b;\n b = c;\n c = d;\n d = a2;\n }\n }\n }\n /**\n * @file async-stream.js\n */\n\n /**\n * A wrapper around the Stream class to use setTimeout\n * and run stream \"jobs\" Asynchronously\n *\n * @class AsyncStream\n * @extends Stream\n */\n\n class AsyncStream extends Stream {\n constructor() {\n super(Stream);\n this.jobs = [];\n this.delay = 1;\n this.timeout_ = null;\n }\n /**\n * process an async job\n *\n * @private\n */\n\n processJob_() {\n this.jobs.shift()();\n if (this.jobs.length) {\n this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay);\n } else {\n this.timeout_ = null;\n }\n }\n /**\n * push a job into the stream\n *\n * @param {Function} job the job to push into the stream\n */\n\n push(job) {\n this.jobs.push(job);\n if (!this.timeout_) {\n this.timeout_ = setTimeout(this.processJob_.bind(this), this.delay);\n }\n }\n }\n /**\n * @file decrypter.js\n *\n * An asynchronous implementation of AES-128 CBC decryption with\n * PKCS#7 padding.\n */\n\n /**\n * Convert network-order (big-endian) bytes into their little-endian\n * representation.\n */\n\n const ntoh = function (word) {\n return word << 24 | (word & 0xff00) << 8 | (word & 0xff0000) >> 8 | word >>> 24;\n };\n /**\n * Decrypt bytes using AES-128 with CBC and PKCS#7 padding.\n *\n * @param {Uint8Array} encrypted the encrypted bytes\n * @param {Uint32Array} key the bytes of the decryption key\n * @param {Uint32Array} initVector the initialization vector (IV) to\n * use for the first round of CBC.\n * @return {Uint8Array} the decrypted bytes\n *\n * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard\n * @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Block_Chaining_.28CBC.29\n * @see https://tools.ietf.org/html/rfc2315\n */\n\n const decrypt = function (encrypted, key, initVector) {\n // word-level access to the encrypted bytes\n const encrypted32 = new Int32Array(encrypted.buffer, encrypted.byteOffset, encrypted.byteLength >> 2);\n const decipher = new AES(Array.prototype.slice.call(key)); // byte and word-level access for the decrypted output\n\n const decrypted = new Uint8Array(encrypted.byteLength);\n const decrypted32 = new Int32Array(decrypted.buffer); // temporary variables for working with the IV, encrypted, and\n // decrypted data\n\n let init0;\n let init1;\n let init2;\n let init3;\n let encrypted0;\n let encrypted1;\n let encrypted2;\n let encrypted3; // iteration variable\n\n let wordIx; // pull out the words of the IV to ensure we don't modify the\n // passed-in reference and easier access\n\n init0 = initVector[0];\n init1 = initVector[1];\n init2 = initVector[2];\n init3 = initVector[3]; // decrypt four word sequences, applying cipher-block chaining (CBC)\n // to each decrypted block\n\n for (wordIx = 0; wordIx < encrypted32.length; wordIx += 4) {\n // convert big-endian (network order) words into little-endian\n // (javascript order)\n encrypted0 = ntoh(encrypted32[wordIx]);\n encrypted1 = ntoh(encrypted32[wordIx + 1]);\n encrypted2 = ntoh(encrypted32[wordIx + 2]);\n encrypted3 = ntoh(encrypted32[wordIx + 3]); // decrypt the block\n\n decipher.decrypt(encrypted0, encrypted1, encrypted2, encrypted3, decrypted32, wordIx); // XOR with the IV, and restore network byte-order to obtain the\n // plaintext\n\n decrypted32[wordIx] = ntoh(decrypted32[wordIx] ^ init0);\n decrypted32[wordIx + 1] = ntoh(decrypted32[wordIx + 1] ^ init1);\n decrypted32[wordIx + 2] = ntoh(decrypted32[wordIx + 2] ^ init2);\n decrypted32[wordIx + 3] = ntoh(decrypted32[wordIx + 3] ^ init3); // setup the IV for the next round\n\n init0 = encrypted0;\n init1 = encrypted1;\n init2 = encrypted2;\n init3 = encrypted3;\n }\n return decrypted;\n };\n /**\n * The `Decrypter` class that manages decryption of AES\n * data through `AsyncStream` objects and the `decrypt`\n * function\n *\n * @param {Uint8Array} encrypted the encrypted bytes\n * @param {Uint32Array} key the bytes of the decryption key\n * @param {Uint32Array} initVector the initialization vector (IV) to\n * @param {Function} done the function to run when done\n * @class Decrypter\n */\n\n class Decrypter {\n constructor(encrypted, key, initVector, done) {\n const step = Decrypter.STEP;\n const encrypted32 = new Int32Array(encrypted.buffer);\n const decrypted = new Uint8Array(encrypted.byteLength);\n let i = 0;\n this.asyncStream_ = new AsyncStream(); // split up the encryption job and do the individual chunks asynchronously\n\n this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted));\n for (i = step; i < encrypted32.length; i += step) {\n initVector = new Uint32Array([ntoh(encrypted32[i - 4]), ntoh(encrypted32[i - 3]), ntoh(encrypted32[i - 2]), ntoh(encrypted32[i - 1])]);\n this.asyncStream_.push(this.decryptChunk_(encrypted32.subarray(i, i + step), key, initVector, decrypted));\n } // invoke the done() callback when everything is finished\n\n this.asyncStream_.push(function () {\n // remove pkcs#7 padding from the decrypted bytes\n done(null, unpad(decrypted));\n });\n }\n /**\n * a getter for step the maximum number of bytes to process at one time\n *\n * @return {number} the value of step 32000\n */\n\n static get STEP() {\n // 4 * 8000;\n return 32000;\n }\n /**\n * @private\n */\n\n decryptChunk_(encrypted, key, initVector, decrypted) {\n return function () {\n const bytes = decrypt(encrypted, key, initVector);\n decrypted.set(bytes, encrypted.byteOffset);\n };\n }\n }\n var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {};\n var win;\n if (typeof window !== \"undefined\") {\n win = window;\n } else if (typeof commonjsGlobal !== \"undefined\") {\n win = commonjsGlobal;\n } else if (typeof self !== \"undefined\") {\n win = self;\n } else {\n win = {};\n }\n var window_1 = win;\n var isArrayBufferView = function isArrayBufferView(obj) {\n if (ArrayBuffer.isView === 'function') {\n return ArrayBuffer.isView(obj);\n }\n return obj && obj.buffer instanceof ArrayBuffer;\n };\n var BigInt = window_1.BigInt || Number;\n [BigInt('0x1'), BigInt('0x100'), BigInt('0x10000'), BigInt('0x1000000'), BigInt('0x100000000'), BigInt('0x10000000000'), BigInt('0x1000000000000'), BigInt('0x100000000000000'), BigInt('0x10000000000000000')];\n (function () {\n var a = new Uint16Array([0xFFCC]);\n var b = new Uint8Array(a.buffer, a.byteOffset, a.byteLength);\n if (b[0] === 0xFF) {\n return 'big';\n }\n if (b[0] === 0xCC) {\n return 'little';\n }\n return 'unknown';\n })();\n /**\n * Creates an object for sending to a web worker modifying properties that are TypedArrays\n * into a new object with seperated properties for the buffer, byteOffset, and byteLength.\n *\n * @param {Object} message\n * Object of properties and values to send to the web worker\n * @return {Object}\n * Modified message with TypedArray values expanded\n * @function createTransferableMessage\n */\n\n const createTransferableMessage = function (message) {\n const transferable = {};\n Object.keys(message).forEach(key => {\n const value = message[key];\n if (isArrayBufferView(value)) {\n transferable[key] = {\n bytes: value.buffer,\n byteOffset: value.byteOffset,\n byteLength: value.byteLength\n };\n } else {\n transferable[key] = value;\n }\n });\n return transferable;\n };\n /* global self */\n\n /**\n * Our web worker interface so that things can talk to aes-decrypter\n * that will be running in a web worker. the scope is passed to this by\n * webworkify.\n */\n\n self.onmessage = function (event) {\n const data = event.data;\n const encrypted = new Uint8Array(data.encrypted.bytes, data.encrypted.byteOffset, data.encrypted.byteLength);\n const key = new Uint32Array(data.key.bytes, data.key.byteOffset, data.key.byteLength / 4);\n const iv = new Uint32Array(data.iv.bytes, data.iv.byteOffset, data.iv.byteLength / 4);\n /* eslint-disable no-new, handle-callback-err */\n\n new Decrypter(encrypted, key, iv, function (err, bytes) {\n self.postMessage(createTransferableMessage({\n source: data.source,\n decrypted: bytes\n }), [bytes.buffer]);\n });\n /* eslint-enable */\n };\n}));\n\nvar Decrypter = factory(workerCode);\n/* rollup-plugin-worker-factory end for worker!/home/runner/work/http-streaming/http-streaming/src/decrypter-worker.js */\n\n/**\n * Convert the properties of an HLS track into an audioTrackKind.\n *\n * @private\n */\n\nconst audioTrackKind_ = properties => {\n let kind = properties.default ? 'main' : 'alternative';\n if (properties.characteristics && properties.characteristics.indexOf('public.accessibility.describes-video') >= 0) {\n kind = 'main-desc';\n }\n return kind;\n};\n/**\n * Pause provided segment loader and playlist loader if active\n *\n * @param {SegmentLoader} segmentLoader\n * SegmentLoader to pause\n * @param {Object} mediaType\n * Active media type\n * @function stopLoaders\n */\n\nconst stopLoaders = (segmentLoader, mediaType) => {\n segmentLoader.abort();\n segmentLoader.pause();\n if (mediaType && mediaType.activePlaylistLoader) {\n mediaType.activePlaylistLoader.pause();\n mediaType.activePlaylistLoader = null;\n }\n};\n/**\n * Start loading provided segment loader and playlist loader\n *\n * @param {PlaylistLoader} playlistLoader\n * PlaylistLoader to start loading\n * @param {Object} mediaType\n * Active media type\n * @function startLoaders\n */\n\nconst startLoaders = (playlistLoader, mediaType) => {\n // Segment loader will be started after `loadedmetadata` or `loadedplaylist` from the\n // playlist loader\n mediaType.activePlaylistLoader = playlistLoader;\n playlistLoader.load();\n};\n/**\n * Returns a function to be called when the media group changes. It performs a\n * non-destructive (preserve the buffer) resync of the SegmentLoader. This is because a\n * change of group is merely a rendition switch of the same content at another encoding,\n * rather than a change of content, such as switching audio from English to Spanish.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Handler for a non-destructive resync of SegmentLoader when the active media\n * group changes.\n * @function onGroupChanged\n */\n\nconst onGroupChanged = (type, settings) => () => {\n const {\n segmentLoaders: {\n [type]: segmentLoader,\n main: mainSegmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n const activeTrack = mediaType.activeTrack();\n const activeGroup = mediaType.getActiveGroup();\n const previousActiveLoader = mediaType.activePlaylistLoader;\n const lastGroup = mediaType.lastGroup_; // the group did not change do nothing\n\n if (activeGroup && lastGroup && activeGroup.id === lastGroup.id) {\n return;\n }\n mediaType.lastGroup_ = activeGroup;\n mediaType.lastTrack_ = activeTrack;\n stopLoaders(segmentLoader, mediaType);\n if (!activeGroup || activeGroup.isMainPlaylist) {\n // there is no group active or active group is a main playlist and won't change\n return;\n }\n if (!activeGroup.playlistLoader) {\n if (previousActiveLoader) {\n // The previous group had a playlist loader but the new active group does not\n // this means we are switching from demuxed to muxed audio. In this case we want to\n // do a destructive reset of the main segment loader and not restart the audio\n // loaders.\n mainSegmentLoader.resetEverything();\n }\n return;\n } // Non-destructive resync\n\n segmentLoader.resyncLoader();\n startLoaders(activeGroup.playlistLoader, mediaType);\n};\nconst onGroupChanging = (type, settings) => () => {\n const {\n segmentLoaders: {\n [type]: segmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n mediaType.lastGroup_ = null;\n segmentLoader.abort();\n segmentLoader.pause();\n};\n/**\n * Returns a function to be called when the media track changes. It performs a\n * destructive reset of the SegmentLoader to ensure we start loading as close to\n * currentTime as possible.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Handler for a destructive reset of SegmentLoader when the active media\n * track changes.\n * @function onTrackChanged\n */\n\nconst onTrackChanged = (type, settings) => () => {\n const {\n mainPlaylistLoader,\n segmentLoaders: {\n [type]: segmentLoader,\n main: mainSegmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n const activeTrack = mediaType.activeTrack();\n const activeGroup = mediaType.getActiveGroup();\n const previousActiveLoader = mediaType.activePlaylistLoader;\n const lastTrack = mediaType.lastTrack_; // track did not change, do nothing\n\n if (lastTrack && activeTrack && lastTrack.id === activeTrack.id) {\n return;\n }\n mediaType.lastGroup_ = activeGroup;\n mediaType.lastTrack_ = activeTrack;\n stopLoaders(segmentLoader, mediaType);\n if (!activeGroup) {\n // there is no group active so we do not want to restart loaders\n return;\n }\n if (activeGroup.isMainPlaylist) {\n // track did not change, do nothing\n if (!activeTrack || !lastTrack || activeTrack.id === lastTrack.id) {\n return;\n }\n const pc = settings.vhs.playlistController_;\n const newPlaylist = pc.selectPlaylist(); // media will not change do nothing\n\n if (pc.media() === newPlaylist) {\n return;\n }\n mediaType.logger_(`track change. Switching main audio from ${lastTrack.id} to ${activeTrack.id}`);\n mainPlaylistLoader.pause();\n mainSegmentLoader.resetEverything();\n pc.fastQualityChange_(newPlaylist);\n return;\n }\n if (type === 'AUDIO') {\n if (!activeGroup.playlistLoader) {\n // when switching from demuxed audio/video to muxed audio/video (noted by no\n // playlist loader for the audio group), we want to do a destructive reset of the\n // main segment loader and not restart the audio loaders\n mainSegmentLoader.setAudio(true); // don't have to worry about disabling the audio of the audio segment loader since\n // it should be stopped\n\n mainSegmentLoader.resetEverything();\n return;\n } // although the segment loader is an audio segment loader, call the setAudio\n // function to ensure it is prepared to re-append the init segment (or handle other\n // config changes)\n\n segmentLoader.setAudio(true);\n mainSegmentLoader.setAudio(false);\n }\n if (previousActiveLoader === activeGroup.playlistLoader) {\n // Nothing has actually changed. This can happen because track change events can fire\n // multiple times for a \"single\" change. One for enabling the new active track, and\n // one for disabling the track that was active\n startLoaders(activeGroup.playlistLoader, mediaType);\n return;\n }\n if (segmentLoader.track) {\n // For WebVTT, set the new text track in the segmentloader\n segmentLoader.track(activeTrack);\n } // destructive reset\n\n segmentLoader.resetEverything();\n startLoaders(activeGroup.playlistLoader, mediaType);\n};\nconst onError = {\n /**\n * Returns a function to be called when a SegmentLoader or PlaylistLoader encounters\n * an error.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Error handler. Logs warning (or error if the playlist is excluded) to\n * console and switches back to default audio track.\n * @function onError.AUDIO\n */\n AUDIO: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: mediaType\n },\n excludePlaylist\n } = settings; // switch back to default audio track\n\n const activeTrack = mediaType.activeTrack();\n const activeGroup = mediaType.activeGroup();\n const id = (activeGroup.filter(group => group.default)[0] || activeGroup[0]).id;\n const defaultTrack = mediaType.tracks[id];\n if (activeTrack === defaultTrack) {\n // Default track encountered an error. All we can do now is exclude the current\n // rendition and hope another will switch audio groups\n excludePlaylist({\n error: {\n message: 'Problem encountered loading the default audio track.'\n }\n });\n return;\n }\n videojs.log.warn('Problem encountered loading the alternate audio track.' + 'Switching back to default.');\n for (const trackId in mediaType.tracks) {\n mediaType.tracks[trackId].enabled = mediaType.tracks[trackId] === defaultTrack;\n }\n mediaType.onTrackChanged();\n },\n /**\n * Returns a function to be called when a SegmentLoader or PlaylistLoader encounters\n * an error.\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Error handler. Logs warning to console and disables the active subtitle track\n * @function onError.SUBTITLES\n */\n SUBTITLES: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n videojs.log.warn('Problem encountered loading the subtitle track.' + 'Disabling subtitle track.');\n const track = mediaType.activeTrack();\n if (track) {\n track.mode = 'disabled';\n }\n mediaType.onTrackChanged();\n }\n};\nconst setupListeners = {\n /**\n * Setup event listeners for audio playlist loader\n *\n * @param {string} type\n * MediaGroup type\n * @param {PlaylistLoader|null} playlistLoader\n * PlaylistLoader to register listeners on\n * @param {Object} settings\n * Object containing required information for media groups\n * @function setupListeners.AUDIO\n */\n AUDIO: (type, playlistLoader, settings) => {\n if (!playlistLoader) {\n // no playlist loader means audio will be muxed with the video\n return;\n }\n const {\n tech,\n requestOptions,\n segmentLoaders: {\n [type]: segmentLoader\n }\n } = settings;\n playlistLoader.on('loadedmetadata', () => {\n const media = playlistLoader.media();\n segmentLoader.playlist(media, requestOptions); // if the video is already playing, or if this isn't a live video and preload\n // permits, start downloading segments\n\n if (!tech.paused() || media.endList && tech.preload() !== 'none') {\n segmentLoader.load();\n }\n });\n playlistLoader.on('loadedplaylist', () => {\n segmentLoader.playlist(playlistLoader.media(), requestOptions); // If the player isn't paused, ensure that the segment loader is running\n\n if (!tech.paused()) {\n segmentLoader.load();\n }\n });\n playlistLoader.on('error', onError[type](type, settings));\n },\n /**\n * Setup event listeners for subtitle playlist loader\n *\n * @param {string} type\n * MediaGroup type\n * @param {PlaylistLoader|null} playlistLoader\n * PlaylistLoader to register listeners on\n * @param {Object} settings\n * Object containing required information for media groups\n * @function setupListeners.SUBTITLES\n */\n SUBTITLES: (type, playlistLoader, settings) => {\n const {\n tech,\n requestOptions,\n segmentLoaders: {\n [type]: segmentLoader\n },\n mediaTypes: {\n [type]: mediaType\n }\n } = settings;\n playlistLoader.on('loadedmetadata', () => {\n const media = playlistLoader.media();\n segmentLoader.playlist(media, requestOptions);\n segmentLoader.track(mediaType.activeTrack()); // if the video is already playing, or if this isn't a live video and preload\n // permits, start downloading segments\n\n if (!tech.paused() || media.endList && tech.preload() !== 'none') {\n segmentLoader.load();\n }\n });\n playlistLoader.on('loadedplaylist', () => {\n segmentLoader.playlist(playlistLoader.media(), requestOptions); // If the player isn't paused, ensure that the segment loader is running\n\n if (!tech.paused()) {\n segmentLoader.load();\n }\n });\n playlistLoader.on('error', onError[type](type, settings));\n }\n};\nconst initialize = {\n /**\n * Setup PlaylistLoaders and AudioTracks for the audio groups\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @function initialize.AUDIO\n */\n 'AUDIO': (type, settings) => {\n const {\n vhs,\n sourceType,\n segmentLoaders: {\n [type]: segmentLoader\n },\n requestOptions,\n main: {\n mediaGroups\n },\n mediaTypes: {\n [type]: {\n groups,\n tracks,\n logger_\n }\n },\n mainPlaylistLoader\n } = settings;\n const audioOnlyMain = isAudioOnly(mainPlaylistLoader.main); // force a default if we have none\n\n if (!mediaGroups[type] || Object.keys(mediaGroups[type]).length === 0) {\n mediaGroups[type] = {\n main: {\n default: {\n default: true\n }\n }\n };\n if (audioOnlyMain) {\n mediaGroups[type].main.default.playlists = mainPlaylistLoader.main.playlists;\n }\n }\n for (const groupId in mediaGroups[type]) {\n if (!groups[groupId]) {\n groups[groupId] = [];\n }\n for (const variantLabel in mediaGroups[type][groupId]) {\n let properties = mediaGroups[type][groupId][variantLabel];\n let playlistLoader;\n if (audioOnlyMain) {\n logger_(`AUDIO group '${groupId}' label '${variantLabel}' is a main playlist`);\n properties.isMainPlaylist = true;\n playlistLoader = null; // if vhs-json was provided as the source, and the media playlist was resolved,\n // use the resolved media playlist object\n } else if (sourceType === 'vhs-json' && properties.playlists) {\n playlistLoader = new PlaylistLoader(properties.playlists[0], vhs, requestOptions);\n } else if (properties.resolvedUri) {\n playlistLoader = new PlaylistLoader(properties.resolvedUri, vhs, requestOptions); // TODO: dash isn't the only type with properties.playlists\n // should we even have properties.playlists in this check.\n } else if (properties.playlists && sourceType === 'dash') {\n playlistLoader = new DashPlaylistLoader(properties.playlists[0], vhs, requestOptions, mainPlaylistLoader);\n } else {\n // no resolvedUri means the audio is muxed with the video when using this\n // audio track\n playlistLoader = null;\n }\n properties = merge({\n id: variantLabel,\n playlistLoader\n }, properties);\n setupListeners[type](type, properties.playlistLoader, settings);\n groups[groupId].push(properties);\n if (typeof tracks[variantLabel] === 'undefined') {\n const track = new videojs.AudioTrack({\n id: variantLabel,\n kind: audioTrackKind_(properties),\n enabled: false,\n language: properties.language,\n default: properties.default,\n label: variantLabel\n });\n tracks[variantLabel] = track;\n }\n }\n } // setup single error event handler for the segment loader\n\n segmentLoader.on('error', onError[type](type, settings));\n },\n /**\n * Setup PlaylistLoaders and TextTracks for the subtitle groups\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @function initialize.SUBTITLES\n */\n 'SUBTITLES': (type, settings) => {\n const {\n tech,\n vhs,\n sourceType,\n segmentLoaders: {\n [type]: segmentLoader\n },\n requestOptions,\n main: {\n mediaGroups\n },\n mediaTypes: {\n [type]: {\n groups,\n tracks\n }\n },\n mainPlaylistLoader\n } = settings;\n for (const groupId in mediaGroups[type]) {\n if (!groups[groupId]) {\n groups[groupId] = [];\n }\n for (const variantLabel in mediaGroups[type][groupId]) {\n if (!vhs.options_.useForcedSubtitles && mediaGroups[type][groupId][variantLabel].forced) {\n // Subtitle playlists with the forced attribute are not selectable in Safari.\n // According to Apple's HLS Authoring Specification:\n // If content has forced subtitles and regular subtitles in a given language,\n // the regular subtitles track in that language MUST contain both the forced\n // subtitles and the regular subtitles for that language.\n // Because of this requirement and that Safari does not add forced subtitles,\n // forced subtitles are skipped here to maintain consistent experience across\n // all platforms\n continue;\n }\n let properties = mediaGroups[type][groupId][variantLabel];\n let playlistLoader;\n if (sourceType === 'hls') {\n playlistLoader = new PlaylistLoader(properties.resolvedUri, vhs, requestOptions);\n } else if (sourceType === 'dash') {\n const playlists = properties.playlists.filter(p => p.excludeUntil !== Infinity);\n if (!playlists.length) {\n return;\n }\n playlistLoader = new DashPlaylistLoader(properties.playlists[0], vhs, requestOptions, mainPlaylistLoader);\n } else if (sourceType === 'vhs-json') {\n playlistLoader = new PlaylistLoader(\n // if the vhs-json object included the media playlist, use the media playlist\n // as provided, otherwise use the resolved URI to load the playlist\n properties.playlists ? properties.playlists[0] : properties.resolvedUri, vhs, requestOptions);\n }\n properties = merge({\n id: variantLabel,\n playlistLoader\n }, properties);\n setupListeners[type](type, properties.playlistLoader, settings);\n groups[groupId].push(properties);\n if (typeof tracks[variantLabel] === 'undefined') {\n const track = tech.addRemoteTextTrack({\n id: variantLabel,\n kind: 'subtitles',\n default: properties.default && properties.autoselect,\n language: properties.language,\n label: variantLabel\n }, false).track;\n tracks[variantLabel] = track;\n }\n }\n } // setup single error event handler for the segment loader\n\n segmentLoader.on('error', onError[type](type, settings));\n },\n /**\n * Setup TextTracks for the closed-caption groups\n *\n * @param {String} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @function initialize['CLOSED-CAPTIONS']\n */\n 'CLOSED-CAPTIONS': (type, settings) => {\n const {\n tech,\n main: {\n mediaGroups\n },\n mediaTypes: {\n [type]: {\n groups,\n tracks\n }\n }\n } = settings;\n for (const groupId in mediaGroups[type]) {\n if (!groups[groupId]) {\n groups[groupId] = [];\n }\n for (const variantLabel in mediaGroups[type][groupId]) {\n const properties = mediaGroups[type][groupId][variantLabel]; // Look for either 608 (CCn) or 708 (SERVICEn) caption services\n\n if (!/^(?:CC|SERVICE)/.test(properties.instreamId)) {\n continue;\n }\n const captionServices = tech.options_.vhs && tech.options_.vhs.captionServices || {};\n let newProps = {\n label: variantLabel,\n language: properties.language,\n instreamId: properties.instreamId,\n default: properties.default && properties.autoselect\n };\n if (captionServices[newProps.instreamId]) {\n newProps = merge(newProps, captionServices[newProps.instreamId]);\n }\n if (newProps.default === undefined) {\n delete newProps.default;\n } // No PlaylistLoader is required for Closed-Captions because the captions are\n // embedded within the video stream\n\n groups[groupId].push(merge({\n id: variantLabel\n }, properties));\n if (typeof tracks[variantLabel] === 'undefined') {\n const track = tech.addRemoteTextTrack({\n id: newProps.instreamId,\n kind: 'captions',\n default: newProps.default,\n language: newProps.language,\n label: newProps.label\n }, false).track;\n tracks[variantLabel] = track;\n }\n }\n }\n }\n};\nconst groupMatch = (list, media) => {\n for (let i = 0; i < list.length; i++) {\n if (playlistMatch(media, list[i])) {\n return true;\n }\n if (list[i].playlists && groupMatch(list[i].playlists, media)) {\n return true;\n }\n }\n return false;\n};\n/**\n * Returns a function used to get the active group of the provided type\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Function that returns the active media group for the provided type. Takes an\n * optional parameter {TextTrack} track. If no track is provided, a list of all\n * variants in the group, otherwise the variant corresponding to the provided\n * track is returned.\n * @function activeGroup\n */\n\nconst activeGroup = (type, settings) => track => {\n const {\n mainPlaylistLoader,\n mediaTypes: {\n [type]: {\n groups\n }\n }\n } = settings;\n const media = mainPlaylistLoader.media();\n if (!media) {\n return null;\n }\n let variants = null; // set to variants to main media active group\n\n if (media.attributes[type]) {\n variants = groups[media.attributes[type]];\n }\n const groupKeys = Object.keys(groups);\n if (!variants) {\n // find the mainPlaylistLoader media\n // that is in a media group if we are dealing\n // with audio only\n if (type === 'AUDIO' && groupKeys.length > 1 && isAudioOnly(settings.main)) {\n for (let i = 0; i < groupKeys.length; i++) {\n const groupPropertyList = groups[groupKeys[i]];\n if (groupMatch(groupPropertyList, media)) {\n variants = groupPropertyList;\n break;\n }\n } // use the main group if it exists\n } else if (groups.main) {\n variants = groups.main; // only one group, use that one\n } else if (groupKeys.length === 1) {\n variants = groups[groupKeys[0]];\n }\n }\n if (typeof track === 'undefined') {\n return variants;\n }\n if (track === null || !variants) {\n // An active track was specified so a corresponding group is expected. track === null\n // means no track is currently active so there is no corresponding group\n return null;\n }\n return variants.filter(props => props.id === track.id)[0] || null;\n};\nconst activeTrack = {\n /**\n * Returns a function used to get the active track of type provided\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Function that returns the active media track for the provided type. Returns\n * null if no track is active\n * @function activeTrack.AUDIO\n */\n AUDIO: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: {\n tracks\n }\n }\n } = settings;\n for (const id in tracks) {\n if (tracks[id].enabled) {\n return tracks[id];\n }\n }\n return null;\n },\n /**\n * Returns a function used to get the active track of type provided\n *\n * @param {string} type\n * MediaGroup type\n * @param {Object} settings\n * Object containing required information for media groups\n * @return {Function}\n * Function that returns the active media track for the provided type. Returns\n * null if no track is active\n * @function activeTrack.SUBTITLES\n */\n SUBTITLES: (type, settings) => () => {\n const {\n mediaTypes: {\n [type]: {\n tracks\n }\n }\n } = settings;\n for (const id in tracks) {\n if (tracks[id].mode === 'showing' || tracks[id].mode === 'hidden') {\n return tracks[id];\n }\n }\n return null;\n }\n};\nconst getActiveGroup = (type, {\n mediaTypes\n}) => () => {\n const activeTrack_ = mediaTypes[type].activeTrack();\n if (!activeTrack_) {\n return null;\n }\n return mediaTypes[type].activeGroup(activeTrack_);\n};\n/**\n * Setup PlaylistLoaders and Tracks for media groups (Audio, Subtitles,\n * Closed-Captions) specified in the main manifest.\n *\n * @param {Object} settings\n * Object containing required information for setting up the media groups\n * @param {Tech} settings.tech\n * The tech of the player\n * @param {Object} settings.requestOptions\n * XHR request options used by the segment loaders\n * @param {PlaylistLoader} settings.mainPlaylistLoader\n * PlaylistLoader for the main source\n * @param {VhsHandler} settings.vhs\n * VHS SourceHandler\n * @param {Object} settings.main\n * The parsed main manifest\n * @param {Object} settings.mediaTypes\n * Object to store the loaders, tracks, and utility methods for each media type\n * @param {Function} settings.excludePlaylist\n * Excludes the current rendition and forces a rendition switch.\n * @function setupMediaGroups\n */\n\nconst setupMediaGroups = settings => {\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(type => {\n initialize[type](type, settings);\n });\n const {\n mediaTypes,\n mainPlaylistLoader,\n tech,\n vhs,\n segmentLoaders: {\n ['AUDIO']: audioSegmentLoader,\n main: mainSegmentLoader\n }\n } = settings; // setup active group and track getters and change event handlers\n\n ['AUDIO', 'SUBTITLES'].forEach(type => {\n mediaTypes[type].activeGroup = activeGroup(type, settings);\n mediaTypes[type].activeTrack = activeTrack[type](type, settings);\n mediaTypes[type].onGroupChanged = onGroupChanged(type, settings);\n mediaTypes[type].onGroupChanging = onGroupChanging(type, settings);\n mediaTypes[type].onTrackChanged = onTrackChanged(type, settings);\n mediaTypes[type].getActiveGroup = getActiveGroup(type, settings);\n }); // DO NOT enable the default subtitle or caption track.\n // DO enable the default audio track\n\n const audioGroup = mediaTypes.AUDIO.activeGroup();\n if (audioGroup) {\n const groupId = (audioGroup.filter(group => group.default)[0] || audioGroup[0]).id;\n mediaTypes.AUDIO.tracks[groupId].enabled = true;\n mediaTypes.AUDIO.onGroupChanged();\n mediaTypes.AUDIO.onTrackChanged();\n const activeAudioGroup = mediaTypes.AUDIO.getActiveGroup(); // a similar check for handling setAudio on each loader is run again each time the\n // track is changed, but needs to be handled here since the track may not be considered\n // changed on the first call to onTrackChanged\n\n if (!activeAudioGroup.playlistLoader) {\n // either audio is muxed with video or the stream is audio only\n mainSegmentLoader.setAudio(true);\n } else {\n // audio is demuxed\n mainSegmentLoader.setAudio(false);\n audioSegmentLoader.setAudio(true);\n }\n }\n mainPlaylistLoader.on('mediachange', () => {\n ['AUDIO', 'SUBTITLES'].forEach(type => mediaTypes[type].onGroupChanged());\n });\n mainPlaylistLoader.on('mediachanging', () => {\n ['AUDIO', 'SUBTITLES'].forEach(type => mediaTypes[type].onGroupChanging());\n }); // custom audio track change event handler for usage event\n\n const onAudioTrackChanged = () => {\n mediaTypes.AUDIO.onTrackChanged();\n tech.trigger({\n type: 'usage',\n name: 'vhs-audio-change'\n });\n };\n tech.audioTracks().addEventListener('change', onAudioTrackChanged);\n tech.remoteTextTracks().addEventListener('change', mediaTypes.SUBTITLES.onTrackChanged);\n vhs.on('dispose', () => {\n tech.audioTracks().removeEventListener('change', onAudioTrackChanged);\n tech.remoteTextTracks().removeEventListener('change', mediaTypes.SUBTITLES.onTrackChanged);\n }); // clear existing audio tracks and add the ones we just created\n\n tech.clearTracks('audio');\n for (const id in mediaTypes.AUDIO.tracks) {\n tech.audioTracks().addTrack(mediaTypes.AUDIO.tracks[id]);\n }\n};\n/**\n * Creates skeleton object used to store the loaders, tracks, and utility methods for each\n * media type\n *\n * @return {Object}\n * Object to store the loaders, tracks, and utility methods for each media type\n * @function createMediaTypes\n */\n\nconst createMediaTypes = () => {\n const mediaTypes = {};\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(type => {\n mediaTypes[type] = {\n groups: {},\n tracks: {},\n activePlaylistLoader: null,\n activeGroup: noop,\n activeTrack: noop,\n getActiveGroup: noop,\n onGroupChanged: noop,\n onTrackChanged: noop,\n lastTrack_: null,\n logger_: logger(`MediaGroups[${type}]`)\n };\n });\n return mediaTypes;\n};\n\n/**\n * A utility class for setting properties and maintaining the state of the content steering manifest.\n *\n * Content Steering manifest format:\n * VERSION: number (required) currently only version 1 is supported.\n * TTL: number in seconds (optional) until the next content steering manifest reload.\n * RELOAD-URI: string (optional) uri to fetch the next content steering manifest.\n * SERVICE-LOCATION-PRIORITY or PATHWAY-PRIORITY a non empty array of unique string values.\n * PATHWAY-CLONES: array (optional) (HLS only) pathway clone objects to copy from other playlists.\n */\n\nclass SteeringManifest {\n constructor() {\n this.priority_ = [];\n this.pathwayClones_ = new Map();\n }\n set version(number) {\n // Only version 1 is currently supported for both DASH and HLS.\n if (number === 1) {\n this.version_ = number;\n }\n }\n set ttl(seconds) {\n // TTL = time-to-live, default = 300 seconds.\n this.ttl_ = seconds || 300;\n }\n set reloadUri(uri) {\n if (uri) {\n // reload URI can be relative to the previous reloadUri.\n this.reloadUri_ = resolveUrl(this.reloadUri_, uri);\n }\n }\n set priority(array) {\n // priority must be non-empty and unique values.\n if (array && array.length) {\n this.priority_ = array;\n }\n }\n set pathwayClones(array) {\n // pathwayClones must be non-empty.\n if (array && array.length) {\n this.pathwayClones_ = new Map(array.map(clone => [clone.ID, clone]));\n }\n }\n get version() {\n return this.version_;\n }\n get ttl() {\n return this.ttl_;\n }\n get reloadUri() {\n return this.reloadUri_;\n }\n get priority() {\n return this.priority_;\n }\n get pathwayClones() {\n return this.pathwayClones_;\n }\n}\n/**\n * This class represents a content steering manifest and associated state. See both HLS and DASH specifications.\n * HLS: https://developer.apple.com/streaming/HLSContentSteeringSpecification.pdf and\n * https://datatracker.ietf.org/doc/draft-pantos-hls-rfc8216bis/ section 4.4.6.6.\n * DASH: https://dashif.org/docs/DASH-IF-CTS-00XX-Content-Steering-Community-Review.pdf\n *\n * @param {function} xhr for making a network request from the browser.\n * @param {function} bandwidth for fetching the current bandwidth from the main segment loader.\n */\n\nclass ContentSteeringController extends videojs.EventTarget {\n constructor(xhr, bandwidth) {\n super();\n this.currentPathway = null;\n this.defaultPathway = null;\n this.queryBeforeStart = false;\n this.availablePathways_ = new Set();\n this.steeringManifest = new SteeringManifest();\n this.proxyServerUrl_ = null;\n this.manifestType_ = null;\n this.ttlTimeout_ = null;\n this.request_ = null;\n this.currentPathwayClones = new Map();\n this.nextPathwayClones = new Map();\n this.excludedSteeringManifestURLs = new Set();\n this.logger_ = logger('Content Steering');\n this.xhr_ = xhr;\n this.getBandwidth_ = bandwidth;\n }\n /**\n * Assigns the content steering tag properties to the steering controller\n *\n * @param {string} baseUrl the baseURL from the main manifest for resolving the steering manifest url\n * @param {Object} steeringTag the content steering tag from the main manifest\n */\n\n assignTagProperties(baseUrl, steeringTag) {\n this.manifestType_ = steeringTag.serverUri ? 'HLS' : 'DASH'; // serverUri is HLS serverURL is DASH\n\n const steeringUri = steeringTag.serverUri || steeringTag.serverURL;\n if (!steeringUri) {\n this.logger_(`steering manifest URL is ${steeringUri}, cannot request steering manifest.`);\n this.trigger('error');\n return;\n } // Content steering manifests can be encoded as a data URI. We can decode, parse and return early if that's the case.\n\n if (steeringUri.startsWith('data:')) {\n this.decodeDataUriManifest_(steeringUri.substring(steeringUri.indexOf(',') + 1));\n return;\n } // reloadUri is the resolution of the main manifest URL and steering URL.\n\n this.steeringManifest.reloadUri = resolveUrl(baseUrl, steeringUri); // pathwayId is HLS defaultServiceLocation is DASH\n\n this.defaultPathway = steeringTag.pathwayId || steeringTag.defaultServiceLocation; // currently only DASH supports the following properties on tags.\n\n this.queryBeforeStart = steeringTag.queryBeforeStart;\n this.proxyServerUrl_ = steeringTag.proxyServerURL; // trigger a steering event if we have a pathway from the content steering tag.\n // this tells VHS which segment pathway to start with.\n // If queryBeforeStart is true we need to wait for the steering manifest response.\n\n if (this.defaultPathway && !this.queryBeforeStart) {\n this.trigger('content-steering');\n }\n }\n /**\n * Requests the content steering manifest and parse the response. This should only be called after\n * assignTagProperties was called with a content steering tag.\n *\n * @param {string} initialUri The optional uri to make the request with.\n * If set, the request should be made with exactly what is passed in this variable.\n * This scenario should only happen once on initalization.\n */\n\n requestSteeringManifest(initial) {\n const reloadUri = this.steeringManifest.reloadUri;\n if (!reloadUri) {\n return;\n } // We currently don't support passing MPD query parameters directly to the content steering URL as this requires\n // ExtUrlQueryInfo tag support. See the DASH content steering spec section 8.1.\n // This request URI accounts for manifest URIs that have been excluded.\n\n const uri = initial ? reloadUri : this.getRequestURI(reloadUri); // If there are no valid manifest URIs, we should stop content steering.\n\n if (!uri) {\n this.logger_('No valid content steering manifest URIs. Stopping content steering.');\n this.trigger('error');\n this.dispose();\n return;\n }\n this.request_ = this.xhr_({\n uri,\n requestType: 'content-steering-manifest'\n }, (error, errorInfo) => {\n if (error) {\n // If the client receives HTTP 410 Gone in response to a manifest request,\n // it MUST NOT issue another request for that URI for the remainder of the\n // playback session. It MAY continue to use the most-recently obtained set\n // of Pathways.\n if (errorInfo.status === 410) {\n this.logger_(`manifest request 410 ${error}.`);\n this.logger_(`There will be no more content steering requests to ${uri} this session.`);\n this.excludedSteeringManifestURLs.add(uri);\n return;\n } // If the client receives HTTP 429 Too Many Requests with a Retry-After\n // header in response to a manifest request, it SHOULD wait until the time\n // specified by the Retry-After header to reissue the request.\n\n if (errorInfo.status === 429) {\n const retrySeconds = errorInfo.responseHeaders['retry-after'];\n this.logger_(`manifest request 429 ${error}.`);\n this.logger_(`content steering will retry in ${retrySeconds} seconds.`);\n this.startTTLTimeout_(parseInt(retrySeconds, 10));\n return;\n } // If the Steering Manifest cannot be loaded and parsed correctly, the\n // client SHOULD continue to use the previous values and attempt to reload\n // it after waiting for the previously-specified TTL (or 5 minutes if\n // none).\n\n this.logger_(`manifest failed to load ${error}.`);\n this.startTTLTimeout_();\n return;\n }\n const steeringManifestJson = JSON.parse(this.request_.responseText);\n this.assignSteeringProperties_(steeringManifestJson);\n this.startTTLTimeout_();\n });\n }\n /**\n * Set the proxy server URL and add the steering manifest url as a URI encoded parameter.\n *\n * @param {string} steeringUrl the steering manifest url\n * @return the steering manifest url to a proxy server with all parameters set\n */\n\n setProxyServerUrl_(steeringUrl) {\n const steeringUrlObject = new window$1.URL(steeringUrl);\n const proxyServerUrlObject = new window$1.URL(this.proxyServerUrl_);\n proxyServerUrlObject.searchParams.set('url', encodeURI(steeringUrlObject.toString()));\n return this.setSteeringParams_(proxyServerUrlObject.toString());\n }\n /**\n * Decodes and parses the data uri encoded steering manifest\n *\n * @param {string} dataUri the data uri to be decoded and parsed.\n */\n\n decodeDataUriManifest_(dataUri) {\n const steeringManifestJson = JSON.parse(window$1.atob(dataUri));\n this.assignSteeringProperties_(steeringManifestJson);\n }\n /**\n * Set the HLS or DASH content steering manifest request query parameters. For example:\n * _HLS_pathway=\"\" and _HLS_throughput=\n * _DASH_pathway and _DASH_throughput\n *\n * @param {string} uri to add content steering server parameters to.\n * @return a new uri as a string with the added steering query parameters.\n */\n\n setSteeringParams_(url) {\n const urlObject = new window$1.URL(url);\n const path = this.getPathway();\n const networkThroughput = this.getBandwidth_();\n if (path) {\n const pathwayKey = `_${this.manifestType_}_pathway`;\n urlObject.searchParams.set(pathwayKey, path);\n }\n if (networkThroughput) {\n const throughputKey = `_${this.manifestType_}_throughput`;\n urlObject.searchParams.set(throughputKey, networkThroughput);\n }\n return urlObject.toString();\n }\n /**\n * Assigns the current steering manifest properties and to the SteeringManifest object\n *\n * @param {Object} steeringJson the raw JSON steering manifest\n */\n\n assignSteeringProperties_(steeringJson) {\n this.steeringManifest.version = steeringJson.VERSION;\n if (!this.steeringManifest.version) {\n this.logger_(`manifest version is ${steeringJson.VERSION}, which is not supported.`);\n this.trigger('error');\n return;\n }\n this.steeringManifest.ttl = steeringJson.TTL;\n this.steeringManifest.reloadUri = steeringJson['RELOAD-URI']; // HLS = PATHWAY-PRIORITY required. DASH = SERVICE-LOCATION-PRIORITY optional\n\n this.steeringManifest.priority = steeringJson['PATHWAY-PRIORITY'] || steeringJson['SERVICE-LOCATION-PRIORITY']; // Pathway clones to be created/updated in HLS.\n // See section 7.2 https://datatracker.ietf.org/doc/draft-pantos-hls-rfc8216bis/\n\n this.steeringManifest.pathwayClones = steeringJson['PATHWAY-CLONES'];\n this.nextPathwayClones = this.steeringManifest.pathwayClones; // 1. apply first pathway from the array.\n // 2. if first pathway doesn't exist in manifest, try next pathway.\n // a. if all pathways are exhausted, ignore the steering manifest priority.\n // 3. if segments fail from an established pathway, try all variants/renditions, then exclude the failed pathway.\n // a. exclude a pathway for a minimum of the last TTL duration. Meaning, from the next steering response,\n // the excluded pathway will be ignored.\n // See excludePathway usage in excludePlaylist().\n // If there are no available pathways, we need to stop content steering.\n\n if (!this.availablePathways_.size) {\n this.logger_('There are no available pathways for content steering. Ending content steering.');\n this.trigger('error');\n this.dispose();\n }\n const chooseNextPathway = pathwaysByPriority => {\n for (const path of pathwaysByPriority) {\n if (this.availablePathways_.has(path)) {\n return path;\n }\n } // If no pathway matches, ignore the manifest and choose the first available.\n\n return [...this.availablePathways_][0];\n };\n const nextPathway = chooseNextPathway(this.steeringManifest.priority);\n if (this.currentPathway !== nextPathway) {\n this.currentPathway = nextPathway;\n this.trigger('content-steering');\n }\n }\n /**\n * Returns the pathway to use for steering decisions\n *\n * @return {string} returns the current pathway or the default\n */\n\n getPathway() {\n return this.currentPathway || this.defaultPathway;\n }\n /**\n * Chooses the manifest request URI based on proxy URIs and server URLs.\n * Also accounts for exclusion on certain manifest URIs.\n *\n * @param {string} reloadUri the base uri before parameters\n *\n * @return {string} the final URI for the request to the manifest server.\n */\n\n getRequestURI(reloadUri) {\n if (!reloadUri) {\n return null;\n }\n const isExcluded = uri => this.excludedSteeringManifestURLs.has(uri);\n if (this.proxyServerUrl_) {\n const proxyURI = this.setProxyServerUrl_(reloadUri);\n if (!isExcluded(proxyURI)) {\n return proxyURI;\n }\n }\n const steeringURI = this.setSteeringParams_(reloadUri);\n if (!isExcluded(steeringURI)) {\n return steeringURI;\n } // Return nothing if all valid manifest URIs are excluded.\n\n return null;\n }\n /**\n * Start the timeout for re-requesting the steering manifest at the TTL interval.\n *\n * @param {number} ttl time in seconds of the timeout. Defaults to the\n * ttl interval in the steering manifest\n */\n\n startTTLTimeout_(ttl = this.steeringManifest.ttl) {\n // 300 (5 minutes) is the default value.\n const ttlMS = ttl * 1000;\n this.ttlTimeout_ = window$1.setTimeout(() => {\n this.requestSteeringManifest();\n }, ttlMS);\n }\n /**\n * Clear the TTL timeout if necessary.\n */\n\n clearTTLTimeout_() {\n window$1.clearTimeout(this.ttlTimeout_);\n this.ttlTimeout_ = null;\n }\n /**\n * aborts any current steering xhr and sets the current request object to null\n */\n\n abort() {\n if (this.request_) {\n this.request_.abort();\n }\n this.request_ = null;\n }\n /**\n * aborts steering requests clears the ttl timeout and resets all properties.\n */\n\n dispose() {\n this.off('content-steering');\n this.off('error');\n this.abort();\n this.clearTTLTimeout_();\n this.currentPathway = null;\n this.defaultPathway = null;\n this.queryBeforeStart = null;\n this.proxyServerUrl_ = null;\n this.manifestType_ = null;\n this.ttlTimeout_ = null;\n this.request_ = null;\n this.excludedSteeringManifestURLs = new Set();\n this.availablePathways_ = new Set();\n this.steeringManifest = new SteeringManifest();\n }\n /**\n * adds a pathway to the available pathways set\n *\n * @param {string} pathway the pathway string to add\n */\n\n addAvailablePathway(pathway) {\n if (pathway) {\n this.availablePathways_.add(pathway);\n }\n }\n /**\n * Clears all pathways from the available pathways set\n */\n\n clearAvailablePathways() {\n this.availablePathways_.clear();\n }\n /**\n * Removes a pathway from the available pathways set.\n */\n\n excludePathway(pathway) {\n return this.availablePathways_.delete(pathway);\n }\n /**\n * Checks the refreshed DASH manifest content steering tag for changes.\n *\n * @param {string} baseURL new steering tag on DASH manifest refresh\n * @param {Object} newTag the new tag to check for changes\n * @return a true or false whether the new tag has different values\n */\n\n didDASHTagChange(baseURL, newTag) {\n return !newTag && this.steeringManifest.reloadUri || newTag && (resolveUrl(baseURL, newTag.serverURL) !== this.steeringManifest.reloadUri || newTag.defaultServiceLocation !== this.defaultPathway || newTag.queryBeforeStart !== this.queryBeforeStart || newTag.proxyServerURL !== this.proxyServerUrl_);\n }\n getAvailablePathways() {\n return this.availablePathways_;\n }\n}\n\n/**\n * @file playlist-controller.js\n */\nconst ABORT_EARLY_EXCLUSION_SECONDS = 10;\nlet Vhs$1; // SegmentLoader stats that need to have each loader's\n// values summed to calculate the final value\n\nconst loaderStats = ['mediaRequests', 'mediaRequestsAborted', 'mediaRequestsTimedout', 'mediaRequestsErrored', 'mediaTransferDuration', 'mediaBytesTransferred', 'mediaAppends'];\nconst sumLoaderStat = function (stat) {\n return this.audioSegmentLoader_[stat] + this.mainSegmentLoader_[stat];\n};\nconst shouldSwitchToMedia = function ({\n currentPlaylist,\n buffered,\n currentTime,\n nextPlaylist,\n bufferLowWaterLine,\n bufferHighWaterLine,\n duration,\n bufferBasedABR,\n log\n}) {\n // we have no other playlist to switch to\n if (!nextPlaylist) {\n videojs.log.warn('We received no playlist to switch to. Please check your stream.');\n return false;\n }\n const sharedLogLine = `allowing switch ${currentPlaylist && currentPlaylist.id || 'null'} -> ${nextPlaylist.id}`;\n if (!currentPlaylist) {\n log(`${sharedLogLine} as current playlist is not set`);\n return true;\n } // no need to switch if playlist is the same\n\n if (nextPlaylist.id === currentPlaylist.id) {\n return false;\n } // determine if current time is in a buffered range.\n\n const isBuffered = Boolean(findRange(buffered, currentTime).length); // If the playlist is live, then we want to not take low water line into account.\n // This is because in LIVE, the player plays 3 segments from the end of the\n // playlist, and if `BUFFER_LOW_WATER_LINE` is greater than the duration availble\n // in those segments, a viewer will never experience a rendition upswitch.\n\n if (!currentPlaylist.endList) {\n // For LLHLS live streams, don't switch renditions before playback has started, as it almost\n // doubles the time to first playback.\n if (!isBuffered && typeof currentPlaylist.partTargetDuration === 'number') {\n log(`not ${sharedLogLine} as current playlist is live llhls, but currentTime isn't in buffered.`);\n return false;\n }\n log(`${sharedLogLine} as current playlist is live`);\n return true;\n }\n const forwardBuffer = timeAheadOf(buffered, currentTime);\n const maxBufferLowWaterLine = bufferBasedABR ? Config.EXPERIMENTAL_MAX_BUFFER_LOW_WATER_LINE : Config.MAX_BUFFER_LOW_WATER_LINE; // For the same reason as LIVE, we ignore the low water line when the VOD\n // duration is below the max potential low water line\n\n if (duration < maxBufferLowWaterLine) {\n log(`${sharedLogLine} as duration < max low water line (${duration} < ${maxBufferLowWaterLine})`);\n return true;\n }\n const nextBandwidth = nextPlaylist.attributes.BANDWIDTH;\n const currBandwidth = currentPlaylist.attributes.BANDWIDTH; // when switching down, if our buffer is lower than the high water line,\n // we can switch down\n\n if (nextBandwidth < currBandwidth && (!bufferBasedABR || forwardBuffer < bufferHighWaterLine)) {\n let logLine = `${sharedLogLine} as next bandwidth < current bandwidth (${nextBandwidth} < ${currBandwidth})`;\n if (bufferBasedABR) {\n logLine += ` and forwardBuffer < bufferHighWaterLine (${forwardBuffer} < ${bufferHighWaterLine})`;\n }\n log(logLine);\n return true;\n } // and if our buffer is higher than the low water line,\n // we can switch up\n\n if ((!bufferBasedABR || nextBandwidth > currBandwidth) && forwardBuffer >= bufferLowWaterLine) {\n let logLine = `${sharedLogLine} as forwardBuffer >= bufferLowWaterLine (${forwardBuffer} >= ${bufferLowWaterLine})`;\n if (bufferBasedABR) {\n logLine += ` and next bandwidth > current bandwidth (${nextBandwidth} > ${currBandwidth})`;\n }\n log(logLine);\n return true;\n }\n log(`not ${sharedLogLine} as no switching criteria met`);\n return false;\n};\n/**\n * the main playlist controller controller all interactons\n * between playlists and segmentloaders. At this time this mainly\n * involves a main playlist and a series of audio playlists\n * if they are available\n *\n * @class PlaylistController\n * @extends videojs.EventTarget\n */\n\nclass PlaylistController extends videojs.EventTarget {\n constructor(options) {\n super();\n const {\n src,\n withCredentials,\n tech,\n bandwidth,\n externVhs,\n useCueTags,\n playlistExclusionDuration,\n enableLowInitialPlaylist,\n sourceType,\n cacheEncryptionKeys,\n bufferBasedABR,\n leastPixelDiffSelector,\n captionServices\n } = options;\n if (!src) {\n throw new Error('A non-empty playlist URL or JSON manifest string is required');\n }\n let {\n maxPlaylistRetries\n } = options;\n if (maxPlaylistRetries === null || typeof maxPlaylistRetries === 'undefined') {\n maxPlaylistRetries = Infinity;\n }\n Vhs$1 = externVhs;\n this.bufferBasedABR = Boolean(bufferBasedABR);\n this.leastPixelDiffSelector = Boolean(leastPixelDiffSelector);\n this.withCredentials = withCredentials;\n this.tech_ = tech;\n this.vhs_ = tech.vhs;\n this.sourceType_ = sourceType;\n this.useCueTags_ = useCueTags;\n this.playlistExclusionDuration = playlistExclusionDuration;\n this.maxPlaylistRetries = maxPlaylistRetries;\n this.enableLowInitialPlaylist = enableLowInitialPlaylist;\n if (this.useCueTags_) {\n this.cueTagsTrack_ = this.tech_.addTextTrack('metadata', 'ad-cues');\n this.cueTagsTrack_.inBandMetadataTrackDispatchType = '';\n }\n this.requestOptions_ = {\n withCredentials,\n maxPlaylistRetries,\n timeout: null\n };\n this.on('error', this.pauseLoading);\n this.mediaTypes_ = createMediaTypes();\n this.mediaSource = new window$1.MediaSource();\n this.handleDurationChange_ = this.handleDurationChange_.bind(this);\n this.handleSourceOpen_ = this.handleSourceOpen_.bind(this);\n this.handleSourceEnded_ = this.handleSourceEnded_.bind(this);\n this.mediaSource.addEventListener('durationchange', this.handleDurationChange_); // load the media source into the player\n\n this.mediaSource.addEventListener('sourceopen', this.handleSourceOpen_);\n this.mediaSource.addEventListener('sourceended', this.handleSourceEnded_); // we don't have to handle sourceclose since dispose will handle termination of\n // everything, and the MediaSource should not be detached without a proper disposal\n\n this.seekable_ = createTimeRanges();\n this.hasPlayed_ = false;\n this.syncController_ = new SyncController(options);\n this.segmentMetadataTrack_ = tech.addRemoteTextTrack({\n kind: 'metadata',\n label: 'segment-metadata'\n }, false).track;\n this.decrypter_ = new Decrypter();\n this.sourceUpdater_ = new SourceUpdater(this.mediaSource);\n this.inbandTextTracks_ = {};\n this.timelineChangeController_ = new TimelineChangeController();\n this.keyStatusMap_ = new Map();\n const segmentLoaderSettings = {\n vhs: this.vhs_,\n parse708captions: options.parse708captions,\n useDtsForTimestampOffset: options.useDtsForTimestampOffset,\n captionServices,\n mediaSource: this.mediaSource,\n currentTime: this.tech_.currentTime.bind(this.tech_),\n seekable: () => this.seekable(),\n seeking: () => this.tech_.seeking(),\n duration: () => this.duration(),\n hasPlayed: () => this.hasPlayed_,\n goalBufferLength: () => this.goalBufferLength(),\n bandwidth,\n syncController: this.syncController_,\n decrypter: this.decrypter_,\n sourceType: this.sourceType_,\n inbandTextTracks: this.inbandTextTracks_,\n cacheEncryptionKeys,\n sourceUpdater: this.sourceUpdater_,\n timelineChangeController: this.timelineChangeController_,\n exactManifestTimings: options.exactManifestTimings,\n addMetadataToTextTrack: this.addMetadataToTextTrack.bind(this)\n }; // The source type check not only determines whether a special DASH playlist loader\n // should be used, but also covers the case where the provided src is a vhs-json\n // manifest object (instead of a URL). In the case of vhs-json, the default\n // PlaylistLoader should be used.\n\n this.mainPlaylistLoader_ = this.sourceType_ === 'dash' ? new DashPlaylistLoader(src, this.vhs_, merge(this.requestOptions_, {\n addMetadataToTextTrack: this.addMetadataToTextTrack.bind(this)\n })) : new PlaylistLoader(src, this.vhs_, merge(this.requestOptions_, {\n addDateRangesToTextTrack: this.addDateRangesToTextTrack_.bind(this)\n }));\n this.setupMainPlaylistLoaderListeners_(); // setup segment loaders\n // combined audio/video or just video when alternate audio track is selected\n\n this.mainSegmentLoader_ = new SegmentLoader(merge(segmentLoaderSettings, {\n segmentMetadataTrack: this.segmentMetadataTrack_,\n loaderType: 'main'\n }), options); // alternate audio track\n\n this.audioSegmentLoader_ = new SegmentLoader(merge(segmentLoaderSettings, {\n loaderType: 'audio'\n }), options);\n this.subtitleSegmentLoader_ = new VTTSegmentLoader(merge(segmentLoaderSettings, {\n loaderType: 'vtt',\n featuresNativeTextTracks: this.tech_.featuresNativeTextTracks,\n loadVttJs: () => new Promise((resolve, reject) => {\n function onLoad() {\n tech.off('vttjserror', onError);\n resolve();\n }\n function onError() {\n tech.off('vttjsloaded', onLoad);\n reject();\n }\n tech.one('vttjsloaded', onLoad);\n tech.one('vttjserror', onError); // safe to call multiple times, script will be loaded only once:\n\n tech.addWebVttScript_();\n })\n }), options);\n const getBandwidth = () => {\n return this.mainSegmentLoader_.bandwidth;\n };\n this.contentSteeringController_ = new ContentSteeringController(this.vhs_.xhr, getBandwidth);\n this.setupSegmentLoaderListeners_();\n if (this.bufferBasedABR) {\n this.mainPlaylistLoader_.one('loadedplaylist', () => this.startABRTimer_());\n this.tech_.on('pause', () => this.stopABRTimer_());\n this.tech_.on('play', () => this.startABRTimer_());\n } // Create SegmentLoader stat-getters\n // mediaRequests_\n // mediaRequestsAborted_\n // mediaRequestsTimedout_\n // mediaRequestsErrored_\n // mediaTransferDuration_\n // mediaBytesTransferred_\n // mediaAppends_\n\n loaderStats.forEach(stat => {\n this[stat + '_'] = sumLoaderStat.bind(this, stat);\n });\n this.logger_ = logger('pc');\n this.triggeredFmp4Usage = false;\n if (this.tech_.preload() === 'none') {\n this.loadOnPlay_ = () => {\n this.loadOnPlay_ = null;\n this.mainPlaylistLoader_.load();\n };\n this.tech_.one('play', this.loadOnPlay_);\n } else {\n this.mainPlaylistLoader_.load();\n }\n this.timeToLoadedData__ = -1;\n this.mainAppendsToLoadedData__ = -1;\n this.audioAppendsToLoadedData__ = -1;\n const event = this.tech_.preload() === 'none' ? 'play' : 'loadstart'; // start the first frame timer on loadstart or play (for preload none)\n\n this.tech_.one(event, () => {\n const timeToLoadedDataStart = Date.now();\n this.tech_.one('loadeddata', () => {\n this.timeToLoadedData__ = Date.now() - timeToLoadedDataStart;\n this.mainAppendsToLoadedData__ = this.mainSegmentLoader_.mediaAppends;\n this.audioAppendsToLoadedData__ = this.audioSegmentLoader_.mediaAppends;\n });\n });\n }\n mainAppendsToLoadedData_() {\n return this.mainAppendsToLoadedData__;\n }\n audioAppendsToLoadedData_() {\n return this.audioAppendsToLoadedData__;\n }\n appendsToLoadedData_() {\n const main = this.mainAppendsToLoadedData_();\n const audio = this.audioAppendsToLoadedData_();\n if (main === -1 || audio === -1) {\n return -1;\n }\n return main + audio;\n }\n timeToLoadedData_() {\n return this.timeToLoadedData__;\n }\n /**\n * Run selectPlaylist and switch to the new playlist if we should\n *\n * @param {string} [reason=abr] a reason for why the ABR check is made\n * @private\n */\n\n checkABR_(reason = 'abr') {\n const nextPlaylist = this.selectPlaylist();\n if (nextPlaylist && this.shouldSwitchToMedia_(nextPlaylist)) {\n this.switchMedia_(nextPlaylist, reason);\n }\n }\n switchMedia_(playlist, cause, delay) {\n const oldMedia = this.media();\n const oldId = oldMedia && (oldMedia.id || oldMedia.uri);\n const newId = playlist && (playlist.id || playlist.uri);\n if (oldId && oldId !== newId) {\n this.logger_(`switch media ${oldId} -> ${newId} from ${cause}`);\n this.tech_.trigger({\n type: 'usage',\n name: `vhs-rendition-change-${cause}`\n });\n }\n this.mainPlaylistLoader_.media(playlist, delay);\n }\n /**\n * A function that ensures we switch our playlists inside of `mediaTypes`\n * to match the current `serviceLocation` provided by the contentSteering controller.\n * We want to check media types of `AUDIO`, `SUBTITLES`, and `CLOSED-CAPTIONS`.\n *\n * This should only be called on a DASH playback scenario while using content steering.\n * This is necessary due to differences in how media in HLS manifests are generally tied to\n * a video playlist, where in DASH that is not always the case.\n */\n\n switchMediaForDASHContentSteering_() {\n ['AUDIO', 'SUBTITLES', 'CLOSED-CAPTIONS'].forEach(type => {\n const mediaType = this.mediaTypes_[type];\n const activeGroup = mediaType ? mediaType.activeGroup() : null;\n const pathway = this.contentSteeringController_.getPathway();\n if (activeGroup && pathway) {\n // activeGroup can be an array or a single group\n const mediaPlaylists = activeGroup.length ? activeGroup[0].playlists : activeGroup.playlists;\n const dashMediaPlaylists = mediaPlaylists.filter(p => p.attributes.serviceLocation === pathway); // Switch the current active playlist to the correct CDN\n\n if (dashMediaPlaylists.length) {\n this.mediaTypes_[type].activePlaylistLoader.media(dashMediaPlaylists[0]);\n }\n }\n });\n }\n /**\n * Start a timer that periodically calls checkABR_\n *\n * @private\n */\n\n startABRTimer_() {\n this.stopABRTimer_();\n this.abrTimer_ = window$1.setInterval(() => this.checkABR_(), 250);\n }\n /**\n * Stop the timer that periodically calls checkABR_\n *\n * @private\n */\n\n stopABRTimer_() {\n // if we're scrubbing, we don't need to pause.\n // This getter will be added to Video.js in version 7.11.\n if (this.tech_.scrubbing && this.tech_.scrubbing()) {\n return;\n }\n window$1.clearInterval(this.abrTimer_);\n this.abrTimer_ = null;\n }\n /**\n * Get a list of playlists for the currently selected audio playlist\n *\n * @return {Array} the array of audio playlists\n */\n\n getAudioTrackPlaylists_() {\n const main = this.main();\n const defaultPlaylists = main && main.playlists || []; // if we don't have any audio groups then we can only\n // assume that the audio tracks are contained in main\n // playlist array, use that or an empty array.\n\n if (!main || !main.mediaGroups || !main.mediaGroups.AUDIO) {\n return defaultPlaylists;\n }\n const AUDIO = main.mediaGroups.AUDIO;\n const groupKeys = Object.keys(AUDIO);\n let track; // get the current active track\n\n if (Object.keys(this.mediaTypes_.AUDIO.groups).length) {\n track = this.mediaTypes_.AUDIO.activeTrack(); // or get the default track from main if mediaTypes_ isn't setup yet\n } else {\n // default group is `main` or just the first group.\n const defaultGroup = AUDIO.main || groupKeys.length && AUDIO[groupKeys[0]];\n for (const label in defaultGroup) {\n if (defaultGroup[label].default) {\n track = {\n label\n };\n break;\n }\n }\n } // no active track no playlists.\n\n if (!track) {\n return defaultPlaylists;\n }\n const playlists = []; // get all of the playlists that are possible for the\n // active track.\n\n for (const group in AUDIO) {\n if (AUDIO[group][track.label]) {\n const properties = AUDIO[group][track.label];\n if (properties.playlists && properties.playlists.length) {\n playlists.push.apply(playlists, properties.playlists);\n } else if (properties.uri) {\n playlists.push(properties);\n } else if (main.playlists.length) {\n // if an audio group does not have a uri\n // see if we have main playlists that use it as a group.\n // if we do then add those to the playlists list.\n for (let i = 0; i < main.playlists.length; i++) {\n const playlist = main.playlists[i];\n if (playlist.attributes && playlist.attributes.AUDIO && playlist.attributes.AUDIO === group) {\n playlists.push(playlist);\n }\n }\n }\n }\n }\n if (!playlists.length) {\n return defaultPlaylists;\n }\n return playlists;\n }\n /**\n * Register event handlers on the main playlist loader. A helper\n * function for construction time.\n *\n * @private\n */\n\n setupMainPlaylistLoaderListeners_() {\n this.mainPlaylistLoader_.on('loadedmetadata', () => {\n const media = this.mainPlaylistLoader_.media();\n const requestTimeout = media.targetDuration * 1.5 * 1000; // If we don't have any more available playlists, we don't want to\n // timeout the request.\n\n if (isLowestEnabledRendition(this.mainPlaylistLoader_.main, this.mainPlaylistLoader_.media())) {\n this.requestOptions_.timeout = 0;\n } else {\n this.requestOptions_.timeout = requestTimeout;\n } // if this isn't a live video and preload permits, start\n // downloading segments\n\n if (media.endList && this.tech_.preload() !== 'none') {\n this.mainSegmentLoader_.playlist(media, this.requestOptions_);\n this.mainSegmentLoader_.load();\n }\n setupMediaGroups({\n sourceType: this.sourceType_,\n segmentLoaders: {\n AUDIO: this.audioSegmentLoader_,\n SUBTITLES: this.subtitleSegmentLoader_,\n main: this.mainSegmentLoader_\n },\n tech: this.tech_,\n requestOptions: this.requestOptions_,\n mainPlaylistLoader: this.mainPlaylistLoader_,\n vhs: this.vhs_,\n main: this.main(),\n mediaTypes: this.mediaTypes_,\n excludePlaylist: this.excludePlaylist.bind(this)\n });\n this.triggerPresenceUsage_(this.main(), media);\n this.setupFirstPlay();\n if (!this.mediaTypes_.AUDIO.activePlaylistLoader || this.mediaTypes_.AUDIO.activePlaylistLoader.media()) {\n this.trigger('selectedinitialmedia');\n } else {\n // We must wait for the active audio playlist loader to\n // finish setting up before triggering this event so the\n // representations API and EME setup is correct\n this.mediaTypes_.AUDIO.activePlaylistLoader.one('loadedmetadata', () => {\n this.trigger('selectedinitialmedia');\n });\n }\n });\n this.mainPlaylistLoader_.on('loadedplaylist', () => {\n if (this.loadOnPlay_) {\n this.tech_.off('play', this.loadOnPlay_);\n }\n let updatedPlaylist = this.mainPlaylistLoader_.media();\n if (!updatedPlaylist) {\n // Add content steering listeners on first load and init.\n this.attachContentSteeringListeners_();\n this.initContentSteeringController_(); // exclude any variants that are not supported by the browser before selecting\n // an initial media as the playlist selectors do not consider browser support\n\n this.excludeUnsupportedVariants_();\n let selectedMedia;\n if (this.enableLowInitialPlaylist) {\n selectedMedia = this.selectInitialPlaylist();\n }\n if (!selectedMedia) {\n selectedMedia = this.selectPlaylist();\n }\n if (!selectedMedia || !this.shouldSwitchToMedia_(selectedMedia)) {\n return;\n }\n this.initialMedia_ = selectedMedia;\n this.switchMedia_(this.initialMedia_, 'initial'); // Under the standard case where a source URL is provided, loadedplaylist will\n // fire again since the playlist will be requested. In the case of vhs-json\n // (where the manifest object is provided as the source), when the media\n // playlist's `segments` list is already available, a media playlist won't be\n // requested, and loadedplaylist won't fire again, so the playlist handler must be\n // called on its own here.\n\n const haveJsonSource = this.sourceType_ === 'vhs-json' && this.initialMedia_.segments;\n if (!haveJsonSource) {\n return;\n }\n updatedPlaylist = this.initialMedia_;\n }\n this.handleUpdatedMediaPlaylist(updatedPlaylist);\n });\n this.mainPlaylistLoader_.on('error', () => {\n const error = this.mainPlaylistLoader_.error;\n this.excludePlaylist({\n playlistToExclude: error.playlist,\n error\n });\n });\n this.mainPlaylistLoader_.on('mediachanging', () => {\n this.mainSegmentLoader_.abort();\n this.mainSegmentLoader_.pause();\n });\n this.mainPlaylistLoader_.on('mediachange', () => {\n const media = this.mainPlaylistLoader_.media();\n const requestTimeout = media.targetDuration * 1.5 * 1000; // If we don't have any more available playlists, we don't want to\n // timeout the request.\n\n if (isLowestEnabledRendition(this.mainPlaylistLoader_.main, this.mainPlaylistLoader_.media())) {\n this.requestOptions_.timeout = 0;\n } else {\n this.requestOptions_.timeout = requestTimeout;\n }\n if (this.sourceType_ === 'dash') {\n // we don't want to re-request the same hls playlist right after it was changed\n this.mainPlaylistLoader_.load();\n } // TODO: Create a new event on the PlaylistLoader that signals\n // that the segments have changed in some way and use that to\n // update the SegmentLoader instead of doing it twice here and\n // on `loadedplaylist`\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.playlist(media, this.requestOptions_);\n if (this.waitingForFastQualityPlaylistReceived_) {\n this.runFastQualitySwitch_();\n } else {\n this.mainSegmentLoader_.load();\n }\n this.tech_.trigger({\n type: 'mediachange',\n bubbles: true\n });\n });\n this.mainPlaylistLoader_.on('playlistunchanged', () => {\n const updatedPlaylist = this.mainPlaylistLoader_.media(); // ignore unchanged playlists that have already been\n // excluded for not-changing. We likely just have a really slowly updating\n // playlist.\n\n if (updatedPlaylist.lastExcludeReason_ === 'playlist-unchanged') {\n return;\n }\n const playlistOutdated = this.stuckAtPlaylistEnd_(updatedPlaylist);\n if (playlistOutdated) {\n // Playlist has stopped updating and we're stuck at its end. Try to\n // exclude it and switch to another playlist in the hope that that\n // one is updating (and give the player a chance to re-adjust to the\n // safe live point).\n this.excludePlaylist({\n error: {\n message: 'Playlist no longer updating.',\n reason: 'playlist-unchanged'\n }\n }); // useful for monitoring QoS\n\n this.tech_.trigger('playliststuck');\n }\n });\n this.mainPlaylistLoader_.on('renditiondisabled', () => {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-rendition-disabled'\n });\n });\n this.mainPlaylistLoader_.on('renditionenabled', () => {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-rendition-enabled'\n });\n });\n }\n /**\n * Given an updated media playlist (whether it was loaded for the first time, or\n * refreshed for live playlists), update any relevant properties and state to reflect\n * changes in the media that should be accounted for (e.g., cues and duration).\n *\n * @param {Object} updatedPlaylist the updated media playlist object\n *\n * @private\n */\n\n handleUpdatedMediaPlaylist(updatedPlaylist) {\n if (this.useCueTags_) {\n this.updateAdCues_(updatedPlaylist);\n } // TODO: Create a new event on the PlaylistLoader that signals\n // that the segments have changed in some way and use that to\n // update the SegmentLoader instead of doing it twice here and\n // on `mediachange`\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.playlist(updatedPlaylist, this.requestOptions_);\n if (this.waitingForFastQualityPlaylistReceived_) {\n this.runFastQualitySwitch_();\n }\n this.updateDuration(!updatedPlaylist.endList); // If the player isn't paused, ensure that the segment loader is running,\n // as it is possible that it was temporarily stopped while waiting for\n // a playlist (e.g., in case the playlist errored and we re-requested it).\n\n if (!this.tech_.paused()) {\n this.mainSegmentLoader_.load();\n if (this.audioSegmentLoader_) {\n this.audioSegmentLoader_.load();\n }\n }\n }\n /**\n * A helper function for triggerring presence usage events once per source\n *\n * @private\n */\n\n triggerPresenceUsage_(main, media) {\n const mediaGroups = main.mediaGroups || {};\n let defaultDemuxed = true;\n const audioGroupKeys = Object.keys(mediaGroups.AUDIO);\n for (const mediaGroup in mediaGroups.AUDIO) {\n for (const label in mediaGroups.AUDIO[mediaGroup]) {\n const properties = mediaGroups.AUDIO[mediaGroup][label];\n if (!properties.uri) {\n defaultDemuxed = false;\n }\n }\n }\n if (defaultDemuxed) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-demuxed'\n });\n }\n if (Object.keys(mediaGroups.SUBTITLES).length) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-webvtt'\n });\n }\n if (Vhs$1.Playlist.isAes(media)) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-aes'\n });\n }\n if (audioGroupKeys.length && Object.keys(mediaGroups.AUDIO[audioGroupKeys[0]]).length > 1) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-alternate-audio'\n });\n }\n if (this.useCueTags_) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-playlist-cue-tags'\n });\n }\n }\n shouldSwitchToMedia_(nextPlaylist) {\n const currentPlaylist = this.mainPlaylistLoader_.media() || this.mainPlaylistLoader_.pendingMedia_;\n const currentTime = this.tech_.currentTime();\n const bufferLowWaterLine = this.bufferLowWaterLine();\n const bufferHighWaterLine = this.bufferHighWaterLine();\n const buffered = this.tech_.buffered();\n return shouldSwitchToMedia({\n buffered,\n currentTime,\n currentPlaylist,\n nextPlaylist,\n bufferLowWaterLine,\n bufferHighWaterLine,\n duration: this.duration(),\n bufferBasedABR: this.bufferBasedABR,\n log: this.logger_\n });\n }\n /**\n * Register event handlers on the segment loaders. A helper function\n * for construction time.\n *\n * @private\n */\n\n setupSegmentLoaderListeners_() {\n this.mainSegmentLoader_.on('bandwidthupdate', () => {\n // Whether or not buffer based ABR or another ABR is used, on a bandwidth change it's\n // useful to check to see if a rendition switch should be made.\n this.checkABR_('bandwidthupdate');\n this.tech_.trigger('bandwidthupdate');\n });\n this.mainSegmentLoader_.on('timeout', () => {\n if (this.bufferBasedABR) {\n // If a rendition change is needed, then it would've be done on `bandwidthupdate`.\n // Here the only consideration is that for buffer based ABR there's no guarantee\n // of an immediate switch (since the bandwidth is averaged with a timeout\n // bandwidth value of 1), so force a load on the segment loader to keep it going.\n this.mainSegmentLoader_.load();\n }\n }); // `progress` events are not reliable enough of a bandwidth measure to trigger buffer\n // based ABR.\n\n if (!this.bufferBasedABR) {\n this.mainSegmentLoader_.on('progress', () => {\n this.trigger('progress');\n });\n }\n this.mainSegmentLoader_.on('error', () => {\n const error = this.mainSegmentLoader_.error();\n this.excludePlaylist({\n playlistToExclude: error.playlist,\n error\n });\n });\n this.mainSegmentLoader_.on('appenderror', () => {\n this.error = this.mainSegmentLoader_.error_;\n this.trigger('error');\n });\n this.mainSegmentLoader_.on('syncinfoupdate', () => {\n this.onSyncInfoUpdate_();\n });\n this.mainSegmentLoader_.on('timestampoffset', () => {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-timestamp-offset'\n });\n });\n this.audioSegmentLoader_.on('syncinfoupdate', () => {\n this.onSyncInfoUpdate_();\n });\n this.audioSegmentLoader_.on('appenderror', () => {\n this.error = this.audioSegmentLoader_.error_;\n this.trigger('error');\n });\n this.mainSegmentLoader_.on('ended', () => {\n this.logger_('main segment loader ended');\n this.onEndOfStream();\n });\n this.mainSegmentLoader_.on('earlyabort', event => {\n // never try to early abort with the new ABR algorithm\n if (this.bufferBasedABR) {\n return;\n }\n this.delegateLoaders_('all', ['abort']);\n this.excludePlaylist({\n error: {\n message: 'Aborted early because there isn\\'t enough bandwidth to complete ' + 'the request without rebuffering.'\n },\n playlistExclusionDuration: ABORT_EARLY_EXCLUSION_SECONDS\n });\n });\n const updateCodecs = () => {\n if (!this.sourceUpdater_.hasCreatedSourceBuffers()) {\n return this.tryToCreateSourceBuffers_();\n }\n const codecs = this.getCodecsOrExclude_(); // no codecs means that the playlist was excluded\n\n if (!codecs) {\n return;\n }\n this.sourceUpdater_.addOrChangeSourceBuffers(codecs);\n };\n this.mainSegmentLoader_.on('trackinfo', updateCodecs);\n this.audioSegmentLoader_.on('trackinfo', updateCodecs);\n this.mainSegmentLoader_.on('fmp4', () => {\n if (!this.triggeredFmp4Usage) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-fmp4'\n });\n this.triggeredFmp4Usage = true;\n }\n });\n this.audioSegmentLoader_.on('fmp4', () => {\n if (!this.triggeredFmp4Usage) {\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-fmp4'\n });\n this.triggeredFmp4Usage = true;\n }\n });\n this.audioSegmentLoader_.on('ended', () => {\n this.logger_('audioSegmentLoader ended');\n this.onEndOfStream();\n });\n }\n mediaSecondsLoaded_() {\n return Math.max(this.audioSegmentLoader_.mediaSecondsLoaded + this.mainSegmentLoader_.mediaSecondsLoaded);\n }\n /**\n * Call load on our SegmentLoaders\n */\n\n load() {\n this.mainSegmentLoader_.load();\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n this.audioSegmentLoader_.load();\n }\n if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {\n this.subtitleSegmentLoader_.load();\n }\n }\n /**\n * Re-tune playback quality level for the current player\n * conditions. This method will perform destructive actions like removing\n * already buffered content in order to readjust the currently active\n * playlist quickly. This is good for manual quality changes\n *\n * @private\n */\n\n fastQualityChange_(media = this.selectPlaylist()) {\n if (media && media === this.mainPlaylistLoader_.media()) {\n this.logger_('skipping fastQualityChange because new media is same as old');\n return;\n }\n this.switchMedia_(media, 'fast-quality'); // we would like to avoid race condition when we call fastQuality,\n // reset everything and start loading segments from prev segments instead of new because new playlist is not received yet\n\n this.waitingForFastQualityPlaylistReceived_ = true;\n }\n runFastQualitySwitch_() {\n this.waitingForFastQualityPlaylistReceived_ = false; // Delete all buffered data to allow an immediate quality switch, then seek to give\n // the browser a kick to remove any cached frames from the previous rendtion (.04 seconds\n // ahead was roughly the minimum that will accomplish this across a variety of content\n // in IE and Edge, but seeking in place is sufficient on all other browsers)\n // Edge/IE bug: https://developer.microsoft.com/en-us/microsoft-edge/platform/issues/14600375/\n // Chrome bug: https://bugs.chromium.org/p/chromium/issues/detail?id=651904\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.resetEverything(() => {\n this.tech_.setCurrentTime(this.tech_.currentTime());\n }); // don't need to reset audio as it is reset when media changes\n }\n /**\n * Begin playback.\n */\n\n play() {\n if (this.setupFirstPlay()) {\n return;\n }\n if (this.tech_.ended()) {\n this.tech_.setCurrentTime(0);\n }\n if (this.hasPlayed_) {\n this.load();\n }\n const seekable = this.tech_.seekable(); // if the viewer has paused and we fell out of the live window,\n // seek forward to the live point\n\n if (this.tech_.duration() === Infinity) {\n if (this.tech_.currentTime() < seekable.start(0)) {\n return this.tech_.setCurrentTime(seekable.end(seekable.length - 1));\n }\n }\n }\n /**\n * Seek to the latest media position if this is a live video and the\n * player and video are loaded and initialized.\n */\n\n setupFirstPlay() {\n const media = this.mainPlaylistLoader_.media(); // Check that everything is ready to begin buffering for the first call to play\n // If 1) there is no active media\n // 2) the player is paused\n // 3) the first play has already been setup\n // then exit early\n\n if (!media || this.tech_.paused() || this.hasPlayed_) {\n return false;\n } // when the video is a live stream and/or has a start time\n\n if (!media.endList || media.start) {\n const seekable = this.seekable();\n if (!seekable.length) {\n // without a seekable range, the player cannot seek to begin buffering at the\n // live or start point\n return false;\n }\n const seekableEnd = seekable.end(0);\n let startPoint = seekableEnd;\n if (media.start) {\n const offset = media.start.timeOffset;\n if (offset < 0) {\n startPoint = Math.max(seekableEnd + offset, seekable.start(0));\n } else {\n startPoint = Math.min(seekableEnd, offset);\n }\n } // trigger firstplay to inform the source handler to ignore the next seek event\n\n this.trigger('firstplay'); // seek to the live point\n\n this.tech_.setCurrentTime(startPoint);\n }\n this.hasPlayed_ = true; // we can begin loading now that everything is ready\n\n this.load();\n return true;\n }\n /**\n * handle the sourceopen event on the MediaSource\n *\n * @private\n */\n\n handleSourceOpen_() {\n // Only attempt to create the source buffer if none already exist.\n // handleSourceOpen is also called when we are \"re-opening\" a source buffer\n // after `endOfStream` has been called (in response to a seek for instance)\n this.tryToCreateSourceBuffers_(); // if autoplay is enabled, begin playback. This is duplicative of\n // code in video.js but is required because play() must be invoked\n // *after* the media source has opened.\n\n if (this.tech_.autoplay()) {\n const playPromise = this.tech_.play(); // Catch/silence error when a pause interrupts a play request\n // on browsers which return a promise\n\n if (typeof playPromise !== 'undefined' && typeof playPromise.then === 'function') {\n playPromise.then(null, e => {});\n }\n }\n this.trigger('sourceopen');\n }\n /**\n * handle the sourceended event on the MediaSource\n *\n * @private\n */\n\n handleSourceEnded_() {\n if (!this.inbandTextTracks_.metadataTrack_) {\n return;\n }\n const cues = this.inbandTextTracks_.metadataTrack_.cues;\n if (!cues || !cues.length) {\n return;\n }\n const duration = this.duration();\n cues[cues.length - 1].endTime = isNaN(duration) || Math.abs(duration) === Infinity ? Number.MAX_VALUE : duration;\n }\n /**\n * handle the durationchange event on the MediaSource\n *\n * @private\n */\n\n handleDurationChange_() {\n this.tech_.trigger('durationchange');\n }\n /**\n * Calls endOfStream on the media source when all active stream types have called\n * endOfStream\n *\n * @param {string} streamType\n * Stream type of the segment loader that called endOfStream\n * @private\n */\n\n onEndOfStream() {\n let isEndOfStream = this.mainSegmentLoader_.ended_;\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n const mainMediaInfo = this.mainSegmentLoader_.getCurrentMediaInfo_(); // if the audio playlist loader exists, then alternate audio is active\n\n if (!mainMediaInfo || mainMediaInfo.hasVideo) {\n // if we do not know if the main segment loader contains video yet or if we\n // definitively know the main segment loader contains video, then we need to wait\n // for both main and audio segment loaders to call endOfStream\n isEndOfStream = isEndOfStream && this.audioSegmentLoader_.ended_;\n } else {\n // otherwise just rely on the audio loader\n isEndOfStream = this.audioSegmentLoader_.ended_;\n }\n }\n if (!isEndOfStream) {\n return;\n }\n this.stopABRTimer_();\n this.sourceUpdater_.endOfStream();\n }\n /**\n * Check if a playlist has stopped being updated\n *\n * @param {Object} playlist the media playlist object\n * @return {boolean} whether the playlist has stopped being updated or not\n */\n\n stuckAtPlaylistEnd_(playlist) {\n const seekable = this.seekable();\n if (!seekable.length) {\n // playlist doesn't have enough information to determine whether we are stuck\n return false;\n }\n const expired = this.syncController_.getExpiredTime(playlist, this.duration());\n if (expired === null) {\n return false;\n } // does not use the safe live end to calculate playlist end, since we\n // don't want to say we are stuck while there is still content\n\n const absolutePlaylistEnd = Vhs$1.Playlist.playlistEnd(playlist, expired);\n const currentTime = this.tech_.currentTime();\n const buffered = this.tech_.buffered();\n if (!buffered.length) {\n // return true if the playhead reached the absolute end of the playlist\n return absolutePlaylistEnd - currentTime <= SAFE_TIME_DELTA;\n }\n const bufferedEnd = buffered.end(buffered.length - 1); // return true if there is too little buffer left and buffer has reached absolute\n // end of playlist\n\n return bufferedEnd - currentTime <= SAFE_TIME_DELTA && absolutePlaylistEnd - bufferedEnd <= SAFE_TIME_DELTA;\n }\n /**\n * Exclude a playlist for a set amount of time, making it unavailable for selection by\n * the rendition selection algorithm, then force a new playlist (rendition) selection.\n *\n * @param {Object=} playlistToExclude\n * the playlist to exclude, defaults to the currently selected playlist\n * @param {Object=} error\n * an optional error\n * @param {number=} playlistExclusionDuration\n * an optional number of seconds to exclude the playlist\n */\n\n excludePlaylist({\n playlistToExclude = this.mainPlaylistLoader_.media(),\n error = {},\n playlistExclusionDuration\n }) {\n // If the `error` was generated by the playlist loader, it will contain\n // the playlist we were trying to load (but failed) and that should be\n // excluded instead of the currently selected playlist which is likely\n // out-of-date in this scenario\n playlistToExclude = playlistToExclude || this.mainPlaylistLoader_.media();\n playlistExclusionDuration = playlistExclusionDuration || error.playlistExclusionDuration || this.playlistExclusionDuration; // If there is no current playlist, then an error occurred while we were\n // trying to load the main OR while we were disposing of the tech\n\n if (!playlistToExclude) {\n this.error = error;\n if (this.mediaSource.readyState !== 'open') {\n this.trigger('error');\n } else {\n this.sourceUpdater_.endOfStream('network');\n }\n return;\n }\n playlistToExclude.playlistErrors_++;\n const playlists = this.mainPlaylistLoader_.main.playlists;\n const enabledPlaylists = playlists.filter(isEnabled);\n const isFinalRendition = enabledPlaylists.length === 1 && enabledPlaylists[0] === playlistToExclude; // Don't exclude the only playlist unless it was excluded\n // forever\n\n if (playlists.length === 1 && playlistExclusionDuration !== Infinity) {\n videojs.log.warn(`Problem encountered with playlist ${playlistToExclude.id}. ` + 'Trying again since it is the only playlist.');\n this.tech_.trigger('retryplaylist'); // if this is a final rendition, we should delay\n\n return this.mainPlaylistLoader_.load(isFinalRendition);\n }\n if (isFinalRendition) {\n // If we're content steering, try other pathways.\n if (this.main().contentSteering) {\n const pathway = this.pathwayAttribute_(playlistToExclude); // Ignore at least 1 steering manifest refresh.\n\n const reIncludeDelay = this.contentSteeringController_.steeringManifest.ttl * 1000;\n this.contentSteeringController_.excludePathway(pathway);\n this.excludeThenChangePathway_();\n setTimeout(() => {\n this.contentSteeringController_.addAvailablePathway(pathway);\n }, reIncludeDelay);\n return;\n } // Since we're on the final non-excluded playlist, and we're about to exclude\n // it, instead of erring the player or retrying this playlist, clear out the current\n // exclusion list. This allows other playlists to be attempted in case any have been\n // fixed.\n\n let reincluded = false;\n playlists.forEach(playlist => {\n // skip current playlist which is about to be excluded\n if (playlist === playlistToExclude) {\n return;\n }\n const excludeUntil = playlist.excludeUntil; // a playlist cannot be reincluded if it wasn't excluded to begin with.\n\n if (typeof excludeUntil !== 'undefined' && excludeUntil !== Infinity) {\n reincluded = true;\n delete playlist.excludeUntil;\n }\n });\n if (reincluded) {\n videojs.log.warn('Removing other playlists from the exclusion list because the last ' + 'rendition is about to be excluded.'); // Technically we are retrying a playlist, in that we are simply retrying a previous\n // playlist. This is needed for users relying on the retryplaylist event to catch a\n // case where the player might be stuck and looping through \"dead\" playlists.\n\n this.tech_.trigger('retryplaylist');\n }\n } // Exclude this playlist\n\n let excludeUntil;\n if (playlistToExclude.playlistErrors_ > this.maxPlaylistRetries) {\n excludeUntil = Infinity;\n } else {\n excludeUntil = Date.now() + playlistExclusionDuration * 1000;\n }\n playlistToExclude.excludeUntil = excludeUntil;\n if (error.reason) {\n playlistToExclude.lastExcludeReason_ = error.reason;\n }\n this.tech_.trigger('excludeplaylist');\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-rendition-excluded'\n }); // TODO: only load a new playlist if we're excluding the current playlist\n // If this function was called with a playlist that's not the current active playlist\n // (e.g., media().id !== playlistToExclude.id),\n // then a new playlist should not be selected and loaded, as there's nothing wrong with the current playlist.\n\n const nextPlaylist = this.selectPlaylist();\n if (!nextPlaylist) {\n this.error = 'Playback cannot continue. No available working or supported playlists.';\n this.trigger('error');\n return;\n }\n const logFn = error.internal ? this.logger_ : videojs.log.warn;\n const errorMessage = error.message ? ' ' + error.message : '';\n logFn(`${error.internal ? 'Internal problem' : 'Problem'} encountered with playlist ${playlistToExclude.id}.` + `${errorMessage} Switching to playlist ${nextPlaylist.id}.`); // if audio group changed reset audio loaders\n\n if (nextPlaylist.attributes.AUDIO !== playlistToExclude.attributes.AUDIO) {\n this.delegateLoaders_('audio', ['abort', 'pause']);\n } // if subtitle group changed reset subtitle loaders\n\n if (nextPlaylist.attributes.SUBTITLES !== playlistToExclude.attributes.SUBTITLES) {\n this.delegateLoaders_('subtitle', ['abort', 'pause']);\n }\n this.delegateLoaders_('main', ['abort', 'pause']);\n const delayDuration = nextPlaylist.targetDuration / 2 * 1000 || 5 * 1000;\n const shouldDelay = typeof nextPlaylist.lastRequest === 'number' && Date.now() - nextPlaylist.lastRequest <= delayDuration; // delay if it's a final rendition or if the last refresh is sooner than half targetDuration\n\n return this.switchMedia_(nextPlaylist, 'exclude', isFinalRendition || shouldDelay);\n }\n /**\n * Pause all segment/playlist loaders\n */\n\n pauseLoading() {\n this.delegateLoaders_('all', ['abort', 'pause']);\n this.stopABRTimer_();\n }\n /**\n * Call a set of functions in order on playlist loaders, segment loaders,\n * or both types of loaders.\n *\n * @param {string} filter\n * Filter loaders that should call fnNames using a string. Can be:\n * * all - run on all loaders\n * * audio - run on all audio loaders\n * * subtitle - run on all subtitle loaders\n * * main - run on the main loaders\n *\n * @param {Array|string} fnNames\n * A string or array of function names to call.\n */\n\n delegateLoaders_(filter, fnNames) {\n const loaders = [];\n const dontFilterPlaylist = filter === 'all';\n if (dontFilterPlaylist || filter === 'main') {\n loaders.push(this.mainPlaylistLoader_);\n }\n const mediaTypes = [];\n if (dontFilterPlaylist || filter === 'audio') {\n mediaTypes.push('AUDIO');\n }\n if (dontFilterPlaylist || filter === 'subtitle') {\n mediaTypes.push('CLOSED-CAPTIONS');\n mediaTypes.push('SUBTITLES');\n }\n mediaTypes.forEach(mediaType => {\n const loader = this.mediaTypes_[mediaType] && this.mediaTypes_[mediaType].activePlaylistLoader;\n if (loader) {\n loaders.push(loader);\n }\n });\n ['main', 'audio', 'subtitle'].forEach(name => {\n const loader = this[`${name}SegmentLoader_`];\n if (loader && (filter === name || filter === 'all')) {\n loaders.push(loader);\n }\n });\n loaders.forEach(loader => fnNames.forEach(fnName => {\n if (typeof loader[fnName] === 'function') {\n loader[fnName]();\n }\n }));\n }\n /**\n * set the current time on all segment loaders\n *\n * @param {TimeRange} currentTime the current time to set\n * @return {TimeRange} the current time\n */\n\n setCurrentTime(currentTime) {\n const buffered = findRange(this.tech_.buffered(), currentTime);\n if (!(this.mainPlaylistLoader_ && this.mainPlaylistLoader_.media())) {\n // return immediately if the metadata is not ready yet\n return 0;\n } // it's clearly an edge-case but don't thrown an error if asked to\n // seek within an empty playlist\n\n if (!this.mainPlaylistLoader_.media().segments) {\n return 0;\n } // if the seek location is already buffered, continue buffering as usual\n\n if (buffered && buffered.length) {\n return currentTime;\n } // cancel outstanding requests so we begin buffering at the new\n // location\n\n this.mainSegmentLoader_.pause();\n this.mainSegmentLoader_.resetEverything();\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n this.audioSegmentLoader_.pause();\n this.audioSegmentLoader_.resetEverything();\n }\n if (this.mediaTypes_.SUBTITLES.activePlaylistLoader) {\n this.subtitleSegmentLoader_.pause();\n this.subtitleSegmentLoader_.resetEverything();\n } // start segment loader loading in case they are paused\n\n this.load();\n }\n /**\n * get the current duration\n *\n * @return {TimeRange} the duration\n */\n\n duration() {\n if (!this.mainPlaylistLoader_) {\n return 0;\n }\n const media = this.mainPlaylistLoader_.media();\n if (!media) {\n // no playlists loaded yet, so can't determine a duration\n return 0;\n } // Don't rely on the media source for duration in the case of a live playlist since\n // setting the native MediaSource's duration to infinity ends up with consequences to\n // seekable behavior. See https://github.com/w3c/media-source/issues/5 for details.\n //\n // This is resolved in the spec by https://github.com/w3c/media-source/pull/92,\n // however, few browsers have support for setLiveSeekableRange()\n // https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/setLiveSeekableRange\n //\n // Until a time when the duration of the media source can be set to infinity, and a\n // seekable range specified across browsers, just return Infinity.\n\n if (!media.endList) {\n return Infinity;\n } // Since this is a VOD video, it is safe to rely on the media source's duration (if\n // available). If it's not available, fall back to a playlist-calculated estimate.\n\n if (this.mediaSource) {\n return this.mediaSource.duration;\n }\n return Vhs$1.Playlist.duration(media);\n }\n /**\n * check the seekable range\n *\n * @return {TimeRange} the seekable range\n */\n\n seekable() {\n return this.seekable_;\n }\n onSyncInfoUpdate_() {\n let audioSeekable; // TODO check for creation of both source buffers before updating seekable\n //\n // A fix was made to this function where a check for\n // this.sourceUpdater_.hasCreatedSourceBuffers\n // was added to ensure that both source buffers were created before seekable was\n // updated. However, it originally had a bug where it was checking for a true and\n // returning early instead of checking for false. Setting it to check for false to\n // return early though created other issues. A call to play() would check for seekable\n // end without verifying that a seekable range was present. In addition, even checking\n // for that didn't solve some issues, as handleFirstPlay is sometimes worked around\n // due to a media update calling load on the segment loaders, skipping a seek to live,\n // thereby starting live streams at the beginning of the stream rather than at the end.\n //\n // This conditional should be fixed to wait for the creation of two source buffers at\n // the same time as the other sections of code are fixed to properly seek to live and\n // not throw an error due to checking for a seekable end when no seekable range exists.\n //\n // For now, fall back to the older behavior, with the understanding that the seekable\n // range may not be completely correct, leading to a suboptimal initial live point.\n\n if (!this.mainPlaylistLoader_) {\n return;\n }\n let media = this.mainPlaylistLoader_.media();\n if (!media) {\n return;\n }\n let expired = this.syncController_.getExpiredTime(media, this.duration());\n if (expired === null) {\n // not enough information to update seekable\n return;\n }\n const main = this.mainPlaylistLoader_.main;\n const mainSeekable = Vhs$1.Playlist.seekable(media, expired, Vhs$1.Playlist.liveEdgeDelay(main, media));\n if (mainSeekable.length === 0) {\n return;\n }\n if (this.mediaTypes_.AUDIO.activePlaylistLoader) {\n media = this.mediaTypes_.AUDIO.activePlaylistLoader.media();\n expired = this.syncController_.getExpiredTime(media, this.duration());\n if (expired === null) {\n return;\n }\n audioSeekable = Vhs$1.Playlist.seekable(media, expired, Vhs$1.Playlist.liveEdgeDelay(main, media));\n if (audioSeekable.length === 0) {\n return;\n }\n }\n let oldEnd;\n let oldStart;\n if (this.seekable_ && this.seekable_.length) {\n oldEnd = this.seekable_.end(0);\n oldStart = this.seekable_.start(0);\n }\n if (!audioSeekable) {\n // seekable has been calculated based on buffering video data so it\n // can be returned directly\n this.seekable_ = mainSeekable;\n } else if (audioSeekable.start(0) > mainSeekable.end(0) || mainSeekable.start(0) > audioSeekable.end(0)) {\n // seekables are pretty far off, rely on main\n this.seekable_ = mainSeekable;\n } else {\n this.seekable_ = createTimeRanges([[audioSeekable.start(0) > mainSeekable.start(0) ? audioSeekable.start(0) : mainSeekable.start(0), audioSeekable.end(0) < mainSeekable.end(0) ? audioSeekable.end(0) : mainSeekable.end(0)]]);\n } // seekable is the same as last time\n\n if (this.seekable_ && this.seekable_.length) {\n if (this.seekable_.end(0) === oldEnd && this.seekable_.start(0) === oldStart) {\n return;\n }\n }\n this.logger_(`seekable updated [${printableRange(this.seekable_)}]`);\n this.tech_.trigger('seekablechanged');\n }\n /**\n * Update the player duration\n */\n\n updateDuration(isLive) {\n if (this.updateDuration_) {\n this.mediaSource.removeEventListener('sourceopen', this.updateDuration_);\n this.updateDuration_ = null;\n }\n if (this.mediaSource.readyState !== 'open') {\n this.updateDuration_ = this.updateDuration.bind(this, isLive);\n this.mediaSource.addEventListener('sourceopen', this.updateDuration_);\n return;\n }\n if (isLive) {\n const seekable = this.seekable();\n if (!seekable.length) {\n return;\n } // Even in the case of a live playlist, the native MediaSource's duration should not\n // be set to Infinity (even though this would be expected for a live playlist), since\n // setting the native MediaSource's duration to infinity ends up with consequences to\n // seekable behavior. See https://github.com/w3c/media-source/issues/5 for details.\n //\n // This is resolved in the spec by https://github.com/w3c/media-source/pull/92,\n // however, few browsers have support for setLiveSeekableRange()\n // https://developer.mozilla.org/en-US/docs/Web/API/MediaSource/setLiveSeekableRange\n //\n // Until a time when the duration of the media source can be set to infinity, and a\n // seekable range specified across browsers, the duration should be greater than or\n // equal to the last possible seekable value.\n // MediaSource duration starts as NaN\n // It is possible (and probable) that this case will never be reached for many\n // sources, since the MediaSource reports duration as the highest value without\n // accounting for timestamp offset. For example, if the timestamp offset is -100 and\n // we buffered times 0 to 100 with real times of 100 to 200, even though current\n // time will be between 0 and 100, the native media source may report the duration\n // as 200. However, since we report duration separate from the media source (as\n // Infinity), and as long as the native media source duration value is greater than\n // our reported seekable range, seeks will work as expected. The large number as\n // duration for live is actually a strategy used by some players to work around the\n // issue of live seekable ranges cited above.\n\n if (isNaN(this.mediaSource.duration) || this.mediaSource.duration < seekable.end(seekable.length - 1)) {\n this.sourceUpdater_.setDuration(seekable.end(seekable.length - 1));\n }\n return;\n }\n const buffered = this.tech_.buffered();\n let duration = Vhs$1.Playlist.duration(this.mainPlaylistLoader_.media());\n if (buffered.length > 0) {\n duration = Math.max(duration, buffered.end(buffered.length - 1));\n }\n if (this.mediaSource.duration !== duration) {\n this.sourceUpdater_.setDuration(duration);\n }\n }\n /**\n * dispose of the PlaylistController and everything\n * that it controls\n */\n\n dispose() {\n this.trigger('dispose');\n this.decrypter_.terminate();\n this.mainPlaylistLoader_.dispose();\n this.mainSegmentLoader_.dispose();\n this.contentSteeringController_.dispose();\n this.keyStatusMap_.clear();\n if (this.loadOnPlay_) {\n this.tech_.off('play', this.loadOnPlay_);\n }\n ['AUDIO', 'SUBTITLES'].forEach(type => {\n const groups = this.mediaTypes_[type].groups;\n for (const id in groups) {\n groups[id].forEach(group => {\n if (group.playlistLoader) {\n group.playlistLoader.dispose();\n }\n });\n }\n });\n this.audioSegmentLoader_.dispose();\n this.subtitleSegmentLoader_.dispose();\n this.sourceUpdater_.dispose();\n this.timelineChangeController_.dispose();\n this.stopABRTimer_();\n if (this.updateDuration_) {\n this.mediaSource.removeEventListener('sourceopen', this.updateDuration_);\n }\n this.mediaSource.removeEventListener('durationchange', this.handleDurationChange_); // load the media source into the player\n\n this.mediaSource.removeEventListener('sourceopen', this.handleSourceOpen_);\n this.mediaSource.removeEventListener('sourceended', this.handleSourceEnded_);\n this.off();\n }\n /**\n * return the main playlist object if we have one\n *\n * @return {Object} the main playlist object that we parsed\n */\n\n main() {\n return this.mainPlaylistLoader_.main;\n }\n /**\n * return the currently selected playlist\n *\n * @return {Object} the currently selected playlist object that we parsed\n */\n\n media() {\n // playlist loader will not return media if it has not been fully loaded\n return this.mainPlaylistLoader_.media() || this.initialMedia_;\n }\n areMediaTypesKnown_() {\n const usingAudioLoader = !!this.mediaTypes_.AUDIO.activePlaylistLoader;\n const hasMainMediaInfo = !!this.mainSegmentLoader_.getCurrentMediaInfo_(); // if we are not using an audio loader, then we have audio media info\n // otherwise check on the segment loader.\n\n const hasAudioMediaInfo = !usingAudioLoader ? true : !!this.audioSegmentLoader_.getCurrentMediaInfo_(); // one or both loaders has not loaded sufficently to get codecs\n\n if (!hasMainMediaInfo || !hasAudioMediaInfo) {\n return false;\n }\n return true;\n }\n getCodecsOrExclude_() {\n const media = {\n main: this.mainSegmentLoader_.getCurrentMediaInfo_() || {},\n audio: this.audioSegmentLoader_.getCurrentMediaInfo_() || {}\n };\n const playlist = this.mainSegmentLoader_.getPendingSegmentPlaylist() || this.media(); // set \"main\" media equal to video\n\n media.video = media.main;\n const playlistCodecs = codecsForPlaylist(this.main(), playlist);\n const codecs = {};\n const usingAudioLoader = !!this.mediaTypes_.AUDIO.activePlaylistLoader;\n if (media.main.hasVideo) {\n codecs.video = playlistCodecs.video || media.main.videoCodec || DEFAULT_VIDEO_CODEC;\n }\n if (media.main.isMuxed) {\n codecs.video += `,${playlistCodecs.audio || media.main.audioCodec || DEFAULT_AUDIO_CODEC}`;\n }\n if (media.main.hasAudio && !media.main.isMuxed || media.audio.hasAudio || usingAudioLoader) {\n codecs.audio = playlistCodecs.audio || media.main.audioCodec || media.audio.audioCodec || DEFAULT_AUDIO_CODEC; // set audio isFmp4 so we use the correct \"supports\" function below\n\n media.audio.isFmp4 = media.main.hasAudio && !media.main.isMuxed ? media.main.isFmp4 : media.audio.isFmp4;\n } // no codecs, no playback.\n\n if (!codecs.audio && !codecs.video) {\n this.excludePlaylist({\n playlistToExclude: playlist,\n error: {\n message: 'Could not determine codecs for playlist.'\n },\n playlistExclusionDuration: Infinity\n });\n return;\n } // fmp4 relies on browser support, while ts relies on muxer support\n\n const supportFunction = (isFmp4, codec) => isFmp4 ? browserSupportsCodec(codec) : muxerSupportsCodec(codec);\n const unsupportedCodecs = {};\n let unsupportedAudio;\n ['video', 'audio'].forEach(function (type) {\n if (codecs.hasOwnProperty(type) && !supportFunction(media[type].isFmp4, codecs[type])) {\n const supporter = media[type].isFmp4 ? 'browser' : 'muxer';\n unsupportedCodecs[supporter] = unsupportedCodecs[supporter] || [];\n unsupportedCodecs[supporter].push(codecs[type]);\n if (type === 'audio') {\n unsupportedAudio = supporter;\n }\n }\n });\n if (usingAudioLoader && unsupportedAudio && playlist.attributes.AUDIO) {\n const audioGroup = playlist.attributes.AUDIO;\n this.main().playlists.forEach(variant => {\n const variantAudioGroup = variant.attributes && variant.attributes.AUDIO;\n if (variantAudioGroup === audioGroup && variant !== playlist) {\n variant.excludeUntil = Infinity;\n }\n });\n this.logger_(`excluding audio group ${audioGroup} as ${unsupportedAudio} does not support codec(s): \"${codecs.audio}\"`);\n } // if we have any unsupported codecs exclude this playlist.\n\n if (Object.keys(unsupportedCodecs).length) {\n const message = Object.keys(unsupportedCodecs).reduce((acc, supporter) => {\n if (acc) {\n acc += ', ';\n }\n acc += `${supporter} does not support codec(s): \"${unsupportedCodecs[supporter].join(',')}\"`;\n return acc;\n }, '') + '.';\n this.excludePlaylist({\n playlistToExclude: playlist,\n error: {\n internal: true,\n message\n },\n playlistExclusionDuration: Infinity\n });\n return;\n } // check if codec switching is happening\n\n if (this.sourceUpdater_.hasCreatedSourceBuffers() && !this.sourceUpdater_.canChangeType()) {\n const switchMessages = [];\n ['video', 'audio'].forEach(type => {\n const newCodec = (parseCodecs(this.sourceUpdater_.codecs[type] || '')[0] || {}).type;\n const oldCodec = (parseCodecs(codecs[type] || '')[0] || {}).type;\n if (newCodec && oldCodec && newCodec.toLowerCase() !== oldCodec.toLowerCase()) {\n switchMessages.push(`\"${this.sourceUpdater_.codecs[type]}\" -> \"${codecs[type]}\"`);\n }\n });\n if (switchMessages.length) {\n this.excludePlaylist({\n playlistToExclude: playlist,\n error: {\n message: `Codec switching not supported: ${switchMessages.join(', ')}.`,\n internal: true\n },\n playlistExclusionDuration: Infinity\n });\n return;\n }\n } // TODO: when using the muxer shouldn't we just return\n // the codecs that the muxer outputs?\n\n return codecs;\n }\n /**\n * Create source buffers and exlude any incompatible renditions.\n *\n * @private\n */\n\n tryToCreateSourceBuffers_() {\n // media source is not ready yet or sourceBuffers are already\n // created.\n if (this.mediaSource.readyState !== 'open' || this.sourceUpdater_.hasCreatedSourceBuffers()) {\n return;\n }\n if (!this.areMediaTypesKnown_()) {\n return;\n }\n const codecs = this.getCodecsOrExclude_(); // no codecs means that the playlist was excluded\n\n if (!codecs) {\n return;\n }\n this.sourceUpdater_.createSourceBuffers(codecs);\n const codecString = [codecs.video, codecs.audio].filter(Boolean).join(',');\n this.excludeIncompatibleVariants_(codecString);\n }\n /**\n * Excludes playlists with codecs that are unsupported by the muxer and browser.\n */\n\n excludeUnsupportedVariants_() {\n const playlists = this.main().playlists;\n const ids = []; // TODO: why don't we have a property to loop through all\n // playlist? Why did we ever mix indexes and keys?\n\n Object.keys(playlists).forEach(key => {\n const variant = playlists[key]; // check if we already processed this playlist.\n\n if (ids.indexOf(variant.id) !== -1) {\n return;\n }\n ids.push(variant.id);\n const codecs = codecsForPlaylist(this.main, variant);\n const unsupported = [];\n if (codecs.audio && !muxerSupportsCodec(codecs.audio) && !browserSupportsCodec(codecs.audio)) {\n unsupported.push(`audio codec ${codecs.audio}`);\n }\n if (codecs.video && !muxerSupportsCodec(codecs.video) && !browserSupportsCodec(codecs.video)) {\n unsupported.push(`video codec ${codecs.video}`);\n }\n if (codecs.text && codecs.text === 'stpp.ttml.im1t') {\n unsupported.push(`text codec ${codecs.text}`);\n }\n if (unsupported.length) {\n variant.excludeUntil = Infinity;\n this.logger_(`excluding ${variant.id} for unsupported: ${unsupported.join(', ')}`);\n }\n });\n }\n /**\n * Exclude playlists that are known to be codec or\n * stream-incompatible with the SourceBuffer configuration. For\n * instance, Media Source Extensions would cause the video element to\n * stall waiting for video data if you switched from a variant with\n * video and audio to an audio-only one.\n *\n * @param {Object} media a media playlist compatible with the current\n * set of SourceBuffers. Variants in the current main playlist that\n * do not appear to have compatible codec or stream configurations\n * will be excluded from the default playlist selection algorithm\n * indefinitely.\n * @private\n */\n\n excludeIncompatibleVariants_(codecString) {\n const ids = [];\n const playlists = this.main().playlists;\n const codecs = unwrapCodecList(parseCodecs(codecString));\n const codecCount_ = codecCount(codecs);\n const videoDetails = codecs.video && parseCodecs(codecs.video)[0] || null;\n const audioDetails = codecs.audio && parseCodecs(codecs.audio)[0] || null;\n Object.keys(playlists).forEach(key => {\n const variant = playlists[key]; // check if we already processed this playlist.\n // or it if it is already excluded forever.\n\n if (ids.indexOf(variant.id) !== -1 || variant.excludeUntil === Infinity) {\n return;\n }\n ids.push(variant.id);\n const exclusionReasons = []; // get codecs from the playlist for this variant\n\n const variantCodecs = codecsForPlaylist(this.mainPlaylistLoader_.main, variant);\n const variantCodecCount = codecCount(variantCodecs); // if no codecs are listed, we cannot determine that this\n // variant is incompatible. Wait for mux.js to probe\n\n if (!variantCodecs.audio && !variantCodecs.video) {\n return;\n } // TODO: we can support this by removing the\n // old media source and creating a new one, but it will take some work.\n // The number of streams cannot change\n\n if (variantCodecCount !== codecCount_) {\n exclusionReasons.push(`codec count \"${variantCodecCount}\" !== \"${codecCount_}\"`);\n } // only exclude playlists by codec change, if codecs cannot switch\n // during playback.\n\n if (!this.sourceUpdater_.canChangeType()) {\n const variantVideoDetails = variantCodecs.video && parseCodecs(variantCodecs.video)[0] || null;\n const variantAudioDetails = variantCodecs.audio && parseCodecs(variantCodecs.audio)[0] || null; // the video codec cannot change\n\n if (variantVideoDetails && videoDetails && variantVideoDetails.type.toLowerCase() !== videoDetails.type.toLowerCase()) {\n exclusionReasons.push(`video codec \"${variantVideoDetails.type}\" !== \"${videoDetails.type}\"`);\n } // the audio codec cannot change\n\n if (variantAudioDetails && audioDetails && variantAudioDetails.type.toLowerCase() !== audioDetails.type.toLowerCase()) {\n exclusionReasons.push(`audio codec \"${variantAudioDetails.type}\" !== \"${audioDetails.type}\"`);\n }\n }\n if (exclusionReasons.length) {\n variant.excludeUntil = Infinity;\n this.logger_(`excluding ${variant.id}: ${exclusionReasons.join(' && ')}`);\n }\n });\n }\n updateAdCues_(media) {\n let offset = 0;\n const seekable = this.seekable();\n if (seekable.length) {\n offset = seekable.start(0);\n }\n updateAdCues(media, this.cueTagsTrack_, offset);\n }\n /**\n * Calculates the desired forward buffer length based on current time\n *\n * @return {number} Desired forward buffer length in seconds\n */\n\n goalBufferLength() {\n const currentTime = this.tech_.currentTime();\n const initial = Config.GOAL_BUFFER_LENGTH;\n const rate = Config.GOAL_BUFFER_LENGTH_RATE;\n const max = Math.max(initial, Config.MAX_GOAL_BUFFER_LENGTH);\n return Math.min(initial + currentTime * rate, max);\n }\n /**\n * Calculates the desired buffer low water line based on current time\n *\n * @return {number} Desired buffer low water line in seconds\n */\n\n bufferLowWaterLine() {\n const currentTime = this.tech_.currentTime();\n const initial = Config.BUFFER_LOW_WATER_LINE;\n const rate = Config.BUFFER_LOW_WATER_LINE_RATE;\n const max = Math.max(initial, Config.MAX_BUFFER_LOW_WATER_LINE);\n const newMax = Math.max(initial, Config.EXPERIMENTAL_MAX_BUFFER_LOW_WATER_LINE);\n return Math.min(initial + currentTime * rate, this.bufferBasedABR ? newMax : max);\n }\n bufferHighWaterLine() {\n return Config.BUFFER_HIGH_WATER_LINE;\n }\n addDateRangesToTextTrack_(dateRanges) {\n createMetadataTrackIfNotExists(this.inbandTextTracks_, 'com.apple.streaming', this.tech_);\n addDateRangeMetadata({\n inbandTextTracks: this.inbandTextTracks_,\n dateRanges\n });\n }\n addMetadataToTextTrack(dispatchType, metadataArray, videoDuration) {\n const timestampOffset = this.sourceUpdater_.videoBuffer ? this.sourceUpdater_.videoTimestampOffset() : this.sourceUpdater_.audioTimestampOffset(); // There's potentially an issue where we could double add metadata if there's a muxed\n // audio/video source with a metadata track, and an alt audio with a metadata track.\n // However, this probably won't happen, and if it does it can be handled then.\n\n createMetadataTrackIfNotExists(this.inbandTextTracks_, dispatchType, this.tech_);\n addMetadata({\n inbandTextTracks: this.inbandTextTracks_,\n metadataArray,\n timestampOffset,\n videoDuration\n });\n }\n /**\n * Utility for getting the pathway or service location from an HLS or DASH playlist.\n *\n * @param {Object} playlist for getting pathway from.\n * @return the pathway attribute of a playlist\n */\n\n pathwayAttribute_(playlist) {\n return playlist.attributes['PATHWAY-ID'] || playlist.attributes.serviceLocation;\n }\n /**\n * Initialize available pathways and apply the tag properties.\n */\n\n initContentSteeringController_() {\n const main = this.main();\n if (!main.contentSteering) {\n return;\n }\n for (const playlist of main.playlists) {\n this.contentSteeringController_.addAvailablePathway(this.pathwayAttribute_(playlist));\n }\n this.contentSteeringController_.assignTagProperties(main.uri, main.contentSteering); // request the steering manifest immediately if queryBeforeStart is set.\n\n if (this.contentSteeringController_.queryBeforeStart) {\n // When queryBeforeStart is true, initial request should omit steering parameters.\n this.contentSteeringController_.requestSteeringManifest(true);\n return;\n } // otherwise start content steering after playback starts\n\n this.tech_.one('canplay', () => {\n this.contentSteeringController_.requestSteeringManifest();\n });\n }\n /**\n * Reset the content steering controller and re-init.\n */\n\n resetContentSteeringController_() {\n this.contentSteeringController_.clearAvailablePathways();\n this.contentSteeringController_.dispose();\n this.initContentSteeringController_();\n }\n /**\n * Attaches the listeners for content steering.\n */\n\n attachContentSteeringListeners_() {\n this.contentSteeringController_.on('content-steering', this.excludeThenChangePathway_.bind(this));\n if (this.sourceType_ === 'dash') {\n this.mainPlaylistLoader_.on('loadedplaylist', () => {\n const main = this.main(); // check if steering tag or pathways changed.\n\n const didDashTagChange = this.contentSteeringController_.didDASHTagChange(main.uri, main.contentSteering);\n const didPathwaysChange = () => {\n const availablePathways = this.contentSteeringController_.getAvailablePathways();\n const newPathways = [];\n for (const playlist of main.playlists) {\n const serviceLocation = playlist.attributes.serviceLocation;\n if (serviceLocation) {\n newPathways.push(serviceLocation);\n if (!availablePathways.has(serviceLocation)) {\n return true;\n }\n }\n } // If we have no new serviceLocations and previously had availablePathways\n\n if (!newPathways.length && availablePathways.size) {\n return true;\n }\n return false;\n };\n if (didDashTagChange || didPathwaysChange()) {\n this.resetContentSteeringController_();\n }\n });\n }\n }\n /**\n * Simple exclude and change playlist logic for content steering.\n */\n\n excludeThenChangePathway_() {\n const currentPathway = this.contentSteeringController_.getPathway();\n if (!currentPathway) {\n return;\n }\n this.handlePathwayClones_();\n const main = this.main();\n const playlists = main.playlists;\n const ids = new Set();\n let didEnablePlaylists = false;\n Object.keys(playlists).forEach(key => {\n const variant = playlists[key];\n const pathwayId = this.pathwayAttribute_(variant);\n const differentPathwayId = pathwayId && currentPathway !== pathwayId;\n const steeringExclusion = variant.excludeUntil === Infinity && variant.lastExcludeReason_ === 'content-steering';\n if (steeringExclusion && !differentPathwayId) {\n delete variant.excludeUntil;\n delete variant.lastExcludeReason_;\n didEnablePlaylists = true;\n }\n const noExcludeUntil = !variant.excludeUntil && variant.excludeUntil !== Infinity;\n const shouldExclude = !ids.has(variant.id) && differentPathwayId && noExcludeUntil;\n if (!shouldExclude) {\n return;\n }\n ids.add(variant.id);\n variant.excludeUntil = Infinity;\n variant.lastExcludeReason_ = 'content-steering'; // TODO: kind of spammy, maybe move this.\n\n this.logger_(`excluding ${variant.id} for ${variant.lastExcludeReason_}`);\n });\n if (this.contentSteeringController_.manifestType_ === 'DASH') {\n Object.keys(this.mediaTypes_).forEach(key => {\n const type = this.mediaTypes_[key];\n if (type.activePlaylistLoader) {\n const currentPlaylist = type.activePlaylistLoader.media_; // Check if the current media playlist matches the current CDN\n\n if (currentPlaylist && currentPlaylist.attributes.serviceLocation !== currentPathway) {\n didEnablePlaylists = true;\n }\n }\n });\n }\n if (didEnablePlaylists) {\n this.changeSegmentPathway_();\n }\n }\n /**\n * Add, update, or delete playlists and media groups for\n * the pathway clones for HLS Content Steering.\n *\n * See https://datatracker.ietf.org/doc/draft-pantos-hls-rfc8216bis/\n *\n * NOTE: Pathway cloning does not currently support the `PER_VARIANT_URIS` and\n * `PER_RENDITION_URIS` as we do not handle `STABLE-VARIANT-ID` or\n * `STABLE-RENDITION-ID` values.\n */\n\n handlePathwayClones_() {\n const main = this.main();\n const playlists = main.playlists;\n const currentPathwayClones = this.contentSteeringController_.currentPathwayClones;\n const nextPathwayClones = this.contentSteeringController_.nextPathwayClones;\n const hasClones = currentPathwayClones && currentPathwayClones.size || nextPathwayClones && nextPathwayClones.size;\n if (!hasClones) {\n return;\n }\n for (const [id, clone] of currentPathwayClones.entries()) {\n const newClone = nextPathwayClones.get(id); // Delete the old pathway clone.\n\n if (!newClone) {\n this.mainPlaylistLoader_.updateOrDeleteClone(clone);\n this.contentSteeringController_.excludePathway(id);\n }\n }\n for (const [id, clone] of nextPathwayClones.entries()) {\n const oldClone = currentPathwayClones.get(id); // Create a new pathway if it is a new pathway clone object.\n\n if (!oldClone) {\n const playlistsToClone = playlists.filter(p => {\n return p.attributes['PATHWAY-ID'] === clone['BASE-ID'];\n });\n playlistsToClone.forEach(p => {\n this.mainPlaylistLoader_.addClonePathway(clone, p);\n });\n this.contentSteeringController_.addAvailablePathway(id);\n continue;\n } // There have not been changes to the pathway clone object, so skip.\n\n if (this.equalPathwayClones_(oldClone, clone)) {\n continue;\n } // Update a preexisting cloned pathway.\n // True is set for the update flag.\n\n this.mainPlaylistLoader_.updateOrDeleteClone(clone, true);\n this.contentSteeringController_.addAvailablePathway(id);\n } // Deep copy contents of next to current pathways.\n\n this.contentSteeringController_.currentPathwayClones = new Map(JSON.parse(JSON.stringify([...nextPathwayClones])));\n }\n /**\n * Determines whether two pathway clone objects are equivalent.\n *\n * @param {Object} a The first pathway clone object.\n * @param {Object} b The second pathway clone object.\n * @return {boolean} True if the pathway clone objects are equal, false otherwise.\n */\n\n equalPathwayClones_(a, b) {\n if (a['BASE-ID'] !== b['BASE-ID'] || a.ID !== b.ID || a['URI-REPLACEMENT'].HOST !== b['URI-REPLACEMENT'].HOST) {\n return false;\n }\n const aParams = a['URI-REPLACEMENT'].PARAMS;\n const bParams = b['URI-REPLACEMENT'].PARAMS; // We need to iterate through both lists of params because one could be\n // missing a parameter that the other has.\n\n for (const p in aParams) {\n if (aParams[p] !== bParams[p]) {\n return false;\n }\n }\n for (const p in bParams) {\n if (aParams[p] !== bParams[p]) {\n return false;\n }\n }\n return true;\n }\n /**\n * Changes the current playlists for audio, video and subtitles after a new pathway\n * is chosen from content steering.\n */\n\n changeSegmentPathway_() {\n const nextPlaylist = this.selectPlaylist();\n this.pauseLoading(); // Switch audio and text track playlists if necessary in DASH\n\n if (this.contentSteeringController_.manifestType_ === 'DASH') {\n this.switchMediaForDASHContentSteering_();\n }\n this.switchMedia_(nextPlaylist, 'content-steering');\n }\n /**\n * Iterates through playlists and check their keyId set and compare with the\n * keyStatusMap, only enable playlists that have a usable key. If the playlist\n * has no keyId leave it enabled by default.\n */\n\n excludeNonUsablePlaylistsByKeyId_() {\n if (!this.mainPlaylistLoader_ || !this.mainPlaylistLoader_.main) {\n return;\n }\n let nonUsableKeyStatusCount = 0;\n const NON_USABLE = 'non-usable';\n this.mainPlaylistLoader_.main.playlists.forEach(playlist => {\n const keyIdSet = this.mainPlaylistLoader_.getKeyIdSet(playlist); // If the playlist doesn't have keyIDs lets not exclude it.\n\n if (!keyIdSet || !keyIdSet.size) {\n return;\n }\n keyIdSet.forEach(key => {\n const USABLE = 'usable';\n const hasUsableKeyStatus = this.keyStatusMap_.has(key) && this.keyStatusMap_.get(key) === USABLE;\n const nonUsableExclusion = playlist.lastExcludeReason_ === NON_USABLE && playlist.excludeUntil === Infinity;\n if (!hasUsableKeyStatus) {\n // Only exclude playlists that haven't already been excluded as non-usable.\n if (playlist.excludeUntil !== Infinity && playlist.lastExcludeReason_ !== NON_USABLE) {\n playlist.excludeUntil = Infinity;\n playlist.lastExcludeReason_ = NON_USABLE;\n this.logger_(`excluding playlist ${playlist.id} because the key ID ${key} doesn't exist in the keyStatusMap or is not ${USABLE}`);\n } // count all nonUsableKeyStatus\n\n nonUsableKeyStatusCount++;\n } else if (hasUsableKeyStatus && nonUsableExclusion) {\n delete playlist.excludeUntil;\n delete playlist.lastExcludeReason_;\n this.logger_(`enabling playlist ${playlist.id} because key ID ${key} is ${USABLE}`);\n }\n });\n }); // If for whatever reason every playlist has a non usable key status. Lets try re-including the SD renditions as a failsafe.\n\n if (nonUsableKeyStatusCount >= this.mainPlaylistLoader_.main.playlists.length) {\n this.mainPlaylistLoader_.main.playlists.forEach(playlist => {\n const isNonHD = playlist && playlist.attributes && playlist.attributes.RESOLUTION && playlist.attributes.RESOLUTION.height < 720;\n const excludedForNonUsableKey = playlist.excludeUntil === Infinity && playlist.lastExcludeReason_ === NON_USABLE;\n if (isNonHD && excludedForNonUsableKey) {\n // Only delete the excludeUntil so we don't try and re-exclude these playlists.\n delete playlist.excludeUntil;\n videojs.log.warn(`enabling non-HD playlist ${playlist.id} because all playlists were excluded due to ${NON_USABLE} key IDs`);\n }\n });\n }\n }\n /**\n * Adds a keystatus to the keystatus map, tries to convert to string if necessary.\n *\n * @param {any} keyId the keyId to add a status for\n * @param {string} status the status of the keyId\n */\n\n addKeyStatus_(keyId, status) {\n const isString = typeof keyId === 'string';\n const keyIdHexString = isString ? keyId : bufferToHexString(keyId);\n const formattedKeyIdString = keyIdHexString.slice(0, 32).toLowerCase();\n this.logger_(`KeyStatus '${status}' with key ID ${formattedKeyIdString} added to the keyStatusMap`);\n this.keyStatusMap_.set(formattedKeyIdString, status);\n }\n /**\n * Utility function for adding key status to the keyStatusMap and filtering usable encrypted playlists.\n *\n * @param {any} keyId the keyId from the keystatuschange event\n * @param {string} status the key status string\n */\n\n updatePlaylistByKeyStatus(keyId, status) {\n this.addKeyStatus_(keyId, status);\n if (!this.waitingForFastQualityPlaylistReceived_) {\n this.excludeNonUsableThenChangePlaylist_();\n } // Listen to loadedplaylist with a single listener and check for new contentProtection elements when a playlist is updated.\n\n this.mainPlaylistLoader_.off('loadedplaylist', this.excludeNonUsableThenChangePlaylist_.bind(this));\n this.mainPlaylistLoader_.on('loadedplaylist', this.excludeNonUsableThenChangePlaylist_.bind(this));\n }\n excludeNonUsableThenChangePlaylist_() {\n this.excludeNonUsablePlaylistsByKeyId_();\n this.fastQualityChange_();\n }\n}\n\n/**\n * Returns a function that acts as the Enable/disable playlist function.\n *\n * @param {PlaylistLoader} loader - The main playlist loader\n * @param {string} playlistID - id of the playlist\n * @param {Function} changePlaylistFn - A function to be called after a\n * playlist's enabled-state has been changed. Will NOT be called if a\n * playlist's enabled-state is unchanged\n * @param {boolean=} enable - Value to set the playlist enabled-state to\n * or if undefined returns the current enabled-state for the playlist\n * @return {Function} Function for setting/getting enabled\n */\n\nconst enableFunction = (loader, playlistID, changePlaylistFn) => enable => {\n const playlist = loader.main.playlists[playlistID];\n const incompatible = isIncompatible(playlist);\n const currentlyEnabled = isEnabled(playlist);\n if (typeof enable === 'undefined') {\n return currentlyEnabled;\n }\n if (enable) {\n delete playlist.disabled;\n } else {\n playlist.disabled = true;\n }\n if (enable !== currentlyEnabled && !incompatible) {\n // Ensure the outside world knows about our changes\n changePlaylistFn(playlist);\n if (enable) {\n loader.trigger('renditionenabled');\n } else {\n loader.trigger('renditiondisabled');\n }\n }\n return enable;\n};\n/**\n * The representation object encapsulates the publicly visible information\n * in a media playlist along with a setter/getter-type function (enabled)\n * for changing the enabled-state of a particular playlist entry\n *\n * @class Representation\n */\n\nclass Representation {\n constructor(vhsHandler, playlist, id) {\n const {\n playlistController_: pc\n } = vhsHandler;\n const qualityChangeFunction = pc.fastQualityChange_.bind(pc); // some playlist attributes are optional\n\n if (playlist.attributes) {\n const resolution = playlist.attributes.RESOLUTION;\n this.width = resolution && resolution.width;\n this.height = resolution && resolution.height;\n this.bandwidth = playlist.attributes.BANDWIDTH;\n this.frameRate = playlist.attributes['FRAME-RATE'];\n }\n this.codecs = codecsForPlaylist(pc.main(), playlist);\n this.playlist = playlist; // The id is simply the ordinality of the media playlist\n // within the main playlist\n\n this.id = id; // Partially-apply the enableFunction to create a playlist-\n // specific variant\n\n this.enabled = enableFunction(vhsHandler.playlists, playlist.id, qualityChangeFunction);\n }\n}\n/**\n * A mixin function that adds the `representations` api to an instance\n * of the VhsHandler class\n *\n * @param {VhsHandler} vhsHandler - An instance of VhsHandler to add the\n * representation API into\n */\n\nconst renditionSelectionMixin = function (vhsHandler) {\n // Add a single API-specific function to the VhsHandler instance\n vhsHandler.representations = () => {\n const main = vhsHandler.playlistController_.main();\n const playlists = isAudioOnly(main) ? vhsHandler.playlistController_.getAudioTrackPlaylists_() : main.playlists;\n if (!playlists) {\n return [];\n }\n return playlists.filter(media => !isIncompatible(media)).map((e, i) => new Representation(vhsHandler, e, e.id));\n };\n};\n\n/**\n * @file playback-watcher.js\n *\n * Playback starts, and now my watch begins. It shall not end until my death. I shall\n * take no wait, hold no uncleared timeouts, father no bad seeks. I shall wear no crowns\n * and win no glory. I shall live and die at my post. I am the corrector of the underflow.\n * I am the watcher of gaps. I am the shield that guards the realms of seekable. I pledge\n * my life and honor to the Playback Watch, for this Player and all the Players to come.\n */\n\nconst timerCancelEvents = ['seeking', 'seeked', 'pause', 'playing', 'error'];\n/**\n * @class PlaybackWatcher\n */\n\nclass PlaybackWatcher {\n /**\n * Represents an PlaybackWatcher object.\n *\n * @class\n * @param {Object} options an object that includes the tech and settings\n */\n constructor(options) {\n this.playlistController_ = options.playlistController;\n this.tech_ = options.tech;\n this.seekable = options.seekable;\n this.allowSeeksWithinUnsafeLiveWindow = options.allowSeeksWithinUnsafeLiveWindow;\n this.liveRangeSafeTimeDelta = options.liveRangeSafeTimeDelta;\n this.media = options.media;\n this.consecutiveUpdates = 0;\n this.lastRecordedTime = null;\n this.checkCurrentTimeTimeout_ = null;\n this.logger_ = logger('PlaybackWatcher');\n this.logger_('initialize');\n const playHandler = () => this.monitorCurrentTime_();\n const canPlayHandler = () => this.monitorCurrentTime_();\n const waitingHandler = () => this.techWaiting_();\n const cancelTimerHandler = () => this.resetTimeUpdate_();\n const pc = this.playlistController_;\n const loaderTypes = ['main', 'subtitle', 'audio'];\n const loaderChecks = {};\n loaderTypes.forEach(type => {\n loaderChecks[type] = {\n reset: () => this.resetSegmentDownloads_(type),\n updateend: () => this.checkSegmentDownloads_(type)\n };\n pc[`${type}SegmentLoader_`].on('appendsdone', loaderChecks[type].updateend); // If a rendition switch happens during a playback stall where the buffer\n // isn't changing we want to reset. We cannot assume that the new rendition\n // will also be stalled, until after new appends.\n\n pc[`${type}SegmentLoader_`].on('playlistupdate', loaderChecks[type].reset); // Playback stalls should not be detected right after seeking.\n // This prevents one segment playlists (single vtt or single segment content)\n // from being detected as stalling. As the buffer will not change in those cases, since\n // the buffer is the entire video duration.\n\n this.tech_.on(['seeked', 'seeking'], loaderChecks[type].reset);\n });\n /**\n * We check if a seek was into a gap through the following steps:\n * 1. We get a seeking event and we do not get a seeked event. This means that\n * a seek was attempted but not completed.\n * 2. We run `fixesBadSeeks_` on segment loader appends. This means that we already\n * removed everything from our buffer and appended a segment, and should be ready\n * to check for gaps.\n */\n\n const setSeekingHandlers = fn => {\n ['main', 'audio'].forEach(type => {\n pc[`${type}SegmentLoader_`][fn]('appended', this.seekingAppendCheck_);\n });\n };\n this.seekingAppendCheck_ = () => {\n if (this.fixesBadSeeks_()) {\n this.consecutiveUpdates = 0;\n this.lastRecordedTime = this.tech_.currentTime();\n setSeekingHandlers('off');\n }\n };\n this.clearSeekingAppendCheck_ = () => setSeekingHandlers('off');\n this.watchForBadSeeking_ = () => {\n this.clearSeekingAppendCheck_();\n setSeekingHandlers('on');\n };\n this.tech_.on('seeked', this.clearSeekingAppendCheck_);\n this.tech_.on('seeking', this.watchForBadSeeking_);\n this.tech_.on('waiting', waitingHandler);\n this.tech_.on(timerCancelEvents, cancelTimerHandler);\n this.tech_.on('canplay', canPlayHandler);\n /*\n An edge case exists that results in gaps not being skipped when they exist at the beginning of a stream. This case\n is surfaced in one of two ways:\n 1) The `waiting` event is fired before the player has buffered content, making it impossible\n to find or skip the gap. The `waiting` event is followed by a `play` event. On first play\n we can check if playback is stalled due to a gap, and skip the gap if necessary.\n 2) A source with a gap at the beginning of the stream is loaded programatically while the player\n is in a playing state. To catch this case, it's important that our one-time play listener is setup\n even if the player is in a playing state\n */\n\n this.tech_.one('play', playHandler); // Define the dispose function to clean up our events\n\n this.dispose = () => {\n this.clearSeekingAppendCheck_();\n this.logger_('dispose');\n this.tech_.off('waiting', waitingHandler);\n this.tech_.off(timerCancelEvents, cancelTimerHandler);\n this.tech_.off('canplay', canPlayHandler);\n this.tech_.off('play', playHandler);\n this.tech_.off('seeking', this.watchForBadSeeking_);\n this.tech_.off('seeked', this.clearSeekingAppendCheck_);\n loaderTypes.forEach(type => {\n pc[`${type}SegmentLoader_`].off('appendsdone', loaderChecks[type].updateend);\n pc[`${type}SegmentLoader_`].off('playlistupdate', loaderChecks[type].reset);\n this.tech_.off(['seeked', 'seeking'], loaderChecks[type].reset);\n });\n if (this.checkCurrentTimeTimeout_) {\n window$1.clearTimeout(this.checkCurrentTimeTimeout_);\n }\n this.resetTimeUpdate_();\n };\n }\n /**\n * Periodically check current time to see if playback stopped\n *\n * @private\n */\n\n monitorCurrentTime_() {\n this.checkCurrentTime_();\n if (this.checkCurrentTimeTimeout_) {\n window$1.clearTimeout(this.checkCurrentTimeTimeout_);\n } // 42 = 24 fps // 250 is what Webkit uses // FF uses 15\n\n this.checkCurrentTimeTimeout_ = window$1.setTimeout(this.monitorCurrentTime_.bind(this), 250);\n }\n /**\n * Reset stalled download stats for a specific type of loader\n *\n * @param {string} type\n * The segment loader type to check.\n *\n * @listens SegmentLoader#playlistupdate\n * @listens Tech#seeking\n * @listens Tech#seeked\n */\n\n resetSegmentDownloads_(type) {\n const loader = this.playlistController_[`${type}SegmentLoader_`];\n if (this[`${type}StalledDownloads_`] > 0) {\n this.logger_(`resetting possible stalled download count for ${type} loader`);\n }\n this[`${type}StalledDownloads_`] = 0;\n this[`${type}Buffered_`] = loader.buffered_();\n }\n /**\n * Checks on every segment `appendsdone` to see\n * if segment appends are making progress. If they are not\n * and we are still downloading bytes. We exclude the playlist.\n *\n * @param {string} type\n * The segment loader type to check.\n *\n * @listens SegmentLoader#appendsdone\n */\n\n checkSegmentDownloads_(type) {\n const pc = this.playlistController_;\n const loader = pc[`${type}SegmentLoader_`];\n const buffered = loader.buffered_();\n const isBufferedDifferent = isRangeDifferent(this[`${type}Buffered_`], buffered);\n this[`${type}Buffered_`] = buffered; // if another watcher is going to fix the issue or\n // the buffered value for this loader changed\n // appends are working\n\n if (isBufferedDifferent) {\n this.resetSegmentDownloads_(type);\n return;\n }\n this[`${type}StalledDownloads_`]++;\n this.logger_(`found #${this[`${type}StalledDownloads_`]} ${type} appends that did not increase buffer (possible stalled download)`, {\n playlistId: loader.playlist_ && loader.playlist_.id,\n buffered: timeRangesToArray(buffered)\n }); // after 10 possibly stalled appends with no reset, exclude\n\n if (this[`${type}StalledDownloads_`] < 10) {\n return;\n }\n this.logger_(`${type} loader stalled download exclusion`);\n this.resetSegmentDownloads_(type);\n this.tech_.trigger({\n type: 'usage',\n name: `vhs-${type}-download-exclusion`\n });\n if (type === 'subtitle') {\n return;\n } // TODO: should we exclude audio tracks rather than main tracks\n // when type is audio?\n\n pc.excludePlaylist({\n error: {\n message: `Excessive ${type} segment downloading detected.`\n },\n playlistExclusionDuration: Infinity\n });\n }\n /**\n * The purpose of this function is to emulate the \"waiting\" event on\n * browsers that do not emit it when they are waiting for more\n * data to continue playback\n *\n * @private\n */\n\n checkCurrentTime_() {\n if (this.tech_.paused() || this.tech_.seeking()) {\n return;\n }\n const currentTime = this.tech_.currentTime();\n const buffered = this.tech_.buffered();\n if (this.lastRecordedTime === currentTime && (!buffered.length || currentTime + SAFE_TIME_DELTA >= buffered.end(buffered.length - 1))) {\n // If current time is at the end of the final buffered region, then any playback\n // stall is most likely caused by buffering in a low bandwidth environment. The tech\n // should fire a `waiting` event in this scenario, but due to browser and tech\n // inconsistencies. Calling `techWaiting_` here allows us to simulate\n // responding to a native `waiting` event when the tech fails to emit one.\n return this.techWaiting_();\n }\n if (this.consecutiveUpdates >= 5 && currentTime === this.lastRecordedTime) {\n this.consecutiveUpdates++;\n this.waiting_();\n } else if (currentTime === this.lastRecordedTime) {\n this.consecutiveUpdates++;\n } else {\n this.consecutiveUpdates = 0;\n this.lastRecordedTime = currentTime;\n }\n }\n /**\n * Resets the 'timeupdate' mechanism designed to detect that we are stalled\n *\n * @private\n */\n\n resetTimeUpdate_() {\n this.consecutiveUpdates = 0;\n }\n /**\n * Fixes situations where there's a bad seek\n *\n * @return {boolean} whether an action was taken to fix the seek\n * @private\n */\n\n fixesBadSeeks_() {\n const seeking = this.tech_.seeking();\n if (!seeking) {\n return false;\n } // TODO: It's possible that these seekable checks should be moved out of this function\n // and into a function that runs on seekablechange. It's also possible that we only need\n // afterSeekableWindow as the buffered check at the bottom is good enough to handle before\n // seekable range.\n\n const seekable = this.seekable();\n const currentTime = this.tech_.currentTime();\n const isAfterSeekableRange = this.afterSeekableWindow_(seekable, currentTime, this.media(), this.allowSeeksWithinUnsafeLiveWindow);\n let seekTo;\n if (isAfterSeekableRange) {\n const seekableEnd = seekable.end(seekable.length - 1); // sync to live point (if VOD, our seekable was updated and we're simply adjusting)\n\n seekTo = seekableEnd;\n }\n if (this.beforeSeekableWindow_(seekable, currentTime)) {\n const seekableStart = seekable.start(0); // sync to the beginning of the live window\n // provide a buffer of .1 seconds to handle rounding/imprecise numbers\n\n seekTo = seekableStart + (\n // if the playlist is too short and the seekable range is an exact time (can\n // happen in live with a 3 segment playlist), then don't use a time delta\n seekableStart === seekable.end(0) ? 0 : SAFE_TIME_DELTA);\n }\n if (typeof seekTo !== 'undefined') {\n this.logger_(`Trying to seek outside of seekable at time ${currentTime} with ` + `seekable range ${printableRange(seekable)}. Seeking to ` + `${seekTo}.`);\n this.tech_.setCurrentTime(seekTo);\n return true;\n }\n const sourceUpdater = this.playlistController_.sourceUpdater_;\n const buffered = this.tech_.buffered();\n const audioBuffered = sourceUpdater.audioBuffer ? sourceUpdater.audioBuffered() : null;\n const videoBuffered = sourceUpdater.videoBuffer ? sourceUpdater.videoBuffered() : null;\n const media = this.media(); // verify that at least two segment durations or one part duration have been\n // appended before checking for a gap.\n\n const minAppendedDuration = media.partTargetDuration ? media.partTargetDuration : (media.targetDuration - TIME_FUDGE_FACTOR) * 2; // verify that at least two segment durations have been\n // appended before checking for a gap.\n\n const bufferedToCheck = [audioBuffered, videoBuffered];\n for (let i = 0; i < bufferedToCheck.length; i++) {\n // skip null buffered\n if (!bufferedToCheck[i]) {\n continue;\n }\n const timeAhead = timeAheadOf(bufferedToCheck[i], currentTime); // if we are less than two video/audio segment durations or one part\n // duration behind we haven't appended enough to call this a bad seek.\n\n if (timeAhead < minAppendedDuration) {\n return false;\n }\n }\n const nextRange = findNextRange(buffered, currentTime); // we have appended enough content, but we don't have anything buffered\n // to seek over the gap\n\n if (nextRange.length === 0) {\n return false;\n }\n seekTo = nextRange.start(0) + SAFE_TIME_DELTA;\n this.logger_(`Buffered region starts (${nextRange.start(0)}) ` + ` just beyond seek point (${currentTime}). Seeking to ${seekTo}.`);\n this.tech_.setCurrentTime(seekTo);\n return true;\n }\n /**\n * Handler for situations when we determine the player is waiting.\n *\n * @private\n */\n\n waiting_() {\n if (this.techWaiting_()) {\n return;\n } // All tech waiting checks failed. Use last resort correction\n\n const currentTime = this.tech_.currentTime();\n const buffered = this.tech_.buffered();\n const currentRange = findRange(buffered, currentTime); // Sometimes the player can stall for unknown reasons within a contiguous buffered\n // region with no indication that anything is amiss (seen in Firefox). Seeking to\n // currentTime is usually enough to kickstart the player. This checks that the player\n // is currently within a buffered region before attempting a corrective seek.\n // Chrome does not appear to continue `timeupdate` events after a `waiting` event\n // until there is ~ 3 seconds of forward buffer available. PlaybackWatcher should also\n // make sure there is ~3 seconds of forward buffer before taking any corrective action\n // to avoid triggering an `unknownwaiting` event when the network is slow.\n\n if (currentRange.length && currentTime + 3 <= currentRange.end(0)) {\n this.resetTimeUpdate_();\n this.tech_.setCurrentTime(currentTime);\n this.logger_(`Stopped at ${currentTime} while inside a buffered region ` + `[${currentRange.start(0)} -> ${currentRange.end(0)}]. Attempting to resume ` + 'playback by seeking to the current time.'); // unknown waiting corrections may be useful for monitoring QoS\n\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-unknown-waiting'\n });\n return;\n }\n }\n /**\n * Handler for situations when the tech fires a `waiting` event\n *\n * @return {boolean}\n * True if an action (or none) was needed to correct the waiting. False if no\n * checks passed\n * @private\n */\n\n techWaiting_() {\n const seekable = this.seekable();\n const currentTime = this.tech_.currentTime();\n if (this.tech_.seeking()) {\n // Tech is seeking or already waiting on another action, no action needed\n return true;\n }\n if (this.beforeSeekableWindow_(seekable, currentTime)) {\n const livePoint = seekable.end(seekable.length - 1);\n this.logger_(`Fell out of live window at time ${currentTime}. Seeking to ` + `live point (seekable end) ${livePoint}`);\n this.resetTimeUpdate_();\n this.tech_.setCurrentTime(livePoint); // live window resyncs may be useful for monitoring QoS\n\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-live-resync'\n });\n return true;\n }\n const sourceUpdater = this.tech_.vhs.playlistController_.sourceUpdater_;\n const buffered = this.tech_.buffered();\n const videoUnderflow = this.videoUnderflow_({\n audioBuffered: sourceUpdater.audioBuffered(),\n videoBuffered: sourceUpdater.videoBuffered(),\n currentTime\n });\n if (videoUnderflow) {\n // Even though the video underflowed and was stuck in a gap, the audio overplayed\n // the gap, leading currentTime into a buffered range. Seeking to currentTime\n // allows the video to catch up to the audio position without losing any audio\n // (only suffering ~3 seconds of frozen video and a pause in audio playback).\n this.resetTimeUpdate_();\n this.tech_.setCurrentTime(currentTime); // video underflow may be useful for monitoring QoS\n\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-video-underflow'\n });\n return true;\n }\n const nextRange = findNextRange(buffered, currentTime); // check for gap\n\n if (nextRange.length > 0) {\n this.logger_(`Stopped at ${currentTime} and seeking to ${nextRange.start(0)}`);\n this.resetTimeUpdate_();\n this.skipTheGap_(currentTime);\n return true;\n } // All checks failed. Returning false to indicate failure to correct waiting\n\n return false;\n }\n afterSeekableWindow_(seekable, currentTime, playlist, allowSeeksWithinUnsafeLiveWindow = false) {\n if (!seekable.length) {\n // we can't make a solid case if there's no seekable, default to false\n return false;\n }\n let allowedEnd = seekable.end(seekable.length - 1) + SAFE_TIME_DELTA;\n const isLive = !playlist.endList;\n const isLLHLS = typeof playlist.partTargetDuration === 'number';\n if (isLive && (isLLHLS || allowSeeksWithinUnsafeLiveWindow)) {\n allowedEnd = seekable.end(seekable.length - 1) + playlist.targetDuration * 3;\n }\n if (currentTime > allowedEnd) {\n return true;\n }\n return false;\n }\n beforeSeekableWindow_(seekable, currentTime) {\n if (seekable.length &&\n // can't fall before 0 and 0 seekable start identifies VOD stream\n seekable.start(0) > 0 && currentTime < seekable.start(0) - this.liveRangeSafeTimeDelta) {\n return true;\n }\n return false;\n }\n videoUnderflow_({\n videoBuffered,\n audioBuffered,\n currentTime\n }) {\n // audio only content will not have video underflow :)\n if (!videoBuffered) {\n return;\n }\n let gap; // find a gap in demuxed content.\n\n if (videoBuffered.length && audioBuffered.length) {\n // in Chrome audio will continue to play for ~3s when we run out of video\n // so we have to check that the video buffer did have some buffer in the\n // past.\n const lastVideoRange = findRange(videoBuffered, currentTime - 3);\n const videoRange = findRange(videoBuffered, currentTime);\n const audioRange = findRange(audioBuffered, currentTime);\n if (audioRange.length && !videoRange.length && lastVideoRange.length) {\n gap = {\n start: lastVideoRange.end(0),\n end: audioRange.end(0)\n };\n } // find a gap in muxed content.\n } else {\n const nextRange = findNextRange(videoBuffered, currentTime); // Even if there is no available next range, there is still a possibility we are\n // stuck in a gap due to video underflow.\n\n if (!nextRange.length) {\n gap = this.gapFromVideoUnderflow_(videoBuffered, currentTime);\n }\n }\n if (gap) {\n this.logger_(`Encountered a gap in video from ${gap.start} to ${gap.end}. ` + `Seeking to current time ${currentTime}`);\n return true;\n }\n return false;\n }\n /**\n * Timer callback. If playback still has not proceeded, then we seek\n * to the start of the next buffered region.\n *\n * @private\n */\n\n skipTheGap_(scheduledCurrentTime) {\n const buffered = this.tech_.buffered();\n const currentTime = this.tech_.currentTime();\n const nextRange = findNextRange(buffered, currentTime);\n this.resetTimeUpdate_();\n if (nextRange.length === 0 || currentTime !== scheduledCurrentTime) {\n return;\n }\n this.logger_('skipTheGap_:', 'currentTime:', currentTime, 'scheduled currentTime:', scheduledCurrentTime, 'nextRange start:', nextRange.start(0)); // only seek if we still have not played\n\n this.tech_.setCurrentTime(nextRange.start(0) + TIME_FUDGE_FACTOR);\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-gap-skip'\n });\n }\n gapFromVideoUnderflow_(buffered, currentTime) {\n // At least in Chrome, if there is a gap in the video buffer, the audio will continue\n // playing for ~3 seconds after the video gap starts. This is done to account for\n // video buffer underflow/underrun (note that this is not done when there is audio\n // buffer underflow/underrun -- in that case the video will stop as soon as it\n // encounters the gap, as audio stalls are more noticeable/jarring to a user than\n // video stalls). The player's time will reflect the playthrough of audio, so the\n // time will appear as if we are in a buffered region, even if we are stuck in a\n // \"gap.\"\n //\n // Example:\n // video buffer: 0 => 10.1, 10.2 => 20\n // audio buffer: 0 => 20\n // overall buffer: 0 => 10.1, 10.2 => 20\n // current time: 13\n //\n // Chrome's video froze at 10 seconds, where the video buffer encountered the gap,\n // however, the audio continued playing until it reached ~3 seconds past the gap\n // (13 seconds), at which point it stops as well. Since current time is past the\n // gap, findNextRange will return no ranges.\n //\n // To check for this issue, we see if there is a gap that starts somewhere within\n // a 3 second range (3 seconds +/- 1 second) back from our current time.\n const gaps = findGaps(buffered);\n for (let i = 0; i < gaps.length; i++) {\n const start = gaps.start(i);\n const end = gaps.end(i); // gap is starts no more than 4 seconds back\n\n if (currentTime - start < 4 && currentTime - start > 2) {\n return {\n start,\n end\n };\n }\n }\n return null;\n }\n}\nconst defaultOptions = {\n errorInterval: 30,\n getSource(next) {\n const tech = this.tech({\n IWillNotUseThisInPlugins: true\n });\n const sourceObj = tech.currentSource_ || this.currentSource();\n return next(sourceObj);\n }\n};\n/**\n * Main entry point for the plugin\n *\n * @param {Player} player a reference to a videojs Player instance\n * @param {Object} [options] an object with plugin options\n * @private\n */\n\nconst initPlugin = function (player, options) {\n let lastCalled = 0;\n let seekTo = 0;\n const localOptions = merge(defaultOptions, options);\n player.ready(() => {\n player.trigger({\n type: 'usage',\n name: 'vhs-error-reload-initialized'\n });\n });\n /**\n * Player modifications to perform that must wait until `loadedmetadata`\n * has been triggered\n *\n * @private\n */\n\n const loadedMetadataHandler = function () {\n if (seekTo) {\n player.currentTime(seekTo);\n }\n };\n /**\n * Set the source on the player element, play, and seek if necessary\n *\n * @param {Object} sourceObj An object specifying the source url and mime-type to play\n * @private\n */\n\n const setSource = function (sourceObj) {\n if (sourceObj === null || sourceObj === undefined) {\n return;\n }\n seekTo = player.duration() !== Infinity && player.currentTime() || 0;\n player.one('loadedmetadata', loadedMetadataHandler);\n player.src(sourceObj);\n player.trigger({\n type: 'usage',\n name: 'vhs-error-reload'\n });\n player.play();\n };\n /**\n * Attempt to get a source from either the built-in getSource function\n * or a custom function provided via the options\n *\n * @private\n */\n\n const errorHandler = function () {\n // Do not attempt to reload the source if a source-reload occurred before\n // 'errorInterval' time has elapsed since the last source-reload\n if (Date.now() - lastCalled < localOptions.errorInterval * 1000) {\n player.trigger({\n type: 'usage',\n name: 'vhs-error-reload-canceled'\n });\n return;\n }\n if (!localOptions.getSource || typeof localOptions.getSource !== 'function') {\n videojs.log.error('ERROR: reloadSourceOnError - The option getSource must be a function!');\n return;\n }\n lastCalled = Date.now();\n return localOptions.getSource.call(player, setSource);\n };\n /**\n * Unbind any event handlers that were bound by the plugin\n *\n * @private\n */\n\n const cleanupEvents = function () {\n player.off('loadedmetadata', loadedMetadataHandler);\n player.off('error', errorHandler);\n player.off('dispose', cleanupEvents);\n };\n /**\n * Cleanup before re-initializing the plugin\n *\n * @param {Object} [newOptions] an object with plugin options\n * @private\n */\n\n const reinitPlugin = function (newOptions) {\n cleanupEvents();\n initPlugin(player, newOptions);\n };\n player.on('error', errorHandler);\n player.on('dispose', cleanupEvents); // Overwrite the plugin function so that we can correctly cleanup before\n // initializing the plugin\n\n player.reloadSourceOnError = reinitPlugin;\n};\n/**\n * Reload the source when an error is detected as long as there\n * wasn't an error previously within the last 30 seconds\n *\n * @param {Object} [options] an object with plugin options\n */\n\nconst reloadSourceOnError = function (options) {\n initPlugin(this, options);\n};\nvar version$4 = \"3.12.1\";\nvar version$3 = \"7.0.3\";\nvar version$2 = \"1.3.0\";\nvar version$1 = \"7.1.0\";\nvar version = \"4.0.1\";\n\n/**\n * @file videojs-http-streaming.js\n *\n * The main file for the VHS project.\n * License: https://github.com/videojs/videojs-http-streaming/blob/main/LICENSE\n */\nconst Vhs = {\n PlaylistLoader,\n Playlist,\n utils,\n STANDARD_PLAYLIST_SELECTOR: lastBandwidthSelector,\n INITIAL_PLAYLIST_SELECTOR: lowestBitrateCompatibleVariantSelector,\n lastBandwidthSelector,\n movingAverageBandwidthSelector,\n comparePlaylistBandwidth,\n comparePlaylistResolution,\n xhr: xhrFactory()\n}; // Define getter/setters for config properties\n\nObject.keys(Config).forEach(prop => {\n Object.defineProperty(Vhs, prop, {\n get() {\n videojs.log.warn(`using Vhs.${prop} is UNSAFE be sure you know what you are doing`);\n return Config[prop];\n },\n set(value) {\n videojs.log.warn(`using Vhs.${prop} is UNSAFE be sure you know what you are doing`);\n if (typeof value !== 'number' || value < 0) {\n videojs.log.warn(`value of Vhs.${prop} must be greater than or equal to 0`);\n return;\n }\n Config[prop] = value;\n }\n });\n});\nconst LOCAL_STORAGE_KEY = 'videojs-vhs';\n/**\n * Updates the selectedIndex of the QualityLevelList when a mediachange happens in vhs.\n *\n * @param {QualityLevelList} qualityLevels The QualityLevelList to update.\n * @param {PlaylistLoader} playlistLoader PlaylistLoader containing the new media info.\n * @function handleVhsMediaChange\n */\n\nconst handleVhsMediaChange = function (qualityLevels, playlistLoader) {\n const newPlaylist = playlistLoader.media();\n let selectedIndex = -1;\n for (let i = 0; i < qualityLevels.length; i++) {\n if (qualityLevels[i].id === newPlaylist.id) {\n selectedIndex = i;\n break;\n }\n }\n qualityLevels.selectedIndex_ = selectedIndex;\n qualityLevels.trigger({\n selectedIndex,\n type: 'change'\n });\n};\n/**\n * Adds quality levels to list once playlist metadata is available\n *\n * @param {QualityLevelList} qualityLevels The QualityLevelList to attach events to.\n * @param {Object} vhs Vhs object to listen to for media events.\n * @function handleVhsLoadedMetadata\n */\n\nconst handleVhsLoadedMetadata = function (qualityLevels, vhs) {\n vhs.representations().forEach(rep => {\n qualityLevels.addQualityLevel(rep);\n });\n handleVhsMediaChange(qualityLevels, vhs.playlists);\n}; // VHS is a source handler, not a tech. Make sure attempts to use it\n// as one do not cause exceptions.\n\nVhs.canPlaySource = function () {\n return videojs.log.warn('VHS is no longer a tech. Please remove it from ' + 'your player\\'s techOrder.');\n};\nconst emeKeySystems = (keySystemOptions, mainPlaylist, audioPlaylist) => {\n if (!keySystemOptions) {\n return keySystemOptions;\n }\n let codecs = {};\n if (mainPlaylist && mainPlaylist.attributes && mainPlaylist.attributes.CODECS) {\n codecs = unwrapCodecList(parseCodecs(mainPlaylist.attributes.CODECS));\n }\n if (audioPlaylist && audioPlaylist.attributes && audioPlaylist.attributes.CODECS) {\n codecs.audio = audioPlaylist.attributes.CODECS;\n }\n const videoContentType = getMimeForCodec(codecs.video);\n const audioContentType = getMimeForCodec(codecs.audio); // upsert the content types based on the selected playlist\n\n const keySystemContentTypes = {};\n for (const keySystem in keySystemOptions) {\n keySystemContentTypes[keySystem] = {};\n if (audioContentType) {\n keySystemContentTypes[keySystem].audioContentType = audioContentType;\n }\n if (videoContentType) {\n keySystemContentTypes[keySystem].videoContentType = videoContentType;\n } // Default to using the video playlist's PSSH even though they may be different, as\n // videojs-contrib-eme will only accept one in the options.\n //\n // This shouldn't be an issue for most cases as early intialization will handle all\n // unique PSSH values, and if they aren't, then encrypted events should have the\n // specific information needed for the unique license.\n\n if (mainPlaylist.contentProtection && mainPlaylist.contentProtection[keySystem] && mainPlaylist.contentProtection[keySystem].pssh) {\n keySystemContentTypes[keySystem].pssh = mainPlaylist.contentProtection[keySystem].pssh;\n } // videojs-contrib-eme accepts the option of specifying: 'com.some.cdm': 'url'\n // so we need to prevent overwriting the URL entirely\n\n if (typeof keySystemOptions[keySystem] === 'string') {\n keySystemContentTypes[keySystem].url = keySystemOptions[keySystem];\n }\n }\n return merge(keySystemOptions, keySystemContentTypes);\n};\n/**\n * @typedef {Object} KeySystems\n *\n * keySystems configuration for https://github.com/videojs/videojs-contrib-eme\n * Note: not all options are listed here.\n *\n * @property {Uint8Array} [pssh]\n * Protection System Specific Header\n */\n\n/**\n * Goes through all the playlists and collects an array of KeySystems options objects\n * containing each playlist's keySystems and their pssh values, if available.\n *\n * @param {Object[]} playlists\n * The playlists to look through\n * @param {string[]} keySystems\n * The keySystems to collect pssh values for\n *\n * @return {KeySystems[]}\n * An array of KeySystems objects containing available key systems and their\n * pssh values\n */\n\nconst getAllPsshKeySystemsOptions = (playlists, keySystems) => {\n return playlists.reduce((keySystemsArr, playlist) => {\n if (!playlist.contentProtection) {\n return keySystemsArr;\n }\n const keySystemsOptions = keySystems.reduce((keySystemsObj, keySystem) => {\n const keySystemOptions = playlist.contentProtection[keySystem];\n if (keySystemOptions && keySystemOptions.pssh) {\n keySystemsObj[keySystem] = {\n pssh: keySystemOptions.pssh\n };\n }\n return keySystemsObj;\n }, {});\n if (Object.keys(keySystemsOptions).length) {\n keySystemsArr.push(keySystemsOptions);\n }\n return keySystemsArr;\n }, []);\n};\n/**\n * Returns a promise that waits for the\n * [eme plugin](https://github.com/videojs/videojs-contrib-eme) to create a key session.\n *\n * Works around https://bugs.chromium.org/p/chromium/issues/detail?id=895449 in non-IE11\n * browsers.\n *\n * As per the above ticket, this is particularly important for Chrome, where, if\n * unencrypted content is appended before encrypted content and the key session has not\n * been created, a MEDIA_ERR_DECODE will be thrown once the encrypted content is reached\n * during playback.\n *\n * @param {Object} player\n * The player instance\n * @param {Object[]} sourceKeySystems\n * The key systems options from the player source\n * @param {Object} [audioMedia]\n * The active audio media playlist (optional)\n * @param {Object[]} mainPlaylists\n * The playlists found on the main playlist object\n *\n * @return {Object}\n * Promise that resolves when the key session has been created\n */\n\nconst waitForKeySessionCreation = ({\n player,\n sourceKeySystems,\n audioMedia,\n mainPlaylists\n}) => {\n if (!player.eme.initializeMediaKeys) {\n return Promise.resolve();\n } // TODO should all audio PSSH values be initialized for DRM?\n //\n // All unique video rendition pssh values are initialized for DRM, but here only\n // the initial audio playlist license is initialized. In theory, an encrypted\n // event should be fired if the user switches to an alternative audio playlist\n // where a license is required, but this case hasn't yet been tested. In addition, there\n // may be many alternate audio playlists unlikely to be used (e.g., multiple different\n // languages).\n\n const playlists = audioMedia ? mainPlaylists.concat([audioMedia]) : mainPlaylists;\n const keySystemsOptionsArr = getAllPsshKeySystemsOptions(playlists, Object.keys(sourceKeySystems));\n const initializationFinishedPromises = [];\n const keySessionCreatedPromises = []; // Since PSSH values are interpreted as initData, EME will dedupe any duplicates. The\n // only place where it should not be deduped is for ms-prefixed APIs, but\n // the existence of modern EME APIs in addition to\n // ms-prefixed APIs on Edge should prevent this from being a concern.\n // initializeMediaKeys also won't use the webkit-prefixed APIs.\n\n keySystemsOptionsArr.forEach(keySystemsOptions => {\n keySessionCreatedPromises.push(new Promise((resolve, reject) => {\n player.tech_.one('keysessioncreated', resolve);\n }));\n initializationFinishedPromises.push(new Promise((resolve, reject) => {\n player.eme.initializeMediaKeys({\n keySystems: keySystemsOptions\n }, err => {\n if (err) {\n reject(err);\n return;\n }\n resolve();\n });\n }));\n }); // The reasons Promise.race is chosen over Promise.any:\n //\n // * Promise.any is only available in Safari 14+.\n // * None of these promises are expected to reject. If they do reject, it might be\n // better here for the race to surface the rejection, rather than mask it by using\n // Promise.any.\n\n return Promise.race([\n // If a session was previously created, these will all finish resolving without\n // creating a new session, otherwise it will take until the end of all license\n // requests, which is why the key session check is used (to make setup much faster).\n Promise.all(initializationFinishedPromises),\n // Once a single session is created, the browser knows DRM will be used.\n Promise.race(keySessionCreatedPromises)]);\n};\n/**\n * If the [eme](https://github.com/videojs/videojs-contrib-eme) plugin is available, and\n * there are keySystems on the source, sets up source options to prepare the source for\n * eme.\n *\n * @param {Object} player\n * The player instance\n * @param {Object[]} sourceKeySystems\n * The key systems options from the player source\n * @param {Object} media\n * The active media playlist\n * @param {Object} [audioMedia]\n * The active audio media playlist (optional)\n *\n * @return {boolean}\n * Whether or not options were configured and EME is available\n */\n\nconst setupEmeOptions = ({\n player,\n sourceKeySystems,\n media,\n audioMedia\n}) => {\n const sourceOptions = emeKeySystems(sourceKeySystems, media, audioMedia);\n if (!sourceOptions) {\n return false;\n }\n player.currentSource().keySystems = sourceOptions; // eme handles the rest of the setup, so if it is missing\n // do nothing.\n\n if (sourceOptions && !player.eme) {\n videojs.log.warn('DRM encrypted source cannot be decrypted without a DRM plugin');\n return false;\n }\n return true;\n};\nconst getVhsLocalStorage = () => {\n if (!window$1.localStorage) {\n return null;\n }\n const storedObject = window$1.localStorage.getItem(LOCAL_STORAGE_KEY);\n if (!storedObject) {\n return null;\n }\n try {\n return JSON.parse(storedObject);\n } catch (e) {\n // someone may have tampered with the value\n return null;\n }\n};\nconst updateVhsLocalStorage = options => {\n if (!window$1.localStorage) {\n return false;\n }\n let objectToStore = getVhsLocalStorage();\n objectToStore = objectToStore ? merge(objectToStore, options) : options;\n try {\n window$1.localStorage.setItem(LOCAL_STORAGE_KEY, JSON.stringify(objectToStore));\n } catch (e) {\n // Throws if storage is full (e.g., always on iOS 5+ Safari private mode, where\n // storage is set to 0).\n // https://developer.mozilla.org/en-US/docs/Web/API/Storage/setItem#Exceptions\n // No need to perform any operation.\n return false;\n }\n return objectToStore;\n};\n/**\n * Parses VHS-supported media types from data URIs. See\n * https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs\n * for information on data URIs.\n *\n * @param {string} dataUri\n * The data URI\n *\n * @return {string|Object}\n * The parsed object/string, or the original string if no supported media type\n * was found\n */\n\nconst expandDataUri = dataUri => {\n if (dataUri.toLowerCase().indexOf('data:application/vnd.videojs.vhs+json,') === 0) {\n return JSON.parse(dataUri.substring(dataUri.indexOf(',') + 1));\n } // no known case for this data URI, return the string as-is\n\n return dataUri;\n};\n/**\n * Adds a request hook to an xhr object\n *\n * @param {Object} xhr object to add the onRequest hook to\n * @param {function} callback hook function for an xhr request\n */\n\nconst addOnRequestHook = (xhr, callback) => {\n if (!xhr._requestCallbackSet) {\n xhr._requestCallbackSet = new Set();\n }\n xhr._requestCallbackSet.add(callback);\n};\n/**\n * Adds a response hook to an xhr object\n *\n * @param {Object} xhr object to add the onResponse hook to\n * @param {function} callback hook function for an xhr response\n */\n\nconst addOnResponseHook = (xhr, callback) => {\n if (!xhr._responseCallbackSet) {\n xhr._responseCallbackSet = new Set();\n }\n xhr._responseCallbackSet.add(callback);\n};\n/**\n * Removes a request hook on an xhr object, deletes the onRequest set if empty.\n *\n * @param {Object} xhr object to remove the onRequest hook from\n * @param {function} callback hook function to remove\n */\n\nconst removeOnRequestHook = (xhr, callback) => {\n if (!xhr._requestCallbackSet) {\n return;\n }\n xhr._requestCallbackSet.delete(callback);\n if (!xhr._requestCallbackSet.size) {\n delete xhr._requestCallbackSet;\n }\n};\n/**\n * Removes a response hook on an xhr object, deletes the onResponse set if empty.\n *\n * @param {Object} xhr object to remove the onResponse hook from\n * @param {function} callback hook function to remove\n */\n\nconst removeOnResponseHook = (xhr, callback) => {\n if (!xhr._responseCallbackSet) {\n return;\n }\n xhr._responseCallbackSet.delete(callback);\n if (!xhr._responseCallbackSet.size) {\n delete xhr._responseCallbackSet;\n }\n};\n/**\n * Whether the browser has built-in HLS support.\n */\n\nVhs.supportsNativeHls = function () {\n if (!document || !document.createElement) {\n return false;\n }\n const video = document.createElement('video'); // native HLS is definitely not supported if HTML5 video isn't\n\n if (!videojs.getTech('Html5').isSupported()) {\n return false;\n } // HLS manifests can go by many mime-types\n\n const canPlay = [\n // Apple santioned\n 'application/vnd.apple.mpegurl',\n // Apple sanctioned for backwards compatibility\n 'audio/mpegurl',\n // Very common\n 'audio/x-mpegurl',\n // Very common\n 'application/x-mpegurl',\n // Included for completeness\n 'video/x-mpegurl', 'video/mpegurl', 'application/mpegurl'];\n return canPlay.some(function (canItPlay) {\n return /maybe|probably/i.test(video.canPlayType(canItPlay));\n });\n}();\nVhs.supportsNativeDash = function () {\n if (!document || !document.createElement || !videojs.getTech('Html5').isSupported()) {\n return false;\n }\n return /maybe|probably/i.test(document.createElement('video').canPlayType('application/dash+xml'));\n}();\nVhs.supportsTypeNatively = type => {\n if (type === 'hls') {\n return Vhs.supportsNativeHls;\n }\n if (type === 'dash') {\n return Vhs.supportsNativeDash;\n }\n return false;\n};\n/**\n * VHS is a source handler, not a tech. Make sure attempts to use it\n * as one do not cause exceptions.\n */\n\nVhs.isSupported = function () {\n return videojs.log.warn('VHS is no longer a tech. Please remove it from ' + 'your player\\'s techOrder.');\n};\n/**\n * A global function for setting an onRequest hook\n *\n * @param {function} callback for request modifiction\n */\n\nVhs.xhr.onRequest = function (callback) {\n addOnRequestHook(Vhs.xhr, callback);\n};\n/**\n * A global function for setting an onResponse hook\n *\n * @param {callback} callback for response data retrieval\n */\n\nVhs.xhr.onResponse = function (callback) {\n addOnResponseHook(Vhs.xhr, callback);\n};\n/**\n * Deletes a global onRequest callback if it exists\n *\n * @param {function} callback to delete from the global set\n */\n\nVhs.xhr.offRequest = function (callback) {\n removeOnRequestHook(Vhs.xhr, callback);\n};\n/**\n * Deletes a global onResponse callback if it exists\n *\n * @param {function} callback to delete from the global set\n */\n\nVhs.xhr.offResponse = function (callback) {\n removeOnResponseHook(Vhs.xhr, callback);\n};\nconst Component = videojs.getComponent('Component');\n/**\n * The Vhs Handler object, where we orchestrate all of the parts\n * of VHS to interact with video.js\n *\n * @class VhsHandler\n * @extends videojs.Component\n * @param {Object} source the soruce object\n * @param {Tech} tech the parent tech object\n * @param {Object} options optional and required options\n */\n\nclass VhsHandler extends Component {\n constructor(source, tech, options) {\n super(tech, options.vhs); // if a tech level `initialBandwidth` option was passed\n // use that over the VHS level `bandwidth` option\n\n if (typeof options.initialBandwidth === 'number') {\n this.options_.bandwidth = options.initialBandwidth;\n }\n this.logger_ = logger('VhsHandler'); // we need access to the player in some cases,\n // so, get it from Video.js via the `playerId`\n\n if (tech.options_ && tech.options_.playerId) {\n const _player = videojs.getPlayer(tech.options_.playerId);\n this.player_ = _player;\n }\n this.tech_ = tech;\n this.source_ = source;\n this.stats = {};\n this.ignoreNextSeekingEvent_ = false;\n this.setOptions_();\n if (this.options_.overrideNative && tech.overrideNativeAudioTracks && tech.overrideNativeVideoTracks) {\n tech.overrideNativeAudioTracks(true);\n tech.overrideNativeVideoTracks(true);\n } else if (this.options_.overrideNative && (tech.featuresNativeVideoTracks || tech.featuresNativeAudioTracks)) {\n // overriding native VHS only works if audio tracks have been emulated\n // error early if we're misconfigured\n throw new Error('Overriding native VHS requires emulated tracks. ' + 'See https://git.io/vMpjB');\n } // listen for fullscreenchange events for this player so that we\n // can adjust our quality selection quickly\n\n this.on(document, ['fullscreenchange', 'webkitfullscreenchange', 'mozfullscreenchange', 'MSFullscreenChange'], event => {\n const fullscreenElement = document.fullscreenElement || document.webkitFullscreenElement || document.mozFullScreenElement || document.msFullscreenElement;\n if (fullscreenElement && fullscreenElement.contains(this.tech_.el())) {\n this.playlistController_.fastQualityChange_();\n } else {\n // When leaving fullscreen, since the in page pixel dimensions should be smaller\n // than full screen, see if there should be a rendition switch down to preserve\n // bandwidth.\n this.playlistController_.checkABR_();\n }\n });\n this.on(this.tech_, 'seeking', function () {\n if (this.ignoreNextSeekingEvent_) {\n this.ignoreNextSeekingEvent_ = false;\n return;\n }\n this.setCurrentTime(this.tech_.currentTime());\n });\n this.on(this.tech_, 'error', function () {\n // verify that the error was real and we are loaded\n // enough to have pc loaded.\n if (this.tech_.error() && this.playlistController_) {\n this.playlistController_.pauseLoading();\n }\n });\n this.on(this.tech_, 'play', this.play);\n }\n /**\n * Set VHS options based on options from configuration, as well as partial\n * options to be passed at a later time.\n *\n * @param {Object} options A partial chunk of config options\n */\n\n setOptions_(options = {}) {\n this.options_ = merge(this.options_, options); // defaults\n\n this.options_.withCredentials = this.options_.withCredentials || false;\n this.options_.limitRenditionByPlayerDimensions = this.options_.limitRenditionByPlayerDimensions === false ? false : true;\n this.options_.useDevicePixelRatio = this.options_.useDevicePixelRatio || false;\n this.options_.useBandwidthFromLocalStorage = typeof this.source_.useBandwidthFromLocalStorage !== 'undefined' ? this.source_.useBandwidthFromLocalStorage : this.options_.useBandwidthFromLocalStorage || false;\n this.options_.useForcedSubtitles = this.options_.useForcedSubtitles || false;\n this.options_.useNetworkInformationApi = this.options_.useNetworkInformationApi || false;\n this.options_.useDtsForTimestampOffset = this.options_.useDtsForTimestampOffset || false;\n this.options_.customTagParsers = this.options_.customTagParsers || [];\n this.options_.customTagMappers = this.options_.customTagMappers || [];\n this.options_.cacheEncryptionKeys = this.options_.cacheEncryptionKeys || false;\n this.options_.llhls = this.options_.llhls === false ? false : true;\n this.options_.bufferBasedABR = this.options_.bufferBasedABR || false;\n if (typeof this.options_.playlistExclusionDuration !== 'number') {\n this.options_.playlistExclusionDuration = 60;\n }\n if (typeof this.options_.bandwidth !== 'number') {\n if (this.options_.useBandwidthFromLocalStorage) {\n const storedObject = getVhsLocalStorage();\n if (storedObject && storedObject.bandwidth) {\n this.options_.bandwidth = storedObject.bandwidth;\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-bandwidth-from-local-storage'\n });\n }\n if (storedObject && storedObject.throughput) {\n this.options_.throughput = storedObject.throughput;\n this.tech_.trigger({\n type: 'usage',\n name: 'vhs-throughput-from-local-storage'\n });\n }\n }\n } // if bandwidth was not set by options or pulled from local storage, start playlist\n // selection at a reasonable bandwidth\n\n if (typeof this.options_.bandwidth !== 'number') {\n this.options_.bandwidth = Config.INITIAL_BANDWIDTH;\n } // If the bandwidth number is unchanged from the initial setting\n // then this takes precedence over the enableLowInitialPlaylist option\n\n this.options_.enableLowInitialPlaylist = this.options_.enableLowInitialPlaylist && this.options_.bandwidth === Config.INITIAL_BANDWIDTH; // grab options passed to player.src\n\n ['withCredentials', 'useDevicePixelRatio', 'customPixelRatio', 'limitRenditionByPlayerDimensions', 'bandwidth', 'customTagParsers', 'customTagMappers', 'cacheEncryptionKeys', 'playlistSelector', 'initialPlaylistSelector', 'bufferBasedABR', 'liveRangeSafeTimeDelta', 'llhls', 'useForcedSubtitles', 'useNetworkInformationApi', 'useDtsForTimestampOffset', 'exactManifestTimings', 'leastPixelDiffSelector'].forEach(option => {\n if (typeof this.source_[option] !== 'undefined') {\n this.options_[option] = this.source_[option];\n }\n });\n this.limitRenditionByPlayerDimensions = this.options_.limitRenditionByPlayerDimensions;\n this.useDevicePixelRatio = this.options_.useDevicePixelRatio;\n const customPixelRatio = this.options_.customPixelRatio; // Ensure the custom pixel ratio is a number greater than or equal to 0\n\n if (typeof customPixelRatio === 'number' && customPixelRatio >= 0) {\n this.customPixelRatio = customPixelRatio;\n }\n } // alias for public method to set options\n\n setOptions(options = {}) {\n this.setOptions_(options);\n }\n /**\n * called when player.src gets called, handle a new source\n *\n * @param {Object} src the source object to handle\n */\n\n src(src, type) {\n // do nothing if the src is falsey\n if (!src) {\n return;\n }\n this.setOptions_(); // add main playlist controller options\n\n this.options_.src = expandDataUri(this.source_.src);\n this.options_.tech = this.tech_;\n this.options_.externVhs = Vhs;\n this.options_.sourceType = simpleTypeFromSourceType(type); // Whenever we seek internally, we should update the tech\n\n this.options_.seekTo = time => {\n this.tech_.setCurrentTime(time);\n };\n this.playlistController_ = new PlaylistController(this.options_);\n const playbackWatcherOptions = merge({\n liveRangeSafeTimeDelta: SAFE_TIME_DELTA\n }, this.options_, {\n seekable: () => this.seekable(),\n media: () => this.playlistController_.media(),\n playlistController: this.playlistController_\n });\n this.playbackWatcher_ = new PlaybackWatcher(playbackWatcherOptions);\n this.playlistController_.on('error', () => {\n const player = videojs.players[this.tech_.options_.playerId];\n let error = this.playlistController_.error;\n if (typeof error === 'object' && !error.code) {\n error.code = 3;\n } else if (typeof error === 'string') {\n error = {\n message: error,\n code: 3\n };\n }\n player.error(error);\n });\n const defaultSelector = this.options_.bufferBasedABR ? Vhs.movingAverageBandwidthSelector(0.55) : Vhs.STANDARD_PLAYLIST_SELECTOR; // `this` in selectPlaylist should be the VhsHandler for backwards\n // compatibility with < v2\n\n this.playlistController_.selectPlaylist = this.selectPlaylist ? this.selectPlaylist.bind(this) : defaultSelector.bind(this);\n this.playlistController_.selectInitialPlaylist = Vhs.INITIAL_PLAYLIST_SELECTOR.bind(this); // re-expose some internal objects for backwards compatibility with < v2\n\n this.playlists = this.playlistController_.mainPlaylistLoader_;\n this.mediaSource = this.playlistController_.mediaSource; // Proxy assignment of some properties to the main playlist\n // controller. Using a custom property for backwards compatibility\n // with < v2\n\n Object.defineProperties(this, {\n selectPlaylist: {\n get() {\n return this.playlistController_.selectPlaylist;\n },\n set(selectPlaylist) {\n this.playlistController_.selectPlaylist = selectPlaylist.bind(this);\n }\n },\n throughput: {\n get() {\n return this.playlistController_.mainSegmentLoader_.throughput.rate;\n },\n set(throughput) {\n this.playlistController_.mainSegmentLoader_.throughput.rate = throughput; // By setting `count` to 1 the throughput value becomes the starting value\n // for the cumulative average\n\n this.playlistController_.mainSegmentLoader_.throughput.count = 1;\n }\n },\n bandwidth: {\n get() {\n let playerBandwidthEst = this.playlistController_.mainSegmentLoader_.bandwidth;\n const networkInformation = window$1.navigator.connection || window$1.navigator.mozConnection || window$1.navigator.webkitConnection;\n const tenMbpsAsBitsPerSecond = 10e6;\n if (this.options_.useNetworkInformationApi && networkInformation) {\n // downlink returns Mbps\n // https://developer.mozilla.org/en-US/docs/Web/API/NetworkInformation/downlink\n const networkInfoBandwidthEstBitsPerSec = networkInformation.downlink * 1000 * 1000; // downlink maxes out at 10 Mbps. In the event that both networkInformationApi and the player\n // estimate a bandwidth greater than 10 Mbps, use the larger of the two estimates to ensure that\n // high quality streams are not filtered out.\n\n if (networkInfoBandwidthEstBitsPerSec >= tenMbpsAsBitsPerSecond && playerBandwidthEst >= tenMbpsAsBitsPerSecond) {\n playerBandwidthEst = Math.max(playerBandwidthEst, networkInfoBandwidthEstBitsPerSec);\n } else {\n playerBandwidthEst = networkInfoBandwidthEstBitsPerSec;\n }\n }\n return playerBandwidthEst;\n },\n set(bandwidth) {\n this.playlistController_.mainSegmentLoader_.bandwidth = bandwidth; // setting the bandwidth manually resets the throughput counter\n // `count` is set to zero that current value of `rate` isn't included\n // in the cumulative average\n\n this.playlistController_.mainSegmentLoader_.throughput = {\n rate: 0,\n count: 0\n };\n }\n },\n /**\n * `systemBandwidth` is a combination of two serial processes bit-rates. The first\n * is the network bitrate provided by `bandwidth` and the second is the bitrate of\n * the entire process after that - decryption, transmuxing, and appending - provided\n * by `throughput`.\n *\n * Since the two process are serial, the overall system bandwidth is given by:\n * sysBandwidth = 1 / (1 / bandwidth + 1 / throughput)\n */\n systemBandwidth: {\n get() {\n const invBandwidth = 1 / (this.bandwidth || 1);\n let invThroughput;\n if (this.throughput > 0) {\n invThroughput = 1 / this.throughput;\n } else {\n invThroughput = 0;\n }\n const systemBitrate = Math.floor(1 / (invBandwidth + invThroughput));\n return systemBitrate;\n },\n set() {\n videojs.log.error('The \"systemBandwidth\" property is read-only');\n }\n }\n });\n if (this.options_.bandwidth) {\n this.bandwidth = this.options_.bandwidth;\n }\n if (this.options_.throughput) {\n this.throughput = this.options_.throughput;\n }\n Object.defineProperties(this.stats, {\n bandwidth: {\n get: () => this.bandwidth || 0,\n enumerable: true\n },\n mediaRequests: {\n get: () => this.playlistController_.mediaRequests_() || 0,\n enumerable: true\n },\n mediaRequestsAborted: {\n get: () => this.playlistController_.mediaRequestsAborted_() || 0,\n enumerable: true\n },\n mediaRequestsTimedout: {\n get: () => this.playlistController_.mediaRequestsTimedout_() || 0,\n enumerable: true\n },\n mediaRequestsErrored: {\n get: () => this.playlistController_.mediaRequestsErrored_() || 0,\n enumerable: true\n },\n mediaTransferDuration: {\n get: () => this.playlistController_.mediaTransferDuration_() || 0,\n enumerable: true\n },\n mediaBytesTransferred: {\n get: () => this.playlistController_.mediaBytesTransferred_() || 0,\n enumerable: true\n },\n mediaSecondsLoaded: {\n get: () => this.playlistController_.mediaSecondsLoaded_() || 0,\n enumerable: true\n },\n mediaAppends: {\n get: () => this.playlistController_.mediaAppends_() || 0,\n enumerable: true\n },\n mainAppendsToLoadedData: {\n get: () => this.playlistController_.mainAppendsToLoadedData_() || 0,\n enumerable: true\n },\n audioAppendsToLoadedData: {\n get: () => this.playlistController_.audioAppendsToLoadedData_() || 0,\n enumerable: true\n },\n appendsToLoadedData: {\n get: () => this.playlistController_.appendsToLoadedData_() || 0,\n enumerable: true\n },\n timeToLoadedData: {\n get: () => this.playlistController_.timeToLoadedData_() || 0,\n enumerable: true\n },\n buffered: {\n get: () => timeRangesToArray(this.tech_.buffered()),\n enumerable: true\n },\n currentTime: {\n get: () => this.tech_.currentTime(),\n enumerable: true\n },\n currentSource: {\n get: () => this.tech_.currentSource_,\n enumerable: true\n },\n currentTech: {\n get: () => this.tech_.name_,\n enumerable: true\n },\n duration: {\n get: () => this.tech_.duration(),\n enumerable: true\n },\n main: {\n get: () => this.playlists.main,\n enumerable: true\n },\n playerDimensions: {\n get: () => this.tech_.currentDimensions(),\n enumerable: true\n },\n seekable: {\n get: () => timeRangesToArray(this.tech_.seekable()),\n enumerable: true\n },\n timestamp: {\n get: () => Date.now(),\n enumerable: true\n },\n videoPlaybackQuality: {\n get: () => this.tech_.getVideoPlaybackQuality(),\n enumerable: true\n }\n });\n this.tech_.one('canplay', this.playlistController_.setupFirstPlay.bind(this.playlistController_));\n this.tech_.on('bandwidthupdate', () => {\n if (this.options_.useBandwidthFromLocalStorage) {\n updateVhsLocalStorage({\n bandwidth: this.bandwidth,\n throughput: Math.round(this.throughput)\n });\n }\n });\n this.playlistController_.on('selectedinitialmedia', () => {\n // Add the manual rendition mix-in to VhsHandler\n renditionSelectionMixin(this);\n });\n this.playlistController_.sourceUpdater_.on('createdsourcebuffers', () => {\n this.setupEme_();\n }); // the bandwidth of the primary segment loader is our best\n // estimate of overall bandwidth\n\n this.on(this.playlistController_, 'progress', function () {\n this.tech_.trigger('progress');\n }); // In the live case, we need to ignore the very first `seeking` event since\n // that will be the result of the seek-to-live behavior\n\n this.on(this.playlistController_, 'firstplay', function () {\n this.ignoreNextSeekingEvent_ = true;\n });\n this.setupQualityLevels_(); // do nothing if the tech has been disposed already\n // this can occur if someone sets the src in player.ready(), for instance\n\n if (!this.tech_.el()) {\n return;\n }\n this.mediaSourceUrl_ = window$1.URL.createObjectURL(this.playlistController_.mediaSource);\n this.tech_.src(this.mediaSourceUrl_);\n }\n createKeySessions_() {\n const audioPlaylistLoader = this.playlistController_.mediaTypes_.AUDIO.activePlaylistLoader;\n this.logger_('waiting for EME key session creation');\n waitForKeySessionCreation({\n player: this.player_,\n sourceKeySystems: this.source_.keySystems,\n audioMedia: audioPlaylistLoader && audioPlaylistLoader.media(),\n mainPlaylists: this.playlists.main.playlists\n }).then(() => {\n this.logger_('created EME key session');\n this.playlistController_.sourceUpdater_.initializedEme();\n }).catch(err => {\n this.logger_('error while creating EME key session', err);\n this.player_.error({\n message: 'Failed to initialize media keys for EME',\n code: 3,\n metadata: {\n errorType: videojs.Error.EMEKeySessionCreationError\n }\n });\n });\n }\n handleWaitingForKey_() {\n // If waitingforkey is fired, it's possible that the data that's necessary to retrieve\n // the key is in the manifest. While this should've happened on initial source load, it\n // may happen again in live streams where the keys change, and the manifest info\n // reflects the update.\n //\n // Because videojs-contrib-eme compares the PSSH data we send to that of PSSH data it's\n // already requested keys for, we don't have to worry about this generating extraneous\n // requests.\n this.logger_('waitingforkey fired, attempting to create any new key sessions');\n this.createKeySessions_();\n }\n /**\n * If necessary and EME is available, sets up EME options and waits for key session\n * creation.\n *\n * This function also updates the source updater so taht it can be used, as for some\n * browsers, EME must be configured before content is appended (if appending unencrypted\n * content before encrypted content).\n */\n\n setupEme_() {\n const audioPlaylistLoader = this.playlistController_.mediaTypes_.AUDIO.activePlaylistLoader;\n const didSetupEmeOptions = setupEmeOptions({\n player: this.player_,\n sourceKeySystems: this.source_.keySystems,\n media: this.playlists.media(),\n audioMedia: audioPlaylistLoader && audioPlaylistLoader.media()\n });\n this.player_.tech_.on('keystatuschange', e => {\n this.playlistController_.updatePlaylistByKeyStatus(e.keyId, e.status);\n });\n this.handleWaitingForKey_ = this.handleWaitingForKey_.bind(this);\n this.player_.tech_.on('waitingforkey', this.handleWaitingForKey_);\n if (!didSetupEmeOptions) {\n // If EME options were not set up, we've done all we could to initialize EME.\n this.playlistController_.sourceUpdater_.initializedEme();\n return;\n }\n this.createKeySessions_();\n }\n /**\n * Initializes the quality levels and sets listeners to update them.\n *\n * @method setupQualityLevels_\n * @private\n */\n\n setupQualityLevels_() {\n const player = videojs.players[this.tech_.options_.playerId]; // if there isn't a player or there isn't a qualityLevels plugin\n // or qualityLevels_ listeners have already been setup, do nothing.\n\n if (!player || !player.qualityLevels || this.qualityLevels_) {\n return;\n }\n this.qualityLevels_ = player.qualityLevels();\n this.playlistController_.on('selectedinitialmedia', () => {\n handleVhsLoadedMetadata(this.qualityLevels_, this);\n });\n this.playlists.on('mediachange', () => {\n handleVhsMediaChange(this.qualityLevels_, this.playlists);\n });\n }\n /**\n * return the version\n */\n\n static version() {\n return {\n '@videojs/http-streaming': version$4,\n 'mux.js': version$3,\n 'mpd-parser': version$2,\n 'm3u8-parser': version$1,\n 'aes-decrypter': version\n };\n }\n /**\n * return the version\n */\n\n version() {\n return this.constructor.version();\n }\n canChangeType() {\n return SourceUpdater.canChangeType();\n }\n /**\n * Begin playing the video.\n */\n\n play() {\n this.playlistController_.play();\n }\n /**\n * a wrapper around the function in PlaylistController\n */\n\n setCurrentTime(currentTime) {\n this.playlistController_.setCurrentTime(currentTime);\n }\n /**\n * a wrapper around the function in PlaylistController\n */\n\n duration() {\n return this.playlistController_.duration();\n }\n /**\n * a wrapper around the function in PlaylistController\n */\n\n seekable() {\n return this.playlistController_.seekable();\n }\n /**\n * Abort all outstanding work and cleanup.\n */\n\n dispose() {\n if (this.playbackWatcher_) {\n this.playbackWatcher_.dispose();\n }\n if (this.playlistController_) {\n this.playlistController_.dispose();\n }\n if (this.qualityLevels_) {\n this.qualityLevels_.dispose();\n }\n if (this.tech_ && this.tech_.vhs) {\n delete this.tech_.vhs;\n }\n if (this.mediaSourceUrl_ && window$1.URL.revokeObjectURL) {\n window$1.URL.revokeObjectURL(this.mediaSourceUrl_);\n this.mediaSourceUrl_ = null;\n }\n if (this.tech_) {\n this.tech_.off('waitingforkey', this.handleWaitingForKey_);\n }\n super.dispose();\n }\n convertToProgramTime(time, callback) {\n return getProgramTime({\n playlist: this.playlistController_.media(),\n time,\n callback\n });\n } // the player must be playing before calling this\n\n seekToProgramTime(programTime, callback, pauseAfterSeek = true, retryCount = 2) {\n return seekToProgramTime({\n programTime,\n playlist: this.playlistController_.media(),\n retryCount,\n pauseAfterSeek,\n seekTo: this.options_.seekTo,\n tech: this.options_.tech,\n callback\n });\n }\n /**\n * Adds the onRequest, onResponse, offRequest and offResponse functions\n * to the VhsHandler xhr Object.\n */\n\n setupXhrHooks_() {\n /**\n * A player function for setting an onRequest hook\n *\n * @param {function} callback for request modifiction\n */\n this.xhr.onRequest = callback => {\n addOnRequestHook(this.xhr, callback);\n };\n /**\n * A player function for setting an onResponse hook\n *\n * @param {callback} callback for response data retrieval\n */\n\n this.xhr.onResponse = callback => {\n addOnResponseHook(this.xhr, callback);\n };\n /**\n * Deletes a player onRequest callback if it exists\n *\n * @param {function} callback to delete from the player set\n */\n\n this.xhr.offRequest = callback => {\n removeOnRequestHook(this.xhr, callback);\n };\n /**\n * Deletes a player onResponse callback if it exists\n *\n * @param {function} callback to delete from the player set\n */\n\n this.xhr.offResponse = callback => {\n removeOnResponseHook(this.xhr, callback);\n }; // Trigger an event on the player to notify the user that vhs is ready to set xhr hooks.\n // This allows hooks to be set before the source is set to vhs when handleSource is called.\n\n this.player_.trigger('xhr-hooks-ready');\n }\n}\n/**\n * The Source Handler object, which informs video.js what additional\n * MIME types are supported and sets up playback. It is registered\n * automatically to the appropriate tech based on the capabilities of\n * the browser it is running in. It is not necessary to use or modify\n * this object in normal usage.\n */\n\nconst VhsSourceHandler = {\n name: 'videojs-http-streaming',\n VERSION: version$4,\n canHandleSource(srcObj, options = {}) {\n const localOptions = merge(videojs.options, options);\n return VhsSourceHandler.canPlayType(srcObj.type, localOptions);\n },\n handleSource(source, tech, options = {}) {\n const localOptions = merge(videojs.options, options);\n tech.vhs = new VhsHandler(source, tech, localOptions);\n tech.vhs.xhr = xhrFactory();\n tech.vhs.setupXhrHooks_();\n tech.vhs.src(source.src, source.type);\n return tech.vhs;\n },\n canPlayType(type, options) {\n const simpleType = simpleTypeFromSourceType(type);\n if (!simpleType) {\n return '';\n }\n const overrideNative = VhsSourceHandler.getOverrideNative(options);\n const supportsTypeNatively = Vhs.supportsTypeNatively(simpleType);\n const canUseMsePlayback = !supportsTypeNatively || overrideNative;\n return canUseMsePlayback ? 'maybe' : '';\n },\n getOverrideNative(options = {}) {\n const {\n vhs = {}\n } = options;\n const defaultOverrideNative = !(videojs.browser.IS_ANY_SAFARI || videojs.browser.IS_IOS);\n const {\n overrideNative = defaultOverrideNative\n } = vhs;\n return overrideNative;\n }\n};\n/**\n * Check to see if the native MediaSource object exists and supports\n * an MP4 container with both H.264 video and AAC-LC audio.\n *\n * @return {boolean} if native media sources are supported\n */\n\nconst supportsNativeMediaSources = () => {\n return browserSupportsCodec('avc1.4d400d,mp4a.40.2');\n}; // register source handlers with the appropriate techs\n\nif (supportsNativeMediaSources()) {\n videojs.getTech('Html5').registerSourceHandler(VhsSourceHandler, 0);\n}\nvideojs.VhsHandler = VhsHandler;\nvideojs.VhsSourceHandler = VhsSourceHandler;\nvideojs.Vhs = Vhs;\nif (!videojs.use) {\n videojs.registerComponent('Vhs', Vhs);\n}\nvideojs.options.vhs = videojs.options.vhs || {};\nif (!videojs.getPlugin || !videojs.getPlugin('reloadSourceOnError')) {\n videojs.registerPlugin('reloadSourceOnError', reloadSourceOnError);\n}\n\nexport { videojs as default };\n","export default function _extends() {\n _extends = Object.assign ? Object.assign.bind() : function (target) {\n for (var i = 1; i < arguments.length; i++) {\n var source = arguments[i];\n for (var key in source) {\n if (Object.prototype.hasOwnProperty.call(source, key)) {\n target[key] = source[key];\n }\n }\n }\n return target;\n };\n return _extends.apply(this, arguments);\n}","import URLToolkit from 'url-toolkit';\nimport window from 'global/window';\nvar DEFAULT_LOCATION = 'http://example.com';\n\nvar resolveUrl = function resolveUrl(baseUrl, relativeUrl) {\n // return early if we don't need to resolve\n if (/^[a-z]+:/i.test(relativeUrl)) {\n return relativeUrl;\n } // if baseUrl is a data URI, ignore it and resolve everything relative to window.location\n\n\n if (/^data:/.test(baseUrl)) {\n baseUrl = window.location && window.location.href || '';\n } // IE11 supports URL but not the URL constructor\n // feature detect the behavior we want\n\n\n var nativeURL = typeof window.URL === 'function';\n var protocolLess = /^\\/\\//.test(baseUrl); // remove location if window.location isn't available (i.e. we're in node)\n // and if baseUrl isn't an absolute url\n\n var removeLocation = !window.location && !/\\/\\//i.test(baseUrl); // if the base URL is relative then combine with the current location\n\n if (nativeURL) {\n baseUrl = new window.URL(baseUrl, window.location || DEFAULT_LOCATION);\n } else if (!/\\/\\//i.test(baseUrl)) {\n baseUrl = URLToolkit.buildAbsoluteURL(window.location && window.location.href || '', baseUrl);\n }\n\n if (nativeURL) {\n var newUrl = new URL(relativeUrl, baseUrl); // if we're a protocol-less url, remove the protocol\n // and if we're location-less, remove the location\n // otherwise, return the url unmodified\n\n if (removeLocation) {\n return newUrl.href.slice(DEFAULT_LOCATION.length);\n } else if (protocolLess) {\n return newUrl.href.slice(newUrl.protocol.length);\n }\n\n return newUrl.href;\n }\n\n return URLToolkit.buildAbsoluteURL(baseUrl, relativeUrl);\n};\n\nexport default resolveUrl;","/**\n * @file stream.js\n */\n\n/**\n * A lightweight readable stream implemention that handles event dispatching.\n *\n * @class Stream\n */\nvar Stream = /*#__PURE__*/function () {\n function Stream() {\n this.listeners = {};\n }\n /**\n * Add a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener the callback to be invoked when an event of\n * the specified type occurs\n */\n\n\n var _proto = Stream.prototype;\n\n _proto.on = function on(type, listener) {\n if (!this.listeners[type]) {\n this.listeners[type] = [];\n }\n\n this.listeners[type].push(listener);\n }\n /**\n * Remove a listener for a specified event type.\n *\n * @param {string} type the event name\n * @param {Function} listener a function previously registered for this\n * type of event through `on`\n * @return {boolean} if we could turn it off or not\n */\n ;\n\n _proto.off = function off(type, listener) {\n if (!this.listeners[type]) {\n return false;\n }\n\n var index = this.listeners[type].indexOf(listener); // TODO: which is better?\n // In Video.js we slice listener functions\n // on trigger so that it does not mess up the order\n // while we loop through.\n //\n // Here we slice on off so that the loop in trigger\n // can continue using it's old reference to loop without\n // messing up the order.\n\n this.listeners[type] = this.listeners[type].slice(0);\n this.listeners[type].splice(index, 1);\n return index > -1;\n }\n /**\n * Trigger an event of the specified type on this stream. Any additional\n * arguments to this function are passed as parameters to event listeners.\n *\n * @param {string} type the event name\n */\n ;\n\n _proto.trigger = function trigger(type) {\n var callbacks = this.listeners[type];\n\n if (!callbacks) {\n return;\n } // Slicing the arguments on every invocation of this method\n // can add a significant amount of overhead. Avoid the\n // intermediate object creation for the common case of a\n // single callback argument\n\n\n if (arguments.length === 2) {\n var length = callbacks.length;\n\n for (var i = 0; i < length; ++i) {\n callbacks[i].call(this, arguments[1]);\n }\n } else {\n var args = Array.prototype.slice.call(arguments, 1);\n var _length = callbacks.length;\n\n for (var _i = 0; _i < _length; ++_i) {\n callbacks[_i].apply(this, args);\n }\n }\n }\n /**\n * Destroys the stream and cleans up.\n */\n ;\n\n _proto.dispose = function dispose() {\n this.listeners = {};\n }\n /**\n * Forwards all `data` events on this stream to the destination stream. The\n * destination stream should provide a method `push` to receive the data\n * events as they arrive.\n *\n * @param {Stream} destination the stream that will receive all `data` events\n * @see http://nodejs.org/api/stream.html#stream_readable_pipe_destination_options\n */\n ;\n\n _proto.pipe = function pipe(destination) {\n this.on('data', function (data) {\n destination.push(data);\n });\n };\n\n return Stream;\n}();\n\nexport { Stream as default };","import window from 'global/window';\n\nvar atob = function atob(s) {\n return window.atob ? window.atob(s) : Buffer.from(s, 'base64').toString('binary');\n};\n\nexport default function decodeB64ToUint8Array(b64Text) {\n var decodedString = atob(b64Text);\n var array = new Uint8Array(decodedString.length);\n\n for (var i = 0; i < decodedString.length; i++) {\n array[i] = decodedString.charCodeAt(i);\n }\n\n return array;\n}","/*! @name m3u8-parser @version 7.1.0 @license Apache-2.0 */\nimport Stream from '@videojs/vhs-utils/es/stream.js';\nimport _extends from '@babel/runtime/helpers/extends';\nimport decodeB64ToUint8Array from '@videojs/vhs-utils/es/decode-b64-to-uint8-array.js';\n\n/**\n * @file m3u8/line-stream.js\n */\n/**\n * A stream that buffers string input and generates a `data` event for each\n * line.\n *\n * @class LineStream\n * @extends Stream\n */\n\nclass LineStream extends Stream {\n constructor() {\n super();\n this.buffer = '';\n }\n /**\n * Add new data to be parsed.\n *\n * @param {string} data the text to process\n */\n\n\n push(data) {\n let nextNewline;\n this.buffer += data;\n nextNewline = this.buffer.indexOf('\\n');\n\n for (; nextNewline > -1; nextNewline = this.buffer.indexOf('\\n')) {\n this.trigger('data', this.buffer.substring(0, nextNewline));\n this.buffer = this.buffer.substring(nextNewline + 1);\n }\n }\n\n}\n\nconst TAB = String.fromCharCode(0x09);\n\nconst parseByterange = function (byterangeString) {\n // optionally match and capture 0+ digits before `@`\n // optionally match and capture 0+ digits after `@`\n const match = /([0-9.]*)?@?([0-9.]*)?/.exec(byterangeString || '');\n const result = {};\n\n if (match[1]) {\n result.length = parseInt(match[1], 10);\n }\n\n if (match[2]) {\n result.offset = parseInt(match[2], 10);\n }\n\n return result;\n};\n/**\n * \"forgiving\" attribute list psuedo-grammar:\n * attributes -> keyvalue (',' keyvalue)*\n * keyvalue -> key '=' value\n * key -> [^=]*\n * value -> '\"' [^\"]* '\"' | [^,]*\n */\n\n\nconst attributeSeparator = function () {\n const key = '[^=]*';\n const value = '\"[^\"]*\"|[^,]*';\n const keyvalue = '(?:' + key + ')=(?:' + value + ')';\n return new RegExp('(?:^|,)(' + keyvalue + ')');\n};\n/**\n * Parse attributes from a line given the separator\n *\n * @param {string} attributes the attribute line to parse\n */\n\n\nconst parseAttributes = function (attributes) {\n const result = {};\n\n if (!attributes) {\n return result;\n } // split the string using attributes as the separator\n\n\n const attrs = attributes.split(attributeSeparator());\n let i = attrs.length;\n let attr;\n\n while (i--) {\n // filter out unmatched portions of the string\n if (attrs[i] === '') {\n continue;\n } // split the key and value\n\n\n attr = /([^=]*)=(.*)/.exec(attrs[i]).slice(1); // trim whitespace and remove optional quotes around the value\n\n attr[0] = attr[0].replace(/^\\s+|\\s+$/g, '');\n attr[1] = attr[1].replace(/^\\s+|\\s+$/g, '');\n attr[1] = attr[1].replace(/^['\"](.*)['\"]$/g, '$1');\n result[attr[0]] = attr[1];\n }\n\n return result;\n};\n/**\n * A line-level M3U8 parser event stream. It expects to receive input one\n * line at a time and performs a context-free parse of its contents. A stream\n * interpretation of a manifest can be useful if the manifest is expected to\n * be too large to fit comfortably into memory or the entirety of the input\n * is not immediately available. Otherwise, it's probably much easier to work\n * with a regular `Parser` object.\n *\n * Produces `data` events with an object that captures the parser's\n * interpretation of the input. That object has a property `tag` that is one\n * of `uri`, `comment`, or `tag`. URIs only have a single additional\n * property, `line`, which captures the entirety of the input without\n * interpretation. Comments similarly have a single additional property\n * `text` which is the input without the leading `#`.\n *\n * Tags always have a property `tagType` which is the lower-cased version of\n * the M3U8 directive without the `#EXT` or `#EXT-X-` prefix. For instance,\n * `#EXT-X-MEDIA-SEQUENCE` becomes `media-sequence` when parsed. Unrecognized\n * tags are given the tag type `unknown` and a single additional property\n * `data` with the remainder of the input.\n *\n * @class ParseStream\n * @extends Stream\n */\n\n\nclass ParseStream extends Stream {\n constructor() {\n super();\n this.customParsers = [];\n this.tagMappers = [];\n }\n /**\n * Parses an additional line of input.\n *\n * @param {string} line a single line of an M3U8 file to parse\n */\n\n\n push(line) {\n let match;\n let event; // strip whitespace\n\n line = line.trim();\n\n if (line.length === 0) {\n // ignore empty lines\n return;\n } // URIs\n\n\n if (line[0] !== '#') {\n this.trigger('data', {\n type: 'uri',\n uri: line\n });\n return;\n } // map tags\n\n\n const newLines = this.tagMappers.reduce((acc, mapper) => {\n const mappedLine = mapper(line); // skip if unchanged\n\n if (mappedLine === line) {\n return acc;\n }\n\n return acc.concat([mappedLine]);\n }, [line]);\n newLines.forEach(newLine => {\n for (let i = 0; i < this.customParsers.length; i++) {\n if (this.customParsers[i].call(this, newLine)) {\n return;\n }\n } // Comments\n\n\n if (newLine.indexOf('#EXT') !== 0) {\n this.trigger('data', {\n type: 'comment',\n text: newLine.slice(1)\n });\n return;\n } // strip off any carriage returns here so the regex matching\n // doesn't have to account for them.\n\n\n newLine = newLine.replace('\\r', ''); // Tags\n\n match = /^#EXTM3U/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'm3u'\n });\n return;\n }\n\n match = /^#EXTINF:([0-9\\.]*)?,?(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'inf'\n };\n\n if (match[1]) {\n event.duration = parseFloat(match[1]);\n }\n\n if (match[2]) {\n event.title = match[2];\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-TARGETDURATION:([0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'targetduration'\n };\n\n if (match[1]) {\n event.duration = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-VERSION:([0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'version'\n };\n\n if (match[1]) {\n event.version = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-MEDIA-SEQUENCE:(\\-?[0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'media-sequence'\n };\n\n if (match[1]) {\n event.number = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-DISCONTINUITY-SEQUENCE:(\\-?[0-9.]*)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'discontinuity-sequence'\n };\n\n if (match[1]) {\n event.number = parseInt(match[1], 10);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PLAYLIST-TYPE:(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'playlist-type'\n };\n\n if (match[1]) {\n event.playlistType = match[1];\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-BYTERANGE:(.*)?$/.exec(newLine);\n\n if (match) {\n event = _extends(parseByterange(match[1]), {\n type: 'tag',\n tagType: 'byterange'\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-ALLOW-CACHE:(YES|NO)?/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'allow-cache'\n };\n\n if (match[1]) {\n event.allowed = !/NO/.test(match[1]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-MAP:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'map'\n };\n\n if (match[1]) {\n const attributes = parseAttributes(match[1]);\n\n if (attributes.URI) {\n event.uri = attributes.URI;\n }\n\n if (attributes.BYTERANGE) {\n event.byterange = parseByterange(attributes.BYTERANGE);\n }\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-STREAM-INF:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'stream-inf'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]);\n\n if (event.attributes.RESOLUTION) {\n const split = event.attributes.RESOLUTION.split('x');\n const resolution = {};\n\n if (split[0]) {\n resolution.width = parseInt(split[0], 10);\n }\n\n if (split[1]) {\n resolution.height = parseInt(split[1], 10);\n }\n\n event.attributes.RESOLUTION = resolution;\n }\n\n if (event.attributes.BANDWIDTH) {\n event.attributes.BANDWIDTH = parseInt(event.attributes.BANDWIDTH, 10);\n }\n\n if (event.attributes['FRAME-RATE']) {\n event.attributes['FRAME-RATE'] = parseFloat(event.attributes['FRAME-RATE']);\n }\n\n if (event.attributes['PROGRAM-ID']) {\n event.attributes['PROGRAM-ID'] = parseInt(event.attributes['PROGRAM-ID'], 10);\n }\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-MEDIA:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'media'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-ENDLIST/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'endlist'\n });\n return;\n }\n\n match = /^#EXT-X-DISCONTINUITY/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'discontinuity'\n });\n return;\n }\n\n match = /^#EXT-X-PROGRAM-DATE-TIME:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'program-date-time'\n };\n\n if (match[1]) {\n event.dateTimeString = match[1];\n event.dateTimeObject = new Date(match[1]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-KEY:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'key'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]); // parse the IV string into a Uint32Array\n\n if (event.attributes.IV) {\n if (event.attributes.IV.substring(0, 2).toLowerCase() === '0x') {\n event.attributes.IV = event.attributes.IV.substring(2);\n }\n\n event.attributes.IV = event.attributes.IV.match(/.{8}/g);\n event.attributes.IV[0] = parseInt(event.attributes.IV[0], 16);\n event.attributes.IV[1] = parseInt(event.attributes.IV[1], 16);\n event.attributes.IV[2] = parseInt(event.attributes.IV[2], 16);\n event.attributes.IV[3] = parseInt(event.attributes.IV[3], 16);\n event.attributes.IV = new Uint32Array(event.attributes.IV);\n }\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-START:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'start'\n };\n\n if (match[1]) {\n event.attributes = parseAttributes(match[1]);\n event.attributes['TIME-OFFSET'] = parseFloat(event.attributes['TIME-OFFSET']);\n event.attributes.PRECISE = /YES/.test(event.attributes.PRECISE);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-CUE-OUT-CONT:(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'cue-out-cont'\n };\n\n if (match[1]) {\n event.data = match[1];\n } else {\n event.data = '';\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-CUE-OUT:(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'cue-out'\n };\n\n if (match[1]) {\n event.data = match[1];\n } else {\n event.data = '';\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-CUE-IN:(.*)?$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'cue-in'\n };\n\n if (match[1]) {\n event.data = match[1];\n } else {\n event.data = '';\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-SKIP:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'skip'\n };\n event.attributes = parseAttributes(match[1]);\n\n if (event.attributes.hasOwnProperty('SKIPPED-SEGMENTS')) {\n event.attributes['SKIPPED-SEGMENTS'] = parseInt(event.attributes['SKIPPED-SEGMENTS'], 10);\n }\n\n if (event.attributes.hasOwnProperty('RECENTLY-REMOVED-DATERANGES')) {\n event.attributes['RECENTLY-REMOVED-DATERANGES'] = event.attributes['RECENTLY-REMOVED-DATERANGES'].split(TAB);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PART:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'part'\n };\n event.attributes = parseAttributes(match[1]);\n ['DURATION'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n ['INDEPENDENT', 'GAP'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = /YES/.test(event.attributes[key]);\n }\n });\n\n if (event.attributes.hasOwnProperty('BYTERANGE')) {\n event.attributes.byterange = parseByterange(event.attributes.BYTERANGE);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-SERVER-CONTROL:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'server-control'\n };\n event.attributes = parseAttributes(match[1]);\n ['CAN-SKIP-UNTIL', 'PART-HOLD-BACK', 'HOLD-BACK'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n ['CAN-SKIP-DATERANGES', 'CAN-BLOCK-RELOAD'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = /YES/.test(event.attributes[key]);\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PART-INF:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'part-inf'\n };\n event.attributes = parseAttributes(match[1]);\n ['PART-TARGET'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-PRELOAD-HINT:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'preload-hint'\n };\n event.attributes = parseAttributes(match[1]);\n ['BYTERANGE-START', 'BYTERANGE-LENGTH'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseInt(event.attributes[key], 10);\n const subkey = key === 'BYTERANGE-LENGTH' ? 'length' : 'offset';\n event.attributes.byterange = event.attributes.byterange || {};\n event.attributes.byterange[subkey] = event.attributes[key]; // only keep the parsed byterange object.\n\n delete event.attributes[key];\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-RENDITION-REPORT:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'rendition-report'\n };\n event.attributes = parseAttributes(match[1]);\n ['LAST-MSN', 'LAST-PART'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseInt(event.attributes[key], 10);\n }\n });\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-DATERANGE:(.*)$/.exec(newLine);\n\n if (match && match[1]) {\n event = {\n type: 'tag',\n tagType: 'daterange'\n };\n event.attributes = parseAttributes(match[1]);\n ['ID', 'CLASS'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = String(event.attributes[key]);\n }\n });\n ['START-DATE', 'END-DATE'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = new Date(event.attributes[key]);\n }\n });\n ['DURATION', 'PLANNED-DURATION'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = parseFloat(event.attributes[key]);\n }\n });\n ['END-ON-NEXT'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = /YES/i.test(event.attributes[key]);\n }\n });\n ['SCTE35-CMD', ' SCTE35-OUT', 'SCTE35-IN'].forEach(function (key) {\n if (event.attributes.hasOwnProperty(key)) {\n event.attributes[key] = event.attributes[key].toString(16);\n }\n });\n const clientAttributePattern = /^X-([A-Z]+-)+[A-Z]+$/;\n\n for (const key in event.attributes) {\n if (!clientAttributePattern.test(key)) {\n continue;\n }\n\n const isHexaDecimal = /[0-9A-Fa-f]{6}/g.test(event.attributes[key]);\n const isDecimalFloating = /^\\d+(\\.\\d+)?$/.test(event.attributes[key]);\n event.attributes[key] = isHexaDecimal ? event.attributes[key].toString(16) : isDecimalFloating ? parseFloat(event.attributes[key]) : String(event.attributes[key]);\n }\n\n this.trigger('data', event);\n return;\n }\n\n match = /^#EXT-X-INDEPENDENT-SEGMENTS/.exec(newLine);\n\n if (match) {\n this.trigger('data', {\n type: 'tag',\n tagType: 'independent-segments'\n });\n return;\n }\n\n match = /^#EXT-X-CONTENT-STEERING:(.*)$/.exec(newLine);\n\n if (match) {\n event = {\n type: 'tag',\n tagType: 'content-steering'\n };\n event.attributes = parseAttributes(match[1]);\n this.trigger('data', event);\n return;\n } // unknown tag type\n\n\n this.trigger('data', {\n type: 'tag',\n data: newLine.slice(4)\n });\n });\n }\n /**\n * Add a parser for custom headers\n *\n * @param {Object} options a map of options for the added parser\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {string} options.customType the custom type to register to the output\n * @param {Function} [options.dataParser] function to parse the line into an object\n * @param {boolean} [options.segment] should tag data be attached to the segment object\n */\n\n\n addParser({\n expression,\n customType,\n dataParser,\n segment\n }) {\n if (typeof dataParser !== 'function') {\n dataParser = line => line;\n }\n\n this.customParsers.push(line => {\n const match = expression.exec(line);\n\n if (match) {\n this.trigger('data', {\n type: 'custom',\n data: dataParser(line),\n customType,\n segment\n });\n return true;\n }\n });\n }\n /**\n * Add a custom header mapper\n *\n * @param {Object} options\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {Function} options.map function to translate tag into a different tag\n */\n\n\n addTagMapper({\n expression,\n map\n }) {\n const mapFn = line => {\n if (expression.test(line)) {\n return map(line);\n }\n\n return line;\n };\n\n this.tagMappers.push(mapFn);\n }\n\n}\n\nconst camelCase = str => str.toLowerCase().replace(/-(\\w)/g, a => a[1].toUpperCase());\n\nconst camelCaseKeys = function (attributes) {\n const result = {};\n Object.keys(attributes).forEach(function (key) {\n result[camelCase(key)] = attributes[key];\n });\n return result;\n}; // set SERVER-CONTROL hold back based upon targetDuration and partTargetDuration\n// we need this helper because defaults are based upon targetDuration and\n// partTargetDuration being set, but they may not be if SERVER-CONTROL appears before\n// target durations are set.\n\n\nconst setHoldBack = function (manifest) {\n const {\n serverControl,\n targetDuration,\n partTargetDuration\n } = manifest;\n\n if (!serverControl) {\n return;\n }\n\n const tag = '#EXT-X-SERVER-CONTROL';\n const hb = 'holdBack';\n const phb = 'partHoldBack';\n const minTargetDuration = targetDuration && targetDuration * 3;\n const minPartDuration = partTargetDuration && partTargetDuration * 2;\n\n if (targetDuration && !serverControl.hasOwnProperty(hb)) {\n serverControl[hb] = minTargetDuration;\n this.trigger('info', {\n message: `${tag} defaulting HOLD-BACK to targetDuration * 3 (${minTargetDuration}).`\n });\n }\n\n if (minTargetDuration && serverControl[hb] < minTargetDuration) {\n this.trigger('warn', {\n message: `${tag} clamping HOLD-BACK (${serverControl[hb]}) to targetDuration * 3 (${minTargetDuration})`\n });\n serverControl[hb] = minTargetDuration;\n } // default no part hold back to part target duration * 3\n\n\n if (partTargetDuration && !serverControl.hasOwnProperty(phb)) {\n serverControl[phb] = partTargetDuration * 3;\n this.trigger('info', {\n message: `${tag} defaulting PART-HOLD-BACK to partTargetDuration * 3 (${serverControl[phb]}).`\n });\n } // if part hold back is too small default it to part target duration * 2\n\n\n if (partTargetDuration && serverControl[phb] < minPartDuration) {\n this.trigger('warn', {\n message: `${tag} clamping PART-HOLD-BACK (${serverControl[phb]}) to partTargetDuration * 2 (${minPartDuration}).`\n });\n serverControl[phb] = minPartDuration;\n }\n};\n/**\n * A parser for M3U8 files. The current interpretation of the input is\n * exposed as a property `manifest` on parser objects. It's just two lines to\n * create and parse a manifest once you have the contents available as a string:\n *\n * ```js\n * var parser = new m3u8.Parser();\n * parser.push(xhr.responseText);\n * ```\n *\n * New input can later be applied to update the manifest object by calling\n * `push` again.\n *\n * The parser attempts to create a usable manifest object even if the\n * underlying input is somewhat nonsensical. It emits `info` and `warning`\n * events during the parse if it encounters input that seems invalid or\n * requires some property of the manifest object to be defaulted.\n *\n * @class Parser\n * @extends Stream\n */\n\n\nclass Parser extends Stream {\n constructor() {\n super();\n this.lineStream = new LineStream();\n this.parseStream = new ParseStream();\n this.lineStream.pipe(this.parseStream);\n this.lastProgramDateTime = null;\n /* eslint-disable consistent-this */\n\n const self = this;\n /* eslint-enable consistent-this */\n\n const uris = [];\n let currentUri = {}; // if specified, the active EXT-X-MAP definition\n\n let currentMap; // if specified, the active decryption key\n\n let key;\n let hasParts = false;\n\n const noop = function () {};\n\n const defaultMediaGroups = {\n 'AUDIO': {},\n 'VIDEO': {},\n 'CLOSED-CAPTIONS': {},\n 'SUBTITLES': {}\n }; // This is the Widevine UUID from DASH IF IOP. The same exact string is\n // used in MPDs with Widevine encrypted streams.\n\n const widevineUuid = 'urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed'; // group segments into numbered timelines delineated by discontinuities\n\n let currentTimeline = 0; // the manifest is empty until the parse stream begins delivering data\n\n this.manifest = {\n allowCache: true,\n discontinuityStarts: [],\n dateRanges: [],\n segments: []\n }; // keep track of the last seen segment's byte range end, as segments are not required\n // to provide the offset, in which case it defaults to the next byte after the\n // previous segment\n\n let lastByterangeEnd = 0; // keep track of the last seen part's byte range end.\n\n let lastPartByterangeEnd = 0;\n const dateRangeTags = {};\n this.on('end', () => {\n // only add preloadSegment if we don't yet have a uri for it.\n // and we actually have parts/preloadHints\n if (currentUri.uri || !currentUri.parts && !currentUri.preloadHints) {\n return;\n }\n\n if (!currentUri.map && currentMap) {\n currentUri.map = currentMap;\n }\n\n if (!currentUri.key && key) {\n currentUri.key = key;\n }\n\n if (!currentUri.timeline && typeof currentTimeline === 'number') {\n currentUri.timeline = currentTimeline;\n }\n\n this.manifest.preloadSegment = currentUri;\n }); // update the manifest with the m3u8 entry from the parse stream\n\n this.parseStream.on('data', function (entry) {\n let mediaGroup;\n let rendition;\n ({\n tag() {\n // switch based on the tag type\n (({\n version() {\n if (entry.version) {\n this.manifest.version = entry.version;\n }\n },\n\n 'allow-cache'() {\n this.manifest.allowCache = entry.allowed;\n\n if (!('allowed' in entry)) {\n this.trigger('info', {\n message: 'defaulting allowCache to YES'\n });\n this.manifest.allowCache = true;\n }\n },\n\n byterange() {\n const byterange = {};\n\n if ('length' in entry) {\n currentUri.byterange = byterange;\n byterange.length = entry.length;\n\n if (!('offset' in entry)) {\n /*\n * From the latest spec (as of this writing):\n * https://tools.ietf.org/html/draft-pantos-http-live-streaming-23#section-4.3.2.2\n *\n * Same text since EXT-X-BYTERANGE's introduction in draft 7:\n * https://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.1)\n *\n * \"If o [offset] is not present, the sub-range begins at the next byte\n * following the sub-range of the previous media segment.\"\n */\n entry.offset = lastByterangeEnd;\n }\n }\n\n if ('offset' in entry) {\n currentUri.byterange = byterange;\n byterange.offset = entry.offset;\n }\n\n lastByterangeEnd = byterange.offset + byterange.length;\n },\n\n endlist() {\n this.manifest.endList = true;\n },\n\n inf() {\n if (!('mediaSequence' in this.manifest)) {\n this.manifest.mediaSequence = 0;\n this.trigger('info', {\n message: 'defaulting media sequence to zero'\n });\n }\n\n if (!('discontinuitySequence' in this.manifest)) {\n this.manifest.discontinuitySequence = 0;\n this.trigger('info', {\n message: 'defaulting discontinuity sequence to zero'\n });\n }\n\n if (entry.title) {\n currentUri.title = entry.title;\n }\n\n if (entry.duration > 0) {\n currentUri.duration = entry.duration;\n }\n\n if (entry.duration === 0) {\n currentUri.duration = 0.01;\n this.trigger('info', {\n message: 'updating zero segment duration to a small value'\n });\n }\n\n this.manifest.segments = uris;\n },\n\n key() {\n if (!entry.attributes) {\n this.trigger('warn', {\n message: 'ignoring key declaration without attribute list'\n });\n return;\n } // clear the active encryption key\n\n\n if (entry.attributes.METHOD === 'NONE') {\n key = null;\n return;\n }\n\n if (!entry.attributes.URI) {\n this.trigger('warn', {\n message: 'ignoring key declaration without URI'\n });\n return;\n }\n\n if (entry.attributes.KEYFORMAT === 'com.apple.streamingkeydelivery') {\n this.manifest.contentProtection = this.manifest.contentProtection || {}; // TODO: add full support for this.\n\n this.manifest.contentProtection['com.apple.fps.1_0'] = {\n attributes: entry.attributes\n };\n return;\n }\n\n if (entry.attributes.KEYFORMAT === 'com.microsoft.playready') {\n this.manifest.contentProtection = this.manifest.contentProtection || {}; // TODO: add full support for this.\n\n this.manifest.contentProtection['com.microsoft.playready'] = {\n uri: entry.attributes.URI\n };\n return;\n } // check if the content is encrypted for Widevine\n // Widevine/HLS spec: https://storage.googleapis.com/wvdocs/Widevine_DRM_HLS.pdf\n\n\n if (entry.attributes.KEYFORMAT === widevineUuid) {\n const VALID_METHODS = ['SAMPLE-AES', 'SAMPLE-AES-CTR', 'SAMPLE-AES-CENC'];\n\n if (VALID_METHODS.indexOf(entry.attributes.METHOD) === -1) {\n this.trigger('warn', {\n message: 'invalid key method provided for Widevine'\n });\n return;\n }\n\n if (entry.attributes.METHOD === 'SAMPLE-AES-CENC') {\n this.trigger('warn', {\n message: 'SAMPLE-AES-CENC is deprecated, please use SAMPLE-AES-CTR instead'\n });\n }\n\n if (entry.attributes.URI.substring(0, 23) !== 'data:text/plain;base64,') {\n this.trigger('warn', {\n message: 'invalid key URI provided for Widevine'\n });\n return;\n }\n\n if (!(entry.attributes.KEYID && entry.attributes.KEYID.substring(0, 2) === '0x')) {\n this.trigger('warn', {\n message: 'invalid key ID provided for Widevine'\n });\n return;\n } // if Widevine key attributes are valid, store them as `contentProtection`\n // on the manifest to emulate Widevine tag structure in a DASH mpd\n\n\n this.manifest.contentProtection = this.manifest.contentProtection || {};\n this.manifest.contentProtection['com.widevine.alpha'] = {\n attributes: {\n schemeIdUri: entry.attributes.KEYFORMAT,\n // remove '0x' from the key id string\n keyId: entry.attributes.KEYID.substring(2)\n },\n // decode the base64-encoded PSSH box\n pssh: decodeB64ToUint8Array(entry.attributes.URI.split(',')[1])\n };\n return;\n }\n\n if (!entry.attributes.METHOD) {\n this.trigger('warn', {\n message: 'defaulting key method to AES-128'\n });\n } // setup an encryption key for upcoming segments\n\n\n key = {\n method: entry.attributes.METHOD || 'AES-128',\n uri: entry.attributes.URI\n };\n\n if (typeof entry.attributes.IV !== 'undefined') {\n key.iv = entry.attributes.IV;\n }\n },\n\n 'media-sequence'() {\n if (!isFinite(entry.number)) {\n this.trigger('warn', {\n message: 'ignoring invalid media sequence: ' + entry.number\n });\n return;\n }\n\n this.manifest.mediaSequence = entry.number;\n },\n\n 'discontinuity-sequence'() {\n if (!isFinite(entry.number)) {\n this.trigger('warn', {\n message: 'ignoring invalid discontinuity sequence: ' + entry.number\n });\n return;\n }\n\n this.manifest.discontinuitySequence = entry.number;\n currentTimeline = entry.number;\n },\n\n 'playlist-type'() {\n if (!/VOD|EVENT/.test(entry.playlistType)) {\n this.trigger('warn', {\n message: 'ignoring unknown playlist type: ' + entry.playlist\n });\n return;\n }\n\n this.manifest.playlistType = entry.playlistType;\n },\n\n map() {\n currentMap = {};\n\n if (entry.uri) {\n currentMap.uri = entry.uri;\n }\n\n if (entry.byterange) {\n currentMap.byterange = entry.byterange;\n }\n\n if (key) {\n currentMap.key = key;\n }\n },\n\n 'stream-inf'() {\n this.manifest.playlists = uris;\n this.manifest.mediaGroups = this.manifest.mediaGroups || defaultMediaGroups;\n\n if (!entry.attributes) {\n this.trigger('warn', {\n message: 'ignoring empty stream-inf attributes'\n });\n return;\n }\n\n if (!currentUri.attributes) {\n currentUri.attributes = {};\n }\n\n _extends(currentUri.attributes, entry.attributes);\n },\n\n media() {\n this.manifest.mediaGroups = this.manifest.mediaGroups || defaultMediaGroups;\n\n if (!(entry.attributes && entry.attributes.TYPE && entry.attributes['GROUP-ID'] && entry.attributes.NAME)) {\n this.trigger('warn', {\n message: 'ignoring incomplete or missing media group'\n });\n return;\n } // find the media group, creating defaults as necessary\n\n\n const mediaGroupType = this.manifest.mediaGroups[entry.attributes.TYPE];\n mediaGroupType[entry.attributes['GROUP-ID']] = mediaGroupType[entry.attributes['GROUP-ID']] || {};\n mediaGroup = mediaGroupType[entry.attributes['GROUP-ID']]; // collect the rendition metadata\n\n rendition = {\n default: /yes/i.test(entry.attributes.DEFAULT)\n };\n\n if (rendition.default) {\n rendition.autoselect = true;\n } else {\n rendition.autoselect = /yes/i.test(entry.attributes.AUTOSELECT);\n }\n\n if (entry.attributes.LANGUAGE) {\n rendition.language = entry.attributes.LANGUAGE;\n }\n\n if (entry.attributes.URI) {\n rendition.uri = entry.attributes.URI;\n }\n\n if (entry.attributes['INSTREAM-ID']) {\n rendition.instreamId = entry.attributes['INSTREAM-ID'];\n }\n\n if (entry.attributes.CHARACTERISTICS) {\n rendition.characteristics = entry.attributes.CHARACTERISTICS;\n }\n\n if (entry.attributes.FORCED) {\n rendition.forced = /yes/i.test(entry.attributes.FORCED);\n } // insert the new rendition\n\n\n mediaGroup[entry.attributes.NAME] = rendition;\n },\n\n discontinuity() {\n currentTimeline += 1;\n currentUri.discontinuity = true;\n this.manifest.discontinuityStarts.push(uris.length);\n },\n\n 'program-date-time'() {\n if (typeof this.manifest.dateTimeString === 'undefined') {\n // PROGRAM-DATE-TIME is a media-segment tag, but for backwards\n // compatibility, we add the first occurence of the PROGRAM-DATE-TIME tag\n // to the manifest object\n // TODO: Consider removing this in future major version\n this.manifest.dateTimeString = entry.dateTimeString;\n this.manifest.dateTimeObject = entry.dateTimeObject;\n }\n\n currentUri.dateTimeString = entry.dateTimeString;\n currentUri.dateTimeObject = entry.dateTimeObject;\n const {\n lastProgramDateTime\n } = this;\n this.lastProgramDateTime = new Date(entry.dateTimeString).getTime(); // We should extrapolate Program Date Time backward only during first program date time occurrence.\n // Once we have at least one program date time point, we can always extrapolate it forward using lastProgramDateTime reference.\n\n if (lastProgramDateTime === null) {\n // Extrapolate Program Date Time backward\n // Since it is first program date time occurrence we're assuming that\n // all this.manifest.segments have no program date time info\n this.manifest.segments.reduceRight((programDateTime, segment) => {\n segment.programDateTime = programDateTime - segment.duration * 1000;\n return segment.programDateTime;\n }, this.lastProgramDateTime);\n }\n },\n\n targetduration() {\n if (!isFinite(entry.duration) || entry.duration < 0) {\n this.trigger('warn', {\n message: 'ignoring invalid target duration: ' + entry.duration\n });\n return;\n }\n\n this.manifest.targetDuration = entry.duration;\n setHoldBack.call(this, this.manifest);\n },\n\n start() {\n if (!entry.attributes || isNaN(entry.attributes['TIME-OFFSET'])) {\n this.trigger('warn', {\n message: 'ignoring start declaration without appropriate attribute list'\n });\n return;\n }\n\n this.manifest.start = {\n timeOffset: entry.attributes['TIME-OFFSET'],\n precise: entry.attributes.PRECISE\n };\n },\n\n 'cue-out'() {\n currentUri.cueOut = entry.data;\n },\n\n 'cue-out-cont'() {\n currentUri.cueOutCont = entry.data;\n },\n\n 'cue-in'() {\n currentUri.cueIn = entry.data;\n },\n\n 'skip'() {\n this.manifest.skip = camelCaseKeys(entry.attributes);\n this.warnOnMissingAttributes_('#EXT-X-SKIP', entry.attributes, ['SKIPPED-SEGMENTS']);\n },\n\n 'part'() {\n hasParts = true; // parts are always specifed before a segment\n\n const segmentIndex = this.manifest.segments.length;\n const part = camelCaseKeys(entry.attributes);\n currentUri.parts = currentUri.parts || [];\n currentUri.parts.push(part);\n\n if (part.byterange) {\n if (!part.byterange.hasOwnProperty('offset')) {\n part.byterange.offset = lastPartByterangeEnd;\n }\n\n lastPartByterangeEnd = part.byterange.offset + part.byterange.length;\n }\n\n const partIndex = currentUri.parts.length - 1;\n this.warnOnMissingAttributes_(`#EXT-X-PART #${partIndex} for segment #${segmentIndex}`, entry.attributes, ['URI', 'DURATION']);\n\n if (this.manifest.renditionReports) {\n this.manifest.renditionReports.forEach((r, i) => {\n if (!r.hasOwnProperty('lastPart')) {\n this.trigger('warn', {\n message: `#EXT-X-RENDITION-REPORT #${i} lacks required attribute(s): LAST-PART`\n });\n }\n });\n }\n },\n\n 'server-control'() {\n const attrs = this.manifest.serverControl = camelCaseKeys(entry.attributes);\n\n if (!attrs.hasOwnProperty('canBlockReload')) {\n attrs.canBlockReload = false;\n this.trigger('info', {\n message: '#EXT-X-SERVER-CONTROL defaulting CAN-BLOCK-RELOAD to false'\n });\n }\n\n setHoldBack.call(this, this.manifest);\n\n if (attrs.canSkipDateranges && !attrs.hasOwnProperty('canSkipUntil')) {\n this.trigger('warn', {\n message: '#EXT-X-SERVER-CONTROL lacks required attribute CAN-SKIP-UNTIL which is required when CAN-SKIP-DATERANGES is set'\n });\n }\n },\n\n 'preload-hint'() {\n // parts are always specifed before a segment\n const segmentIndex = this.manifest.segments.length;\n const hint = camelCaseKeys(entry.attributes);\n const isPart = hint.type && hint.type === 'PART';\n currentUri.preloadHints = currentUri.preloadHints || [];\n currentUri.preloadHints.push(hint);\n\n if (hint.byterange) {\n if (!hint.byterange.hasOwnProperty('offset')) {\n // use last part byterange end or zero if not a part.\n hint.byterange.offset = isPart ? lastPartByterangeEnd : 0;\n\n if (isPart) {\n lastPartByterangeEnd = hint.byterange.offset + hint.byterange.length;\n }\n }\n }\n\n const index = currentUri.preloadHints.length - 1;\n this.warnOnMissingAttributes_(`#EXT-X-PRELOAD-HINT #${index} for segment #${segmentIndex}`, entry.attributes, ['TYPE', 'URI']);\n\n if (!hint.type) {\n return;\n } // search through all preload hints except for the current one for\n // a duplicate type.\n\n\n for (let i = 0; i < currentUri.preloadHints.length - 1; i++) {\n const otherHint = currentUri.preloadHints[i];\n\n if (!otherHint.type) {\n continue;\n }\n\n if (otherHint.type === hint.type) {\n this.trigger('warn', {\n message: `#EXT-X-PRELOAD-HINT #${index} for segment #${segmentIndex} has the same TYPE ${hint.type} as preload hint #${i}`\n });\n }\n }\n },\n\n 'rendition-report'() {\n const report = camelCaseKeys(entry.attributes);\n this.manifest.renditionReports = this.manifest.renditionReports || [];\n this.manifest.renditionReports.push(report);\n const index = this.manifest.renditionReports.length - 1;\n const required = ['LAST-MSN', 'URI'];\n\n if (hasParts) {\n required.push('LAST-PART');\n }\n\n this.warnOnMissingAttributes_(`#EXT-X-RENDITION-REPORT #${index}`, entry.attributes, required);\n },\n\n 'part-inf'() {\n this.manifest.partInf = camelCaseKeys(entry.attributes);\n this.warnOnMissingAttributes_('#EXT-X-PART-INF', entry.attributes, ['PART-TARGET']);\n\n if (this.manifest.partInf.partTarget) {\n this.manifest.partTargetDuration = this.manifest.partInf.partTarget;\n }\n\n setHoldBack.call(this, this.manifest);\n },\n\n 'daterange'() {\n this.manifest.dateRanges.push(camelCaseKeys(entry.attributes));\n const index = this.manifest.dateRanges.length - 1;\n this.warnOnMissingAttributes_(`#EXT-X-DATERANGE #${index}`, entry.attributes, ['ID', 'START-DATE']);\n const dateRange = this.manifest.dateRanges[index];\n\n if (dateRange.endDate && dateRange.startDate && new Date(dateRange.endDate) < new Date(dateRange.startDate)) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE END-DATE must be equal to or later than the value of the START-DATE'\n });\n }\n\n if (dateRange.duration && dateRange.duration < 0) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE DURATION must not be negative'\n });\n }\n\n if (dateRange.plannedDuration && dateRange.plannedDuration < 0) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE PLANNED-DURATION must not be negative'\n });\n }\n\n const endOnNextYes = !!dateRange.endOnNext;\n\n if (endOnNextYes && !dateRange.class) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE with an END-ON-NEXT=YES attribute must have a CLASS attribute'\n });\n }\n\n if (endOnNextYes && (dateRange.duration || dateRange.endDate)) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE with an END-ON-NEXT=YES attribute must not contain DURATION or END-DATE attributes'\n });\n }\n\n if (dateRange.duration && dateRange.endDate) {\n const startDate = dateRange.startDate;\n const newDateInSeconds = startDate.getTime() + dateRange.duration * 1000;\n this.manifest.dateRanges[index].endDate = new Date(newDateInSeconds);\n }\n\n if (!dateRangeTags[dateRange.id]) {\n dateRangeTags[dateRange.id] = dateRange;\n } else {\n for (const attribute in dateRangeTags[dateRange.id]) {\n if (!!dateRange[attribute] && JSON.stringify(dateRangeTags[dateRange.id][attribute]) !== JSON.stringify(dateRange[attribute])) {\n this.trigger('warn', {\n message: 'EXT-X-DATERANGE tags with the same ID in a playlist must have the same attributes values'\n });\n break;\n }\n } // if tags with the same ID do not have conflicting attributes, merge them\n\n\n const dateRangeWithSameId = this.manifest.dateRanges.findIndex(dateRangeToFind => dateRangeToFind.id === dateRange.id);\n this.manifest.dateRanges[dateRangeWithSameId] = _extends(this.manifest.dateRanges[dateRangeWithSameId], dateRange);\n dateRangeTags[dateRange.id] = _extends(dateRangeTags[dateRange.id], dateRange); // after merging, delete the duplicate dateRange that was added last\n\n this.manifest.dateRanges.pop();\n }\n },\n\n 'independent-segments'() {\n this.manifest.independentSegments = true;\n },\n\n 'content-steering'() {\n this.manifest.contentSteering = camelCaseKeys(entry.attributes);\n this.warnOnMissingAttributes_('#EXT-X-CONTENT-STEERING', entry.attributes, ['SERVER-URI']);\n }\n\n })[entry.tagType] || noop).call(self);\n },\n\n uri() {\n currentUri.uri = entry.uri;\n uris.push(currentUri); // if no explicit duration was declared, use the target duration\n\n if (this.manifest.targetDuration && !('duration' in currentUri)) {\n this.trigger('warn', {\n message: 'defaulting segment duration to the target duration'\n });\n currentUri.duration = this.manifest.targetDuration;\n } // annotate with encryption information, if necessary\n\n\n if (key) {\n currentUri.key = key;\n }\n\n currentUri.timeline = currentTimeline; // annotate with initialization segment information, if necessary\n\n if (currentMap) {\n currentUri.map = currentMap;\n } // reset the last byterange end as it needs to be 0 between parts\n\n\n lastPartByterangeEnd = 0; // Once we have at least one program date time we can always extrapolate it forward\n\n if (this.lastProgramDateTime !== null) {\n currentUri.programDateTime = this.lastProgramDateTime;\n this.lastProgramDateTime += currentUri.duration * 1000;\n } // prepare for the next URI\n\n\n currentUri = {};\n },\n\n comment() {// comments are not important for playback\n },\n\n custom() {\n // if this is segment-level data attach the output to the segment\n if (entry.segment) {\n currentUri.custom = currentUri.custom || {};\n currentUri.custom[entry.customType] = entry.data; // if this is manifest-level data attach to the top level manifest object\n } else {\n this.manifest.custom = this.manifest.custom || {};\n this.manifest.custom[entry.customType] = entry.data;\n }\n }\n\n })[entry.type].call(self);\n });\n }\n\n warnOnMissingAttributes_(identifier, attributes, required) {\n const missing = [];\n required.forEach(function (key) {\n if (!attributes.hasOwnProperty(key)) {\n missing.push(key);\n }\n });\n\n if (missing.length) {\n this.trigger('warn', {\n message: `${identifier} lacks required attribute(s): ${missing.join(', ')}`\n });\n }\n }\n /**\n * Parse the input string and update the manifest object.\n *\n * @param {string} chunk a potentially incomplete portion of the manifest\n */\n\n\n push(chunk) {\n this.lineStream.push(chunk);\n }\n /**\n * Flush any remaining input. This can be handy if the last line of an M3U8\n * manifest did not contain a trailing newline but the file has been\n * completely received.\n */\n\n\n end() {\n // flush any buffered input\n this.lineStream.push('\\n');\n\n if (this.manifest.dateRanges.length && this.lastProgramDateTime === null) {\n this.trigger('warn', {\n message: 'A playlist with EXT-X-DATERANGE tag must contain atleast one EXT-X-PROGRAM-DATE-TIME tag'\n });\n }\n\n this.lastProgramDateTime = null;\n this.trigger('end');\n }\n /**\n * Add an additional parser for non-standard tags\n *\n * @param {Object} options a map of options for the added parser\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {string} options.customType the custom type to register to the output\n * @param {Function} [options.dataParser] function to parse the line into an object\n * @param {boolean} [options.segment] should tag data be attached to the segment object\n */\n\n\n addParser(options) {\n this.parseStream.addParser(options);\n }\n /**\n * Add a custom header mapper\n *\n * @param {Object} options\n * @param {RegExp} options.expression a regular expression to match the custom header\n * @param {Function} options.map function to translate tag into a different tag\n */\n\n\n addTagMapper(options) {\n this.parseStream.addTagMapper(options);\n }\n\n}\n\nexport { LineStream, ParseStream, Parser };\n","import window from 'global/window';\nvar regexs = {\n // to determine mime types\n mp4: /^(av0?1|avc0?[1234]|vp0?9|flac|opus|mp3|mp4a|mp4v|stpp.ttml.im1t)/,\n webm: /^(vp0?[89]|av0?1|opus|vorbis)/,\n ogg: /^(vp0?[89]|theora|flac|opus|vorbis)/,\n // to determine if a codec is audio or video\n video: /^(av0?1|avc0?[1234]|vp0?[89]|hvc1|hev1|theora|mp4v)/,\n audio: /^(mp4a|flac|vorbis|opus|ac-[34]|ec-3|alac|mp3|speex|aac)/,\n text: /^(stpp.ttml.im1t)/,\n // mux.js support regex\n muxerVideo: /^(avc0?1)/,\n muxerAudio: /^(mp4a)/,\n // match nothing as muxer does not support text right now.\n // there cannot never be a character before the start of a string\n // so this matches nothing.\n muxerText: /a^/\n};\nvar mediaTypes = ['video', 'audio', 'text'];\nvar upperMediaTypes = ['Video', 'Audio', 'Text'];\n/**\n * Replace the old apple-style `avc1..` codec string with the standard\n * `avc1.`\n *\n * @param {string} codec\n * Codec string to translate\n * @return {string}\n * The translated codec string\n */\n\nexport var translateLegacyCodec = function translateLegacyCodec(codec) {\n if (!codec) {\n return codec;\n }\n\n return codec.replace(/avc1\\.(\\d+)\\.(\\d+)/i, function (orig, profile, avcLevel) {\n var profileHex = ('00' + Number(profile).toString(16)).slice(-2);\n var avcLevelHex = ('00' + Number(avcLevel).toString(16)).slice(-2);\n return 'avc1.' + profileHex + '00' + avcLevelHex;\n });\n};\n/**\n * Replace the old apple-style `avc1..` codec strings with the standard\n * `avc1.`\n *\n * @param {string[]} codecs\n * An array of codec strings to translate\n * @return {string[]}\n * The translated array of codec strings\n */\n\nexport var translateLegacyCodecs = function translateLegacyCodecs(codecs) {\n return codecs.map(translateLegacyCodec);\n};\n/**\n * Replace codecs in the codec string with the old apple-style `avc1..` to the\n * standard `avc1.`.\n *\n * @param {string} codecString\n * The codec string\n * @return {string}\n * The codec string with old apple-style codecs replaced\n *\n * @private\n */\n\nexport var mapLegacyAvcCodecs = function mapLegacyAvcCodecs(codecString) {\n return codecString.replace(/avc1\\.(\\d+)\\.(\\d+)/i, function (match) {\n return translateLegacyCodecs([match])[0];\n });\n};\n/**\n * @typedef {Object} ParsedCodecInfo\n * @property {number} codecCount\n * Number of codecs parsed\n * @property {string} [videoCodec]\n * Parsed video codec (if found)\n * @property {string} [videoObjectTypeIndicator]\n * Video object type indicator (if found)\n * @property {string|null} audioProfile\n * Audio profile\n */\n\n/**\n * Parses a codec string to retrieve the number of codecs specified, the video codec and\n * object type indicator, and the audio profile.\n *\n * @param {string} [codecString]\n * The codec string to parse\n * @return {ParsedCodecInfo}\n * Parsed codec info\n */\n\nexport var parseCodecs = function parseCodecs(codecString) {\n if (codecString === void 0) {\n codecString = '';\n }\n\n var codecs = codecString.split(',');\n var result = [];\n codecs.forEach(function (codec) {\n codec = codec.trim();\n var codecType;\n mediaTypes.forEach(function (name) {\n var match = regexs[name].exec(codec.toLowerCase());\n\n if (!match || match.length <= 1) {\n return;\n }\n\n codecType = name; // maintain codec case\n\n var type = codec.substring(0, match[1].length);\n var details = codec.replace(type, '');\n result.push({\n type: type,\n details: details,\n mediaType: name\n });\n });\n\n if (!codecType) {\n result.push({\n type: codec,\n details: '',\n mediaType: 'unknown'\n });\n }\n });\n return result;\n};\n/**\n * Returns a ParsedCodecInfo object for the default alternate audio playlist if there is\n * a default alternate audio playlist for the provided audio group.\n *\n * @param {Object} master\n * The master playlist\n * @param {string} audioGroupId\n * ID of the audio group for which to find the default codec info\n * @return {ParsedCodecInfo}\n * Parsed codec info\n */\n\nexport var codecsFromDefault = function codecsFromDefault(master, audioGroupId) {\n if (!master.mediaGroups.AUDIO || !audioGroupId) {\n return null;\n }\n\n var audioGroup = master.mediaGroups.AUDIO[audioGroupId];\n\n if (!audioGroup) {\n return null;\n }\n\n for (var name in audioGroup) {\n var audioType = audioGroup[name];\n\n if (audioType.default && audioType.playlists) {\n // codec should be the same for all playlists within the audio type\n return parseCodecs(audioType.playlists[0].attributes.CODECS);\n }\n }\n\n return null;\n};\nexport var isVideoCodec = function isVideoCodec(codec) {\n if (codec === void 0) {\n codec = '';\n }\n\n return regexs.video.test(codec.trim().toLowerCase());\n};\nexport var isAudioCodec = function isAudioCodec(codec) {\n if (codec === void 0) {\n codec = '';\n }\n\n return regexs.audio.test(codec.trim().toLowerCase());\n};\nexport var isTextCodec = function isTextCodec(codec) {\n if (codec === void 0) {\n codec = '';\n }\n\n return regexs.text.test(codec.trim().toLowerCase());\n};\nexport var getMimeForCodec = function getMimeForCodec(codecString) {\n if (!codecString || typeof codecString !== 'string') {\n return;\n }\n\n var codecs = codecString.toLowerCase().split(',').map(function (c) {\n return translateLegacyCodec(c.trim());\n }); // default to video type\n\n var type = 'video'; // only change to audio type if the only codec we have is\n // audio\n\n if (codecs.length === 1 && isAudioCodec(codecs[0])) {\n type = 'audio';\n } else if (codecs.length === 1 && isTextCodec(codecs[0])) {\n // text uses application/ for now\n type = 'application';\n } // default the container to mp4\n\n\n var container = 'mp4'; // every codec must be able to go into the container\n // for that container to be the correct one\n\n if (codecs.every(function (c) {\n return regexs.mp4.test(c);\n })) {\n container = 'mp4';\n } else if (codecs.every(function (c) {\n return regexs.webm.test(c);\n })) {\n container = 'webm';\n } else if (codecs.every(function (c) {\n return regexs.ogg.test(c);\n })) {\n container = 'ogg';\n }\n\n return type + \"/\" + container + \";codecs=\\\"\" + codecString + \"\\\"\";\n};\nexport var browserSupportsCodec = function browserSupportsCodec(codecString) {\n if (codecString === void 0) {\n codecString = '';\n }\n\n return window.MediaSource && window.MediaSource.isTypeSupported && window.MediaSource.isTypeSupported(getMimeForCodec(codecString)) || false;\n};\nexport var muxerSupportsCodec = function muxerSupportsCodec(codecString) {\n if (codecString === void 0) {\n codecString = '';\n }\n\n return codecString.toLowerCase().split(',').every(function (codec) {\n codec = codec.trim(); // any match is supported.\n\n for (var i = 0; i < upperMediaTypes.length; i++) {\n var type = upperMediaTypes[i];\n\n if (regexs[\"muxer\" + type].test(codec)) {\n return true;\n }\n }\n\n return false;\n });\n};\nexport var DEFAULT_AUDIO_CODEC = 'mp4a.40.2';\nexport var DEFAULT_VIDEO_CODEC = 'avc1.4d400d';","var MPEGURL_REGEX = /^(audio|video|application)\\/(x-|vnd\\.apple\\.)?mpegurl/i;\nvar DASH_REGEX = /^application\\/dash\\+xml/i;\n/**\n * Returns a string that describes the type of source based on a video source object's\n * media type.\n *\n * @see {@link https://dev.w3.org/html5/pf-summary/video.html#dom-source-type|Source Type}\n *\n * @param {string} type\n * Video source object media type\n * @return {('hls'|'dash'|'vhs-json'|null)}\n * VHS source type string\n */\n\nexport var simpleTypeFromSourceType = function simpleTypeFromSourceType(type) {\n if (MPEGURL_REGEX.test(type)) {\n return 'hls';\n }\n\n if (DASH_REGEX.test(type)) {\n return 'dash';\n } // Denotes the special case of a manifest object passed to http-streaming instead of a\n // source URL.\n //\n // See https://en.wikipedia.org/wiki/Media_type for details on specifying media types.\n //\n // In this case, vnd stands for vendor, video.js for the organization, VHS for this\n // project, and the +json suffix identifies the structure of the media type.\n\n\n if (type === 'application/vnd.videojs.vhs+json') {\n return 'vhs-json';\n }\n\n return null;\n};","import window from 'global/window'; // const log2 = Math.log2 ? Math.log2 : (x) => (Math.log(x) / Math.log(2));\n\nvar repeat = function repeat(str, len) {\n var acc = '';\n\n while (len--) {\n acc += str;\n }\n\n return acc;\n}; // count the number of bits it would take to represent a number\n// we used to do this with log2 but BigInt does not support builtin math\n// Math.ceil(log2(x));\n\n\nexport var countBits = function countBits(x) {\n return x.toString(2).length;\n}; // count the number of whole bytes it would take to represent a number\n\nexport var countBytes = function countBytes(x) {\n return Math.ceil(countBits(x) / 8);\n};\nexport var padStart = function padStart(b, len, str) {\n if (str === void 0) {\n str = ' ';\n }\n\n return (repeat(str, len) + b.toString()).slice(-len);\n};\nexport var isArrayBufferView = function isArrayBufferView(obj) {\n if (ArrayBuffer.isView === 'function') {\n return ArrayBuffer.isView(obj);\n }\n\n return obj && obj.buffer instanceof ArrayBuffer;\n};\nexport var isTypedArray = function isTypedArray(obj) {\n return isArrayBufferView(obj);\n};\nexport var toUint8 = function toUint8(bytes) {\n if (bytes instanceof Uint8Array) {\n return bytes;\n }\n\n if (!Array.isArray(bytes) && !isTypedArray(bytes) && !(bytes instanceof ArrayBuffer)) {\n // any non-number or NaN leads to empty uint8array\n // eslint-disable-next-line\n if (typeof bytes !== 'number' || typeof bytes === 'number' && bytes !== bytes) {\n bytes = 0;\n } else {\n bytes = [bytes];\n }\n }\n\n return new Uint8Array(bytes && bytes.buffer || bytes, bytes && bytes.byteOffset || 0, bytes && bytes.byteLength || 0);\n};\nexport var toHexString = function toHexString(bytes) {\n bytes = toUint8(bytes);\n var str = '';\n\n for (var i = 0; i < bytes.length; i++) {\n str += padStart(bytes[i].toString(16), 2, '0');\n }\n\n return str;\n};\nexport var toBinaryString = function toBinaryString(bytes) {\n bytes = toUint8(bytes);\n var str = '';\n\n for (var i = 0; i < bytes.length; i++) {\n str += padStart(bytes[i].toString(2), 8, '0');\n }\n\n return str;\n};\nvar BigInt = window.BigInt || Number;\nvar BYTE_TABLE = [BigInt('0x1'), BigInt('0x100'), BigInt('0x10000'), BigInt('0x1000000'), BigInt('0x100000000'), BigInt('0x10000000000'), BigInt('0x1000000000000'), BigInt('0x100000000000000'), BigInt('0x10000000000000000')];\nexport var ENDIANNESS = function () {\n var a = new Uint16Array([0xFFCC]);\n var b = new Uint8Array(a.buffer, a.byteOffset, a.byteLength);\n\n if (b[0] === 0xFF) {\n return 'big';\n }\n\n if (b[0] === 0xCC) {\n return 'little';\n }\n\n return 'unknown';\n}();\nexport var IS_BIG_ENDIAN = ENDIANNESS === 'big';\nexport var IS_LITTLE_ENDIAN = ENDIANNESS === 'little';\nexport var bytesToNumber = function bytesToNumber(bytes, _temp) {\n var _ref = _temp === void 0 ? {} : _temp,\n _ref$signed = _ref.signed,\n signed = _ref$signed === void 0 ? false : _ref$signed,\n _ref$le = _ref.le,\n le = _ref$le === void 0 ? false : _ref$le;\n\n bytes = toUint8(bytes);\n var fn = le ? 'reduce' : 'reduceRight';\n var obj = bytes[fn] ? bytes[fn] : Array.prototype[fn];\n var number = obj.call(bytes, function (total, byte, i) {\n var exponent = le ? i : Math.abs(i + 1 - bytes.length);\n return total + BigInt(byte) * BYTE_TABLE[exponent];\n }, BigInt(0));\n\n if (signed) {\n var max = BYTE_TABLE[bytes.length] / BigInt(2) - BigInt(1);\n number = BigInt(number);\n\n if (number > max) {\n number -= max;\n number -= max;\n number -= BigInt(2);\n }\n }\n\n return Number(number);\n};\nexport var numberToBytes = function numberToBytes(number, _temp2) {\n var _ref2 = _temp2 === void 0 ? {} : _temp2,\n _ref2$le = _ref2.le,\n le = _ref2$le === void 0 ? false : _ref2$le;\n\n // eslint-disable-next-line\n if (typeof number !== 'bigint' && typeof number !== 'number' || typeof number === 'number' && number !== number) {\n number = 0;\n }\n\n number = BigInt(number);\n var byteCount = countBytes(number);\n var bytes = new Uint8Array(new ArrayBuffer(byteCount));\n\n for (var i = 0; i < byteCount; i++) {\n var byteIndex = le ? i : Math.abs(i + 1 - bytes.length);\n bytes[byteIndex] = Number(number / BYTE_TABLE[i] & BigInt(0xFF));\n\n if (number < 0) {\n bytes[byteIndex] = Math.abs(~bytes[byteIndex]);\n bytes[byteIndex] -= i === 0 ? 1 : 2;\n }\n }\n\n return bytes;\n};\nexport var bytesToString = function bytesToString(bytes) {\n if (!bytes) {\n return '';\n } // TODO: should toUint8 handle cases where we only have 8 bytes\n // but report more since this is a Uint16+ Array?\n\n\n bytes = Array.prototype.slice.call(bytes);\n var string = String.fromCharCode.apply(null, toUint8(bytes));\n\n try {\n return decodeURIComponent(escape(string));\n } catch (e) {// if decodeURIComponent/escape fails, we are dealing with partial\n // or full non string data. Just return the potentially garbled string.\n }\n\n return string;\n};\nexport var stringToBytes = function stringToBytes(string, stringIsBytes) {\n if (typeof string !== 'string' && string && typeof string.toString === 'function') {\n string = string.toString();\n }\n\n if (typeof string !== 'string') {\n return new Uint8Array();\n } // If the string already is bytes, we don't have to do this\n // otherwise we do this so that we split multi length characters\n // into individual bytes\n\n\n if (!stringIsBytes) {\n string = unescape(encodeURIComponent(string));\n }\n\n var view = new Uint8Array(string.length);\n\n for (var i = 0; i < string.length; i++) {\n view[i] = string.charCodeAt(i);\n }\n\n return view;\n};\nexport var concatTypedArrays = function concatTypedArrays() {\n for (var _len = arguments.length, buffers = new Array(_len), _key = 0; _key < _len; _key++) {\n buffers[_key] = arguments[_key];\n }\n\n buffers = buffers.filter(function (b) {\n return b && (b.byteLength || b.length) && typeof b !== 'string';\n });\n\n if (buffers.length <= 1) {\n // for 0 length we will return empty uint8\n // for 1 length we return the first uint8\n return toUint8(buffers[0]);\n }\n\n var totalLen = buffers.reduce(function (total, buf, i) {\n return total + (buf.byteLength || buf.length);\n }, 0);\n var tempBuffer = new Uint8Array(totalLen);\n var offset = 0;\n buffers.forEach(function (buf) {\n buf = toUint8(buf);\n tempBuffer.set(buf, offset);\n offset += buf.byteLength;\n });\n return tempBuffer;\n};\n/**\n * Check if the bytes \"b\" are contained within bytes \"a\".\n *\n * @param {Uint8Array|Array} a\n * Bytes to check in\n *\n * @param {Uint8Array|Array} b\n * Bytes to check for\n *\n * @param {Object} options\n * options\n *\n * @param {Array|Uint8Array} [offset=0]\n * offset to use when looking at bytes in a\n *\n * @param {Array|Uint8Array} [mask=[]]\n * mask to use on bytes before comparison.\n *\n * @return {boolean}\n * If all bytes in b are inside of a, taking into account\n * bit masks.\n */\n\nexport var bytesMatch = function bytesMatch(a, b, _temp3) {\n var _ref3 = _temp3 === void 0 ? {} : _temp3,\n _ref3$offset = _ref3.offset,\n offset = _ref3$offset === void 0 ? 0 : _ref3$offset,\n _ref3$mask = _ref3.mask,\n mask = _ref3$mask === void 0 ? [] : _ref3$mask;\n\n a = toUint8(a);\n b = toUint8(b); // ie 11 does not support uint8 every\n\n var fn = b.every ? b.every : Array.prototype.every;\n return b.length && a.length - offset >= b.length && // ie 11 doesn't support every on uin8\n fn.call(b, function (bByte, i) {\n var aByte = mask[i] ? mask[i] & a[offset + i] : a[offset + i];\n return bByte === aByte;\n });\n};\nexport var sliceBytes = function sliceBytes(src, start, end) {\n if (Uint8Array.prototype.slice) {\n return Uint8Array.prototype.slice.call(src, start, end);\n }\n\n return new Uint8Array(Array.prototype.slice.call(src, start, end));\n};\nexport var reverseBytes = function reverseBytes(src) {\n if (src.reverse) {\n return src.reverse();\n }\n\n return Array.prototype.reverse.call(src);\n};","/*! @name mpd-parser @version 1.3.0 @license Apache-2.0 */\nimport resolveUrl from '@videojs/vhs-utils/es/resolve-url';\nimport window from 'global/window';\nimport { forEachMediaGroup } from '@videojs/vhs-utils/es/media-groups';\nimport decodeB64ToUint8Array from '@videojs/vhs-utils/es/decode-b64-to-uint8-array';\nimport { DOMParser } from '@xmldom/xmldom';\n\nvar version = \"1.3.0\";\n\nconst isObject = obj => {\n return !!obj && typeof obj === 'object';\n};\n\nconst merge = (...objects) => {\n return objects.reduce((result, source) => {\n if (typeof source !== 'object') {\n return result;\n }\n\n Object.keys(source).forEach(key => {\n if (Array.isArray(result[key]) && Array.isArray(source[key])) {\n result[key] = result[key].concat(source[key]);\n } else if (isObject(result[key]) && isObject(source[key])) {\n result[key] = merge(result[key], source[key]);\n } else {\n result[key] = source[key];\n }\n });\n return result;\n }, {});\n};\nconst values = o => Object.keys(o).map(k => o[k]);\n\nconst range = (start, end) => {\n const result = [];\n\n for (let i = start; i < end; i++) {\n result.push(i);\n }\n\n return result;\n};\nconst flatten = lists => lists.reduce((x, y) => x.concat(y), []);\nconst from = list => {\n if (!list.length) {\n return [];\n }\n\n const result = [];\n\n for (let i = 0; i < list.length; i++) {\n result.push(list[i]);\n }\n\n return result;\n};\nconst findIndexes = (l, key) => l.reduce((a, e, i) => {\n if (e[key]) {\n a.push(i);\n }\n\n return a;\n}, []);\n/**\n * Returns a union of the included lists provided each element can be identified by a key.\n *\n * @param {Array} list - list of lists to get the union of\n * @param {Function} keyFunction - the function to use as a key for each element\n *\n * @return {Array} the union of the arrays\n */\n\nconst union = (lists, keyFunction) => {\n return values(lists.reduce((acc, list) => {\n list.forEach(el => {\n acc[keyFunction(el)] = el;\n });\n return acc;\n }, {}));\n};\n\nvar errors = {\n INVALID_NUMBER_OF_PERIOD: 'INVALID_NUMBER_OF_PERIOD',\n INVALID_NUMBER_OF_CONTENT_STEERING: 'INVALID_NUMBER_OF_CONTENT_STEERING',\n DASH_EMPTY_MANIFEST: 'DASH_EMPTY_MANIFEST',\n DASH_INVALID_XML: 'DASH_INVALID_XML',\n NO_BASE_URL: 'NO_BASE_URL',\n MISSING_SEGMENT_INFORMATION: 'MISSING_SEGMENT_INFORMATION',\n SEGMENT_TIME_UNSPECIFIED: 'SEGMENT_TIME_UNSPECIFIED',\n UNSUPPORTED_UTC_TIMING_SCHEME: 'UNSUPPORTED_UTC_TIMING_SCHEME'\n};\n\n/**\n * @typedef {Object} SingleUri\n * @property {string} uri - relative location of segment\n * @property {string} resolvedUri - resolved location of segment\n * @property {Object} byterange - Object containing information on how to make byte range\n * requests following byte-range-spec per RFC2616.\n * @property {String} byterange.length - length of range request\n * @property {String} byterange.offset - byte offset of range request\n *\n * @see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1\n */\n\n/**\n * Converts a URLType node (5.3.9.2.3 Table 13) to a segment object\n * that conforms to how m3u8-parser is structured\n *\n * @see https://github.com/videojs/m3u8-parser\n *\n * @param {string} baseUrl - baseUrl provided by nodes\n * @param {string} source - source url for segment\n * @param {string} range - optional range used for range calls,\n * follows RFC 2616, Clause 14.35.1\n * @return {SingleUri} full segment information transformed into a format similar\n * to m3u8-parser\n */\n\nconst urlTypeToSegment = ({\n baseUrl = '',\n source = '',\n range = '',\n indexRange = ''\n}) => {\n const segment = {\n uri: source,\n resolvedUri: resolveUrl(baseUrl || '', source)\n };\n\n if (range || indexRange) {\n const rangeStr = range ? range : indexRange;\n const ranges = rangeStr.split('-'); // default to parsing this as a BigInt if possible\n\n let startRange = window.BigInt ? window.BigInt(ranges[0]) : parseInt(ranges[0], 10);\n let endRange = window.BigInt ? window.BigInt(ranges[1]) : parseInt(ranges[1], 10); // convert back to a number if less than MAX_SAFE_INTEGER\n\n if (startRange < Number.MAX_SAFE_INTEGER && typeof startRange === 'bigint') {\n startRange = Number(startRange);\n }\n\n if (endRange < Number.MAX_SAFE_INTEGER && typeof endRange === 'bigint') {\n endRange = Number(endRange);\n }\n\n let length;\n\n if (typeof endRange === 'bigint' || typeof startRange === 'bigint') {\n length = window.BigInt(endRange) - window.BigInt(startRange) + window.BigInt(1);\n } else {\n length = endRange - startRange + 1;\n }\n\n if (typeof length === 'bigint' && length < Number.MAX_SAFE_INTEGER) {\n length = Number(length);\n } // byterange should be inclusive according to\n // RFC 2616, Clause 14.35.1\n\n\n segment.byterange = {\n length,\n offset: startRange\n };\n }\n\n return segment;\n};\nconst byteRangeToString = byterange => {\n // `endRange` is one less than `offset + length` because the HTTP range\n // header uses inclusive ranges\n let endRange;\n\n if (typeof byterange.offset === 'bigint' || typeof byterange.length === 'bigint') {\n endRange = window.BigInt(byterange.offset) + window.BigInt(byterange.length) - window.BigInt(1);\n } else {\n endRange = byterange.offset + byterange.length - 1;\n }\n\n return `${byterange.offset}-${endRange}`;\n};\n\n/**\n * parse the end number attribue that can be a string\n * number, or undefined.\n *\n * @param {string|number|undefined} endNumber\n * The end number attribute.\n *\n * @return {number|null}\n * The result of parsing the end number.\n */\n\nconst parseEndNumber = endNumber => {\n if (endNumber && typeof endNumber !== 'number') {\n endNumber = parseInt(endNumber, 10);\n }\n\n if (isNaN(endNumber)) {\n return null;\n }\n\n return endNumber;\n};\n/**\n * Functions for calculating the range of available segments in static and dynamic\n * manifests.\n */\n\n\nconst segmentRange = {\n /**\n * Returns the entire range of available segments for a static MPD\n *\n * @param {Object} attributes\n * Inheritied MPD attributes\n * @return {{ start: number, end: number }}\n * The start and end numbers for available segments\n */\n static(attributes) {\n const {\n duration,\n timescale = 1,\n sourceDuration,\n periodDuration\n } = attributes;\n const endNumber = parseEndNumber(attributes.endNumber);\n const segmentDuration = duration / timescale;\n\n if (typeof endNumber === 'number') {\n return {\n start: 0,\n end: endNumber\n };\n }\n\n if (typeof periodDuration === 'number') {\n return {\n start: 0,\n end: periodDuration / segmentDuration\n };\n }\n\n return {\n start: 0,\n end: sourceDuration / segmentDuration\n };\n },\n\n /**\n * Returns the current live window range of available segments for a dynamic MPD\n *\n * @param {Object} attributes\n * Inheritied MPD attributes\n * @return {{ start: number, end: number }}\n * The start and end numbers for available segments\n */\n dynamic(attributes) {\n const {\n NOW,\n clientOffset,\n availabilityStartTime,\n timescale = 1,\n duration,\n periodStart = 0,\n minimumUpdatePeriod = 0,\n timeShiftBufferDepth = Infinity\n } = attributes;\n const endNumber = parseEndNumber(attributes.endNumber); // clientOffset is passed in at the top level of mpd-parser and is an offset calculated\n // after retrieving UTC server time.\n\n const now = (NOW + clientOffset) / 1000; // WC stands for Wall Clock.\n // Convert the period start time to EPOCH.\n\n const periodStartWC = availabilityStartTime + periodStart; // Period end in EPOCH is manifest's retrieval time + time until next update.\n\n const periodEndWC = now + minimumUpdatePeriod;\n const periodDuration = periodEndWC - periodStartWC;\n const segmentCount = Math.ceil(periodDuration * timescale / duration);\n const availableStart = Math.floor((now - periodStartWC - timeShiftBufferDepth) * timescale / duration);\n const availableEnd = Math.floor((now - periodStartWC) * timescale / duration);\n return {\n start: Math.max(0, availableStart),\n end: typeof endNumber === 'number' ? endNumber : Math.min(segmentCount, availableEnd)\n };\n }\n\n};\n/**\n * Maps a range of numbers to objects with information needed to build the corresponding\n * segment list\n *\n * @name toSegmentsCallback\n * @function\n * @param {number} number\n * Number of the segment\n * @param {number} index\n * Index of the number in the range list\n * @return {{ number: Number, duration: Number, timeline: Number, time: Number }}\n * Object with segment timing and duration info\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping a range of numbers to\n * information needed to build the segment list.\n *\n * @param {Object} attributes\n * Inherited MPD attributes\n * @return {toSegmentsCallback}\n * Callback map function\n */\n\nconst toSegments = attributes => number => {\n const {\n duration,\n timescale = 1,\n periodStart,\n startNumber = 1\n } = attributes;\n return {\n number: startNumber + number,\n duration: duration / timescale,\n timeline: periodStart,\n time: number * duration\n };\n};\n/**\n * Returns a list of objects containing segment timing and duration info used for\n * building the list of segments. This uses the @duration attribute specified\n * in the MPD manifest to derive the range of segments.\n *\n * @param {Object} attributes\n * Inherited MPD attributes\n * @return {{number: number, duration: number, time: number, timeline: number}[]}\n * List of Objects with segment timing and duration info\n */\n\nconst parseByDuration = attributes => {\n const {\n type,\n duration,\n timescale = 1,\n periodDuration,\n sourceDuration\n } = attributes;\n const {\n start,\n end\n } = segmentRange[type](attributes);\n const segments = range(start, end).map(toSegments(attributes));\n\n if (type === 'static') {\n const index = segments.length - 1; // section is either a period or the full source\n\n const sectionDuration = typeof periodDuration === 'number' ? periodDuration : sourceDuration; // final segment may be less than full segment duration\n\n segments[index].duration = sectionDuration - duration / timescale * index;\n }\n\n return segments;\n};\n\n/**\n * Translates SegmentBase into a set of segments.\n * (DASH SPEC Section 5.3.9.3.2) contains a set of nodes. Each\n * node should be translated into segment.\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @return {Object.} list of segments\n */\n\nconst segmentsFromBase = attributes => {\n const {\n baseUrl,\n initialization = {},\n sourceDuration,\n indexRange = '',\n periodStart,\n presentationTime,\n number = 0,\n duration\n } = attributes; // base url is required for SegmentBase to work, per spec (Section 5.3.9.2.1)\n\n if (!baseUrl) {\n throw new Error(errors.NO_BASE_URL);\n }\n\n const initSegment = urlTypeToSegment({\n baseUrl,\n source: initialization.sourceURL,\n range: initialization.range\n });\n const segment = urlTypeToSegment({\n baseUrl,\n source: baseUrl,\n indexRange\n });\n segment.map = initSegment; // If there is a duration, use it, otherwise use the given duration of the source\n // (since SegmentBase is only for one total segment)\n\n if (duration) {\n const segmentTimeInfo = parseByDuration(attributes);\n\n if (segmentTimeInfo.length) {\n segment.duration = segmentTimeInfo[0].duration;\n segment.timeline = segmentTimeInfo[0].timeline;\n }\n } else if (sourceDuration) {\n segment.duration = sourceDuration;\n segment.timeline = periodStart;\n } // If presentation time is provided, these segments are being generated by SIDX\n // references, and should use the time provided. For the general case of SegmentBase,\n // there should only be one segment in the period, so its presentation time is the same\n // as its period start.\n\n\n segment.presentationTime = presentationTime || periodStart;\n segment.number = number;\n return [segment];\n};\n/**\n * Given a playlist, a sidx box, and a baseUrl, update the segment list of the playlist\n * according to the sidx information given.\n *\n * playlist.sidx has metadadata about the sidx where-as the sidx param\n * is the parsed sidx box itself.\n *\n * @param {Object} playlist the playlist to update the sidx information for\n * @param {Object} sidx the parsed sidx box\n * @return {Object} the playlist object with the updated sidx information\n */\n\nconst addSidxSegmentsToPlaylist$1 = (playlist, sidx, baseUrl) => {\n // Retain init segment information\n const initSegment = playlist.sidx.map ? playlist.sidx.map : null; // Retain source duration from initial main manifest parsing\n\n const sourceDuration = playlist.sidx.duration; // Retain source timeline\n\n const timeline = playlist.timeline || 0;\n const sidxByteRange = playlist.sidx.byterange;\n const sidxEnd = sidxByteRange.offset + sidxByteRange.length; // Retain timescale of the parsed sidx\n\n const timescale = sidx.timescale; // referenceType 1 refers to other sidx boxes\n\n const mediaReferences = sidx.references.filter(r => r.referenceType !== 1);\n const segments = [];\n const type = playlist.endList ? 'static' : 'dynamic';\n const periodStart = playlist.sidx.timeline;\n let presentationTime = periodStart;\n let number = playlist.mediaSequence || 0; // firstOffset is the offset from the end of the sidx box\n\n let startIndex; // eslint-disable-next-line\n\n if (typeof sidx.firstOffset === 'bigint') {\n startIndex = window.BigInt(sidxEnd) + sidx.firstOffset;\n } else {\n startIndex = sidxEnd + sidx.firstOffset;\n }\n\n for (let i = 0; i < mediaReferences.length; i++) {\n const reference = sidx.references[i]; // size of the referenced (sub)segment\n\n const size = reference.referencedSize; // duration of the referenced (sub)segment, in the timescale\n // this will be converted to seconds when generating segments\n\n const duration = reference.subsegmentDuration; // should be an inclusive range\n\n let endIndex; // eslint-disable-next-line\n\n if (typeof startIndex === 'bigint') {\n endIndex = startIndex + window.BigInt(size) - window.BigInt(1);\n } else {\n endIndex = startIndex + size - 1;\n }\n\n const indexRange = `${startIndex}-${endIndex}`;\n const attributes = {\n baseUrl,\n timescale,\n timeline,\n periodStart,\n presentationTime,\n number,\n duration,\n sourceDuration,\n indexRange,\n type\n };\n const segment = segmentsFromBase(attributes)[0];\n\n if (initSegment) {\n segment.map = initSegment;\n }\n\n segments.push(segment);\n\n if (typeof startIndex === 'bigint') {\n startIndex += window.BigInt(size);\n } else {\n startIndex += size;\n }\n\n presentationTime += duration / timescale;\n number++;\n }\n\n playlist.segments = segments;\n return playlist;\n};\n\nconst SUPPORTED_MEDIA_TYPES = ['AUDIO', 'SUBTITLES']; // allow one 60fps frame as leniency (arbitrarily chosen)\n\nconst TIME_FUDGE = 1 / 60;\n/**\n * Given a list of timelineStarts, combines, dedupes, and sorts them.\n *\n * @param {TimelineStart[]} timelineStarts - list of timeline starts\n *\n * @return {TimelineStart[]} the combined and deduped timeline starts\n */\n\nconst getUniqueTimelineStarts = timelineStarts => {\n return union(timelineStarts, ({\n timeline\n }) => timeline).sort((a, b) => a.timeline > b.timeline ? 1 : -1);\n};\n/**\n * Finds the playlist with the matching NAME attribute.\n *\n * @param {Array} playlists - playlists to search through\n * @param {string} name - the NAME attribute to search for\n *\n * @return {Object|null} the matching playlist object, or null\n */\n\nconst findPlaylistWithName = (playlists, name) => {\n for (let i = 0; i < playlists.length; i++) {\n if (playlists[i].attributes.NAME === name) {\n return playlists[i];\n }\n }\n\n return null;\n};\n/**\n * Gets a flattened array of media group playlists.\n *\n * @param {Object} manifest - the main manifest object\n *\n * @return {Array} the media group playlists\n */\n\nconst getMediaGroupPlaylists = manifest => {\n let mediaGroupPlaylists = [];\n forEachMediaGroup(manifest, SUPPORTED_MEDIA_TYPES, (properties, type, group, label) => {\n mediaGroupPlaylists = mediaGroupPlaylists.concat(properties.playlists || []);\n });\n return mediaGroupPlaylists;\n};\n/**\n * Updates the playlist's media sequence numbers.\n *\n * @param {Object} config - options object\n * @param {Object} config.playlist - the playlist to update\n * @param {number} config.mediaSequence - the mediaSequence number to start with\n */\n\nconst updateMediaSequenceForPlaylist = ({\n playlist,\n mediaSequence\n}) => {\n playlist.mediaSequence = mediaSequence;\n playlist.segments.forEach((segment, index) => {\n segment.number = playlist.mediaSequence + index;\n });\n};\n/**\n * Updates the media and discontinuity sequence numbers of newPlaylists given oldPlaylists\n * and a complete list of timeline starts.\n *\n * If no matching playlist is found, only the discontinuity sequence number of the playlist\n * will be updated.\n *\n * Since early available timelines are not supported, at least one segment must be present.\n *\n * @param {Object} config - options object\n * @param {Object[]} oldPlaylists - the old playlists to use as a reference\n * @param {Object[]} newPlaylists - the new playlists to update\n * @param {Object} timelineStarts - all timelineStarts seen in the stream to this point\n */\n\nconst updateSequenceNumbers = ({\n oldPlaylists,\n newPlaylists,\n timelineStarts\n}) => {\n newPlaylists.forEach(playlist => {\n playlist.discontinuitySequence = timelineStarts.findIndex(function ({\n timeline\n }) {\n return timeline === playlist.timeline;\n }); // Playlists NAMEs come from DASH Representation IDs, which are mandatory\n // (see ISO_23009-1-2012 5.3.5.2).\n //\n // If the same Representation existed in a prior Period, it will retain the same NAME.\n\n const oldPlaylist = findPlaylistWithName(oldPlaylists, playlist.attributes.NAME);\n\n if (!oldPlaylist) {\n // Since this is a new playlist, the media sequence values can start from 0 without\n // consequence.\n return;\n } // TODO better support for live SIDX\n //\n // As of this writing, mpd-parser does not support multiperiod SIDX (in live or VOD).\n // This is evident by a playlist only having a single SIDX reference. In a multiperiod\n // playlist there would need to be multiple SIDX references. In addition, live SIDX is\n // not supported when the SIDX properties change on refreshes.\n //\n // In the future, if support needs to be added, the merging logic here can be called\n // after SIDX references are resolved. For now, exit early to prevent exceptions being\n // thrown due to undefined references.\n\n\n if (playlist.sidx) {\n return;\n } // Since we don't yet support early available timelines, we don't need to support\n // playlists with no segments.\n\n\n const firstNewSegment = playlist.segments[0];\n const oldMatchingSegmentIndex = oldPlaylist.segments.findIndex(function (oldSegment) {\n return Math.abs(oldSegment.presentationTime - firstNewSegment.presentationTime) < TIME_FUDGE;\n }); // No matching segment from the old playlist means the entire playlist was refreshed.\n // In this case the media sequence should account for this update, and the new segments\n // should be marked as discontinuous from the prior content, since the last prior\n // timeline was removed.\n\n if (oldMatchingSegmentIndex === -1) {\n updateMediaSequenceForPlaylist({\n playlist,\n mediaSequence: oldPlaylist.mediaSequence + oldPlaylist.segments.length\n });\n playlist.segments[0].discontinuity = true;\n playlist.discontinuityStarts.unshift(0); // No matching segment does not necessarily mean there's missing content.\n //\n // If the new playlist's timeline is the same as the last seen segment's timeline,\n // then a discontinuity can be added to identify that there's potentially missing\n // content. If there's no missing content, the discontinuity should still be rather\n // harmless. It's possible that if segment durations are accurate enough, that the\n // existence of a gap can be determined using the presentation times and durations,\n // but if the segment timing info is off, it may introduce more problems than simply\n // adding the discontinuity.\n //\n // If the new playlist's timeline is different from the last seen segment's timeline,\n // then a discontinuity can be added to identify that this is the first seen segment\n // of a new timeline. However, the logic at the start of this function that\n // determined the disconinuity sequence by timeline index is now off by one (the\n // discontinuity of the newest timeline hasn't yet fallen off the manifest...since\n // we added it), so the disconinuity sequence must be decremented.\n //\n // A period may also have a duration of zero, so the case of no segments is handled\n // here even though we don't yet support early available periods.\n\n if (!oldPlaylist.segments.length && playlist.timeline > oldPlaylist.timeline || oldPlaylist.segments.length && playlist.timeline > oldPlaylist.segments[oldPlaylist.segments.length - 1].timeline) {\n playlist.discontinuitySequence--;\n }\n\n return;\n } // If the first segment matched with a prior segment on a discontinuity (it's matching\n // on the first segment of a period), then the discontinuitySequence shouldn't be the\n // timeline's matching one, but instead should be the one prior, and the first segment\n // of the new manifest should be marked with a discontinuity.\n //\n // The reason for this special case is that discontinuity sequence shows how many\n // discontinuities have fallen off of the playlist, and discontinuities are marked on\n // the first segment of a new \"timeline.\" Because of this, while DASH will retain that\n // Period while the \"timeline\" exists, HLS keeps track of it via the discontinuity\n // sequence, and that first segment is an indicator, but can be removed before that\n // timeline is gone.\n\n\n const oldMatchingSegment = oldPlaylist.segments[oldMatchingSegmentIndex];\n\n if (oldMatchingSegment.discontinuity && !firstNewSegment.discontinuity) {\n firstNewSegment.discontinuity = true;\n playlist.discontinuityStarts.unshift(0);\n playlist.discontinuitySequence--;\n }\n\n updateMediaSequenceForPlaylist({\n playlist,\n mediaSequence: oldPlaylist.segments[oldMatchingSegmentIndex].number\n });\n });\n};\n/**\n * Given an old parsed manifest object and a new parsed manifest object, updates the\n * sequence and timing values within the new manifest to ensure that it lines up with the\n * old.\n *\n * @param {Array} oldManifest - the old main manifest object\n * @param {Array} newManifest - the new main manifest object\n *\n * @return {Object} the updated new manifest object\n */\n\nconst positionManifestOnTimeline = ({\n oldManifest,\n newManifest\n}) => {\n // Starting from v4.1.2 of the IOP, section 4.4.3.3 states:\n //\n // \"MPD@availabilityStartTime and Period@start shall not be changed over MPD updates.\"\n //\n // This was added from https://github.com/Dash-Industry-Forum/DASH-IF-IOP/issues/160\n //\n // Because of this change, and the difficulty of supporting periods with changing start\n // times, periods with changing start times are not supported. This makes the logic much\n // simpler, since periods with the same start time can be considerred the same period\n // across refreshes.\n //\n // To give an example as to the difficulty of handling periods where the start time may\n // change, if a single period manifest is refreshed with another manifest with a single\n // period, and both the start and end times are increased, then the only way to determine\n // if it's a new period or an old one that has changed is to look through the segments of\n // each playlist and determine the presentation time bounds to find a match. In addition,\n // if the period start changed to exceed the old period end, then there would be no\n // match, and it would not be possible to determine whether the refreshed period is a new\n // one or the old one.\n const oldPlaylists = oldManifest.playlists.concat(getMediaGroupPlaylists(oldManifest));\n const newPlaylists = newManifest.playlists.concat(getMediaGroupPlaylists(newManifest)); // Save all seen timelineStarts to the new manifest. Although this potentially means that\n // there's a \"memory leak\" in that it will never stop growing, in reality, only a couple\n // of properties are saved for each seen Period. Even long running live streams won't\n // generate too many Periods, unless the stream is watched for decades. In the future,\n // this can be optimized by mapping to discontinuity sequence numbers for each timeline,\n // but it may not become an issue, and the additional info can be useful for debugging.\n\n newManifest.timelineStarts = getUniqueTimelineStarts([oldManifest.timelineStarts, newManifest.timelineStarts]);\n updateSequenceNumbers({\n oldPlaylists,\n newPlaylists,\n timelineStarts: newManifest.timelineStarts\n });\n return newManifest;\n};\n\nconst generateSidxKey = sidx => sidx && sidx.uri + '-' + byteRangeToString(sidx.byterange);\n\nconst mergeDiscontiguousPlaylists = playlists => {\n // Break out playlists into groups based on their baseUrl\n const playlistsByBaseUrl = playlists.reduce(function (acc, cur) {\n if (!acc[cur.attributes.baseUrl]) {\n acc[cur.attributes.baseUrl] = [];\n }\n\n acc[cur.attributes.baseUrl].push(cur);\n return acc;\n }, {});\n let allPlaylists = [];\n Object.values(playlistsByBaseUrl).forEach(playlistGroup => {\n const mergedPlaylists = values(playlistGroup.reduce((acc, playlist) => {\n // assuming playlist IDs are the same across periods\n // TODO: handle multiperiod where representation sets are not the same\n // across periods\n const name = playlist.attributes.id + (playlist.attributes.lang || '');\n\n if (!acc[name]) {\n // First Period\n acc[name] = playlist;\n acc[name].attributes.timelineStarts = [];\n } else {\n // Subsequent Periods\n if (playlist.segments) {\n // first segment of subsequent periods signal a discontinuity\n if (playlist.segments[0]) {\n playlist.segments[0].discontinuity = true;\n }\n\n acc[name].segments.push(...playlist.segments);\n } // bubble up contentProtection, this assumes all DRM content\n // has the same contentProtection\n\n\n if (playlist.attributes.contentProtection) {\n acc[name].attributes.contentProtection = playlist.attributes.contentProtection;\n }\n }\n\n acc[name].attributes.timelineStarts.push({\n // Although they represent the same number, it's important to have both to make it\n // compatible with HLS potentially having a similar attribute.\n start: playlist.attributes.periodStart,\n timeline: playlist.attributes.periodStart\n });\n return acc;\n }, {}));\n allPlaylists = allPlaylists.concat(mergedPlaylists);\n });\n return allPlaylists.map(playlist => {\n playlist.discontinuityStarts = findIndexes(playlist.segments || [], 'discontinuity');\n return playlist;\n });\n};\n\nconst addSidxSegmentsToPlaylist = (playlist, sidxMapping) => {\n const sidxKey = generateSidxKey(playlist.sidx);\n const sidxMatch = sidxKey && sidxMapping[sidxKey] && sidxMapping[sidxKey].sidx;\n\n if (sidxMatch) {\n addSidxSegmentsToPlaylist$1(playlist, sidxMatch, playlist.sidx.resolvedUri);\n }\n\n return playlist;\n};\nconst addSidxSegmentsToPlaylists = (playlists, sidxMapping = {}) => {\n if (!Object.keys(sidxMapping).length) {\n return playlists;\n }\n\n for (const i in playlists) {\n playlists[i] = addSidxSegmentsToPlaylist(playlists[i], sidxMapping);\n }\n\n return playlists;\n};\nconst formatAudioPlaylist = ({\n attributes,\n segments,\n sidx,\n mediaSequence,\n discontinuitySequence,\n discontinuityStarts\n}, isAudioOnly) => {\n const playlist = {\n attributes: {\n NAME: attributes.id,\n BANDWIDTH: attributes.bandwidth,\n CODECS: attributes.codecs,\n ['PROGRAM-ID']: 1\n },\n uri: '',\n endList: attributes.type === 'static',\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n targetDuration: attributes.duration,\n discontinuitySequence,\n discontinuityStarts,\n timelineStarts: attributes.timelineStarts,\n mediaSequence,\n segments\n };\n\n if (attributes.contentProtection) {\n playlist.contentProtection = attributes.contentProtection;\n }\n\n if (attributes.serviceLocation) {\n playlist.attributes.serviceLocation = attributes.serviceLocation;\n }\n\n if (sidx) {\n playlist.sidx = sidx;\n }\n\n if (isAudioOnly) {\n playlist.attributes.AUDIO = 'audio';\n playlist.attributes.SUBTITLES = 'subs';\n }\n\n return playlist;\n};\nconst formatVttPlaylist = ({\n attributes,\n segments,\n mediaSequence,\n discontinuityStarts,\n discontinuitySequence\n}) => {\n if (typeof segments === 'undefined') {\n // vtt tracks may use single file in BaseURL\n segments = [{\n uri: attributes.baseUrl,\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n duration: attributes.sourceDuration,\n number: 0\n }]; // targetDuration should be the same duration as the only segment\n\n attributes.duration = attributes.sourceDuration;\n }\n\n const m3u8Attributes = {\n NAME: attributes.id,\n BANDWIDTH: attributes.bandwidth,\n ['PROGRAM-ID']: 1\n };\n\n if (attributes.codecs) {\n m3u8Attributes.CODECS = attributes.codecs;\n }\n\n const vttPlaylist = {\n attributes: m3u8Attributes,\n uri: '',\n endList: attributes.type === 'static',\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n targetDuration: attributes.duration,\n timelineStarts: attributes.timelineStarts,\n discontinuityStarts,\n discontinuitySequence,\n mediaSequence,\n segments\n };\n\n if (attributes.serviceLocation) {\n vttPlaylist.attributes.serviceLocation = attributes.serviceLocation;\n }\n\n return vttPlaylist;\n};\nconst organizeAudioPlaylists = (playlists, sidxMapping = {}, isAudioOnly = false) => {\n let mainPlaylist;\n const formattedPlaylists = playlists.reduce((a, playlist) => {\n const role = playlist.attributes.role && playlist.attributes.role.value || '';\n const language = playlist.attributes.lang || '';\n let label = playlist.attributes.label || 'main';\n\n if (language && !playlist.attributes.label) {\n const roleLabel = role ? ` (${role})` : '';\n label = `${playlist.attributes.lang}${roleLabel}`;\n }\n\n if (!a[label]) {\n a[label] = {\n language,\n autoselect: true,\n default: role === 'main',\n playlists: [],\n uri: ''\n };\n }\n\n const formatted = addSidxSegmentsToPlaylist(formatAudioPlaylist(playlist, isAudioOnly), sidxMapping);\n a[label].playlists.push(formatted);\n\n if (typeof mainPlaylist === 'undefined' && role === 'main') {\n mainPlaylist = playlist;\n mainPlaylist.default = true;\n }\n\n return a;\n }, {}); // if no playlists have role \"main\", mark the first as main\n\n if (!mainPlaylist) {\n const firstLabel = Object.keys(formattedPlaylists)[0];\n formattedPlaylists[firstLabel].default = true;\n }\n\n return formattedPlaylists;\n};\nconst organizeVttPlaylists = (playlists, sidxMapping = {}) => {\n return playlists.reduce((a, playlist) => {\n const label = playlist.attributes.label || playlist.attributes.lang || 'text';\n\n if (!a[label]) {\n a[label] = {\n language: label,\n default: false,\n autoselect: false,\n playlists: [],\n uri: ''\n };\n }\n\n a[label].playlists.push(addSidxSegmentsToPlaylist(formatVttPlaylist(playlist), sidxMapping));\n return a;\n }, {});\n};\n\nconst organizeCaptionServices = captionServices => captionServices.reduce((svcObj, svc) => {\n if (!svc) {\n return svcObj;\n }\n\n svc.forEach(service => {\n const {\n channel,\n language\n } = service;\n svcObj[language] = {\n autoselect: false,\n default: false,\n instreamId: channel,\n language\n };\n\n if (service.hasOwnProperty('aspectRatio')) {\n svcObj[language].aspectRatio = service.aspectRatio;\n }\n\n if (service.hasOwnProperty('easyReader')) {\n svcObj[language].easyReader = service.easyReader;\n }\n\n if (service.hasOwnProperty('3D')) {\n svcObj[language]['3D'] = service['3D'];\n }\n });\n return svcObj;\n}, {});\n\nconst formatVideoPlaylist = ({\n attributes,\n segments,\n sidx,\n discontinuityStarts\n}) => {\n const playlist = {\n attributes: {\n NAME: attributes.id,\n AUDIO: 'audio',\n SUBTITLES: 'subs',\n RESOLUTION: {\n width: attributes.width,\n height: attributes.height\n },\n CODECS: attributes.codecs,\n BANDWIDTH: attributes.bandwidth,\n ['PROGRAM-ID']: 1\n },\n uri: '',\n endList: attributes.type === 'static',\n timeline: attributes.periodStart,\n resolvedUri: attributes.baseUrl || '',\n targetDuration: attributes.duration,\n discontinuityStarts,\n timelineStarts: attributes.timelineStarts,\n segments\n };\n\n if (attributes.frameRate) {\n playlist.attributes['FRAME-RATE'] = attributes.frameRate;\n }\n\n if (attributes.contentProtection) {\n playlist.contentProtection = attributes.contentProtection;\n }\n\n if (attributes.serviceLocation) {\n playlist.attributes.serviceLocation = attributes.serviceLocation;\n }\n\n if (sidx) {\n playlist.sidx = sidx;\n }\n\n return playlist;\n};\n\nconst videoOnly = ({\n attributes\n}) => attributes.mimeType === 'video/mp4' || attributes.mimeType === 'video/webm' || attributes.contentType === 'video';\n\nconst audioOnly = ({\n attributes\n}) => attributes.mimeType === 'audio/mp4' || attributes.mimeType === 'audio/webm' || attributes.contentType === 'audio';\n\nconst vttOnly = ({\n attributes\n}) => attributes.mimeType === 'text/vtt' || attributes.contentType === 'text';\n/**\n * Contains start and timeline properties denoting a timeline start. For DASH, these will\n * be the same number.\n *\n * @typedef {Object} TimelineStart\n * @property {number} start - the start time of the timeline\n * @property {number} timeline - the timeline number\n */\n\n/**\n * Adds appropriate media and discontinuity sequence values to the segments and playlists.\n *\n * Throughout mpd-parser, the `number` attribute is used in relation to `startNumber`, a\n * DASH specific attribute used in constructing segment URI's from templates. However, from\n * an HLS perspective, the `number` attribute on a segment would be its `mediaSequence`\n * value, which should start at the original media sequence value (or 0) and increment by 1\n * for each segment thereafter. Since DASH's `startNumber` values are independent per\n * period, it doesn't make sense to use it for `number`. Instead, assume everything starts\n * from a 0 mediaSequence value and increment from there.\n *\n * Note that VHS currently doesn't use the `number` property, but it can be helpful for\n * debugging and making sense of the manifest.\n *\n * For live playlists, to account for values increasing in manifests when periods are\n * removed on refreshes, merging logic should be used to update the numbers to their\n * appropriate values (to ensure they're sequential and increasing).\n *\n * @param {Object[]} playlists - the playlists to update\n * @param {TimelineStart[]} timelineStarts - the timeline starts for the manifest\n */\n\n\nconst addMediaSequenceValues = (playlists, timelineStarts) => {\n // increment all segments sequentially\n playlists.forEach(playlist => {\n playlist.mediaSequence = 0;\n playlist.discontinuitySequence = timelineStarts.findIndex(function ({\n timeline\n }) {\n return timeline === playlist.timeline;\n });\n\n if (!playlist.segments) {\n return;\n }\n\n playlist.segments.forEach((segment, index) => {\n segment.number = index;\n });\n });\n};\n/**\n * Given a media group object, flattens all playlists within the media group into a single\n * array.\n *\n * @param {Object} mediaGroupObject - the media group object\n *\n * @return {Object[]}\n * The media group playlists\n */\n\nconst flattenMediaGroupPlaylists = mediaGroupObject => {\n if (!mediaGroupObject) {\n return [];\n }\n\n return Object.keys(mediaGroupObject).reduce((acc, label) => {\n const labelContents = mediaGroupObject[label];\n return acc.concat(labelContents.playlists);\n }, []);\n};\nconst toM3u8 = ({\n dashPlaylists,\n locations,\n contentSteering,\n sidxMapping = {},\n previousManifest,\n eventStream\n}) => {\n if (!dashPlaylists.length) {\n return {};\n } // grab all main manifest attributes\n\n\n const {\n sourceDuration: duration,\n type,\n suggestedPresentationDelay,\n minimumUpdatePeriod\n } = dashPlaylists[0].attributes;\n const videoPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(videoOnly)).map(formatVideoPlaylist);\n const audioPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(audioOnly));\n const vttPlaylists = mergeDiscontiguousPlaylists(dashPlaylists.filter(vttOnly));\n const captions = dashPlaylists.map(playlist => playlist.attributes.captionServices).filter(Boolean);\n const manifest = {\n allowCache: true,\n discontinuityStarts: [],\n segments: [],\n endList: true,\n mediaGroups: {\n AUDIO: {},\n VIDEO: {},\n ['CLOSED-CAPTIONS']: {},\n SUBTITLES: {}\n },\n uri: '',\n duration,\n playlists: addSidxSegmentsToPlaylists(videoPlaylists, sidxMapping)\n };\n\n if (minimumUpdatePeriod >= 0) {\n manifest.minimumUpdatePeriod = minimumUpdatePeriod * 1000;\n }\n\n if (locations) {\n manifest.locations = locations;\n }\n\n if (contentSteering) {\n manifest.contentSteering = contentSteering;\n }\n\n if (type === 'dynamic') {\n manifest.suggestedPresentationDelay = suggestedPresentationDelay;\n }\n\n if (eventStream && eventStream.length > 0) {\n manifest.eventStream = eventStream;\n }\n\n const isAudioOnly = manifest.playlists.length === 0;\n const organizedAudioGroup = audioPlaylists.length ? organizeAudioPlaylists(audioPlaylists, sidxMapping, isAudioOnly) : null;\n const organizedVttGroup = vttPlaylists.length ? organizeVttPlaylists(vttPlaylists, sidxMapping) : null;\n const formattedPlaylists = videoPlaylists.concat(flattenMediaGroupPlaylists(organizedAudioGroup), flattenMediaGroupPlaylists(organizedVttGroup));\n const playlistTimelineStarts = formattedPlaylists.map(({\n timelineStarts\n }) => timelineStarts);\n manifest.timelineStarts = getUniqueTimelineStarts(playlistTimelineStarts);\n addMediaSequenceValues(formattedPlaylists, manifest.timelineStarts);\n\n if (organizedAudioGroup) {\n manifest.mediaGroups.AUDIO.audio = organizedAudioGroup;\n }\n\n if (organizedVttGroup) {\n manifest.mediaGroups.SUBTITLES.subs = organizedVttGroup;\n }\n\n if (captions.length) {\n manifest.mediaGroups['CLOSED-CAPTIONS'].cc = organizeCaptionServices(captions);\n }\n\n if (previousManifest) {\n return positionManifestOnTimeline({\n oldManifest: previousManifest,\n newManifest: manifest\n });\n }\n\n return manifest;\n};\n\n/**\n * Calculates the R (repetition) value for a live stream (for the final segment\n * in a manifest where the r value is negative 1)\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {number} time\n * current time (typically the total time up until the final segment)\n * @param {number} duration\n * duration property for the given \n *\n * @return {number}\n * R value to reach the end of the given period\n */\nconst getLiveRValue = (attributes, time, duration) => {\n const {\n NOW,\n clientOffset,\n availabilityStartTime,\n timescale = 1,\n periodStart = 0,\n minimumUpdatePeriod = 0\n } = attributes;\n const now = (NOW + clientOffset) / 1000;\n const periodStartWC = availabilityStartTime + periodStart;\n const periodEndWC = now + minimumUpdatePeriod;\n const periodDuration = periodEndWC - periodStartWC;\n return Math.ceil((periodDuration * timescale - time) / duration);\n};\n/**\n * Uses information provided by SegmentTemplate.SegmentTimeline to determine segment\n * timing and duration\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n *\n * @return {{number: number, duration: number, time: number, timeline: number}[]}\n * List of Objects with segment timing and duration info\n */\n\n\nconst parseByTimeline = (attributes, segmentTimeline) => {\n const {\n type,\n minimumUpdatePeriod = 0,\n media = '',\n sourceDuration,\n timescale = 1,\n startNumber = 1,\n periodStart: timeline\n } = attributes;\n const segments = [];\n let time = -1;\n\n for (let sIndex = 0; sIndex < segmentTimeline.length; sIndex++) {\n const S = segmentTimeline[sIndex];\n const duration = S.d;\n const repeat = S.r || 0;\n const segmentTime = S.t || 0;\n\n if (time < 0) {\n // first segment\n time = segmentTime;\n }\n\n if (segmentTime && segmentTime > time) {\n // discontinuity\n // TODO: How to handle this type of discontinuity\n // timeline++ here would treat it like HLS discontuity and content would\n // get appended without gap\n // E.G.\n // \n // \n // \n // \n // would have $Time$ values of [0, 1, 2, 5]\n // should this be appened at time positions [0, 1, 2, 3],(#EXT-X-DISCONTINUITY)\n // or [0, 1, 2, gap, gap, 5]? (#EXT-X-GAP)\n // does the value of sourceDuration consider this when calculating arbitrary\n // negative @r repeat value?\n // E.G. Same elements as above with this added at the end\n // \n // with a sourceDuration of 10\n // Would the 2 gaps be included in the time duration calculations resulting in\n // 8 segments with $Time$ values of [0, 1, 2, 5, 6, 7, 8, 9] or 10 segments\n // with $Time$ values of [0, 1, 2, 5, 6, 7, 8, 9, 10, 11] ?\n time = segmentTime;\n }\n\n let count;\n\n if (repeat < 0) {\n const nextS = sIndex + 1;\n\n if (nextS === segmentTimeline.length) {\n // last segment\n if (type === 'dynamic' && minimumUpdatePeriod > 0 && media.indexOf('$Number$') > 0) {\n count = getLiveRValue(attributes, time, duration);\n } else {\n // TODO: This may be incorrect depending on conclusion of TODO above\n count = (sourceDuration * timescale - time) / duration;\n }\n } else {\n count = (segmentTimeline[nextS].t - time) / duration;\n }\n } else {\n count = repeat + 1;\n }\n\n const end = startNumber + segments.length + count;\n let number = startNumber + segments.length;\n\n while (number < end) {\n segments.push({\n number,\n duration: duration / timescale,\n time,\n timeline\n });\n time += duration;\n number++;\n }\n }\n\n return segments;\n};\n\nconst identifierPattern = /\\$([A-z]*)(?:(%0)([0-9]+)d)?\\$/g;\n/**\n * Replaces template identifiers with corresponding values. To be used as the callback\n * for String.prototype.replace\n *\n * @name replaceCallback\n * @function\n * @param {string} match\n * Entire match of identifier\n * @param {string} identifier\n * Name of matched identifier\n * @param {string} format\n * Format tag string. Its presence indicates that padding is expected\n * @param {string} width\n * Desired length of the replaced value. Values less than this width shall be left\n * zero padded\n * @return {string}\n * Replacement for the matched identifier\n */\n\n/**\n * Returns a function to be used as a callback for String.prototype.replace to replace\n * template identifiers\n *\n * @param {Obect} values\n * Object containing values that shall be used to replace known identifiers\n * @param {number} values.RepresentationID\n * Value of the Representation@id attribute\n * @param {number} values.Number\n * Number of the corresponding segment\n * @param {number} values.Bandwidth\n * Value of the Representation@bandwidth attribute.\n * @param {number} values.Time\n * Timestamp value of the corresponding segment\n * @return {replaceCallback}\n * Callback to be used with String.prototype.replace to replace identifiers\n */\n\nconst identifierReplacement = values => (match, identifier, format, width) => {\n if (match === '$$') {\n // escape sequence\n return '$';\n }\n\n if (typeof values[identifier] === 'undefined') {\n return match;\n }\n\n const value = '' + values[identifier];\n\n if (identifier === 'RepresentationID') {\n // Format tag shall not be present with RepresentationID\n return value;\n }\n\n if (!format) {\n width = 1;\n } else {\n width = parseInt(width, 10);\n }\n\n if (value.length >= width) {\n return value;\n }\n\n return `${new Array(width - value.length + 1).join('0')}${value}`;\n};\n/**\n * Constructs a segment url from a template string\n *\n * @param {string} url\n * Template string to construct url from\n * @param {Obect} values\n * Object containing values that shall be used to replace known identifiers\n * @param {number} values.RepresentationID\n * Value of the Representation@id attribute\n * @param {number} values.Number\n * Number of the corresponding segment\n * @param {number} values.Bandwidth\n * Value of the Representation@bandwidth attribute.\n * @param {number} values.Time\n * Timestamp value of the corresponding segment\n * @return {string}\n * Segment url with identifiers replaced\n */\n\nconst constructTemplateUrl = (url, values) => url.replace(identifierPattern, identifierReplacement(values));\n/**\n * Generates a list of objects containing timing and duration information about each\n * segment needed to generate segment uris and the complete segment object\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]|undefined} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n * the SegmentTimeline element\n * @return {{number: number, duration: number, time: number, timeline: number}[]}\n * List of Objects with segment timing and duration info\n */\n\nconst parseTemplateInfo = (attributes, segmentTimeline) => {\n if (!attributes.duration && !segmentTimeline) {\n // if neither @duration or SegmentTimeline are present, then there shall be exactly\n // one media segment\n return [{\n number: attributes.startNumber || 1,\n duration: attributes.sourceDuration,\n time: 0,\n timeline: attributes.periodStart\n }];\n }\n\n if (attributes.duration) {\n return parseByDuration(attributes);\n }\n\n return parseByTimeline(attributes, segmentTimeline);\n};\n/**\n * Generates a list of segments using information provided by the SegmentTemplate element\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]|undefined} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n * the SegmentTimeline element\n * @return {Object[]}\n * List of segment objects\n */\n\nconst segmentsFromTemplate = (attributes, segmentTimeline) => {\n const templateValues = {\n RepresentationID: attributes.id,\n Bandwidth: attributes.bandwidth || 0\n };\n const {\n initialization = {\n sourceURL: '',\n range: ''\n }\n } = attributes;\n const mapSegment = urlTypeToSegment({\n baseUrl: attributes.baseUrl,\n source: constructTemplateUrl(initialization.sourceURL, templateValues),\n range: initialization.range\n });\n const segments = parseTemplateInfo(attributes, segmentTimeline);\n return segments.map(segment => {\n templateValues.Number = segment.number;\n templateValues.Time = segment.time;\n const uri = constructTemplateUrl(attributes.media || '', templateValues); // See DASH spec section 5.3.9.2.2\n // - if timescale isn't present on any level, default to 1.\n\n const timescale = attributes.timescale || 1; // - if presentationTimeOffset isn't present on any level, default to 0\n\n const presentationTimeOffset = attributes.presentationTimeOffset || 0;\n const presentationTime = // Even if the @t attribute is not specified for the segment, segment.time is\n // calculated in mpd-parser prior to this, so it's assumed to be available.\n attributes.periodStart + (segment.time - presentationTimeOffset) / timescale;\n const map = {\n uri,\n timeline: segment.timeline,\n duration: segment.duration,\n resolvedUri: resolveUrl(attributes.baseUrl || '', uri),\n map: mapSegment,\n number: segment.number,\n presentationTime\n };\n return map;\n });\n};\n\n/**\n * Converts a (of type URLType from the DASH spec 5.3.9.2 Table 14)\n * to an object that matches the output of a segment in videojs/mpd-parser\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object} segmentUrl\n * node to translate into a segment object\n * @return {Object} translated segment object\n */\n\nconst SegmentURLToSegmentObject = (attributes, segmentUrl) => {\n const {\n baseUrl,\n initialization = {}\n } = attributes;\n const initSegment = urlTypeToSegment({\n baseUrl,\n source: initialization.sourceURL,\n range: initialization.range\n });\n const segment = urlTypeToSegment({\n baseUrl,\n source: segmentUrl.media,\n range: segmentUrl.mediaRange\n });\n segment.map = initSegment;\n return segment;\n};\n/**\n * Generates a list of segments using information provided by the SegmentList element\n * SegmentList (DASH SPEC Section 5.3.9.3.2) contains a set of nodes. Each\n * node should be translated into segment.\n *\n * @param {Object} attributes\n * Object containing all inherited attributes from parent elements with attribute\n * names as keys\n * @param {Object[]|undefined} segmentTimeline\n * List of objects representing the attributes of each S element contained within\n * the SegmentTimeline element\n * @return {Object.} list of segments\n */\n\n\nconst segmentsFromList = (attributes, segmentTimeline) => {\n const {\n duration,\n segmentUrls = [],\n periodStart\n } = attributes; // Per spec (5.3.9.2.1) no way to determine segment duration OR\n // if both SegmentTimeline and @duration are defined, it is outside of spec.\n\n if (!duration && !segmentTimeline || duration && segmentTimeline) {\n throw new Error(errors.SEGMENT_TIME_UNSPECIFIED);\n }\n\n const segmentUrlMap = segmentUrls.map(segmentUrlObject => SegmentURLToSegmentObject(attributes, segmentUrlObject));\n let segmentTimeInfo;\n\n if (duration) {\n segmentTimeInfo = parseByDuration(attributes);\n }\n\n if (segmentTimeline) {\n segmentTimeInfo = parseByTimeline(attributes, segmentTimeline);\n }\n\n const segments = segmentTimeInfo.map((segmentTime, index) => {\n if (segmentUrlMap[index]) {\n const segment = segmentUrlMap[index]; // See DASH spec section 5.3.9.2.2\n // - if timescale isn't present on any level, default to 1.\n\n const timescale = attributes.timescale || 1; // - if presentationTimeOffset isn't present on any level, default to 0\n\n const presentationTimeOffset = attributes.presentationTimeOffset || 0;\n segment.timeline = segmentTime.timeline;\n segment.duration = segmentTime.duration;\n segment.number = segmentTime.number;\n segment.presentationTime = periodStart + (segmentTime.time - presentationTimeOffset) / timescale;\n return segment;\n } // Since we're mapping we should get rid of any blank segments (in case\n // the given SegmentTimeline is handling for more elements than we have\n // SegmentURLs for).\n\n }).filter(segment => segment);\n return segments;\n};\n\nconst generateSegments = ({\n attributes,\n segmentInfo\n}) => {\n let segmentAttributes;\n let segmentsFn;\n\n if (segmentInfo.template) {\n segmentsFn = segmentsFromTemplate;\n segmentAttributes = merge(attributes, segmentInfo.template);\n } else if (segmentInfo.base) {\n segmentsFn = segmentsFromBase;\n segmentAttributes = merge(attributes, segmentInfo.base);\n } else if (segmentInfo.list) {\n segmentsFn = segmentsFromList;\n segmentAttributes = merge(attributes, segmentInfo.list);\n }\n\n const segmentsInfo = {\n attributes\n };\n\n if (!segmentsFn) {\n return segmentsInfo;\n }\n\n const segments = segmentsFn(segmentAttributes, segmentInfo.segmentTimeline); // The @duration attribute will be used to determin the playlist's targetDuration which\n // must be in seconds. Since we've generated the segment list, we no longer need\n // @duration to be in @timescale units, so we can convert it here.\n\n if (segmentAttributes.duration) {\n const {\n duration,\n timescale = 1\n } = segmentAttributes;\n segmentAttributes.duration = duration / timescale;\n } else if (segments.length) {\n // if there is no @duration attribute, use the largest segment duration as\n // as target duration\n segmentAttributes.duration = segments.reduce((max, segment) => {\n return Math.max(max, Math.ceil(segment.duration));\n }, 0);\n } else {\n segmentAttributes.duration = 0;\n }\n\n segmentsInfo.attributes = segmentAttributes;\n segmentsInfo.segments = segments; // This is a sidx box without actual segment information\n\n if (segmentInfo.base && segmentAttributes.indexRange) {\n segmentsInfo.sidx = segments[0];\n segmentsInfo.segments = [];\n }\n\n return segmentsInfo;\n};\nconst toPlaylists = representations => representations.map(generateSegments);\n\nconst findChildren = (element, name) => from(element.childNodes).filter(({\n tagName\n}) => tagName === name);\nconst getContent = element => element.textContent.trim();\n\n/**\n * Converts the provided string that may contain a division operation to a number.\n *\n * @param {string} value - the provided string value\n *\n * @return {number} the parsed string value\n */\nconst parseDivisionValue = value => {\n return parseFloat(value.split('/').reduce((prev, current) => prev / current));\n};\n\nconst parseDuration = str => {\n const SECONDS_IN_YEAR = 365 * 24 * 60 * 60;\n const SECONDS_IN_MONTH = 30 * 24 * 60 * 60;\n const SECONDS_IN_DAY = 24 * 60 * 60;\n const SECONDS_IN_HOUR = 60 * 60;\n const SECONDS_IN_MIN = 60; // P10Y10M10DT10H10M10.1S\n\n const durationRegex = /P(?:(\\d*)Y)?(?:(\\d*)M)?(?:(\\d*)D)?(?:T(?:(\\d*)H)?(?:(\\d*)M)?(?:([\\d.]*)S)?)?/;\n const match = durationRegex.exec(str);\n\n if (!match) {\n return 0;\n }\n\n const [year, month, day, hour, minute, second] = match.slice(1);\n return parseFloat(year || 0) * SECONDS_IN_YEAR + parseFloat(month || 0) * SECONDS_IN_MONTH + parseFloat(day || 0) * SECONDS_IN_DAY + parseFloat(hour || 0) * SECONDS_IN_HOUR + parseFloat(minute || 0) * SECONDS_IN_MIN + parseFloat(second || 0);\n};\nconst parseDate = str => {\n // Date format without timezone according to ISO 8601\n // YYY-MM-DDThh:mm:ss.ssssss\n const dateRegex = /^\\d+-\\d+-\\d+T\\d+:\\d+:\\d+(\\.\\d+)?$/; // If the date string does not specifiy a timezone, we must specifiy UTC. This is\n // expressed by ending with 'Z'\n\n if (dateRegex.test(str)) {\n str += 'Z';\n }\n\n return Date.parse(str);\n};\n\nconst parsers = {\n /**\n * Specifies the duration of the entire Media Presentation. Format is a duration string\n * as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n mediaPresentationDuration(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the Segment availability start time for all Segments referred to in this\n * MPD. For a dynamic manifest, it specifies the anchor for the earliest availability\n * time. Format is a date string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The date as seconds from unix epoch\n */\n availabilityStartTime(value) {\n return parseDate(value) / 1000;\n },\n\n /**\n * Specifies the smallest period between potential changes to the MPD. Format is a\n * duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n minimumUpdatePeriod(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the suggested presentation delay. Format is a\n * duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n suggestedPresentationDelay(value) {\n return parseDuration(value);\n },\n\n /**\n * specifices the type of mpd. Can be either \"static\" or \"dynamic\"\n *\n * @param {string} value\n * value of attribute as a string\n *\n * @return {string}\n * The type as a string\n */\n type(value) {\n return value;\n },\n\n /**\n * Specifies the duration of the smallest time shifting buffer for any Representation\n * in the MPD. Format is a duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n timeShiftBufferDepth(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the PeriodStart time of the Period relative to the availabilityStarttime.\n * Format is a duration string as specified in ISO 8601\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The duration in seconds\n */\n start(value) {\n return parseDuration(value);\n },\n\n /**\n * Specifies the width of the visual presentation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed width\n */\n width(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the height of the visual presentation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed height\n */\n height(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the bitrate of the representation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed bandwidth\n */\n bandwidth(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the frame rate of the representation\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed frame rate\n */\n frameRate(value) {\n return parseDivisionValue(value);\n },\n\n /**\n * Specifies the number of the first Media Segment in this Representation in the Period\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed number\n */\n startNumber(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the timescale in units per seconds\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed timescale\n */\n timescale(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the presentationTimeOffset.\n *\n * @param {string} value\n * value of the attribute as a string\n *\n * @return {number}\n * The parsed presentationTimeOffset\n */\n presentationTimeOffset(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the constant approximate Segment duration\n * NOTE: The element also contains an @duration attribute. This duration\n * specifies the duration of the Period. This attribute is currently not\n * supported by the rest of the parser, however we still check for it to prevent\n * errors.\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed duration\n */\n duration(value) {\n const parsedValue = parseInt(value, 10);\n\n if (isNaN(parsedValue)) {\n return parseDuration(value);\n }\n\n return parsedValue;\n },\n\n /**\n * Specifies the Segment duration, in units of the value of the @timescale.\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed duration\n */\n d(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the MPD start time, in @timescale units, the first Segment in the series\n * starts relative to the beginning of the Period\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed time\n */\n t(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the repeat count of the number of following contiguous Segments with the\n * same duration expressed by the value of @d\n *\n * @param {string} value\n * value of attribute as a string\n * @return {number}\n * The parsed number\n */\n r(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Specifies the presentationTime.\n *\n * @param {string} value\n * value of the attribute as a string\n *\n * @return {number}\n * The parsed presentationTime\n */\n presentationTime(value) {\n return parseInt(value, 10);\n },\n\n /**\n * Default parser for all other attributes. Acts as a no-op and just returns the value\n * as a string\n *\n * @param {string} value\n * value of attribute as a string\n * @return {string}\n * Unparsed value\n */\n DEFAULT(value) {\n return value;\n }\n\n};\n/**\n * Gets all the attributes and values of the provided node, parses attributes with known\n * types, and returns an object with attribute names mapped to values.\n *\n * @param {Node} el\n * The node to parse attributes from\n * @return {Object}\n * Object with all attributes of el parsed\n */\n\nconst parseAttributes = el => {\n if (!(el && el.attributes)) {\n return {};\n }\n\n return from(el.attributes).reduce((a, e) => {\n const parseFn = parsers[e.name] || parsers.DEFAULT;\n a[e.name] = parseFn(e.value);\n return a;\n }, {});\n};\n\nconst keySystemsMap = {\n 'urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b': 'org.w3.clearkey',\n 'urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed': 'com.widevine.alpha',\n 'urn:uuid:9a04f079-9840-4286-ab92-e65be0885f95': 'com.microsoft.playready',\n 'urn:uuid:f239e769-efa3-4850-9c16-a903c6932efb': 'com.adobe.primetime',\n // ISO_IEC 23009-1_2022 5.8.5.2.2 The mp4 Protection Scheme\n 'urn:mpeg:dash:mp4protection:2011': 'mp4protection'\n};\n/**\n * Builds a list of urls that is the product of the reference urls and BaseURL values\n *\n * @param {Object[]} references\n * List of objects containing the reference URL as well as its attributes\n * @param {Node[]} baseUrlElements\n * List of BaseURL nodes from the mpd\n * @return {Object[]}\n * List of objects with resolved urls and attributes\n */\n\nconst buildBaseUrls = (references, baseUrlElements) => {\n if (!baseUrlElements.length) {\n return references;\n }\n\n return flatten(references.map(function (reference) {\n return baseUrlElements.map(function (baseUrlElement) {\n const initialBaseUrl = getContent(baseUrlElement);\n const resolvedBaseUrl = resolveUrl(reference.baseUrl, initialBaseUrl);\n const finalBaseUrl = merge(parseAttributes(baseUrlElement), {\n baseUrl: resolvedBaseUrl\n }); // If the URL is resolved, we want to get the serviceLocation from the reference\n // assuming there is no serviceLocation on the initialBaseUrl\n\n if (resolvedBaseUrl !== initialBaseUrl && !finalBaseUrl.serviceLocation && reference.serviceLocation) {\n finalBaseUrl.serviceLocation = reference.serviceLocation;\n }\n\n return finalBaseUrl;\n });\n }));\n};\n/**\n * Contains all Segment information for its containing AdaptationSet\n *\n * @typedef {Object} SegmentInformation\n * @property {Object|undefined} template\n * Contains the attributes for the SegmentTemplate node\n * @property {Object[]|undefined} segmentTimeline\n * Contains a list of atrributes for each S node within the SegmentTimeline node\n * @property {Object|undefined} list\n * Contains the attributes for the SegmentList node\n * @property {Object|undefined} base\n * Contains the attributes for the SegmentBase node\n */\n\n/**\n * Returns all available Segment information contained within the AdaptationSet node\n *\n * @param {Node} adaptationSet\n * The AdaptationSet node to get Segment information from\n * @return {SegmentInformation}\n * The Segment information contained within the provided AdaptationSet\n */\n\nconst getSegmentInformation = adaptationSet => {\n const segmentTemplate = findChildren(adaptationSet, 'SegmentTemplate')[0];\n const segmentList = findChildren(adaptationSet, 'SegmentList')[0];\n const segmentUrls = segmentList && findChildren(segmentList, 'SegmentURL').map(s => merge({\n tag: 'SegmentURL'\n }, parseAttributes(s)));\n const segmentBase = findChildren(adaptationSet, 'SegmentBase')[0];\n const segmentTimelineParentNode = segmentList || segmentTemplate;\n const segmentTimeline = segmentTimelineParentNode && findChildren(segmentTimelineParentNode, 'SegmentTimeline')[0];\n const segmentInitializationParentNode = segmentList || segmentBase || segmentTemplate;\n const segmentInitialization = segmentInitializationParentNode && findChildren(segmentInitializationParentNode, 'Initialization')[0]; // SegmentTemplate is handled slightly differently, since it can have both\n // @initialization and an node. @initialization can be templated,\n // while the node can have a url and range specified. If the has\n // both @initialization and an subelement we opt to override with\n // the node, as this interaction is not defined in the spec.\n\n const template = segmentTemplate && parseAttributes(segmentTemplate);\n\n if (template && segmentInitialization) {\n template.initialization = segmentInitialization && parseAttributes(segmentInitialization);\n } else if (template && template.initialization) {\n // If it is @initialization we convert it to an object since this is the format that\n // later functions will rely on for the initialization segment. This is only valid\n // for \n template.initialization = {\n sourceURL: template.initialization\n };\n }\n\n const segmentInfo = {\n template,\n segmentTimeline: segmentTimeline && findChildren(segmentTimeline, 'S').map(s => parseAttributes(s)),\n list: segmentList && merge(parseAttributes(segmentList), {\n segmentUrls,\n initialization: parseAttributes(segmentInitialization)\n }),\n base: segmentBase && merge(parseAttributes(segmentBase), {\n initialization: parseAttributes(segmentInitialization)\n })\n };\n Object.keys(segmentInfo).forEach(key => {\n if (!segmentInfo[key]) {\n delete segmentInfo[key];\n }\n });\n return segmentInfo;\n};\n/**\n * Contains Segment information and attributes needed to construct a Playlist object\n * from a Representation\n *\n * @typedef {Object} RepresentationInformation\n * @property {SegmentInformation} segmentInfo\n * Segment information for this Representation\n * @property {Object} attributes\n * Inherited attributes for this Representation\n */\n\n/**\n * Maps a Representation node to an object containing Segment information and attributes\n *\n * @name inheritBaseUrlsCallback\n * @function\n * @param {Node} representation\n * Representation node from the mpd\n * @return {RepresentationInformation}\n * Representation information needed to construct a Playlist object\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping Representation nodes to\n * Segment information and attributes using inherited BaseURL nodes.\n *\n * @param {Object} adaptationSetAttributes\n * Contains attributes inherited by the AdaptationSet\n * @param {Object[]} adaptationSetBaseUrls\n * List of objects containing resolved base URLs and attributes\n * inherited by the AdaptationSet\n * @param {SegmentInformation} adaptationSetSegmentInfo\n * Contains Segment information for the AdaptationSet\n * @return {inheritBaseUrlsCallback}\n * Callback map function\n */\n\nconst inheritBaseUrls = (adaptationSetAttributes, adaptationSetBaseUrls, adaptationSetSegmentInfo) => representation => {\n const repBaseUrlElements = findChildren(representation, 'BaseURL');\n const repBaseUrls = buildBaseUrls(adaptationSetBaseUrls, repBaseUrlElements);\n const attributes = merge(adaptationSetAttributes, parseAttributes(representation));\n const representationSegmentInfo = getSegmentInformation(representation);\n return repBaseUrls.map(baseUrl => {\n return {\n segmentInfo: merge(adaptationSetSegmentInfo, representationSegmentInfo),\n attributes: merge(attributes, baseUrl)\n };\n });\n};\n/**\n * Tranforms a series of content protection nodes to\n * an object containing pssh data by key system\n *\n * @param {Node[]} contentProtectionNodes\n * Content protection nodes\n * @return {Object}\n * Object containing pssh data by key system\n */\n\nconst generateKeySystemInformation = contentProtectionNodes => {\n return contentProtectionNodes.reduce((acc, node) => {\n const attributes = parseAttributes(node); // Although it could be argued that according to the UUID RFC spec the UUID string (a-f chars) should be generated\n // as a lowercase string it also mentions it should be treated as case-insensitive on input. Since the key system\n // UUIDs in the keySystemsMap are hardcoded as lowercase in the codebase there isn't any reason not to do\n // .toLowerCase() on the input UUID string from the manifest (at least I could not think of one).\n\n if (attributes.schemeIdUri) {\n attributes.schemeIdUri = attributes.schemeIdUri.toLowerCase();\n }\n\n const keySystem = keySystemsMap[attributes.schemeIdUri];\n\n if (keySystem) {\n acc[keySystem] = {\n attributes\n };\n const psshNode = findChildren(node, 'cenc:pssh')[0];\n\n if (psshNode) {\n const pssh = getContent(psshNode);\n acc[keySystem].pssh = pssh && decodeB64ToUint8Array(pssh);\n }\n }\n\n return acc;\n }, {});\n}; // defined in ANSI_SCTE 214-1 2016\n\n\nconst parseCaptionServiceMetadata = service => {\n // 608 captions\n if (service.schemeIdUri === 'urn:scte:dash:cc:cea-608:2015') {\n const values = typeof service.value !== 'string' ? [] : service.value.split(';');\n return values.map(value => {\n let channel;\n let language; // default language to value\n\n language = value;\n\n if (/^CC\\d=/.test(value)) {\n [channel, language] = value.split('=');\n } else if (/^CC\\d$/.test(value)) {\n channel = value;\n }\n\n return {\n channel,\n language\n };\n });\n } else if (service.schemeIdUri === 'urn:scte:dash:cc:cea-708:2015') {\n const values = typeof service.value !== 'string' ? [] : service.value.split(';');\n return values.map(value => {\n const flags = {\n // service or channel number 1-63\n 'channel': undefined,\n // language is a 3ALPHA per ISO 639.2/B\n // field is required\n 'language': undefined,\n // BIT 1/0 or ?\n // default value is 1, meaning 16:9 aspect ratio, 0 is 4:3, ? is unknown\n 'aspectRatio': 1,\n // BIT 1/0\n // easy reader flag indicated the text is tailed to the needs of beginning readers\n // default 0, or off\n 'easyReader': 0,\n // BIT 1/0\n // If 3d metadata is present (CEA-708.1) then 1\n // default 0\n '3D': 0\n };\n\n if (/=/.test(value)) {\n const [channel, opts = ''] = value.split('=');\n flags.channel = channel;\n flags.language = value;\n opts.split(',').forEach(opt => {\n const [name, val] = opt.split(':');\n\n if (name === 'lang') {\n flags.language = val; // er for easyReadery\n } else if (name === 'er') {\n flags.easyReader = Number(val); // war for wide aspect ratio\n } else if (name === 'war') {\n flags.aspectRatio = Number(val);\n } else if (name === '3D') {\n flags['3D'] = Number(val);\n }\n });\n } else {\n flags.language = value;\n }\n\n if (flags.channel) {\n flags.channel = 'SERVICE' + flags.channel;\n }\n\n return flags;\n });\n }\n};\n/**\n * A map callback that will parse all event stream data for a collection of periods\n * DASH ISO_IEC_23009 5.10.2.2\n * https://dashif-documents.azurewebsites.net/Events/master/event.html#mpd-event-timing\n *\n * @param {PeriodInformation} period object containing necessary period information\n * @return a collection of parsed eventstream event objects\n */\n\nconst toEventStream = period => {\n // get and flatten all EventStreams tags and parse attributes and children\n return flatten(findChildren(period.node, 'EventStream').map(eventStream => {\n const eventStreamAttributes = parseAttributes(eventStream);\n const schemeIdUri = eventStreamAttributes.schemeIdUri; // find all Events per EventStream tag and map to return objects\n\n return findChildren(eventStream, 'Event').map(event => {\n const eventAttributes = parseAttributes(event);\n const presentationTime = eventAttributes.presentationTime || 0;\n const timescale = eventStreamAttributes.timescale || 1;\n const duration = eventAttributes.duration || 0;\n const start = presentationTime / timescale + period.attributes.start;\n return {\n schemeIdUri,\n value: eventStreamAttributes.value,\n id: eventAttributes.id,\n start,\n end: start + duration / timescale,\n messageData: getContent(event) || eventAttributes.messageData,\n contentEncoding: eventStreamAttributes.contentEncoding,\n presentationTimeOffset: eventStreamAttributes.presentationTimeOffset || 0\n };\n });\n }));\n};\n/**\n * Maps an AdaptationSet node to a list of Representation information objects\n *\n * @name toRepresentationsCallback\n * @function\n * @param {Node} adaptationSet\n * AdaptationSet node from the mpd\n * @return {RepresentationInformation[]}\n * List of objects containing Representaion information\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping AdaptationSet nodes to a list of\n * Representation information objects\n *\n * @param {Object} periodAttributes\n * Contains attributes inherited by the Period\n * @param {Object[]} periodBaseUrls\n * Contains list of objects with resolved base urls and attributes\n * inherited by the Period\n * @param {string[]} periodSegmentInfo\n * Contains Segment Information at the period level\n * @return {toRepresentationsCallback}\n * Callback map function\n */\n\nconst toRepresentations = (periodAttributes, periodBaseUrls, periodSegmentInfo) => adaptationSet => {\n const adaptationSetAttributes = parseAttributes(adaptationSet);\n const adaptationSetBaseUrls = buildBaseUrls(periodBaseUrls, findChildren(adaptationSet, 'BaseURL'));\n const role = findChildren(adaptationSet, 'Role')[0];\n const roleAttributes = {\n role: parseAttributes(role)\n };\n let attrs = merge(periodAttributes, adaptationSetAttributes, roleAttributes);\n const accessibility = findChildren(adaptationSet, 'Accessibility')[0];\n const captionServices = parseCaptionServiceMetadata(parseAttributes(accessibility));\n\n if (captionServices) {\n attrs = merge(attrs, {\n captionServices\n });\n }\n\n const label = findChildren(adaptationSet, 'Label')[0];\n\n if (label && label.childNodes.length) {\n const labelVal = label.childNodes[0].nodeValue.trim();\n attrs = merge(attrs, {\n label: labelVal\n });\n }\n\n const contentProtection = generateKeySystemInformation(findChildren(adaptationSet, 'ContentProtection'));\n\n if (Object.keys(contentProtection).length) {\n attrs = merge(attrs, {\n contentProtection\n });\n }\n\n const segmentInfo = getSegmentInformation(adaptationSet);\n const representations = findChildren(adaptationSet, 'Representation');\n const adaptationSetSegmentInfo = merge(periodSegmentInfo, segmentInfo);\n return flatten(representations.map(inheritBaseUrls(attrs, adaptationSetBaseUrls, adaptationSetSegmentInfo)));\n};\n/**\n * Contains all period information for mapping nodes onto adaptation sets.\n *\n * @typedef {Object} PeriodInformation\n * @property {Node} period.node\n * Period node from the mpd\n * @property {Object} period.attributes\n * Parsed period attributes from node plus any added\n */\n\n/**\n * Maps a PeriodInformation object to a list of Representation information objects for all\n * AdaptationSet nodes contained within the Period.\n *\n * @name toAdaptationSetsCallback\n * @function\n * @param {PeriodInformation} period\n * Period object containing necessary period information\n * @param {number} periodStart\n * Start time of the Period within the mpd\n * @return {RepresentationInformation[]}\n * List of objects containing Representaion information\n */\n\n/**\n * Returns a callback for Array.prototype.map for mapping Period nodes to a list of\n * Representation information objects\n *\n * @param {Object} mpdAttributes\n * Contains attributes inherited by the mpd\n * @param {Object[]} mpdBaseUrls\n * Contains list of objects with resolved base urls and attributes\n * inherited by the mpd\n * @return {toAdaptationSetsCallback}\n * Callback map function\n */\n\nconst toAdaptationSets = (mpdAttributes, mpdBaseUrls) => (period, index) => {\n const periodBaseUrls = buildBaseUrls(mpdBaseUrls, findChildren(period.node, 'BaseURL'));\n const periodAttributes = merge(mpdAttributes, {\n periodStart: period.attributes.start\n });\n\n if (typeof period.attributes.duration === 'number') {\n periodAttributes.periodDuration = period.attributes.duration;\n }\n\n const adaptationSets = findChildren(period.node, 'AdaptationSet');\n const periodSegmentInfo = getSegmentInformation(period.node);\n return flatten(adaptationSets.map(toRepresentations(periodAttributes, periodBaseUrls, periodSegmentInfo)));\n};\n/**\n * Tranforms an array of content steering nodes into an object\n * containing CDN content steering information from the MPD manifest.\n *\n * For more information on the DASH spec for Content Steering parsing, see:\n * https://dashif.org/docs/DASH-IF-CTS-00XX-Content-Steering-Community-Review.pdf\n *\n * @param {Node[]} contentSteeringNodes\n * Content steering nodes\n * @param {Function} eventHandler\n * The event handler passed into the parser options to handle warnings\n * @return {Object}\n * Object containing content steering data\n */\n\nconst generateContentSteeringInformation = (contentSteeringNodes, eventHandler) => {\n // If there are more than one ContentSteering tags, throw an error\n if (contentSteeringNodes.length > 1) {\n eventHandler({\n type: 'warn',\n message: 'The MPD manifest should contain no more than one ContentSteering tag'\n });\n } // Return a null value if there are no ContentSteering tags\n\n\n if (!contentSteeringNodes.length) {\n return null;\n }\n\n const infoFromContentSteeringTag = merge({\n serverURL: getContent(contentSteeringNodes[0])\n }, parseAttributes(contentSteeringNodes[0])); // Converts `queryBeforeStart` to a boolean, as well as setting the default value\n // to `false` if it doesn't exist\n\n infoFromContentSteeringTag.queryBeforeStart = infoFromContentSteeringTag.queryBeforeStart === 'true';\n return infoFromContentSteeringTag;\n};\n/**\n * Gets Period@start property for a given period.\n *\n * @param {Object} options\n * Options object\n * @param {Object} options.attributes\n * Period attributes\n * @param {Object} [options.priorPeriodAttributes]\n * Prior period attributes (if prior period is available)\n * @param {string} options.mpdType\n * The MPD@type these periods came from\n * @return {number|null}\n * The period start, or null if it's an early available period or error\n */\n\nconst getPeriodStart = ({\n attributes,\n priorPeriodAttributes,\n mpdType\n}) => {\n // Summary of period start time calculation from DASH spec section 5.3.2.1\n //\n // A period's start is the first period's start + time elapsed after playing all\n // prior periods to this one. Periods continue one after the other in time (without\n // gaps) until the end of the presentation.\n //\n // The value of Period@start should be:\n // 1. if Period@start is present: value of Period@start\n // 2. if previous period exists and it has @duration: previous Period@start +\n // previous Period@duration\n // 3. if this is first period and MPD@type is 'static': 0\n // 4. in all other cases, consider the period an \"early available period\" (note: not\n // currently supported)\n // (1)\n if (typeof attributes.start === 'number') {\n return attributes.start;\n } // (2)\n\n\n if (priorPeriodAttributes && typeof priorPeriodAttributes.start === 'number' && typeof priorPeriodAttributes.duration === 'number') {\n return priorPeriodAttributes.start + priorPeriodAttributes.duration;\n } // (3)\n\n\n if (!priorPeriodAttributes && mpdType === 'static') {\n return 0;\n } // (4)\n // There is currently no logic for calculating the Period@start value if there is\n // no Period@start or prior Period@start and Period@duration available. This is not made\n // explicit by the DASH interop guidelines or the DASH spec, however, since there's\n // nothing about any other resolution strategies, it's implied. Thus, this case should\n // be considered an early available period, or error, and null should suffice for both\n // of those cases.\n\n\n return null;\n};\n/**\n * Traverses the mpd xml tree to generate a list of Representation information objects\n * that have inherited attributes from parent nodes\n *\n * @param {Node} mpd\n * The root node of the mpd\n * @param {Object} options\n * Available options for inheritAttributes\n * @param {string} options.manifestUri\n * The uri source of the mpd\n * @param {number} options.NOW\n * Current time per DASH IOP. Default is current time in ms since epoch\n * @param {number} options.clientOffset\n * Client time difference from NOW (in milliseconds)\n * @return {RepresentationInformation[]}\n * List of objects containing Representation information\n */\n\nconst inheritAttributes = (mpd, options = {}) => {\n const {\n manifestUri = '',\n NOW = Date.now(),\n clientOffset = 0,\n // TODO: For now, we are expecting an eventHandler callback function\n // to be passed into the mpd parser as an option.\n // In the future, we should enable stream parsing by using the Stream class from vhs-utils.\n // This will support new features including a standardized event handler.\n // See the m3u8 parser for examples of how stream parsing is currently used for HLS parsing.\n // https://github.com/videojs/vhs-utils/blob/88d6e10c631e57a5af02c5a62bc7376cd456b4f5/src/stream.js#L9\n eventHandler = function () {}\n } = options;\n const periodNodes = findChildren(mpd, 'Period');\n\n if (!periodNodes.length) {\n throw new Error(errors.INVALID_NUMBER_OF_PERIOD);\n }\n\n const locations = findChildren(mpd, 'Location');\n const mpdAttributes = parseAttributes(mpd);\n const mpdBaseUrls = buildBaseUrls([{\n baseUrl: manifestUri\n }], findChildren(mpd, 'BaseURL'));\n const contentSteeringNodes = findChildren(mpd, 'ContentSteering'); // See DASH spec section 5.3.1.2, Semantics of MPD element. Default type to 'static'.\n\n mpdAttributes.type = mpdAttributes.type || 'static';\n mpdAttributes.sourceDuration = mpdAttributes.mediaPresentationDuration || 0;\n mpdAttributes.NOW = NOW;\n mpdAttributes.clientOffset = clientOffset;\n\n if (locations.length) {\n mpdAttributes.locations = locations.map(getContent);\n }\n\n const periods = []; // Since toAdaptationSets acts on individual periods right now, the simplest approach to\n // adding properties that require looking at prior periods is to parse attributes and add\n // missing ones before toAdaptationSets is called. If more such properties are added, it\n // may be better to refactor toAdaptationSets.\n\n periodNodes.forEach((node, index) => {\n const attributes = parseAttributes(node); // Use the last modified prior period, as it may contain added information necessary\n // for this period.\n\n const priorPeriod = periods[index - 1];\n attributes.start = getPeriodStart({\n attributes,\n priorPeriodAttributes: priorPeriod ? priorPeriod.attributes : null,\n mpdType: mpdAttributes.type\n });\n periods.push({\n node,\n attributes\n });\n });\n return {\n locations: mpdAttributes.locations,\n contentSteeringInfo: generateContentSteeringInformation(contentSteeringNodes, eventHandler),\n // TODO: There are occurences where this `representationInfo` array contains undesired\n // duplicates. This generally occurs when there are multiple BaseURL nodes that are\n // direct children of the MPD node. When we attempt to resolve URLs from a combination of the\n // parent BaseURL and a child BaseURL, and the value does not resolve,\n // we end up returning the child BaseURL multiple times.\n // We need to determine a way to remove these duplicates in a safe way.\n // See: https://github.com/videojs/mpd-parser/pull/17#discussion_r162750527\n representationInfo: flatten(periods.map(toAdaptationSets(mpdAttributes, mpdBaseUrls))),\n eventStream: flatten(periods.map(toEventStream))\n };\n};\n\nconst stringToMpdXml = manifestString => {\n if (manifestString === '') {\n throw new Error(errors.DASH_EMPTY_MANIFEST);\n }\n\n const parser = new DOMParser();\n let xml;\n let mpd;\n\n try {\n xml = parser.parseFromString(manifestString, 'application/xml');\n mpd = xml && xml.documentElement.tagName === 'MPD' ? xml.documentElement : null;\n } catch (e) {// ie 11 throws on invalid xml\n }\n\n if (!mpd || mpd && mpd.getElementsByTagName('parsererror').length > 0) {\n throw new Error(errors.DASH_INVALID_XML);\n }\n\n return mpd;\n};\n\n/**\n * Parses the manifest for a UTCTiming node, returning the nodes attributes if found\n *\n * @param {string} mpd\n * XML string of the MPD manifest\n * @return {Object|null}\n * Attributes of UTCTiming node specified in the manifest. Null if none found\n */\n\nconst parseUTCTimingScheme = mpd => {\n const UTCTimingNode = findChildren(mpd, 'UTCTiming')[0];\n\n if (!UTCTimingNode) {\n return null;\n }\n\n const attributes = parseAttributes(UTCTimingNode);\n\n switch (attributes.schemeIdUri) {\n case 'urn:mpeg:dash:utc:http-head:2014':\n case 'urn:mpeg:dash:utc:http-head:2012':\n attributes.method = 'HEAD';\n break;\n\n case 'urn:mpeg:dash:utc:http-xsdate:2014':\n case 'urn:mpeg:dash:utc:http-iso:2014':\n case 'urn:mpeg:dash:utc:http-xsdate:2012':\n case 'urn:mpeg:dash:utc:http-iso:2012':\n attributes.method = 'GET';\n break;\n\n case 'urn:mpeg:dash:utc:direct:2014':\n case 'urn:mpeg:dash:utc:direct:2012':\n attributes.method = 'DIRECT';\n attributes.value = Date.parse(attributes.value);\n break;\n\n case 'urn:mpeg:dash:utc:http-ntp:2014':\n case 'urn:mpeg:dash:utc:ntp:2014':\n case 'urn:mpeg:dash:utc:sntp:2014':\n default:\n throw new Error(errors.UNSUPPORTED_UTC_TIMING_SCHEME);\n }\n\n return attributes;\n};\n\nconst VERSION = version;\n/*\n * Given a DASH manifest string and options, parses the DASH manifest into an object in the\n * form outputed by m3u8-parser and accepted by videojs/http-streaming.\n *\n * For live DASH manifests, if `previousManifest` is provided in options, then the newly\n * parsed DASH manifest will have its media sequence and discontinuity sequence values\n * updated to reflect its position relative to the prior manifest.\n *\n * @param {string} manifestString - the DASH manifest as a string\n * @param {options} [options] - any options\n *\n * @return {Object} the manifest object\n */\n\nconst parse = (manifestString, options = {}) => {\n const parsedManifestInfo = inheritAttributes(stringToMpdXml(manifestString), options);\n const playlists = toPlaylists(parsedManifestInfo.representationInfo);\n return toM3u8({\n dashPlaylists: playlists,\n locations: parsedManifestInfo.locations,\n contentSteering: parsedManifestInfo.contentSteeringInfo,\n sidxMapping: options.sidxMapping,\n previousManifest: options.previousManifest,\n eventStream: parsedManifestInfo.eventStream\n });\n};\n/**\n * Parses the manifest for a UTCTiming node, returning the nodes attributes if found\n *\n * @param {string} manifestString\n * XML string of the MPD manifest\n * @return {Object|null}\n * Attributes of UTCTiming node specified in the manifest. Null if none found\n */\n\n\nconst parseUTCTiming = manifestString => parseUTCTimingScheme(stringToMpdXml(manifestString));\n\nexport { VERSION, addSidxSegmentsToPlaylist$1 as addSidxSegmentsToPlaylist, generateSidxKey, inheritAttributes, parse, parseUTCTiming, stringToMpdXml, toM3u8, toPlaylists };\n","/**\n * Loops through all supported media groups in master and calls the provided\n * callback for each group\n *\n * @param {Object} master\n * The parsed master manifest object\n * @param {string[]} groups\n * The media groups to call the callback for\n * @param {Function} callback\n * Callback to call for each media group\n */\nexport var forEachMediaGroup = function forEachMediaGroup(master, groups, callback) {\n groups.forEach(function (mediaType) {\n for (var groupKey in master.mediaGroups[mediaType]) {\n for (var labelKey in master.mediaGroups[mediaType][groupKey]) {\n var mediaProperties = master.mediaGroups[mediaType][groupKey][labelKey];\n callback(mediaProperties, mediaType, groupKey, labelKey);\n }\n }\n });\n};","import window from 'global/window';\n\nvar atob = function atob(s) {\n return window.atob ? window.atob(s) : Buffer.from(s, 'base64').toString('binary');\n};\n\nexport default function decodeB64ToUint8Array(b64Text) {\n var decodedString = atob(b64Text);\n var array = new Uint8Array(decodedString.length);\n\n for (var i = 0; i < decodedString.length; i++) {\n array[i] = decodedString.charCodeAt(i);\n }\n\n return array;\n}","import { toUint8, bytesMatch } from './byte-helpers.js';\nvar ID3 = toUint8([0x49, 0x44, 0x33]);\nexport var getId3Size = function getId3Size(bytes, offset) {\n if (offset === void 0) {\n offset = 0;\n }\n\n bytes = toUint8(bytes);\n var flags = bytes[offset + 5];\n var returnSize = bytes[offset + 6] << 21 | bytes[offset + 7] << 14 | bytes[offset + 8] << 7 | bytes[offset + 9];\n var footerPresent = (flags & 16) >> 4;\n\n if (footerPresent) {\n return returnSize + 20;\n }\n\n return returnSize + 10;\n};\nexport var getId3Offset = function getId3Offset(bytes, offset) {\n if (offset === void 0) {\n offset = 0;\n }\n\n bytes = toUint8(bytes);\n\n if (bytes.length - offset < 10 || !bytesMatch(bytes, ID3, {\n offset: offset\n })) {\n return offset;\n }\n\n offset += getId3Size(bytes, offset); // recursive check for id3 tags as some files\n // have multiple ID3 tag sections even though\n // they should not.\n\n return getId3Offset(bytes, offset);\n};","export var OPUS_HEAD = new Uint8Array([// O, p, u, s\n0x4f, 0x70, 0x75, 0x73, // H, e, a, d\n0x48, 0x65, 0x61, 0x64]); // https://wiki.xiph.org/OggOpus\n// https://vfrmaniac.fushizen.eu/contents/opus_in_isobmff.html\n// https://opus-codec.org/docs/opusfile_api-0.7/structOpusHead.html\n\nexport var parseOpusHead = function parseOpusHead(bytes) {\n var view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength);\n var version = view.getUint8(0); // version 0, from mp4, does not use littleEndian.\n\n var littleEndian = version !== 0;\n var config = {\n version: version,\n channels: view.getUint8(1),\n preSkip: view.getUint16(2, littleEndian),\n sampleRate: view.getUint32(4, littleEndian),\n outputGain: view.getUint16(8, littleEndian),\n channelMappingFamily: view.getUint8(10)\n };\n\n if (config.channelMappingFamily > 0 && bytes.length > 10) {\n config.streamCount = view.getUint8(11);\n config.twoChannelStreamCount = view.getUint8(12);\n config.channelMapping = [];\n\n for (var c = 0; c < config.channels; c++) {\n config.channelMapping.push(view.getUint8(13 + c));\n }\n }\n\n return config;\n};\nexport var setOpusHead = function setOpusHead(config) {\n var size = config.channelMappingFamily <= 0 ? 11 : 12 + config.channels;\n var view = new DataView(new ArrayBuffer(size));\n var littleEndian = config.version !== 0;\n view.setUint8(0, config.version);\n view.setUint8(1, config.channels);\n view.setUint16(2, config.preSkip, littleEndian);\n view.setUint32(4, config.sampleRate, littleEndian);\n view.setUint16(8, config.outputGain, littleEndian);\n view.setUint8(10, config.channelMappingFamily);\n\n if (config.channelMappingFamily > 0) {\n view.setUint8(11, config.streamCount);\n config.channelMapping.foreach(function (cm, i) {\n view.setUint8(12 + i, cm);\n });\n }\n\n return new Uint8Array(view.buffer);\n};","import { stringToBytes, toUint8, bytesMatch, bytesToString, toHexString, padStart, bytesToNumber } from './byte-helpers.js';\nimport { getAvcCodec, getHvcCodec, getAv1Codec } from './codec-helpers.js';\nimport { parseOpusHead } from './opus-helpers.js';\n\nvar normalizePath = function normalizePath(path) {\n if (typeof path === 'string') {\n return stringToBytes(path);\n }\n\n if (typeof path === 'number') {\n return path;\n }\n\n return path;\n};\n\nvar normalizePaths = function normalizePaths(paths) {\n if (!Array.isArray(paths)) {\n return [normalizePath(paths)];\n }\n\n return paths.map(function (p) {\n return normalizePath(p);\n });\n};\n\nvar DESCRIPTORS;\nexport var parseDescriptors = function parseDescriptors(bytes) {\n bytes = toUint8(bytes);\n var results = [];\n var i = 0;\n\n while (bytes.length > i) {\n var tag = bytes[i];\n var size = 0;\n var headerSize = 0; // tag\n\n headerSize++;\n var byte = bytes[headerSize]; // first byte\n\n headerSize++;\n\n while (byte & 0x80) {\n size = (byte & 0x7F) << 7;\n byte = bytes[headerSize];\n headerSize++;\n }\n\n size += byte & 0x7F;\n\n for (var z = 0; z < DESCRIPTORS.length; z++) {\n var _DESCRIPTORS$z = DESCRIPTORS[z],\n id = _DESCRIPTORS$z.id,\n parser = _DESCRIPTORS$z.parser;\n\n if (tag === id) {\n results.push(parser(bytes.subarray(headerSize, headerSize + size)));\n break;\n }\n }\n\n i += size + headerSize;\n }\n\n return results;\n};\nDESCRIPTORS = [{\n id: 0x03,\n parser: function parser(bytes) {\n var desc = {\n tag: 0x03,\n id: bytes[0] << 8 | bytes[1],\n flags: bytes[2],\n size: 3,\n dependsOnEsId: 0,\n ocrEsId: 0,\n descriptors: [],\n url: ''\n }; // depends on es id\n\n if (desc.flags & 0x80) {\n desc.dependsOnEsId = bytes[desc.size] << 8 | bytes[desc.size + 1];\n desc.size += 2;\n } // url\n\n\n if (desc.flags & 0x40) {\n var len = bytes[desc.size];\n desc.url = bytesToString(bytes.subarray(desc.size + 1, desc.size + 1 + len));\n desc.size += len;\n } // ocr es id\n\n\n if (desc.flags & 0x20) {\n desc.ocrEsId = bytes[desc.size] << 8 | bytes[desc.size + 1];\n desc.size += 2;\n }\n\n desc.descriptors = parseDescriptors(bytes.subarray(desc.size)) || [];\n return desc;\n }\n}, {\n id: 0x04,\n parser: function parser(bytes) {\n // DecoderConfigDescriptor\n var desc = {\n tag: 0x04,\n oti: bytes[0],\n streamType: bytes[1],\n bufferSize: bytes[2] << 16 | bytes[3] << 8 | bytes[4],\n maxBitrate: bytes[5] << 24 | bytes[6] << 16 | bytes[7] << 8 | bytes[8],\n avgBitrate: bytes[9] << 24 | bytes[10] << 16 | bytes[11] << 8 | bytes[12],\n descriptors: parseDescriptors(bytes.subarray(13))\n };\n return desc;\n }\n}, {\n id: 0x05,\n parser: function parser(bytes) {\n // DecoderSpecificInfo\n return {\n tag: 0x05,\n bytes: bytes\n };\n }\n}, {\n id: 0x06,\n parser: function parser(bytes) {\n // SLConfigDescriptor\n return {\n tag: 0x06,\n bytes: bytes\n };\n }\n}];\n/**\n * find any number of boxes by name given a path to it in an iso bmff\n * such as mp4.\n *\n * @param {TypedArray} bytes\n * bytes for the iso bmff to search for boxes in\n *\n * @param {Uint8Array[]|string[]|string|Uint8Array} name\n * An array of paths or a single path representing the name\n * of boxes to search through in bytes. Paths may be\n * uint8 (character codes) or strings.\n *\n * @param {boolean} [complete=false]\n * Should we search only for complete boxes on the final path.\n * This is very useful when you do not want to get back partial boxes\n * in the case of streaming files.\n *\n * @return {Uint8Array[]}\n * An array of the end paths that we found.\n */\n\nexport var findBox = function findBox(bytes, paths, complete) {\n if (complete === void 0) {\n complete = false;\n }\n\n paths = normalizePaths(paths);\n bytes = toUint8(bytes);\n var results = [];\n\n if (!paths.length) {\n // short-circuit the search for empty paths\n return results;\n }\n\n var i = 0;\n\n while (i < bytes.length) {\n var size = (bytes[i] << 24 | bytes[i + 1] << 16 | bytes[i + 2] << 8 | bytes[i + 3]) >>> 0;\n var type = bytes.subarray(i + 4, i + 8); // invalid box format.\n\n if (size === 0) {\n break;\n }\n\n var end = i + size;\n\n if (end > bytes.length) {\n // this box is bigger than the number of bytes we have\n // and complete is set, we cannot find any more boxes.\n if (complete) {\n break;\n }\n\n end = bytes.length;\n }\n\n var data = bytes.subarray(i + 8, end);\n\n if (bytesMatch(type, paths[0])) {\n if (paths.length === 1) {\n // this is the end of the path and we've found the box we were\n // looking for\n results.push(data);\n } else {\n // recursively search for the next box along the path\n results.push.apply(results, findBox(data, paths.slice(1), complete));\n }\n }\n\n i = end;\n } // we've finished searching all of bytes\n\n\n return results;\n};\n/**\n * Search for a single matching box by name in an iso bmff format like\n * mp4. This function is useful for finding codec boxes which\n * can be placed arbitrarily in sample descriptions depending\n * on the version of the file or file type.\n *\n * @param {TypedArray} bytes\n * bytes for the iso bmff to search for boxes in\n *\n * @param {string|Uint8Array} name\n * The name of the box to find.\n *\n * @return {Uint8Array[]}\n * a subarray of bytes representing the name boxed we found.\n */\n\nexport var findNamedBox = function findNamedBox(bytes, name) {\n name = normalizePath(name);\n\n if (!name.length) {\n // short-circuit the search for empty paths\n return bytes.subarray(bytes.length);\n }\n\n var i = 0;\n\n while (i < bytes.length) {\n if (bytesMatch(bytes.subarray(i, i + name.length), name)) {\n var size = (bytes[i - 4] << 24 | bytes[i - 3] << 16 | bytes[i - 2] << 8 | bytes[i - 1]) >>> 0;\n var end = size > 1 ? i + size : bytes.byteLength;\n return bytes.subarray(i + 4, end);\n }\n\n i++;\n } // we've finished searching all of bytes\n\n\n return bytes.subarray(bytes.length);\n};\n\nvar parseSamples = function parseSamples(data, entrySize, parseEntry) {\n if (entrySize === void 0) {\n entrySize = 4;\n }\n\n if (parseEntry === void 0) {\n parseEntry = function parseEntry(d) {\n return bytesToNumber(d);\n };\n }\n\n var entries = [];\n\n if (!data || !data.length) {\n return entries;\n }\n\n var entryCount = bytesToNumber(data.subarray(4, 8));\n\n for (var i = 8; entryCount; i += entrySize, entryCount--) {\n entries.push(parseEntry(data.subarray(i, i + entrySize)));\n }\n\n return entries;\n};\n\nexport var buildFrameTable = function buildFrameTable(stbl, timescale) {\n var keySamples = parseSamples(findBox(stbl, ['stss'])[0]);\n var chunkOffsets = parseSamples(findBox(stbl, ['stco'])[0]);\n var timeToSamples = parseSamples(findBox(stbl, ['stts'])[0], 8, function (entry) {\n return {\n sampleCount: bytesToNumber(entry.subarray(0, 4)),\n sampleDelta: bytesToNumber(entry.subarray(4, 8))\n };\n });\n var samplesToChunks = parseSamples(findBox(stbl, ['stsc'])[0], 12, function (entry) {\n return {\n firstChunk: bytesToNumber(entry.subarray(0, 4)),\n samplesPerChunk: bytesToNumber(entry.subarray(4, 8)),\n sampleDescriptionIndex: bytesToNumber(entry.subarray(8, 12))\n };\n });\n var stsz = findBox(stbl, ['stsz'])[0]; // stsz starts with a 4 byte sampleSize which we don't need\n\n var sampleSizes = parseSamples(stsz && stsz.length && stsz.subarray(4) || null);\n var frames = [];\n\n for (var chunkIndex = 0; chunkIndex < chunkOffsets.length; chunkIndex++) {\n var samplesInChunk = void 0;\n\n for (var i = 0; i < samplesToChunks.length; i++) {\n var sampleToChunk = samplesToChunks[i];\n var isThisOne = chunkIndex + 1 >= sampleToChunk.firstChunk && (i + 1 >= samplesToChunks.length || chunkIndex + 1 < samplesToChunks[i + 1].firstChunk);\n\n if (isThisOne) {\n samplesInChunk = sampleToChunk.samplesPerChunk;\n break;\n }\n }\n\n var chunkOffset = chunkOffsets[chunkIndex];\n\n for (var _i = 0; _i < samplesInChunk; _i++) {\n var frameEnd = sampleSizes[frames.length]; // if we don't have key samples every frame is a keyframe\n\n var keyframe = !keySamples.length;\n\n if (keySamples.length && keySamples.indexOf(frames.length + 1) !== -1) {\n keyframe = true;\n }\n\n var frame = {\n keyframe: keyframe,\n start: chunkOffset,\n end: chunkOffset + frameEnd\n };\n\n for (var k = 0; k < timeToSamples.length; k++) {\n var _timeToSamples$k = timeToSamples[k],\n sampleCount = _timeToSamples$k.sampleCount,\n sampleDelta = _timeToSamples$k.sampleDelta;\n\n if (frames.length <= sampleCount) {\n // ms to ns\n var lastTimestamp = frames.length ? frames[frames.length - 1].timestamp : 0;\n frame.timestamp = lastTimestamp + sampleDelta / timescale * 1000;\n frame.duration = sampleDelta;\n break;\n }\n }\n\n frames.push(frame);\n chunkOffset += frameEnd;\n }\n }\n\n return frames;\n};\nexport var addSampleDescription = function addSampleDescription(track, bytes) {\n var codec = bytesToString(bytes.subarray(0, 4));\n\n if (track.type === 'video') {\n track.info = track.info || {};\n track.info.width = bytes[28] << 8 | bytes[29];\n track.info.height = bytes[30] << 8 | bytes[31];\n } else if (track.type === 'audio') {\n track.info = track.info || {};\n track.info.channels = bytes[20] << 8 | bytes[21];\n track.info.bitDepth = bytes[22] << 8 | bytes[23];\n track.info.sampleRate = bytes[28] << 8 | bytes[29];\n }\n\n if (codec === 'avc1') {\n var avcC = findNamedBox(bytes, 'avcC'); // AVCDecoderConfigurationRecord\n\n codec += \".\" + getAvcCodec(avcC);\n track.info.avcC = avcC; // TODO: do we need to parse all this?\n\n /* {\n configurationVersion: avcC[0],\n profile: avcC[1],\n profileCompatibility: avcC[2],\n level: avcC[3],\n lengthSizeMinusOne: avcC[4] & 0x3\n };\n let spsNalUnitCount = avcC[5] & 0x1F;\n const spsNalUnits = track.info.avc.spsNalUnits = [];\n // past spsNalUnitCount\n let offset = 6;\n while (spsNalUnitCount--) {\n const nalLen = avcC[offset] << 8 | avcC[offset + 1];\n spsNalUnits.push(avcC.subarray(offset + 2, offset + 2 + nalLen));\n offset += nalLen + 2;\n }\n let ppsNalUnitCount = avcC[offset];\n const ppsNalUnits = track.info.avc.ppsNalUnits = [];\n // past ppsNalUnitCount\n offset += 1;\n while (ppsNalUnitCount--) {\n const nalLen = avcC[offset] << 8 | avcC[offset + 1];\n ppsNalUnits.push(avcC.subarray(offset + 2, offset + 2 + nalLen));\n offset += nalLen + 2;\n }*/\n // HEVCDecoderConfigurationRecord\n } else if (codec === 'hvc1' || codec === 'hev1') {\n codec += \".\" + getHvcCodec(findNamedBox(bytes, 'hvcC'));\n } else if (codec === 'mp4a' || codec === 'mp4v') {\n var esds = findNamedBox(bytes, 'esds');\n var esDescriptor = parseDescriptors(esds.subarray(4))[0];\n var decoderConfig = esDescriptor && esDescriptor.descriptors.filter(function (_ref) {\n var tag = _ref.tag;\n return tag === 0x04;\n })[0];\n\n if (decoderConfig) {\n // most codecs do not have a further '.'\n // such as 0xa5 for ac-3 and 0xa6 for e-ac-3\n codec += '.' + toHexString(decoderConfig.oti);\n\n if (decoderConfig.oti === 0x40) {\n codec += '.' + (decoderConfig.descriptors[0].bytes[0] >> 3).toString();\n } else if (decoderConfig.oti === 0x20) {\n codec += '.' + decoderConfig.descriptors[0].bytes[4].toString();\n } else if (decoderConfig.oti === 0xdd) {\n codec = 'vorbis';\n }\n } else if (track.type === 'audio') {\n codec += '.40.2';\n } else {\n codec += '.20.9';\n }\n } else if (codec === 'av01') {\n // AV1DecoderConfigurationRecord\n codec += \".\" + getAv1Codec(findNamedBox(bytes, 'av1C'));\n } else if (codec === 'vp09') {\n // VPCodecConfigurationRecord\n var vpcC = findNamedBox(bytes, 'vpcC'); // https://www.webmproject.org/vp9/mp4/\n\n var profile = vpcC[0];\n var level = vpcC[1];\n var bitDepth = vpcC[2] >> 4;\n var chromaSubsampling = (vpcC[2] & 0x0F) >> 1;\n var videoFullRangeFlag = (vpcC[2] & 0x0F) >> 3;\n var colourPrimaries = vpcC[3];\n var transferCharacteristics = vpcC[4];\n var matrixCoefficients = vpcC[5];\n codec += \".\" + padStart(profile, 2, '0');\n codec += \".\" + padStart(level, 2, '0');\n codec += \".\" + padStart(bitDepth, 2, '0');\n codec += \".\" + padStart(chromaSubsampling, 2, '0');\n codec += \".\" + padStart(colourPrimaries, 2, '0');\n codec += \".\" + padStart(transferCharacteristics, 2, '0');\n codec += \".\" + padStart(matrixCoefficients, 2, '0');\n codec += \".\" + padStart(videoFullRangeFlag, 2, '0');\n } else if (codec === 'theo') {\n codec = 'theora';\n } else if (codec === 'spex') {\n codec = 'speex';\n } else if (codec === '.mp3') {\n codec = 'mp4a.40.34';\n } else if (codec === 'msVo') {\n codec = 'vorbis';\n } else if (codec === 'Opus') {\n codec = 'opus';\n var dOps = findNamedBox(bytes, 'dOps');\n track.info.opus = parseOpusHead(dOps); // TODO: should this go into the webm code??\n // Firefox requires a codecDelay for opus playback\n // see https://bugzilla.mozilla.org/show_bug.cgi?id=1276238\n\n track.info.codecDelay = 6500000;\n } else {\n codec = codec.toLowerCase();\n }\n /* eslint-enable */\n // flac, ac-3, ec-3, opus\n\n\n track.codec = codec;\n};\nexport var parseTracks = function parseTracks(bytes, frameTable) {\n if (frameTable === void 0) {\n frameTable = true;\n }\n\n bytes = toUint8(bytes);\n var traks = findBox(bytes, ['moov', 'trak'], true);\n var tracks = [];\n traks.forEach(function (trak) {\n var track = {\n bytes: trak\n };\n var mdia = findBox(trak, ['mdia'])[0];\n var hdlr = findBox(mdia, ['hdlr'])[0];\n var trakType = bytesToString(hdlr.subarray(8, 12));\n\n if (trakType === 'soun') {\n track.type = 'audio';\n } else if (trakType === 'vide') {\n track.type = 'video';\n } else {\n track.type = trakType;\n }\n\n var tkhd = findBox(trak, ['tkhd'])[0];\n\n if (tkhd) {\n var view = new DataView(tkhd.buffer, tkhd.byteOffset, tkhd.byteLength);\n var tkhdVersion = view.getUint8(0);\n track.number = tkhdVersion === 0 ? view.getUint32(12) : view.getUint32(20);\n }\n\n var mdhd = findBox(mdia, ['mdhd'])[0];\n\n if (mdhd) {\n // mdhd is a FullBox, meaning it will have its own version as the first byte\n var version = mdhd[0];\n var index = version === 0 ? 12 : 20;\n track.timescale = (mdhd[index] << 24 | mdhd[index + 1] << 16 | mdhd[index + 2] << 8 | mdhd[index + 3]) >>> 0;\n }\n\n var stbl = findBox(mdia, ['minf', 'stbl'])[0];\n var stsd = findBox(stbl, ['stsd'])[0];\n var descriptionCount = bytesToNumber(stsd.subarray(4, 8));\n var offset = 8; // add codec and codec info\n\n while (descriptionCount--) {\n var len = bytesToNumber(stsd.subarray(offset, offset + 4));\n var sampleDescriptor = stsd.subarray(offset + 4, offset + 4 + len);\n addSampleDescription(track, sampleDescriptor);\n offset += 4 + len;\n }\n\n if (frameTable) {\n track.frameTable = buildFrameTable(stbl, track.timescale);\n } // codec has no sub parameters\n\n\n tracks.push(track);\n });\n return tracks;\n};\nexport var parseMediaInfo = function parseMediaInfo(bytes) {\n var mvhd = findBox(bytes, ['moov', 'mvhd'], true)[0];\n\n if (!mvhd || !mvhd.length) {\n return;\n }\n\n var info = {}; // ms to ns\n // mvhd v1 has 8 byte duration and other fields too\n\n if (mvhd[0] === 1) {\n info.timestampScale = bytesToNumber(mvhd.subarray(20, 24));\n info.duration = bytesToNumber(mvhd.subarray(24, 32));\n } else {\n info.timestampScale = bytesToNumber(mvhd.subarray(12, 16));\n info.duration = bytesToNumber(mvhd.subarray(16, 20));\n }\n\n info.bytes = mvhd;\n return info;\n};","import { toUint8, bytesToNumber, bytesMatch, bytesToString, numberToBytes, padStart } from './byte-helpers';\nimport { getAvcCodec, getHvcCodec, getAv1Codec } from './codec-helpers.js'; // relevant specs for this parser:\n// https://matroska-org.github.io/libebml/specs.html\n// https://www.matroska.org/technical/elements.html\n// https://www.webmproject.org/docs/container/\n\nexport var EBML_TAGS = {\n EBML: toUint8([0x1A, 0x45, 0xDF, 0xA3]),\n DocType: toUint8([0x42, 0x82]),\n Segment: toUint8([0x18, 0x53, 0x80, 0x67]),\n SegmentInfo: toUint8([0x15, 0x49, 0xA9, 0x66]),\n Tracks: toUint8([0x16, 0x54, 0xAE, 0x6B]),\n Track: toUint8([0xAE]),\n TrackNumber: toUint8([0xd7]),\n DefaultDuration: toUint8([0x23, 0xe3, 0x83]),\n TrackEntry: toUint8([0xAE]),\n TrackType: toUint8([0x83]),\n FlagDefault: toUint8([0x88]),\n CodecID: toUint8([0x86]),\n CodecPrivate: toUint8([0x63, 0xA2]),\n VideoTrack: toUint8([0xe0]),\n AudioTrack: toUint8([0xe1]),\n // Not used yet, but will be used for live webm/mkv\n // see https://www.matroska.org/technical/basics.html#block-structure\n // see https://www.matroska.org/technical/basics.html#simpleblock-structure\n Cluster: toUint8([0x1F, 0x43, 0xB6, 0x75]),\n Timestamp: toUint8([0xE7]),\n TimestampScale: toUint8([0x2A, 0xD7, 0xB1]),\n BlockGroup: toUint8([0xA0]),\n BlockDuration: toUint8([0x9B]),\n Block: toUint8([0xA1]),\n SimpleBlock: toUint8([0xA3])\n};\n/**\n * This is a simple table to determine the length\n * of things in ebml. The length is one based (starts at 1,\n * rather than zero) and for every zero bit before a one bit\n * we add one to length. We also need this table because in some\n * case we have to xor all the length bits from another value.\n */\n\nvar LENGTH_TABLE = [128, 64, 32, 16, 8, 4, 2, 1];\n\nvar getLength = function getLength(byte) {\n var len = 1;\n\n for (var i = 0; i < LENGTH_TABLE.length; i++) {\n if (byte & LENGTH_TABLE[i]) {\n break;\n }\n\n len++;\n }\n\n return len;\n}; // length in ebml is stored in the first 4 to 8 bits\n// of the first byte. 4 for the id length and 8 for the\n// data size length. Length is measured by converting the number to binary\n// then 1 + the number of zeros before a 1 is encountered starting\n// from the left.\n\n\nvar getvint = function getvint(bytes, offset, removeLength, signed) {\n if (removeLength === void 0) {\n removeLength = true;\n }\n\n if (signed === void 0) {\n signed = false;\n }\n\n var length = getLength(bytes[offset]);\n var valueBytes = bytes.subarray(offset, offset + length); // NOTE that we do **not** subarray here because we need to copy these bytes\n // as they will be modified below to remove the dataSizeLen bits and we do not\n // want to modify the original data. normally we could just call slice on\n // uint8array but ie 11 does not support that...\n\n if (removeLength) {\n valueBytes = Array.prototype.slice.call(bytes, offset, offset + length);\n valueBytes[0] ^= LENGTH_TABLE[length - 1];\n }\n\n return {\n length: length,\n value: bytesToNumber(valueBytes, {\n signed: signed\n }),\n bytes: valueBytes\n };\n};\n\nvar normalizePath = function normalizePath(path) {\n if (typeof path === 'string') {\n return path.match(/.{1,2}/g).map(function (p) {\n return normalizePath(p);\n });\n }\n\n if (typeof path === 'number') {\n return numberToBytes(path);\n }\n\n return path;\n};\n\nvar normalizePaths = function normalizePaths(paths) {\n if (!Array.isArray(paths)) {\n return [normalizePath(paths)];\n }\n\n return paths.map(function (p) {\n return normalizePath(p);\n });\n};\n\nvar getInfinityDataSize = function getInfinityDataSize(id, bytes, offset) {\n if (offset >= bytes.length) {\n return bytes.length;\n }\n\n var innerid = getvint(bytes, offset, false);\n\n if (bytesMatch(id.bytes, innerid.bytes)) {\n return offset;\n }\n\n var dataHeader = getvint(bytes, offset + innerid.length);\n return getInfinityDataSize(id, bytes, offset + dataHeader.length + dataHeader.value + innerid.length);\n};\n/**\n * Notes on the EBLM format.\n *\n * EBLM uses \"vints\" tags. Every vint tag contains\n * two parts\n *\n * 1. The length from the first byte. You get this by\n * converting the byte to binary and counting the zeros\n * before a 1. Then you add 1 to that. Examples\n * 00011111 = length 4 because there are 3 zeros before a 1.\n * 00100000 = length 3 because there are 2 zeros before a 1.\n * 00000011 = length 7 because there are 6 zeros before a 1.\n *\n * 2. The bits used for length are removed from the first byte\n * Then all the bytes are merged into a value. NOTE: this\n * is not the case for id ebml tags as there id includes\n * length bits.\n *\n */\n\n\nexport var findEbml = function findEbml(bytes, paths) {\n paths = normalizePaths(paths);\n bytes = toUint8(bytes);\n var results = [];\n\n if (!paths.length) {\n return results;\n }\n\n var i = 0;\n\n while (i < bytes.length) {\n var id = getvint(bytes, i, false);\n var dataHeader = getvint(bytes, i + id.length);\n var dataStart = i + id.length + dataHeader.length; // dataSize is unknown or this is a live stream\n\n if (dataHeader.value === 0x7f) {\n dataHeader.value = getInfinityDataSize(id, bytes, dataStart);\n\n if (dataHeader.value !== bytes.length) {\n dataHeader.value -= dataStart;\n }\n }\n\n var dataEnd = dataStart + dataHeader.value > bytes.length ? bytes.length : dataStart + dataHeader.value;\n var data = bytes.subarray(dataStart, dataEnd);\n\n if (bytesMatch(paths[0], id.bytes)) {\n if (paths.length === 1) {\n // this is the end of the paths and we've found the tag we were\n // looking for\n results.push(data);\n } else {\n // recursively search for the next tag inside of the data\n // of this one\n results = results.concat(findEbml(data, paths.slice(1)));\n }\n }\n\n var totalLength = id.length + dataHeader.length + data.length; // move past this tag entirely, we are not looking for it\n\n i += totalLength;\n }\n\n return results;\n}; // see https://www.matroska.org/technical/basics.html#block-structure\n\nexport var decodeBlock = function decodeBlock(block, type, timestampScale, clusterTimestamp) {\n var duration;\n\n if (type === 'group') {\n duration = findEbml(block, [EBML_TAGS.BlockDuration])[0];\n\n if (duration) {\n duration = bytesToNumber(duration);\n duration = 1 / timestampScale * duration * timestampScale / 1000;\n }\n\n block = findEbml(block, [EBML_TAGS.Block])[0];\n type = 'block'; // treat data as a block after this point\n }\n\n var dv = new DataView(block.buffer, block.byteOffset, block.byteLength);\n var trackNumber = getvint(block, 0);\n var timestamp = dv.getInt16(trackNumber.length, false);\n var flags = block[trackNumber.length + 2];\n var data = block.subarray(trackNumber.length + 3); // pts/dts in seconds\n\n var ptsdts = 1 / timestampScale * (clusterTimestamp + timestamp) * timestampScale / 1000; // return the frame\n\n var parsed = {\n duration: duration,\n trackNumber: trackNumber.value,\n keyframe: type === 'simple' && flags >> 7 === 1,\n invisible: (flags & 0x08) >> 3 === 1,\n lacing: (flags & 0x06) >> 1,\n discardable: type === 'simple' && (flags & 0x01) === 1,\n frames: [],\n pts: ptsdts,\n dts: ptsdts,\n timestamp: timestamp\n };\n\n if (!parsed.lacing) {\n parsed.frames.push(data);\n return parsed;\n }\n\n var numberOfFrames = data[0] + 1;\n var frameSizes = [];\n var offset = 1; // Fixed\n\n if (parsed.lacing === 2) {\n var sizeOfFrame = (data.length - offset) / numberOfFrames;\n\n for (var i = 0; i < numberOfFrames; i++) {\n frameSizes.push(sizeOfFrame);\n }\n } // xiph\n\n\n if (parsed.lacing === 1) {\n for (var _i = 0; _i < numberOfFrames - 1; _i++) {\n var size = 0;\n\n do {\n size += data[offset];\n offset++;\n } while (data[offset - 1] === 0xFF);\n\n frameSizes.push(size);\n }\n } // ebml\n\n\n if (parsed.lacing === 3) {\n // first vint is unsinged\n // after that vints are singed and\n // based on a compounding size\n var _size = 0;\n\n for (var _i2 = 0; _i2 < numberOfFrames - 1; _i2++) {\n var vint = _i2 === 0 ? getvint(data, offset) : getvint(data, offset, true, true);\n _size += vint.value;\n frameSizes.push(_size);\n offset += vint.length;\n }\n }\n\n frameSizes.forEach(function (size) {\n parsed.frames.push(data.subarray(offset, offset + size));\n offset += size;\n });\n return parsed;\n}; // VP9 Codec Feature Metadata (CodecPrivate)\n// https://www.webmproject.org/docs/container/\n\nvar parseVp9Private = function parseVp9Private(bytes) {\n var i = 0;\n var params = {};\n\n while (i < bytes.length) {\n var id = bytes[i] & 0x7f;\n var len = bytes[i + 1];\n var val = void 0;\n\n if (len === 1) {\n val = bytes[i + 2];\n } else {\n val = bytes.subarray(i + 2, i + 2 + len);\n }\n\n if (id === 1) {\n params.profile = val;\n } else if (id === 2) {\n params.level = val;\n } else if (id === 3) {\n params.bitDepth = val;\n } else if (id === 4) {\n params.chromaSubsampling = val;\n } else {\n params[id] = val;\n }\n\n i += 2 + len;\n }\n\n return params;\n};\n\nexport var parseTracks = function parseTracks(bytes) {\n bytes = toUint8(bytes);\n var decodedTracks = [];\n var tracks = findEbml(bytes, [EBML_TAGS.Segment, EBML_TAGS.Tracks, EBML_TAGS.Track]);\n\n if (!tracks.length) {\n tracks = findEbml(bytes, [EBML_TAGS.Tracks, EBML_TAGS.Track]);\n }\n\n if (!tracks.length) {\n tracks = findEbml(bytes, [EBML_TAGS.Track]);\n }\n\n if (!tracks.length) {\n return decodedTracks;\n }\n\n tracks.forEach(function (track) {\n var trackType = findEbml(track, EBML_TAGS.TrackType)[0];\n\n if (!trackType || !trackType.length) {\n return;\n } // 1 is video, 2 is audio, 17 is subtitle\n // other values are unimportant in this context\n\n\n if (trackType[0] === 1) {\n trackType = 'video';\n } else if (trackType[0] === 2) {\n trackType = 'audio';\n } else if (trackType[0] === 17) {\n trackType = 'subtitle';\n } else {\n return;\n } // todo parse language\n\n\n var decodedTrack = {\n rawCodec: bytesToString(findEbml(track, [EBML_TAGS.CodecID])[0]),\n type: trackType,\n codecPrivate: findEbml(track, [EBML_TAGS.CodecPrivate])[0],\n number: bytesToNumber(findEbml(track, [EBML_TAGS.TrackNumber])[0]),\n defaultDuration: bytesToNumber(findEbml(track, [EBML_TAGS.DefaultDuration])[0]),\n default: findEbml(track, [EBML_TAGS.FlagDefault])[0],\n rawData: track\n };\n var codec = '';\n\n if (/V_MPEG4\\/ISO\\/AVC/.test(decodedTrack.rawCodec)) {\n codec = \"avc1.\" + getAvcCodec(decodedTrack.codecPrivate);\n } else if (/V_MPEGH\\/ISO\\/HEVC/.test(decodedTrack.rawCodec)) {\n codec = \"hev1.\" + getHvcCodec(decodedTrack.codecPrivate);\n } else if (/V_MPEG4\\/ISO\\/ASP/.test(decodedTrack.rawCodec)) {\n if (decodedTrack.codecPrivate) {\n codec = 'mp4v.20.' + decodedTrack.codecPrivate[4].toString();\n } else {\n codec = 'mp4v.20.9';\n }\n } else if (/^V_THEORA/.test(decodedTrack.rawCodec)) {\n codec = 'theora';\n } else if (/^V_VP8/.test(decodedTrack.rawCodec)) {\n codec = 'vp8';\n } else if (/^V_VP9/.test(decodedTrack.rawCodec)) {\n if (decodedTrack.codecPrivate) {\n var _parseVp9Private = parseVp9Private(decodedTrack.codecPrivate),\n profile = _parseVp9Private.profile,\n level = _parseVp9Private.level,\n bitDepth = _parseVp9Private.bitDepth,\n chromaSubsampling = _parseVp9Private.chromaSubsampling;\n\n codec = 'vp09.';\n codec += padStart(profile, 2, '0') + \".\";\n codec += padStart(level, 2, '0') + \".\";\n codec += padStart(bitDepth, 2, '0') + \".\";\n codec += \"\" + padStart(chromaSubsampling, 2, '0'); // Video -> Colour -> Ebml name\n\n var matrixCoefficients = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xB1]])[0] || [];\n var videoFullRangeFlag = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xB9]])[0] || [];\n var transferCharacteristics = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xBA]])[0] || [];\n var colourPrimaries = findEbml(track, [0xE0, [0x55, 0xB0], [0x55, 0xBB]])[0] || []; // if we find any optional codec parameter specify them all.\n\n if (matrixCoefficients.length || videoFullRangeFlag.length || transferCharacteristics.length || colourPrimaries.length) {\n codec += \".\" + padStart(colourPrimaries[0], 2, '0');\n codec += \".\" + padStart(transferCharacteristics[0], 2, '0');\n codec += \".\" + padStart(matrixCoefficients[0], 2, '0');\n codec += \".\" + padStart(videoFullRangeFlag[0], 2, '0');\n }\n } else {\n codec = 'vp9';\n }\n } else if (/^V_AV1/.test(decodedTrack.rawCodec)) {\n codec = \"av01.\" + getAv1Codec(decodedTrack.codecPrivate);\n } else if (/A_ALAC/.test(decodedTrack.rawCodec)) {\n codec = 'alac';\n } else if (/A_MPEG\\/L2/.test(decodedTrack.rawCodec)) {\n codec = 'mp2';\n } else if (/A_MPEG\\/L3/.test(decodedTrack.rawCodec)) {\n codec = 'mp3';\n } else if (/^A_AAC/.test(decodedTrack.rawCodec)) {\n if (decodedTrack.codecPrivate) {\n codec = 'mp4a.40.' + (decodedTrack.codecPrivate[0] >>> 3).toString();\n } else {\n codec = 'mp4a.40.2';\n }\n } else if (/^A_AC3/.test(decodedTrack.rawCodec)) {\n codec = 'ac-3';\n } else if (/^A_PCM/.test(decodedTrack.rawCodec)) {\n codec = 'pcm';\n } else if (/^A_MS\\/ACM/.test(decodedTrack.rawCodec)) {\n codec = 'speex';\n } else if (/^A_EAC3/.test(decodedTrack.rawCodec)) {\n codec = 'ec-3';\n } else if (/^A_VORBIS/.test(decodedTrack.rawCodec)) {\n codec = 'vorbis';\n } else if (/^A_FLAC/.test(decodedTrack.rawCodec)) {\n codec = 'flac';\n } else if (/^A_OPUS/.test(decodedTrack.rawCodec)) {\n codec = 'opus';\n }\n\n decodedTrack.codec = codec;\n decodedTracks.push(decodedTrack);\n });\n return decodedTracks.sort(function (a, b) {\n return a.number - b.number;\n });\n};\nexport var parseData = function parseData(data, tracks) {\n var allBlocks = [];\n var segment = findEbml(data, [EBML_TAGS.Segment])[0];\n var timestampScale = findEbml(segment, [EBML_TAGS.SegmentInfo, EBML_TAGS.TimestampScale])[0]; // in nanoseconds, defaults to 1ms\n\n if (timestampScale && timestampScale.length) {\n timestampScale = bytesToNumber(timestampScale);\n } else {\n timestampScale = 1000000;\n }\n\n var clusters = findEbml(segment, [EBML_TAGS.Cluster]);\n\n if (!tracks) {\n tracks = parseTracks(segment);\n }\n\n clusters.forEach(function (cluster, ci) {\n var simpleBlocks = findEbml(cluster, [EBML_TAGS.SimpleBlock]).map(function (b) {\n return {\n type: 'simple',\n data: b\n };\n });\n var blockGroups = findEbml(cluster, [EBML_TAGS.BlockGroup]).map(function (b) {\n return {\n type: 'group',\n data: b\n };\n });\n var timestamp = findEbml(cluster, [EBML_TAGS.Timestamp])[0] || 0;\n\n if (timestamp && timestamp.length) {\n timestamp = bytesToNumber(timestamp);\n } // get all blocks then sort them into the correct order\n\n\n var blocks = simpleBlocks.concat(blockGroups).sort(function (a, b) {\n return a.data.byteOffset - b.data.byteOffset;\n });\n blocks.forEach(function (block, bi) {\n var decoded = decodeBlock(block.data, block.type, timestampScale, timestamp);\n allBlocks.push(decoded);\n });\n });\n return {\n tracks: tracks,\n blocks: allBlocks\n };\n};","import { bytesMatch, toUint8 } from './byte-helpers.js';\nexport var NAL_TYPE_ONE = toUint8([0x00, 0x00, 0x00, 0x01]);\nexport var NAL_TYPE_TWO = toUint8([0x00, 0x00, 0x01]);\nexport var EMULATION_PREVENTION = toUint8([0x00, 0x00, 0x03]);\n/**\n * Expunge any \"Emulation Prevention\" bytes from a \"Raw Byte\n * Sequence Payload\"\n *\n * @param data {Uint8Array} the bytes of a RBSP from a NAL\n * unit\n * @return {Uint8Array} the RBSP without any Emulation\n * Prevention Bytes\n */\n\nexport var discardEmulationPreventionBytes = function discardEmulationPreventionBytes(bytes) {\n var positions = [];\n var i = 1; // Find all `Emulation Prevention Bytes`\n\n while (i < bytes.length - 2) {\n if (bytesMatch(bytes.subarray(i, i + 3), EMULATION_PREVENTION)) {\n positions.push(i + 2);\n i++;\n }\n\n i++;\n } // If no Emulation Prevention Bytes were found just return the original\n // array\n\n\n if (positions.length === 0) {\n return bytes;\n } // Create a new array to hold the NAL unit data\n\n\n var newLength = bytes.length - positions.length;\n var newData = new Uint8Array(newLength);\n var sourceIndex = 0;\n\n for (i = 0; i < newLength; sourceIndex++, i++) {\n if (sourceIndex === positions[0]) {\n // Skip this byte\n sourceIndex++; // Remove this position index\n\n positions.shift();\n }\n\n newData[i] = bytes[sourceIndex];\n }\n\n return newData;\n};\nexport var findNal = function findNal(bytes, dataType, types, nalLimit) {\n if (nalLimit === void 0) {\n nalLimit = Infinity;\n }\n\n bytes = toUint8(bytes);\n types = [].concat(types);\n var i = 0;\n var nalStart;\n var nalsFound = 0; // keep searching until:\n // we reach the end of bytes\n // we reach the maximum number of nals they want to seach\n // NOTE: that we disregard nalLimit when we have found the start\n // of the nal we want so that we can find the end of the nal we want.\n\n while (i < bytes.length && (nalsFound < nalLimit || nalStart)) {\n var nalOffset = void 0;\n\n if (bytesMatch(bytes.subarray(i), NAL_TYPE_ONE)) {\n nalOffset = 4;\n } else if (bytesMatch(bytes.subarray(i), NAL_TYPE_TWO)) {\n nalOffset = 3;\n } // we are unsynced,\n // find the next nal unit\n\n\n if (!nalOffset) {\n i++;\n continue;\n }\n\n nalsFound++;\n\n if (nalStart) {\n return discardEmulationPreventionBytes(bytes.subarray(nalStart, i));\n }\n\n var nalType = void 0;\n\n if (dataType === 'h264') {\n nalType = bytes[i + nalOffset] & 0x1f;\n } else if (dataType === 'h265') {\n nalType = bytes[i + nalOffset] >> 1 & 0x3f;\n }\n\n if (types.indexOf(nalType) !== -1) {\n nalStart = i + nalOffset;\n } // nal header is 1 length for h264, and 2 for h265\n\n\n i += nalOffset + (dataType === 'h264' ? 1 : 2);\n }\n\n return bytes.subarray(0, 0);\n};\nexport var findH264Nal = function findH264Nal(bytes, type, nalLimit) {\n return findNal(bytes, 'h264', type, nalLimit);\n};\nexport var findH265Nal = function findH265Nal(bytes, type, nalLimit) {\n return findNal(bytes, 'h265', type, nalLimit);\n};","import { toUint8, bytesMatch } from './byte-helpers.js';\nimport { findBox } from './mp4-helpers.js';\nimport { findEbml, EBML_TAGS } from './ebml-helpers.js';\nimport { getId3Offset } from './id3-helpers.js';\nimport { findH264Nal, findH265Nal } from './nal-helpers.js';\nvar CONSTANTS = {\n // \"webm\" string literal in hex\n 'webm': toUint8([0x77, 0x65, 0x62, 0x6d]),\n // \"matroska\" string literal in hex\n 'matroska': toUint8([0x6d, 0x61, 0x74, 0x72, 0x6f, 0x73, 0x6b, 0x61]),\n // \"fLaC\" string literal in hex\n 'flac': toUint8([0x66, 0x4c, 0x61, 0x43]),\n // \"OggS\" string literal in hex\n 'ogg': toUint8([0x4f, 0x67, 0x67, 0x53]),\n // ac-3 sync byte, also works for ec-3 as that is simply a codec\n // of ac-3\n 'ac3': toUint8([0x0b, 0x77]),\n // \"RIFF\" string literal in hex used for wav and avi\n 'riff': toUint8([0x52, 0x49, 0x46, 0x46]),\n // \"AVI\" string literal in hex\n 'avi': toUint8([0x41, 0x56, 0x49]),\n // \"WAVE\" string literal in hex\n 'wav': toUint8([0x57, 0x41, 0x56, 0x45]),\n // \"ftyp3g\" string literal in hex\n '3gp': toUint8([0x66, 0x74, 0x79, 0x70, 0x33, 0x67]),\n // \"ftyp\" string literal in hex\n 'mp4': toUint8([0x66, 0x74, 0x79, 0x70]),\n // \"styp\" string literal in hex\n 'fmp4': toUint8([0x73, 0x74, 0x79, 0x70]),\n // \"ftypqt\" string literal in hex\n 'mov': toUint8([0x66, 0x74, 0x79, 0x70, 0x71, 0x74]),\n // moov string literal in hex\n 'moov': toUint8([0x6D, 0x6F, 0x6F, 0x76]),\n // moof string literal in hex\n 'moof': toUint8([0x6D, 0x6F, 0x6F, 0x66])\n};\nvar _isLikely = {\n aac: function aac(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, [0xFF, 0x10], {\n offset: offset,\n mask: [0xFF, 0x16]\n });\n },\n mp3: function mp3(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, [0xFF, 0x02], {\n offset: offset,\n mask: [0xFF, 0x06]\n });\n },\n webm: function webm(bytes) {\n var docType = findEbml(bytes, [EBML_TAGS.EBML, EBML_TAGS.DocType])[0]; // check if DocType EBML tag is webm\n\n return bytesMatch(docType, CONSTANTS.webm);\n },\n mkv: function mkv(bytes) {\n var docType = findEbml(bytes, [EBML_TAGS.EBML, EBML_TAGS.DocType])[0]; // check if DocType EBML tag is matroska\n\n return bytesMatch(docType, CONSTANTS.matroska);\n },\n mp4: function mp4(bytes) {\n // if this file is another base media file format, it is not mp4\n if (_isLikely['3gp'](bytes) || _isLikely.mov(bytes)) {\n return false;\n } // if this file starts with a ftyp or styp box its mp4\n\n\n if (bytesMatch(bytes, CONSTANTS.mp4, {\n offset: 4\n }) || bytesMatch(bytes, CONSTANTS.fmp4, {\n offset: 4\n })) {\n return true;\n } // if this file starts with a moof/moov box its mp4\n\n\n if (bytesMatch(bytes, CONSTANTS.moof, {\n offset: 4\n }) || bytesMatch(bytes, CONSTANTS.moov, {\n offset: 4\n })) {\n return true;\n }\n },\n mov: function mov(bytes) {\n return bytesMatch(bytes, CONSTANTS.mov, {\n offset: 4\n });\n },\n '3gp': function gp(bytes) {\n return bytesMatch(bytes, CONSTANTS['3gp'], {\n offset: 4\n });\n },\n ac3: function ac3(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, CONSTANTS.ac3, {\n offset: offset\n });\n },\n ts: function ts(bytes) {\n if (bytes.length < 189 && bytes.length >= 1) {\n return bytes[0] === 0x47;\n }\n\n var i = 0; // check the first 376 bytes for two matching sync bytes\n\n while (i + 188 < bytes.length && i < 188) {\n if (bytes[i] === 0x47 && bytes[i + 188] === 0x47) {\n return true;\n }\n\n i += 1;\n }\n\n return false;\n },\n flac: function flac(bytes) {\n var offset = getId3Offset(bytes);\n return bytesMatch(bytes, CONSTANTS.flac, {\n offset: offset\n });\n },\n ogg: function ogg(bytes) {\n return bytesMatch(bytes, CONSTANTS.ogg);\n },\n avi: function avi(bytes) {\n return bytesMatch(bytes, CONSTANTS.riff) && bytesMatch(bytes, CONSTANTS.avi, {\n offset: 8\n });\n },\n wav: function wav(bytes) {\n return bytesMatch(bytes, CONSTANTS.riff) && bytesMatch(bytes, CONSTANTS.wav, {\n offset: 8\n });\n },\n 'h264': function h264(bytes) {\n // find seq_parameter_set_rbsp\n return findH264Nal(bytes, 7, 3).length;\n },\n 'h265': function h265(bytes) {\n // find video_parameter_set_rbsp or seq_parameter_set_rbsp\n return findH265Nal(bytes, [32, 33], 3).length;\n }\n}; // get all the isLikely functions\n// but make sure 'ts' is above h264 and h265\n// but below everything else as it is the least specific\n\nvar isLikelyTypes = Object.keys(_isLikely) // remove ts, h264, h265\n.filter(function (t) {\n return t !== 'ts' && t !== 'h264' && t !== 'h265';\n}) // add it back to the bottom\n.concat(['ts', 'h264', 'h265']); // make sure we are dealing with uint8 data.\n\nisLikelyTypes.forEach(function (type) {\n var isLikelyFn = _isLikely[type];\n\n _isLikely[type] = function (bytes) {\n return isLikelyFn(toUint8(bytes));\n };\n}); // export after wrapping\n\nexport var isLikely = _isLikely; // A useful list of file signatures can be found here\n// https://en.wikipedia.org/wiki/List_of_file_signatures\n\nexport var detectContainerForBytes = function detectContainerForBytes(bytes) {\n bytes = toUint8(bytes);\n\n for (var i = 0; i < isLikelyTypes.length; i++) {\n var type = isLikelyTypes[i];\n\n if (isLikely[type](bytes)) {\n return type;\n }\n }\n\n return '';\n}; // fmp4 is not a container\n\nexport var isLikelyFmp4MediaSegment = function isLikelyFmp4MediaSegment(bytes) {\n return findBox(bytes, ['moof']).length > 0;\n};","import {Component, ElementRef, Input, OnDestroy, OnInit, ViewChild, ViewEncapsulation} from '@angular/core';\r\nimport videojs from 'video.js';\r\nimport Player from \"video.js/dist/types/player\";\r\n// import Player from \"video.js/dist/types/player\";\r\n\r\n@Component({\r\n selector: 'app-vjs-player',\r\n standalone: true,\r\n imports: [],\r\n encapsulation: ViewEncapsulation.None,\r\n templateUrl: './vjs-player.component.html',\r\n styleUrl: './vjs-player.component.css'\r\n})\r\n\r\nexport class VjsPlayerComponent implements OnInit, OnDestroy {\r\n @ViewChild('target', { static: true })\r\n target!: ElementRef;\r\n\r\n // See options: https://videojs.com/guides/options\r\n // @Input() options: {\r\n @Input()\r\n options!: {\r\n fluid: boolean;\r\n controls: boolean;\r\n aspectRatio: string;\r\n autoplay: boolean;\r\n sources: {\r\n src: string;\r\n type: string;\r\n }[];\r\n };\r\n\r\n // player: videojs.Player;\r\n player!: Player;\r\n // player: Player;\r\n\r\n constructor(\r\n private elementRef: ElementRef,\r\n ) {}\r\n\r\n // Instantiate a Video.js player OnInit\r\n ngOnInit() {\r\n this.player = videojs(this.target.nativeElement, this.options, () => {\r\n console.log('onPlayerReady', this);\r\n });\r\n }\r\n\r\n // Dispose the player OnDestroy\r\n ngOnDestroy() {\r\n if (this.player) {\r\n this.player.dispose();\r\n }\r\n }\r\n}\r\n","\r\n\r\n ","import { coerceNumberProperty, coerceElement } from '@angular/cdk/coercion';\nimport * as i0 from '@angular/core';\nimport { InjectionToken, forwardRef, Directive, Input, Injectable, Optional, Inject, inject, booleanAttribute, Component, ViewEncapsulation, ChangeDetectionStrategy, Output, ViewChild, SkipSelf, ElementRef, NgModule } from '@angular/core';\nimport { Subject, of, Observable, fromEvent, animationFrameScheduler, asapScheduler, Subscription, isObservable } from 'rxjs';\nimport { distinctUntilChanged, auditTime, filter, takeUntil, startWith, pairwise, switchMap, shareReplay } from 'rxjs/operators';\nimport * as i1 from '@angular/cdk/platform';\nimport { getRtlScrollAxisType, RtlScrollAxisType, supportsScrollBehavior, Platform } from '@angular/cdk/platform';\nimport { DOCUMENT } from '@angular/common';\nimport * as i2 from '@angular/cdk/bidi';\nimport { BidiModule } from '@angular/cdk/bidi';\nimport * as i2$1 from '@angular/cdk/collections';\nimport { isDataSource, ArrayDataSource, _VIEW_REPEATER_STRATEGY, _RecycleViewRepeaterStrategy } from '@angular/cdk/collections';\n\n/** The injection token used to specify the virtual scrolling strategy. */\nconst _c0 = [\"contentWrapper\"];\nconst _c1 = [\"*\"];\nconst VIRTUAL_SCROLL_STRATEGY = new InjectionToken('VIRTUAL_SCROLL_STRATEGY');\n\n/** Virtual scrolling strategy for lists with items of known fixed size. */\nclass FixedSizeVirtualScrollStrategy {\n /**\n * @param itemSize The size of the items in the virtually scrolling list.\n * @param minBufferPx The minimum amount of buffer (in pixels) before needing to render more\n * @param maxBufferPx The amount of buffer (in pixels) to render when rendering more.\n */\n constructor(itemSize, minBufferPx, maxBufferPx) {\n this._scrolledIndexChange = new Subject();\n /** @docs-private Implemented as part of VirtualScrollStrategy. */\n this.scrolledIndexChange = this._scrolledIndexChange.pipe(distinctUntilChanged());\n /** The attached viewport. */\n this._viewport = null;\n this._itemSize = itemSize;\n this._minBufferPx = minBufferPx;\n this._maxBufferPx = maxBufferPx;\n }\n /**\n * Attaches this scroll strategy to a viewport.\n * @param viewport The viewport to attach this strategy to.\n */\n attach(viewport) {\n this._viewport = viewport;\n this._updateTotalContentSize();\n this._updateRenderedRange();\n }\n /** Detaches this scroll strategy from the currently attached viewport. */\n detach() {\n this._scrolledIndexChange.complete();\n this._viewport = null;\n }\n /**\n * Update the item size and buffer size.\n * @param itemSize The size of the items in the virtually scrolling list.\n * @param minBufferPx The minimum amount of buffer (in pixels) before needing to render more\n * @param maxBufferPx The amount of buffer (in pixels) to render when rendering more.\n */\n updateItemAndBufferSize(itemSize, minBufferPx, maxBufferPx) {\n if (maxBufferPx < minBufferPx && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw Error('CDK virtual scroll: maxBufferPx must be greater than or equal to minBufferPx');\n }\n this._itemSize = itemSize;\n this._minBufferPx = minBufferPx;\n this._maxBufferPx = maxBufferPx;\n this._updateTotalContentSize();\n this._updateRenderedRange();\n }\n /** @docs-private Implemented as part of VirtualScrollStrategy. */\n onContentScrolled() {\n this._updateRenderedRange();\n }\n /** @docs-private Implemented as part of VirtualScrollStrategy. */\n onDataLengthChanged() {\n this._updateTotalContentSize();\n this._updateRenderedRange();\n }\n /** @docs-private Implemented as part of VirtualScrollStrategy. */\n onContentRendered() {\n /* no-op */\n }\n /** @docs-private Implemented as part of VirtualScrollStrategy. */\n onRenderedOffsetChanged() {\n /* no-op */\n }\n /**\n * Scroll to the offset for the given index.\n * @param index The index of the element to scroll to.\n * @param behavior The ScrollBehavior to use when scrolling.\n */\n scrollToIndex(index, behavior) {\n if (this._viewport) {\n this._viewport.scrollToOffset(index * this._itemSize, behavior);\n }\n }\n /** Update the viewport's total content size. */\n _updateTotalContentSize() {\n if (!this._viewport) {\n return;\n }\n this._viewport.setTotalContentSize(this._viewport.getDataLength() * this._itemSize);\n }\n /** Update the viewport's rendered range. */\n _updateRenderedRange() {\n if (!this._viewport) {\n return;\n }\n const renderedRange = this._viewport.getRenderedRange();\n const newRange = {\n start: renderedRange.start,\n end: renderedRange.end\n };\n const viewportSize = this._viewport.getViewportSize();\n const dataLength = this._viewport.getDataLength();\n let scrollOffset = this._viewport.measureScrollOffset();\n // Prevent NaN as result when dividing by zero.\n let firstVisibleIndex = this._itemSize > 0 ? scrollOffset / this._itemSize : 0;\n // If user scrolls to the bottom of the list and data changes to a smaller list\n if (newRange.end > dataLength) {\n // We have to recalculate the first visible index based on new data length and viewport size.\n const maxVisibleItems = Math.ceil(viewportSize / this._itemSize);\n const newVisibleIndex = Math.max(0, Math.min(firstVisibleIndex, dataLength - maxVisibleItems));\n // If first visible index changed we must update scroll offset to handle start/end buffers\n // Current range must also be adjusted to cover the new position (bottom of new list).\n if (firstVisibleIndex != newVisibleIndex) {\n firstVisibleIndex = newVisibleIndex;\n scrollOffset = newVisibleIndex * this._itemSize;\n newRange.start = Math.floor(firstVisibleIndex);\n }\n newRange.end = Math.max(0, Math.min(dataLength, newRange.start + maxVisibleItems));\n }\n const startBuffer = scrollOffset - newRange.start * this._itemSize;\n if (startBuffer < this._minBufferPx && newRange.start != 0) {\n const expandStart = Math.ceil((this._maxBufferPx - startBuffer) / this._itemSize);\n newRange.start = Math.max(0, newRange.start - expandStart);\n newRange.end = Math.min(dataLength, Math.ceil(firstVisibleIndex + (viewportSize + this._minBufferPx) / this._itemSize));\n } else {\n const endBuffer = newRange.end * this._itemSize - (scrollOffset + viewportSize);\n if (endBuffer < this._minBufferPx && newRange.end != dataLength) {\n const expandEnd = Math.ceil((this._maxBufferPx - endBuffer) / this._itemSize);\n if (expandEnd > 0) {\n newRange.end = Math.min(dataLength, newRange.end + expandEnd);\n newRange.start = Math.max(0, Math.floor(firstVisibleIndex - this._minBufferPx / this._itemSize));\n }\n }\n }\n this._viewport.setRenderedRange(newRange);\n this._viewport.setRenderedContentOffset(this._itemSize * newRange.start);\n this._scrolledIndexChange.next(Math.floor(firstVisibleIndex));\n }\n}\n/**\n * Provider factory for `FixedSizeVirtualScrollStrategy` that simply extracts the already created\n * `FixedSizeVirtualScrollStrategy` from the given directive.\n * @param fixedSizeDir The instance of `CdkFixedSizeVirtualScroll` to extract the\n * `FixedSizeVirtualScrollStrategy` from.\n */\nfunction _fixedSizeVirtualScrollStrategyFactory(fixedSizeDir) {\n return fixedSizeDir._scrollStrategy;\n}\n/** A virtual scroll strategy that supports fixed-size items. */\nclass CdkFixedSizeVirtualScroll {\n constructor() {\n this._itemSize = 20;\n this._minBufferPx = 100;\n this._maxBufferPx = 200;\n /** The scroll strategy used by this directive. */\n this._scrollStrategy = new FixedSizeVirtualScrollStrategy(this.itemSize, this.minBufferPx, this.maxBufferPx);\n }\n /** The size of the items in the list (in pixels). */\n get itemSize() {\n return this._itemSize;\n }\n set itemSize(value) {\n this._itemSize = coerceNumberProperty(value);\n }\n /**\n * The minimum amount of buffer rendered beyond the viewport (in pixels).\n * If the amount of buffer dips below this number, more items will be rendered. Defaults to 100px.\n */\n get minBufferPx() {\n return this._minBufferPx;\n }\n set minBufferPx(value) {\n this._minBufferPx = coerceNumberProperty(value);\n }\n /**\n * The number of pixels worth of buffer to render for when rendering new items. Defaults to 200px.\n */\n get maxBufferPx() {\n return this._maxBufferPx;\n }\n set maxBufferPx(value) {\n this._maxBufferPx = coerceNumberProperty(value);\n }\n ngOnChanges() {\n this._scrollStrategy.updateItemAndBufferSize(this.itemSize, this.minBufferPx, this.maxBufferPx);\n }\n static {\n this.ɵfac = function CdkFixedSizeVirtualScroll_Factory(t) {\n return new (t || CdkFixedSizeVirtualScroll)();\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkFixedSizeVirtualScroll,\n selectors: [[\"cdk-virtual-scroll-viewport\", \"itemSize\", \"\"]],\n inputs: {\n itemSize: \"itemSize\",\n minBufferPx: \"minBufferPx\",\n maxBufferPx: \"maxBufferPx\"\n },\n standalone: true,\n features: [i0.ɵɵProvidersFeature([{\n provide: VIRTUAL_SCROLL_STRATEGY,\n useFactory: _fixedSizeVirtualScrollStrategyFactory,\n deps: [forwardRef(() => CdkFixedSizeVirtualScroll)]\n }]), i0.ɵɵNgOnChangesFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkFixedSizeVirtualScroll, [{\n type: Directive,\n args: [{\n selector: 'cdk-virtual-scroll-viewport[itemSize]',\n standalone: true,\n providers: [{\n provide: VIRTUAL_SCROLL_STRATEGY,\n useFactory: _fixedSizeVirtualScrollStrategyFactory,\n deps: [forwardRef(() => CdkFixedSizeVirtualScroll)]\n }]\n }]\n }], null, {\n itemSize: [{\n type: Input\n }],\n minBufferPx: [{\n type: Input\n }],\n maxBufferPx: [{\n type: Input\n }]\n });\n})();\n\n/** Time in ms to throttle the scrolling events by default. */\nconst DEFAULT_SCROLL_TIME = 20;\n/**\n * Service contained all registered Scrollable references and emits an event when any one of the\n * Scrollable references emit a scrolled event.\n */\nclass ScrollDispatcher {\n constructor(_ngZone, _platform, document) {\n this._ngZone = _ngZone;\n this._platform = _platform;\n /** Subject for notifying that a registered scrollable reference element has been scrolled. */\n this._scrolled = new Subject();\n /** Keeps track of the global `scroll` and `resize` subscriptions. */\n this._globalSubscription = null;\n /** Keeps track of the amount of subscriptions to `scrolled`. Used for cleaning up afterwards. */\n this._scrolledCount = 0;\n /**\n * Map of all the scrollable references that are registered with the service and their\n * scroll event subscriptions.\n */\n this.scrollContainers = new Map();\n this._document = document;\n }\n /**\n * Registers a scrollable instance with the service and listens for its scrolled events. When the\n * scrollable is scrolled, the service emits the event to its scrolled observable.\n * @param scrollable Scrollable instance to be registered.\n */\n register(scrollable) {\n if (!this.scrollContainers.has(scrollable)) {\n this.scrollContainers.set(scrollable, scrollable.elementScrolled().subscribe(() => this._scrolled.next(scrollable)));\n }\n }\n /**\n * De-registers a Scrollable reference and unsubscribes from its scroll event observable.\n * @param scrollable Scrollable instance to be deregistered.\n */\n deregister(scrollable) {\n const scrollableReference = this.scrollContainers.get(scrollable);\n if (scrollableReference) {\n scrollableReference.unsubscribe();\n this.scrollContainers.delete(scrollable);\n }\n }\n /**\n * Returns an observable that emits an event whenever any of the registered Scrollable\n * references (or window, document, or body) fire a scrolled event. Can provide a time in ms\n * to override the default \"throttle\" time.\n *\n * **Note:** in order to avoid hitting change detection for every scroll event,\n * all of the events emitted from this stream will be run outside the Angular zone.\n * If you need to update any data bindings as a result of a scroll event, you have\n * to run the callback using `NgZone.run`.\n */\n scrolled(auditTimeInMs = DEFAULT_SCROLL_TIME) {\n if (!this._platform.isBrowser) {\n return of();\n }\n return new Observable(observer => {\n if (!this._globalSubscription) {\n this._addGlobalListener();\n }\n // In the case of a 0ms delay, use an observable without auditTime\n // since it does add a perceptible delay in processing overhead.\n const subscription = auditTimeInMs > 0 ? this._scrolled.pipe(auditTime(auditTimeInMs)).subscribe(observer) : this._scrolled.subscribe(observer);\n this._scrolledCount++;\n return () => {\n subscription.unsubscribe();\n this._scrolledCount--;\n if (!this._scrolledCount) {\n this._removeGlobalListener();\n }\n };\n });\n }\n ngOnDestroy() {\n this._removeGlobalListener();\n this.scrollContainers.forEach((_, container) => this.deregister(container));\n this._scrolled.complete();\n }\n /**\n * Returns an observable that emits whenever any of the\n * scrollable ancestors of an element are scrolled.\n * @param elementOrElementRef Element whose ancestors to listen for.\n * @param auditTimeInMs Time to throttle the scroll events.\n */\n ancestorScrolled(elementOrElementRef, auditTimeInMs) {\n const ancestors = this.getAncestorScrollContainers(elementOrElementRef);\n return this.scrolled(auditTimeInMs).pipe(filter(target => {\n return !target || ancestors.indexOf(target) > -1;\n }));\n }\n /** Returns all registered Scrollables that contain the provided element. */\n getAncestorScrollContainers(elementOrElementRef) {\n const scrollingContainers = [];\n this.scrollContainers.forEach((_subscription, scrollable) => {\n if (this._scrollableContainsElement(scrollable, elementOrElementRef)) {\n scrollingContainers.push(scrollable);\n }\n });\n return scrollingContainers;\n }\n /** Use defaultView of injected document if available or fallback to global window reference */\n _getWindow() {\n return this._document.defaultView || window;\n }\n /** Returns true if the element is contained within the provided Scrollable. */\n _scrollableContainsElement(scrollable, elementOrElementRef) {\n let element = coerceElement(elementOrElementRef);\n let scrollableElement = scrollable.getElementRef().nativeElement;\n // Traverse through the element parents until we reach null, checking if any of the elements\n // are the scrollable's element.\n do {\n if (element == scrollableElement) {\n return true;\n }\n } while (element = element.parentElement);\n return false;\n }\n /** Sets up the global scroll listeners. */\n _addGlobalListener() {\n this._globalSubscription = this._ngZone.runOutsideAngular(() => {\n const window = this._getWindow();\n return fromEvent(window.document, 'scroll').subscribe(() => this._scrolled.next());\n });\n }\n /** Cleans up the global scroll listener. */\n _removeGlobalListener() {\n if (this._globalSubscription) {\n this._globalSubscription.unsubscribe();\n this._globalSubscription = null;\n }\n }\n static {\n this.ɵfac = function ScrollDispatcher_Factory(t) {\n return new (t || ScrollDispatcher)(i0.ɵɵinject(i0.NgZone), i0.ɵɵinject(i1.Platform), i0.ɵɵinject(DOCUMENT, 8));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: ScrollDispatcher,\n factory: ScrollDispatcher.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(ScrollDispatcher, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: i0.NgZone\n }, {\n type: i1.Platform\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [DOCUMENT]\n }]\n }], null);\n})();\n\n/**\n * Sends an event when the directive's element is scrolled. Registers itself with the\n * ScrollDispatcher service to include itself as part of its collection of scrolling events that it\n * can be listened to through the service.\n */\nclass CdkScrollable {\n constructor(elementRef, scrollDispatcher, ngZone, dir) {\n this.elementRef = elementRef;\n this.scrollDispatcher = scrollDispatcher;\n this.ngZone = ngZone;\n this.dir = dir;\n this._destroyed = new Subject();\n this._elementScrolled = new Observable(observer => this.ngZone.runOutsideAngular(() => fromEvent(this.elementRef.nativeElement, 'scroll').pipe(takeUntil(this._destroyed)).subscribe(observer)));\n }\n ngOnInit() {\n this.scrollDispatcher.register(this);\n }\n ngOnDestroy() {\n this.scrollDispatcher.deregister(this);\n this._destroyed.next();\n this._destroyed.complete();\n }\n /** Returns observable that emits when a scroll event is fired on the host element. */\n elementScrolled() {\n return this._elementScrolled;\n }\n /** Gets the ElementRef for the viewport. */\n getElementRef() {\n return this.elementRef;\n }\n /**\n * Scrolls to the specified offsets. This is a normalized version of the browser's native scrollTo\n * method, since browsers are not consistent about what scrollLeft means in RTL. For this method\n * left and right always refer to the left and right side of the scrolling container irrespective\n * of the layout direction. start and end refer to left and right in an LTR context and vice-versa\n * in an RTL context.\n * @param options specified the offsets to scroll to.\n */\n scrollTo(options) {\n const el = this.elementRef.nativeElement;\n const isRtl = this.dir && this.dir.value == 'rtl';\n // Rewrite start & end offsets as right or left offsets.\n if (options.left == null) {\n options.left = isRtl ? options.end : options.start;\n }\n if (options.right == null) {\n options.right = isRtl ? options.start : options.end;\n }\n // Rewrite the bottom offset as a top offset.\n if (options.bottom != null) {\n options.top = el.scrollHeight - el.clientHeight - options.bottom;\n }\n // Rewrite the right offset as a left offset.\n if (isRtl && getRtlScrollAxisType() != RtlScrollAxisType.NORMAL) {\n if (options.left != null) {\n options.right = el.scrollWidth - el.clientWidth - options.left;\n }\n if (getRtlScrollAxisType() == RtlScrollAxisType.INVERTED) {\n options.left = options.right;\n } else if (getRtlScrollAxisType() == RtlScrollAxisType.NEGATED) {\n options.left = options.right ? -options.right : options.right;\n }\n } else {\n if (options.right != null) {\n options.left = el.scrollWidth - el.clientWidth - options.right;\n }\n }\n this._applyScrollToOptions(options);\n }\n _applyScrollToOptions(options) {\n const el = this.elementRef.nativeElement;\n if (supportsScrollBehavior()) {\n el.scrollTo(options);\n } else {\n if (options.top != null) {\n el.scrollTop = options.top;\n }\n if (options.left != null) {\n el.scrollLeft = options.left;\n }\n }\n }\n /**\n * Measures the scroll offset relative to the specified edge of the viewport. This method can be\n * used instead of directly checking scrollLeft or scrollTop, since browsers are not consistent\n * about what scrollLeft means in RTL. The values returned by this method are normalized such that\n * left and right always refer to the left and right side of the scrolling container irrespective\n * of the layout direction. start and end refer to left and right in an LTR context and vice-versa\n * in an RTL context.\n * @param from The edge to measure from.\n */\n measureScrollOffset(from) {\n const LEFT = 'left';\n const RIGHT = 'right';\n const el = this.elementRef.nativeElement;\n if (from == 'top') {\n return el.scrollTop;\n }\n if (from == 'bottom') {\n return el.scrollHeight - el.clientHeight - el.scrollTop;\n }\n // Rewrite start & end as left or right offsets.\n const isRtl = this.dir && this.dir.value == 'rtl';\n if (from == 'start') {\n from = isRtl ? RIGHT : LEFT;\n } else if (from == 'end') {\n from = isRtl ? LEFT : RIGHT;\n }\n if (isRtl && getRtlScrollAxisType() == RtlScrollAxisType.INVERTED) {\n // For INVERTED, scrollLeft is (scrollWidth - clientWidth) when scrolled all the way left and\n // 0 when scrolled all the way right.\n if (from == LEFT) {\n return el.scrollWidth - el.clientWidth - el.scrollLeft;\n } else {\n return el.scrollLeft;\n }\n } else if (isRtl && getRtlScrollAxisType() == RtlScrollAxisType.NEGATED) {\n // For NEGATED, scrollLeft is -(scrollWidth - clientWidth) when scrolled all the way left and\n // 0 when scrolled all the way right.\n if (from == LEFT) {\n return el.scrollLeft + el.scrollWidth - el.clientWidth;\n } else {\n return -el.scrollLeft;\n }\n } else {\n // For NORMAL, as well as non-RTL contexts, scrollLeft is 0 when scrolled all the way left and\n // (scrollWidth - clientWidth) when scrolled all the way right.\n if (from == LEFT) {\n return el.scrollLeft;\n } else {\n return el.scrollWidth - el.clientWidth - el.scrollLeft;\n }\n }\n }\n static {\n this.ɵfac = function CdkScrollable_Factory(t) {\n return new (t || CdkScrollable)(i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(ScrollDispatcher), i0.ɵɵdirectiveInject(i0.NgZone), i0.ɵɵdirectiveInject(i2.Directionality, 8));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkScrollable,\n selectors: [[\"\", \"cdk-scrollable\", \"\"], [\"\", \"cdkScrollable\", \"\"]],\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkScrollable, [{\n type: Directive,\n args: [{\n selector: '[cdk-scrollable], [cdkScrollable]',\n standalone: true\n }]\n }], () => [{\n type: i0.ElementRef\n }, {\n type: ScrollDispatcher\n }, {\n type: i0.NgZone\n }, {\n type: i2.Directionality,\n decorators: [{\n type: Optional\n }]\n }], null);\n})();\n\n/** Time in ms to throttle the resize events by default. */\nconst DEFAULT_RESIZE_TIME = 20;\n/**\n * Simple utility for getting the bounds of the browser viewport.\n * @docs-private\n */\nclass ViewportRuler {\n constructor(_platform, ngZone, document) {\n this._platform = _platform;\n /** Stream of viewport change events. */\n this._change = new Subject();\n /** Event listener that will be used to handle the viewport change events. */\n this._changeListener = event => {\n this._change.next(event);\n };\n this._document = document;\n ngZone.runOutsideAngular(() => {\n if (_platform.isBrowser) {\n const window = this._getWindow();\n // Note that bind the events ourselves, rather than going through something like RxJS's\n // `fromEvent` so that we can ensure that they're bound outside of the NgZone.\n window.addEventListener('resize', this._changeListener);\n window.addEventListener('orientationchange', this._changeListener);\n }\n // Clear the cached position so that the viewport is re-measured next time it is required.\n // We don't need to keep track of the subscription, because it is completed on destroy.\n this.change().subscribe(() => this._viewportSize = null);\n });\n }\n ngOnDestroy() {\n if (this._platform.isBrowser) {\n const window = this._getWindow();\n window.removeEventListener('resize', this._changeListener);\n window.removeEventListener('orientationchange', this._changeListener);\n }\n this._change.complete();\n }\n /** Returns the viewport's width and height. */\n getViewportSize() {\n if (!this._viewportSize) {\n this._updateViewportSize();\n }\n const output = {\n width: this._viewportSize.width,\n height: this._viewportSize.height\n };\n // If we're not on a browser, don't cache the size since it'll be mocked out anyway.\n if (!this._platform.isBrowser) {\n this._viewportSize = null;\n }\n return output;\n }\n /** Gets a DOMRect for the viewport's bounds. */\n getViewportRect() {\n // Use the document element's bounding rect rather than the window scroll properties\n // (e.g. pageYOffset, scrollY) due to in issue in Chrome and IE where window scroll\n // properties and client coordinates (boundingClientRect, clientX/Y, etc.) are in different\n // conceptual viewports. Under most circumstances these viewports are equivalent, but they\n // can disagree when the page is pinch-zoomed (on devices that support touch).\n // See https://bugs.chromium.org/p/chromium/issues/detail?id=489206#c4\n // We use the documentElement instead of the body because, by default (without a css reset)\n // browsers typically give the document body an 8px margin, which is not included in\n // getBoundingClientRect().\n const scrollPosition = this.getViewportScrollPosition();\n const {\n width,\n height\n } = this.getViewportSize();\n return {\n top: scrollPosition.top,\n left: scrollPosition.left,\n bottom: scrollPosition.top + height,\n right: scrollPosition.left + width,\n height,\n width\n };\n }\n /** Gets the (top, left) scroll position of the viewport. */\n getViewportScrollPosition() {\n // While we can get a reference to the fake document\n // during SSR, it doesn't have getBoundingClientRect.\n if (!this._platform.isBrowser) {\n return {\n top: 0,\n left: 0\n };\n }\n // The top-left-corner of the viewport is determined by the scroll position of the document\n // body, normally just (scrollLeft, scrollTop). However, Chrome and Firefox disagree about\n // whether `document.body` or `document.documentElement` is the scrolled element, so reading\n // `scrollTop` and `scrollLeft` is inconsistent. However, using the bounding rect of\n // `document.documentElement` works consistently, where the `top` and `left` values will\n // equal negative the scroll position.\n const document = this._document;\n const window = this._getWindow();\n const documentElement = document.documentElement;\n const documentRect = documentElement.getBoundingClientRect();\n const top = -documentRect.top || document.body.scrollTop || window.scrollY || documentElement.scrollTop || 0;\n const left = -documentRect.left || document.body.scrollLeft || window.scrollX || documentElement.scrollLeft || 0;\n return {\n top,\n left\n };\n }\n /**\n * Returns a stream that emits whenever the size of the viewport changes.\n * This stream emits outside of the Angular zone.\n * @param throttleTime Time in milliseconds to throttle the stream.\n */\n change(throttleTime = DEFAULT_RESIZE_TIME) {\n return throttleTime > 0 ? this._change.pipe(auditTime(throttleTime)) : this._change;\n }\n /** Use defaultView of injected document if available or fallback to global window reference */\n _getWindow() {\n return this._document.defaultView || window;\n }\n /** Updates the cached viewport size. */\n _updateViewportSize() {\n const window = this._getWindow();\n this._viewportSize = this._platform.isBrowser ? {\n width: window.innerWidth,\n height: window.innerHeight\n } : {\n width: 0,\n height: 0\n };\n }\n static {\n this.ɵfac = function ViewportRuler_Factory(t) {\n return new (t || ViewportRuler)(i0.ɵɵinject(i1.Platform), i0.ɵɵinject(i0.NgZone), i0.ɵɵinject(DOCUMENT, 8));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: ViewportRuler,\n factory: ViewportRuler.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(ViewportRuler, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: i1.Platform\n }, {\n type: i0.NgZone\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [DOCUMENT]\n }]\n }], null);\n})();\nconst VIRTUAL_SCROLLABLE = new InjectionToken('VIRTUAL_SCROLLABLE');\n/**\n * Extending the {@link CdkScrollable} to be used as scrolling container for virtual scrolling.\n */\nclass CdkVirtualScrollable extends CdkScrollable {\n constructor(elementRef, scrollDispatcher, ngZone, dir) {\n super(elementRef, scrollDispatcher, ngZone, dir);\n }\n /**\n * Measure the viewport size for the provided orientation.\n *\n * @param orientation The orientation to measure the size from.\n */\n measureViewportSize(orientation) {\n const viewportEl = this.elementRef.nativeElement;\n return orientation === 'horizontal' ? viewportEl.clientWidth : viewportEl.clientHeight;\n }\n static {\n this.ɵfac = function CdkVirtualScrollable_Factory(t) {\n return new (t || CdkVirtualScrollable)(i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(ScrollDispatcher), i0.ɵɵdirectiveInject(i0.NgZone), i0.ɵɵdirectiveInject(i2.Directionality, 8));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkVirtualScrollable,\n features: [i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkVirtualScrollable, [{\n type: Directive\n }], () => [{\n type: i0.ElementRef\n }, {\n type: ScrollDispatcher\n }, {\n type: i0.NgZone\n }, {\n type: i2.Directionality,\n decorators: [{\n type: Optional\n }]\n }], null);\n})();\n\n/** Checks if the given ranges are equal. */\nfunction rangesEqual(r1, r2) {\n return r1.start == r2.start && r1.end == r2.end;\n}\n/**\n * Scheduler to be used for scroll events. Needs to fall back to\n * something that doesn't rely on requestAnimationFrame on environments\n * that don't support it (e.g. server-side rendering).\n */\nconst SCROLL_SCHEDULER = typeof requestAnimationFrame !== 'undefined' ? animationFrameScheduler : asapScheduler;\n/** A viewport that virtualizes its scrolling with the help of `CdkVirtualForOf`. */\nclass CdkVirtualScrollViewport extends CdkVirtualScrollable {\n /** The direction the viewport scrolls. */\n get orientation() {\n return this._orientation;\n }\n set orientation(orientation) {\n if (this._orientation !== orientation) {\n this._orientation = orientation;\n this._calculateSpacerSize();\n }\n }\n constructor(elementRef, _changeDetectorRef, ngZone, _scrollStrategy, dir, scrollDispatcher, viewportRuler, scrollable) {\n super(elementRef, scrollDispatcher, ngZone, dir);\n this.elementRef = elementRef;\n this._changeDetectorRef = _changeDetectorRef;\n this._scrollStrategy = _scrollStrategy;\n this.scrollable = scrollable;\n this._platform = inject(Platform);\n /** Emits when the viewport is detached from a CdkVirtualForOf. */\n this._detachedSubject = new Subject();\n /** Emits when the rendered range changes. */\n this._renderedRangeSubject = new Subject();\n this._orientation = 'vertical';\n /**\n * Whether rendered items should persist in the DOM after scrolling out of view. By default, items\n * will be removed.\n */\n this.appendOnly = false;\n // Note: we don't use the typical EventEmitter here because we need to subscribe to the scroll\n // strategy lazily (i.e. only if the user is actually listening to the events). We do this because\n // depending on how the strategy calculates the scrolled index, it may come at a cost to\n // performance.\n /** Emits when the index of the first element visible in the viewport changes. */\n this.scrolledIndexChange = new Observable(observer => this._scrollStrategy.scrolledIndexChange.subscribe(index => Promise.resolve().then(() => this.ngZone.run(() => observer.next(index)))));\n /** A stream that emits whenever the rendered range changes. */\n this.renderedRangeStream = this._renderedRangeSubject;\n /**\n * The total size of all content (in pixels), including content that is not currently rendered.\n */\n this._totalContentSize = 0;\n /** A string representing the `style.width` property value to be used for the spacer element. */\n this._totalContentWidth = '';\n /** A string representing the `style.height` property value to be used for the spacer element. */\n this._totalContentHeight = '';\n /** The currently rendered range of indices. */\n this._renderedRange = {\n start: 0,\n end: 0\n };\n /** The length of the data bound to this viewport (in number of items). */\n this._dataLength = 0;\n /** The size of the viewport (in pixels). */\n this._viewportSize = 0;\n /** The last rendered content offset that was set. */\n this._renderedContentOffset = 0;\n /**\n * Whether the last rendered content offset was to the end of the content (and therefore needs to\n * be rewritten as an offset to the start of the content).\n */\n this._renderedContentOffsetNeedsRewrite = false;\n /** Whether there is a pending change detection cycle. */\n this._isChangeDetectionPending = false;\n /** A list of functions to run after the next change detection cycle. */\n this._runAfterChangeDetection = [];\n /** Subscription to changes in the viewport size. */\n this._viewportChanges = Subscription.EMPTY;\n if (!_scrollStrategy && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw Error('Error: cdk-virtual-scroll-viewport requires the \"itemSize\" property to be set.');\n }\n this._viewportChanges = viewportRuler.change().subscribe(() => {\n this.checkViewportSize();\n });\n if (!this.scrollable) {\n // No scrollable is provided, so the virtual-scroll-viewport needs to become a scrollable\n this.elementRef.nativeElement.classList.add('cdk-virtual-scrollable');\n this.scrollable = this;\n }\n }\n ngOnInit() {\n // Scrolling depends on the element dimensions which we can't get during SSR.\n if (!this._platform.isBrowser) {\n return;\n }\n if (this.scrollable === this) {\n super.ngOnInit();\n }\n // It's still too early to measure the viewport at this point. Deferring with a promise allows\n // the Viewport to be rendered with the correct size before we measure. We run this outside the\n // zone to avoid causing more change detection cycles. We handle the change detection loop\n // ourselves instead.\n this.ngZone.runOutsideAngular(() => Promise.resolve().then(() => {\n this._measureViewportSize();\n this._scrollStrategy.attach(this);\n this.scrollable.elementScrolled().pipe(\n // Start off with a fake scroll event so we properly detect our initial position.\n startWith(null),\n // Collect multiple events into one until the next animation frame. This way if\n // there are multiple scroll events in the same frame we only need to recheck\n // our layout once.\n auditTime(0, SCROLL_SCHEDULER),\n // Usually `elementScrolled` is completed when the scrollable is destroyed, but\n // that may not be the case if a `CdkVirtualScrollableElement` is used so we have\n // to unsubscribe here just in case.\n takeUntil(this._destroyed)).subscribe(() => this._scrollStrategy.onContentScrolled());\n this._markChangeDetectionNeeded();\n }));\n }\n ngOnDestroy() {\n this.detach();\n this._scrollStrategy.detach();\n // Complete all subjects\n this._renderedRangeSubject.complete();\n this._detachedSubject.complete();\n this._viewportChanges.unsubscribe();\n super.ngOnDestroy();\n }\n /** Attaches a `CdkVirtualScrollRepeater` to this viewport. */\n attach(forOf) {\n if (this._forOf && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw Error('CdkVirtualScrollViewport is already attached.');\n }\n // Subscribe to the data stream of the CdkVirtualForOf to keep track of when the data length\n // changes. Run outside the zone to avoid triggering change detection, since we're managing the\n // change detection loop ourselves.\n this.ngZone.runOutsideAngular(() => {\n this._forOf = forOf;\n this._forOf.dataStream.pipe(takeUntil(this._detachedSubject)).subscribe(data => {\n const newLength = data.length;\n if (newLength !== this._dataLength) {\n this._dataLength = newLength;\n this._scrollStrategy.onDataLengthChanged();\n }\n this._doChangeDetection();\n });\n });\n }\n /** Detaches the current `CdkVirtualForOf`. */\n detach() {\n this._forOf = null;\n this._detachedSubject.next();\n }\n /** Gets the length of the data bound to this viewport (in number of items). */\n getDataLength() {\n return this._dataLength;\n }\n /** Gets the size of the viewport (in pixels). */\n getViewportSize() {\n return this._viewportSize;\n }\n // TODO(mmalerba): This is technically out of sync with what's really rendered until a render\n // cycle happens. I'm being careful to only call it after the render cycle is complete and before\n // setting it to something else, but its error prone and should probably be split into\n // `pendingRange` and `renderedRange`, the latter reflecting whats actually in the DOM.\n /** Get the current rendered range of items. */\n getRenderedRange() {\n return this._renderedRange;\n }\n measureBoundingClientRectWithScrollOffset(from) {\n return this.getElementRef().nativeElement.getBoundingClientRect()[from];\n }\n /**\n * Sets the total size of all content (in pixels), including content that is not currently\n * rendered.\n */\n setTotalContentSize(size) {\n if (this._totalContentSize !== size) {\n this._totalContentSize = size;\n this._calculateSpacerSize();\n this._markChangeDetectionNeeded();\n }\n }\n /** Sets the currently rendered range of indices. */\n setRenderedRange(range) {\n if (!rangesEqual(this._renderedRange, range)) {\n if (this.appendOnly) {\n range = {\n start: 0,\n end: Math.max(this._renderedRange.end, range.end)\n };\n }\n this._renderedRangeSubject.next(this._renderedRange = range);\n this._markChangeDetectionNeeded(() => this._scrollStrategy.onContentRendered());\n }\n }\n /**\n * Gets the offset from the start of the viewport to the start of the rendered data (in pixels).\n */\n getOffsetToRenderedContentStart() {\n return this._renderedContentOffsetNeedsRewrite ? null : this._renderedContentOffset;\n }\n /**\n * Sets the offset from the start of the viewport to either the start or end of the rendered data\n * (in pixels).\n */\n setRenderedContentOffset(offset, to = 'to-start') {\n // In appendOnly, we always start from the top\n offset = this.appendOnly && to === 'to-start' ? 0 : offset;\n // For a horizontal viewport in a right-to-left language we need to translate along the x-axis\n // in the negative direction.\n const isRtl = this.dir && this.dir.value == 'rtl';\n const isHorizontal = this.orientation == 'horizontal';\n const axis = isHorizontal ? 'X' : 'Y';\n const axisDirection = isHorizontal && isRtl ? -1 : 1;\n let transform = `translate${axis}(${Number(axisDirection * offset)}px)`;\n this._renderedContentOffset = offset;\n if (to === 'to-end') {\n transform += ` translate${axis}(-100%)`;\n // The viewport should rewrite this as a `to-start` offset on the next render cycle. Otherwise\n // elements will appear to expand in the wrong direction (e.g. `mat-expansion-panel` would\n // expand upward).\n this._renderedContentOffsetNeedsRewrite = true;\n }\n if (this._renderedContentTransform != transform) {\n // We know this value is safe because we parse `offset` with `Number()` before passing it\n // into the string.\n this._renderedContentTransform = transform;\n this._markChangeDetectionNeeded(() => {\n if (this._renderedContentOffsetNeedsRewrite) {\n this._renderedContentOffset -= this.measureRenderedContentSize();\n this._renderedContentOffsetNeedsRewrite = false;\n this.setRenderedContentOffset(this._renderedContentOffset);\n } else {\n this._scrollStrategy.onRenderedOffsetChanged();\n }\n });\n }\n }\n /**\n * Scrolls to the given offset from the start of the viewport. Please note that this is not always\n * the same as setting `scrollTop` or `scrollLeft`. In a horizontal viewport with right-to-left\n * direction, this would be the equivalent of setting a fictional `scrollRight` property.\n * @param offset The offset to scroll to.\n * @param behavior The ScrollBehavior to use when scrolling. Default is behavior is `auto`.\n */\n scrollToOffset(offset, behavior = 'auto') {\n const options = {\n behavior\n };\n if (this.orientation === 'horizontal') {\n options.start = offset;\n } else {\n options.top = offset;\n }\n this.scrollable.scrollTo(options);\n }\n /**\n * Scrolls to the offset for the given index.\n * @param index The index of the element to scroll to.\n * @param behavior The ScrollBehavior to use when scrolling. Default is behavior is `auto`.\n */\n scrollToIndex(index, behavior = 'auto') {\n this._scrollStrategy.scrollToIndex(index, behavior);\n }\n /**\n * Gets the current scroll offset from the start of the scrollable (in pixels).\n * @param from The edge to measure the offset from. Defaults to 'top' in vertical mode and 'start'\n * in horizontal mode.\n */\n measureScrollOffset(from) {\n // This is to break the call cycle\n let measureScrollOffset;\n if (this.scrollable == this) {\n measureScrollOffset = _from => super.measureScrollOffset(_from);\n } else {\n measureScrollOffset = _from => this.scrollable.measureScrollOffset(_from);\n }\n return Math.max(0, measureScrollOffset(from ?? (this.orientation === 'horizontal' ? 'start' : 'top')) - this.measureViewportOffset());\n }\n /**\n * Measures the offset of the viewport from the scrolling container\n * @param from The edge to measure from.\n */\n measureViewportOffset(from) {\n let fromRect;\n const LEFT = 'left';\n const RIGHT = 'right';\n const isRtl = this.dir?.value == 'rtl';\n if (from == 'start') {\n fromRect = isRtl ? RIGHT : LEFT;\n } else if (from == 'end') {\n fromRect = isRtl ? LEFT : RIGHT;\n } else if (from) {\n fromRect = from;\n } else {\n fromRect = this.orientation === 'horizontal' ? 'left' : 'top';\n }\n const scrollerClientRect = this.scrollable.measureBoundingClientRectWithScrollOffset(fromRect);\n const viewportClientRect = this.elementRef.nativeElement.getBoundingClientRect()[fromRect];\n return viewportClientRect - scrollerClientRect;\n }\n /** Measure the combined size of all of the rendered items. */\n measureRenderedContentSize() {\n const contentEl = this._contentWrapper.nativeElement;\n return this.orientation === 'horizontal' ? contentEl.offsetWidth : contentEl.offsetHeight;\n }\n /**\n * Measure the total combined size of the given range. Throws if the range includes items that are\n * not rendered.\n */\n measureRangeSize(range) {\n if (!this._forOf) {\n return 0;\n }\n return this._forOf.measureRangeSize(range, this.orientation);\n }\n /** Update the viewport dimensions and re-render. */\n checkViewportSize() {\n // TODO: Cleanup later when add logic for handling content resize\n this._measureViewportSize();\n this._scrollStrategy.onDataLengthChanged();\n }\n /** Measure the viewport size. */\n _measureViewportSize() {\n this._viewportSize = this.scrollable.measureViewportSize(this.orientation);\n }\n /** Queue up change detection to run. */\n _markChangeDetectionNeeded(runAfter) {\n if (runAfter) {\n this._runAfterChangeDetection.push(runAfter);\n }\n // Use a Promise to batch together calls to `_doChangeDetection`. This way if we set a bunch of\n // properties sequentially we only have to run `_doChangeDetection` once at the end.\n if (!this._isChangeDetectionPending) {\n this._isChangeDetectionPending = true;\n this.ngZone.runOutsideAngular(() => Promise.resolve().then(() => {\n this._doChangeDetection();\n }));\n }\n }\n /** Run change detection. */\n _doChangeDetection() {\n this._isChangeDetectionPending = false;\n // Apply the content transform. The transform can't be set via an Angular binding because\n // bypassSecurityTrustStyle is banned in Google. However the value is safe, it's composed of\n // string literals, a variable that can only be 'X' or 'Y', and user input that is run through\n // the `Number` function first to coerce it to a numeric value.\n this._contentWrapper.nativeElement.style.transform = this._renderedContentTransform;\n // Apply changes to Angular bindings. Note: We must call `markForCheck` to run change detection\n // from the root, since the repeated items are content projected in. Calling `detectChanges`\n // instead does not properly check the projected content.\n this.ngZone.run(() => this._changeDetectorRef.markForCheck());\n const runAfterChangeDetection = this._runAfterChangeDetection;\n this._runAfterChangeDetection = [];\n for (const fn of runAfterChangeDetection) {\n fn();\n }\n }\n /** Calculates the `style.width` and `style.height` for the spacer element. */\n _calculateSpacerSize() {\n this._totalContentHeight = this.orientation === 'horizontal' ? '' : `${this._totalContentSize}px`;\n this._totalContentWidth = this.orientation === 'horizontal' ? `${this._totalContentSize}px` : '';\n }\n static {\n this.ɵfac = function CdkVirtualScrollViewport_Factory(t) {\n return new (t || CdkVirtualScrollViewport)(i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(i0.ChangeDetectorRef), i0.ɵɵdirectiveInject(i0.NgZone), i0.ɵɵdirectiveInject(VIRTUAL_SCROLL_STRATEGY, 8), i0.ɵɵdirectiveInject(i2.Directionality, 8), i0.ɵɵdirectiveInject(ScrollDispatcher), i0.ɵɵdirectiveInject(ViewportRuler), i0.ɵɵdirectiveInject(VIRTUAL_SCROLLABLE, 8));\n };\n }\n static {\n this.ɵcmp = /* @__PURE__ */i0.ɵɵdefineComponent({\n type: CdkVirtualScrollViewport,\n selectors: [[\"cdk-virtual-scroll-viewport\"]],\n viewQuery: function CdkVirtualScrollViewport_Query(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵviewQuery(_c0, 7);\n }\n if (rf & 2) {\n let _t;\n i0.ɵɵqueryRefresh(_t = i0.ɵɵloadQuery()) && (ctx._contentWrapper = _t.first);\n }\n },\n hostAttrs: [1, \"cdk-virtual-scroll-viewport\"],\n hostVars: 4,\n hostBindings: function CdkVirtualScrollViewport_HostBindings(rf, ctx) {\n if (rf & 2) {\n i0.ɵɵclassProp(\"cdk-virtual-scroll-orientation-horizontal\", ctx.orientation === \"horizontal\")(\"cdk-virtual-scroll-orientation-vertical\", ctx.orientation !== \"horizontal\");\n }\n },\n inputs: {\n orientation: \"orientation\",\n appendOnly: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"appendOnly\", \"appendOnly\", booleanAttribute]\n },\n outputs: {\n scrolledIndexChange: \"scrolledIndexChange\"\n },\n standalone: true,\n features: [i0.ɵɵProvidersFeature([{\n provide: CdkScrollable,\n useFactory: (virtualScrollable, viewport) => virtualScrollable || viewport,\n deps: [[new Optional(), new Inject(VIRTUAL_SCROLLABLE)], CdkVirtualScrollViewport]\n }]), i0.ɵɵInputTransformsFeature, i0.ɵɵInheritDefinitionFeature, i0.ɵɵStandaloneFeature],\n ngContentSelectors: _c1,\n decls: 4,\n vars: 4,\n consts: [[\"contentWrapper\", \"\"], [1, \"cdk-virtual-scroll-content-wrapper\"], [1, \"cdk-virtual-scroll-spacer\"]],\n template: function CdkVirtualScrollViewport_Template(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵprojectionDef();\n i0.ɵɵelementStart(0, \"div\", 1, 0);\n i0.ɵɵprojection(2);\n i0.ɵɵelementEnd();\n i0.ɵɵelement(3, \"div\", 2);\n }\n if (rf & 2) {\n i0.ɵɵadvance(3);\n i0.ɵɵstyleProp(\"width\", ctx._totalContentWidth)(\"height\", ctx._totalContentHeight);\n }\n },\n styles: [\"cdk-virtual-scroll-viewport{display:block;position:relative;transform:translateZ(0)}.cdk-virtual-scrollable{overflow:auto;will-change:scroll-position;contain:strict;-webkit-overflow-scrolling:touch}.cdk-virtual-scroll-content-wrapper{position:absolute;top:0;left:0;contain:content}[dir=rtl] .cdk-virtual-scroll-content-wrapper{right:0;left:auto}.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper{min-height:100%}.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>dl:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>ol:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>table:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>ul:not([cdkVirtualFor]){padding-left:0;padding-right:0;margin-left:0;margin-right:0;border-left-width:0;border-right-width:0;outline:none}.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper{min-width:100%}.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>dl:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>ol:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>table:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>ul:not([cdkVirtualFor]){padding-top:0;padding-bottom:0;margin-top:0;margin-bottom:0;border-top-width:0;border-bottom-width:0;outline:none}.cdk-virtual-scroll-spacer{height:1px;transform-origin:0 0;flex:0 0 auto}[dir=rtl] .cdk-virtual-scroll-spacer{transform-origin:100% 0}\"],\n encapsulation: 2,\n changeDetection: 0\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkVirtualScrollViewport, [{\n type: Component,\n args: [{\n selector: 'cdk-virtual-scroll-viewport',\n host: {\n 'class': 'cdk-virtual-scroll-viewport',\n '[class.cdk-virtual-scroll-orientation-horizontal]': 'orientation === \"horizontal\"',\n '[class.cdk-virtual-scroll-orientation-vertical]': 'orientation !== \"horizontal\"'\n },\n encapsulation: ViewEncapsulation.None,\n changeDetection: ChangeDetectionStrategy.OnPush,\n standalone: true,\n providers: [{\n provide: CdkScrollable,\n useFactory: (virtualScrollable, viewport) => virtualScrollable || viewport,\n deps: [[new Optional(), new Inject(VIRTUAL_SCROLLABLE)], CdkVirtualScrollViewport]\n }],\n template: \"\\n\\n \\n
\\n\\n
\\n\",\n styles: [\"cdk-virtual-scroll-viewport{display:block;position:relative;transform:translateZ(0)}.cdk-virtual-scrollable{overflow:auto;will-change:scroll-position;contain:strict;-webkit-overflow-scrolling:touch}.cdk-virtual-scroll-content-wrapper{position:absolute;top:0;left:0;contain:content}[dir=rtl] .cdk-virtual-scroll-content-wrapper{right:0;left:auto}.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper{min-height:100%}.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>dl:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>ol:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>table:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-horizontal .cdk-virtual-scroll-content-wrapper>ul:not([cdkVirtualFor]){padding-left:0;padding-right:0;margin-left:0;margin-right:0;border-left-width:0;border-right-width:0;outline:none}.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper{min-width:100%}.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>dl:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>ol:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>table:not([cdkVirtualFor]),.cdk-virtual-scroll-orientation-vertical .cdk-virtual-scroll-content-wrapper>ul:not([cdkVirtualFor]){padding-top:0;padding-bottom:0;margin-top:0;margin-bottom:0;border-top-width:0;border-bottom-width:0;outline:none}.cdk-virtual-scroll-spacer{height:1px;transform-origin:0 0;flex:0 0 auto}[dir=rtl] .cdk-virtual-scroll-spacer{transform-origin:100% 0}\"]\n }]\n }], () => [{\n type: i0.ElementRef\n }, {\n type: i0.ChangeDetectorRef\n }, {\n type: i0.NgZone\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [VIRTUAL_SCROLL_STRATEGY]\n }]\n }, {\n type: i2.Directionality,\n decorators: [{\n type: Optional\n }]\n }, {\n type: ScrollDispatcher\n }, {\n type: ViewportRuler\n }, {\n type: CdkVirtualScrollable,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [VIRTUAL_SCROLLABLE]\n }]\n }], {\n orientation: [{\n type: Input\n }],\n appendOnly: [{\n type: Input,\n args: [{\n transform: booleanAttribute\n }]\n }],\n scrolledIndexChange: [{\n type: Output\n }],\n _contentWrapper: [{\n type: ViewChild,\n args: ['contentWrapper', {\n static: true\n }]\n }]\n });\n})();\n\n/** Helper to extract the offset of a DOM Node in a certain direction. */\nfunction getOffset(orientation, direction, node) {\n const el = node;\n if (!el.getBoundingClientRect) {\n return 0;\n }\n const rect = el.getBoundingClientRect();\n if (orientation === 'horizontal') {\n return direction === 'start' ? rect.left : rect.right;\n }\n return direction === 'start' ? rect.top : rect.bottom;\n}\n/**\n * A directive similar to `ngForOf` to be used for rendering data inside a virtual scrolling\n * container.\n */\nclass CdkVirtualForOf {\n /** The DataSource to display. */\n get cdkVirtualForOf() {\n return this._cdkVirtualForOf;\n }\n set cdkVirtualForOf(value) {\n this._cdkVirtualForOf = value;\n if (isDataSource(value)) {\n this._dataSourceChanges.next(value);\n } else {\n // If value is an an NgIterable, convert it to an array.\n this._dataSourceChanges.next(new ArrayDataSource(isObservable(value) ? value : Array.from(value || [])));\n }\n }\n /**\n * The `TrackByFunction` to use for tracking changes. The `TrackByFunction` takes the index and\n * the item and produces a value to be used as the item's identity when tracking changes.\n */\n get cdkVirtualForTrackBy() {\n return this._cdkVirtualForTrackBy;\n }\n set cdkVirtualForTrackBy(fn) {\n this._needsUpdate = true;\n this._cdkVirtualForTrackBy = fn ? (index, item) => fn(index + (this._renderedRange ? this._renderedRange.start : 0), item) : undefined;\n }\n /** The template used to stamp out new elements. */\n set cdkVirtualForTemplate(value) {\n if (value) {\n this._needsUpdate = true;\n this._template = value;\n }\n }\n /**\n * The size of the cache used to store templates that are not being used for re-use later.\n * Setting the cache size to `0` will disable caching. Defaults to 20 templates.\n */\n get cdkVirtualForTemplateCacheSize() {\n return this._viewRepeater.viewCacheSize;\n }\n set cdkVirtualForTemplateCacheSize(size) {\n this._viewRepeater.viewCacheSize = coerceNumberProperty(size);\n }\n constructor( /** The view container to add items to. */\n _viewContainerRef, /** The template to use when stamping out new items. */\n _template, /** The set of available differs. */\n _differs, /** The strategy used to render items in the virtual scroll viewport. */\n _viewRepeater, /** The virtual scrolling viewport that these items are being rendered in. */\n _viewport, ngZone) {\n this._viewContainerRef = _viewContainerRef;\n this._template = _template;\n this._differs = _differs;\n this._viewRepeater = _viewRepeater;\n this._viewport = _viewport;\n /** Emits when the rendered view of the data changes. */\n this.viewChange = new Subject();\n /** Subject that emits when a new DataSource instance is given. */\n this._dataSourceChanges = new Subject();\n /** Emits whenever the data in the current DataSource changes. */\n this.dataStream = this._dataSourceChanges.pipe(\n // Start off with null `DataSource`.\n startWith(null),\n // Bundle up the previous and current data sources so we can work with both.\n pairwise(),\n // Use `_changeDataSource` to disconnect from the previous data source and connect to the\n // new one, passing back a stream of data changes which we run through `switchMap` to give\n // us a data stream that emits the latest data from whatever the current `DataSource` is.\n switchMap(([prev, cur]) => this._changeDataSource(prev, cur)),\n // Replay the last emitted data when someone subscribes.\n shareReplay(1));\n /** The differ used to calculate changes to the data. */\n this._differ = null;\n /** Whether the rendered data should be updated during the next ngDoCheck cycle. */\n this._needsUpdate = false;\n this._destroyed = new Subject();\n this.dataStream.subscribe(data => {\n this._data = data;\n this._onRenderedDataChange();\n });\n this._viewport.renderedRangeStream.pipe(takeUntil(this._destroyed)).subscribe(range => {\n this._renderedRange = range;\n if (this.viewChange.observers.length) {\n ngZone.run(() => this.viewChange.next(this._renderedRange));\n }\n this._onRenderedDataChange();\n });\n this._viewport.attach(this);\n }\n /**\n * Measures the combined size (width for horizontal orientation, height for vertical) of all items\n * in the specified range. Throws an error if the range includes items that are not currently\n * rendered.\n */\n measureRangeSize(range, orientation) {\n if (range.start >= range.end) {\n return 0;\n }\n if ((range.start < this._renderedRange.start || range.end > this._renderedRange.end) && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw Error(`Error: attempted to measure an item that isn't rendered.`);\n }\n // The index into the list of rendered views for the first item in the range.\n const renderedStartIndex = range.start - this._renderedRange.start;\n // The length of the range we're measuring.\n const rangeLen = range.end - range.start;\n // Loop over all the views, find the first and land node and compute the size by subtracting\n // the top of the first node from the bottom of the last one.\n let firstNode;\n let lastNode;\n // Find the first node by starting from the beginning and going forwards.\n for (let i = 0; i < rangeLen; i++) {\n const view = this._viewContainerRef.get(i + renderedStartIndex);\n if (view && view.rootNodes.length) {\n firstNode = lastNode = view.rootNodes[0];\n break;\n }\n }\n // Find the last node by starting from the end and going backwards.\n for (let i = rangeLen - 1; i > -1; i--) {\n const view = this._viewContainerRef.get(i + renderedStartIndex);\n if (view && view.rootNodes.length) {\n lastNode = view.rootNodes[view.rootNodes.length - 1];\n break;\n }\n }\n return firstNode && lastNode ? getOffset(orientation, 'end', lastNode) - getOffset(orientation, 'start', firstNode) : 0;\n }\n ngDoCheck() {\n if (this._differ && this._needsUpdate) {\n // TODO(mmalerba): We should differentiate needs update due to scrolling and a new portion of\n // this list being rendered (can use simpler algorithm) vs needs update due to data actually\n // changing (need to do this diff).\n const changes = this._differ.diff(this._renderedItems);\n if (!changes) {\n this._updateContext();\n } else {\n this._applyChanges(changes);\n }\n this._needsUpdate = false;\n }\n }\n ngOnDestroy() {\n this._viewport.detach();\n this._dataSourceChanges.next(undefined);\n this._dataSourceChanges.complete();\n this.viewChange.complete();\n this._destroyed.next();\n this._destroyed.complete();\n this._viewRepeater.detach();\n }\n /** React to scroll state changes in the viewport. */\n _onRenderedDataChange() {\n if (!this._renderedRange) {\n return;\n }\n this._renderedItems = this._data.slice(this._renderedRange.start, this._renderedRange.end);\n if (!this._differ) {\n // Use a wrapper function for the `trackBy` so any new values are\n // picked up automatically without having to recreate the differ.\n this._differ = this._differs.find(this._renderedItems).create((index, item) => {\n return this.cdkVirtualForTrackBy ? this.cdkVirtualForTrackBy(index, item) : item;\n });\n }\n this._needsUpdate = true;\n }\n /** Swap out one `DataSource` for another. */\n _changeDataSource(oldDs, newDs) {\n if (oldDs) {\n oldDs.disconnect(this);\n }\n this._needsUpdate = true;\n return newDs ? newDs.connect(this) : of();\n }\n /** Update the `CdkVirtualForOfContext` for all views. */\n _updateContext() {\n const count = this._data.length;\n let i = this._viewContainerRef.length;\n while (i--) {\n const view = this._viewContainerRef.get(i);\n view.context.index = this._renderedRange.start + i;\n view.context.count = count;\n this._updateComputedContextProperties(view.context);\n view.detectChanges();\n }\n }\n /** Apply changes to the DOM. */\n _applyChanges(changes) {\n this._viewRepeater.applyChanges(changes, this._viewContainerRef, (record, _adjustedPreviousIndex, currentIndex) => this._getEmbeddedViewArgs(record, currentIndex), record => record.item);\n // Update $implicit for any items that had an identity change.\n changes.forEachIdentityChange(record => {\n const view = this._viewContainerRef.get(record.currentIndex);\n view.context.$implicit = record.item;\n });\n // Update the context variables on all items.\n const count = this._data.length;\n let i = this._viewContainerRef.length;\n while (i--) {\n const view = this._viewContainerRef.get(i);\n view.context.index = this._renderedRange.start + i;\n view.context.count = count;\n this._updateComputedContextProperties(view.context);\n }\n }\n /** Update the computed properties on the `CdkVirtualForOfContext`. */\n _updateComputedContextProperties(context) {\n context.first = context.index === 0;\n context.last = context.index === context.count - 1;\n context.even = context.index % 2 === 0;\n context.odd = !context.even;\n }\n _getEmbeddedViewArgs(record, index) {\n // Note that it's important that we insert the item directly at the proper index,\n // rather than inserting it and the moving it in place, because if there's a directive\n // on the same node that injects the `ViewContainerRef`, Angular will insert another\n // comment node which can throw off the move when it's being repeated for all items.\n return {\n templateRef: this._template,\n context: {\n $implicit: record.item,\n // It's guaranteed that the iterable is not \"undefined\" or \"null\" because we only\n // generate views for elements if the \"cdkVirtualForOf\" iterable has elements.\n cdkVirtualForOf: this._cdkVirtualForOf,\n index: -1,\n count: -1,\n first: false,\n last: false,\n odd: false,\n even: false\n },\n index\n };\n }\n static {\n this.ɵfac = function CdkVirtualForOf_Factory(t) {\n return new (t || CdkVirtualForOf)(i0.ɵɵdirectiveInject(i0.ViewContainerRef), i0.ɵɵdirectiveInject(i0.TemplateRef), i0.ɵɵdirectiveInject(i0.IterableDiffers), i0.ɵɵdirectiveInject(_VIEW_REPEATER_STRATEGY), i0.ɵɵdirectiveInject(CdkVirtualScrollViewport, 4), i0.ɵɵdirectiveInject(i0.NgZone));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkVirtualForOf,\n selectors: [[\"\", \"cdkVirtualFor\", \"\", \"cdkVirtualForOf\", \"\"]],\n inputs: {\n cdkVirtualForOf: \"cdkVirtualForOf\",\n cdkVirtualForTrackBy: \"cdkVirtualForTrackBy\",\n cdkVirtualForTemplate: \"cdkVirtualForTemplate\",\n cdkVirtualForTemplateCacheSize: \"cdkVirtualForTemplateCacheSize\"\n },\n standalone: true,\n features: [i0.ɵɵProvidersFeature([{\n provide: _VIEW_REPEATER_STRATEGY,\n useClass: _RecycleViewRepeaterStrategy\n }])]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkVirtualForOf, [{\n type: Directive,\n args: [{\n selector: '[cdkVirtualFor][cdkVirtualForOf]',\n providers: [{\n provide: _VIEW_REPEATER_STRATEGY,\n useClass: _RecycleViewRepeaterStrategy\n }],\n standalone: true\n }]\n }], () => [{\n type: i0.ViewContainerRef\n }, {\n type: i0.TemplateRef\n }, {\n type: i0.IterableDiffers\n }, {\n type: i2$1._RecycleViewRepeaterStrategy,\n decorators: [{\n type: Inject,\n args: [_VIEW_REPEATER_STRATEGY]\n }]\n }, {\n type: CdkVirtualScrollViewport,\n decorators: [{\n type: SkipSelf\n }]\n }, {\n type: i0.NgZone\n }], {\n cdkVirtualForOf: [{\n type: Input\n }],\n cdkVirtualForTrackBy: [{\n type: Input\n }],\n cdkVirtualForTemplate: [{\n type: Input\n }],\n cdkVirtualForTemplateCacheSize: [{\n type: Input\n }]\n });\n})();\n\n/**\n * Provides a virtual scrollable for the element it is attached to.\n */\nclass CdkVirtualScrollableElement extends CdkVirtualScrollable {\n constructor(elementRef, scrollDispatcher, ngZone, dir) {\n super(elementRef, scrollDispatcher, ngZone, dir);\n }\n measureBoundingClientRectWithScrollOffset(from) {\n return this.getElementRef().nativeElement.getBoundingClientRect()[from] - this.measureScrollOffset(from);\n }\n static {\n this.ɵfac = function CdkVirtualScrollableElement_Factory(t) {\n return new (t || CdkVirtualScrollableElement)(i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(ScrollDispatcher), i0.ɵɵdirectiveInject(i0.NgZone), i0.ɵɵdirectiveInject(i2.Directionality, 8));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkVirtualScrollableElement,\n selectors: [[\"\", \"cdkVirtualScrollingElement\", \"\"]],\n hostAttrs: [1, \"cdk-virtual-scrollable\"],\n standalone: true,\n features: [i0.ɵɵProvidersFeature([{\n provide: VIRTUAL_SCROLLABLE,\n useExisting: CdkVirtualScrollableElement\n }]), i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkVirtualScrollableElement, [{\n type: Directive,\n args: [{\n selector: '[cdkVirtualScrollingElement]',\n providers: [{\n provide: VIRTUAL_SCROLLABLE,\n useExisting: CdkVirtualScrollableElement\n }],\n standalone: true,\n host: {\n 'class': 'cdk-virtual-scrollable'\n }\n }]\n }], () => [{\n type: i0.ElementRef\n }, {\n type: ScrollDispatcher\n }, {\n type: i0.NgZone\n }, {\n type: i2.Directionality,\n decorators: [{\n type: Optional\n }]\n }], null);\n})();\n\n/**\n * Provides as virtual scrollable for the global / window scrollbar.\n */\nclass CdkVirtualScrollableWindow extends CdkVirtualScrollable {\n constructor(scrollDispatcher, ngZone, dir) {\n super(new ElementRef(document.documentElement), scrollDispatcher, ngZone, dir);\n this._elementScrolled = new Observable(observer => this.ngZone.runOutsideAngular(() => fromEvent(document, 'scroll').pipe(takeUntil(this._destroyed)).subscribe(observer)));\n }\n measureBoundingClientRectWithScrollOffset(from) {\n return this.getElementRef().nativeElement.getBoundingClientRect()[from];\n }\n static {\n this.ɵfac = function CdkVirtualScrollableWindow_Factory(t) {\n return new (t || CdkVirtualScrollableWindow)(i0.ɵɵdirectiveInject(ScrollDispatcher), i0.ɵɵdirectiveInject(i0.NgZone), i0.ɵɵdirectiveInject(i2.Directionality, 8));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkVirtualScrollableWindow,\n selectors: [[\"cdk-virtual-scroll-viewport\", \"scrollWindow\", \"\"]],\n standalone: true,\n features: [i0.ɵɵProvidersFeature([{\n provide: VIRTUAL_SCROLLABLE,\n useExisting: CdkVirtualScrollableWindow\n }]), i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkVirtualScrollableWindow, [{\n type: Directive,\n args: [{\n selector: 'cdk-virtual-scroll-viewport[scrollWindow]',\n providers: [{\n provide: VIRTUAL_SCROLLABLE,\n useExisting: CdkVirtualScrollableWindow\n }],\n standalone: true\n }]\n }], () => [{\n type: ScrollDispatcher\n }, {\n type: i0.NgZone\n }, {\n type: i2.Directionality,\n decorators: [{\n type: Optional\n }]\n }], null);\n})();\nclass CdkScrollableModule {\n static {\n this.ɵfac = function CdkScrollableModule_Factory(t) {\n return new (t || CdkScrollableModule)();\n };\n }\n static {\n this.ɵmod = /* @__PURE__ */i0.ɵɵdefineNgModule({\n type: CdkScrollableModule\n });\n }\n static {\n this.ɵinj = /* @__PURE__ */i0.ɵɵdefineInjector({});\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkScrollableModule, [{\n type: NgModule,\n args: [{\n exports: [CdkScrollable],\n imports: [CdkScrollable]\n }]\n }], null, null);\n})();\n/**\n * @docs-primary-export\n */\nclass ScrollingModule {\n static {\n this.ɵfac = function ScrollingModule_Factory(t) {\n return new (t || ScrollingModule)();\n };\n }\n static {\n this.ɵmod = /* @__PURE__ */i0.ɵɵdefineNgModule({\n type: ScrollingModule\n });\n }\n static {\n this.ɵinj = /* @__PURE__ */i0.ɵɵdefineInjector({\n imports: [BidiModule, CdkScrollableModule, BidiModule, CdkScrollableModule]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(ScrollingModule, [{\n type: NgModule,\n args: [{\n imports: [BidiModule, CdkScrollableModule, CdkVirtualScrollViewport, CdkFixedSizeVirtualScroll, CdkVirtualForOf, CdkVirtualScrollableWindow, CdkVirtualScrollableElement],\n exports: [BidiModule, CdkScrollableModule, CdkFixedSizeVirtualScroll, CdkVirtualForOf, CdkVirtualScrollViewport, CdkVirtualScrollableWindow, CdkVirtualScrollableElement]\n }]\n }], null, null);\n})();\n\n/**\n * Generated bundle index. Do not edit.\n */\n\nexport { CdkFixedSizeVirtualScroll, CdkScrollable, CdkScrollableModule, CdkVirtualForOf, CdkVirtualScrollViewport, CdkVirtualScrollable, CdkVirtualScrollableElement, CdkVirtualScrollableWindow, DEFAULT_RESIZE_TIME, DEFAULT_SCROLL_TIME, FixedSizeVirtualScrollStrategy, ScrollDispatcher, ScrollingModule, VIRTUAL_SCROLLABLE, VIRTUAL_SCROLL_STRATEGY, ViewportRuler, _fixedSizeVirtualScrollStrategyFactory };\n","import * as i1 from '@angular/cdk/scrolling';\nimport { ScrollingModule } from '@angular/cdk/scrolling';\nexport { CdkScrollable, ScrollDispatcher, ViewportRuler } from '@angular/cdk/scrolling';\nimport * as i6 from '@angular/common';\nimport { DOCUMENT } from '@angular/common';\nimport * as i0 from '@angular/core';\nimport { Injectable, Inject, Optional, ElementRef, ApplicationRef, ANIMATION_MODULE_TYPE, InjectionToken, inject, Directive, NgZone, EventEmitter, booleanAttribute, Input, Output, NgModule } from '@angular/core';\nimport { coerceCssPixelValue, coerceArray } from '@angular/cdk/coercion';\nimport * as i1$1 from '@angular/cdk/platform';\nimport { supportsScrollBehavior, _getEventTarget, _isTestEnvironment } from '@angular/cdk/platform';\nimport { filter, take, takeUntil, takeWhile } from 'rxjs/operators';\nimport * as i5 from '@angular/cdk/bidi';\nimport { BidiModule } from '@angular/cdk/bidi';\nimport { DomPortalOutlet, TemplatePortal, PortalModule } from '@angular/cdk/portal';\nimport { Subject, Subscription, merge } from 'rxjs';\nimport { ESCAPE, hasModifierKey } from '@angular/cdk/keycodes';\nconst scrollBehaviorSupported = supportsScrollBehavior();\n/**\n * Strategy that will prevent the user from scrolling while the overlay is visible.\n */\nclass BlockScrollStrategy {\n constructor(_viewportRuler, document) {\n this._viewportRuler = _viewportRuler;\n this._previousHTMLStyles = {\n top: '',\n left: ''\n };\n this._isEnabled = false;\n this._document = document;\n }\n /** Attaches this scroll strategy to an overlay. */\n attach() {}\n /** Blocks page-level scroll while the attached overlay is open. */\n enable() {\n if (this._canBeEnabled()) {\n const root = this._document.documentElement;\n this._previousScrollPosition = this._viewportRuler.getViewportScrollPosition();\n // Cache the previous inline styles in case the user had set them.\n this._previousHTMLStyles.left = root.style.left || '';\n this._previousHTMLStyles.top = root.style.top || '';\n // Note: we're using the `html` node, instead of the `body`, because the `body` may\n // have the user agent margin, whereas the `html` is guaranteed not to have one.\n root.style.left = coerceCssPixelValue(-this._previousScrollPosition.left);\n root.style.top = coerceCssPixelValue(-this._previousScrollPosition.top);\n root.classList.add('cdk-global-scrollblock');\n this._isEnabled = true;\n }\n }\n /** Unblocks page-level scroll while the attached overlay is open. */\n disable() {\n if (this._isEnabled) {\n const html = this._document.documentElement;\n const body = this._document.body;\n const htmlStyle = html.style;\n const bodyStyle = body.style;\n const previousHtmlScrollBehavior = htmlStyle.scrollBehavior || '';\n const previousBodyScrollBehavior = bodyStyle.scrollBehavior || '';\n this._isEnabled = false;\n htmlStyle.left = this._previousHTMLStyles.left;\n htmlStyle.top = this._previousHTMLStyles.top;\n html.classList.remove('cdk-global-scrollblock');\n // Disable user-defined smooth scrolling temporarily while we restore the scroll position.\n // See https://developer.mozilla.org/en-US/docs/Web/CSS/scroll-behavior\n // Note that we don't mutate the property if the browser doesn't support `scroll-behavior`,\n // because it can throw off feature detections in `supportsScrollBehavior` which\n // checks for `'scrollBehavior' in documentElement.style`.\n if (scrollBehaviorSupported) {\n htmlStyle.scrollBehavior = bodyStyle.scrollBehavior = 'auto';\n }\n window.scroll(this._previousScrollPosition.left, this._previousScrollPosition.top);\n if (scrollBehaviorSupported) {\n htmlStyle.scrollBehavior = previousHtmlScrollBehavior;\n bodyStyle.scrollBehavior = previousBodyScrollBehavior;\n }\n }\n }\n _canBeEnabled() {\n // Since the scroll strategies can't be singletons, we have to use a global CSS class\n // (`cdk-global-scrollblock`) to make sure that we don't try to disable global\n // scrolling multiple times.\n const html = this._document.documentElement;\n if (html.classList.contains('cdk-global-scrollblock') || this._isEnabled) {\n return false;\n }\n const body = this._document.body;\n const viewport = this._viewportRuler.getViewportSize();\n return body.scrollHeight > viewport.height || body.scrollWidth > viewport.width;\n }\n}\n\n/**\n * Returns an error to be thrown when attempting to attach an already-attached scroll strategy.\n */\nfunction getMatScrollStrategyAlreadyAttachedError() {\n return Error(`Scroll strategy has already been attached.`);\n}\n\n/**\n * Strategy that will close the overlay as soon as the user starts scrolling.\n */\nclass CloseScrollStrategy {\n constructor(_scrollDispatcher, _ngZone, _viewportRuler, _config) {\n this._scrollDispatcher = _scrollDispatcher;\n this._ngZone = _ngZone;\n this._viewportRuler = _viewportRuler;\n this._config = _config;\n this._scrollSubscription = null;\n /** Detaches the overlay ref and disables the scroll strategy. */\n this._detach = () => {\n this.disable();\n if (this._overlayRef.hasAttached()) {\n this._ngZone.run(() => this._overlayRef.detach());\n }\n };\n }\n /** Attaches this scroll strategy to an overlay. */\n attach(overlayRef) {\n if (this._overlayRef && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw getMatScrollStrategyAlreadyAttachedError();\n }\n this._overlayRef = overlayRef;\n }\n /** Enables the closing of the attached overlay on scroll. */\n enable() {\n if (this._scrollSubscription) {\n return;\n }\n const stream = this._scrollDispatcher.scrolled(0).pipe(filter(scrollable => {\n return !scrollable || !this._overlayRef.overlayElement.contains(scrollable.getElementRef().nativeElement);\n }));\n if (this._config && this._config.threshold && this._config.threshold > 1) {\n this._initialScrollPosition = this._viewportRuler.getViewportScrollPosition().top;\n this._scrollSubscription = stream.subscribe(() => {\n const scrollPosition = this._viewportRuler.getViewportScrollPosition().top;\n if (Math.abs(scrollPosition - this._initialScrollPosition) > this._config.threshold) {\n this._detach();\n } else {\n this._overlayRef.updatePosition();\n }\n });\n } else {\n this._scrollSubscription = stream.subscribe(this._detach);\n }\n }\n /** Disables the closing the attached overlay on scroll. */\n disable() {\n if (this._scrollSubscription) {\n this._scrollSubscription.unsubscribe();\n this._scrollSubscription = null;\n }\n }\n detach() {\n this.disable();\n this._overlayRef = null;\n }\n}\n\n/** Scroll strategy that doesn't do anything. */\nclass NoopScrollStrategy {\n /** Does nothing, as this scroll strategy is a no-op. */\n enable() {}\n /** Does nothing, as this scroll strategy is a no-op. */\n disable() {}\n /** Does nothing, as this scroll strategy is a no-op. */\n attach() {}\n}\n\n/**\n * Gets whether an element is scrolled outside of view by any of its parent scrolling containers.\n * @param element Dimensions of the element (from getBoundingClientRect)\n * @param scrollContainers Dimensions of element's scrolling containers (from getBoundingClientRect)\n * @returns Whether the element is scrolled out of view\n * @docs-private\n */\nfunction isElementScrolledOutsideView(element, scrollContainers) {\n return scrollContainers.some(containerBounds => {\n const outsideAbove = element.bottom < containerBounds.top;\n const outsideBelow = element.top > containerBounds.bottom;\n const outsideLeft = element.right < containerBounds.left;\n const outsideRight = element.left > containerBounds.right;\n return outsideAbove || outsideBelow || outsideLeft || outsideRight;\n });\n}\n/**\n * Gets whether an element is clipped by any of its scrolling containers.\n * @param element Dimensions of the element (from getBoundingClientRect)\n * @param scrollContainers Dimensions of element's scrolling containers (from getBoundingClientRect)\n * @returns Whether the element is clipped\n * @docs-private\n */\nfunction isElementClippedByScrolling(element, scrollContainers) {\n return scrollContainers.some(scrollContainerRect => {\n const clippedAbove = element.top < scrollContainerRect.top;\n const clippedBelow = element.bottom > scrollContainerRect.bottom;\n const clippedLeft = element.left < scrollContainerRect.left;\n const clippedRight = element.right > scrollContainerRect.right;\n return clippedAbove || clippedBelow || clippedLeft || clippedRight;\n });\n}\n\n/**\n * Strategy that will update the element position as the user is scrolling.\n */\nclass RepositionScrollStrategy {\n constructor(_scrollDispatcher, _viewportRuler, _ngZone, _config) {\n this._scrollDispatcher = _scrollDispatcher;\n this._viewportRuler = _viewportRuler;\n this._ngZone = _ngZone;\n this._config = _config;\n this._scrollSubscription = null;\n }\n /** Attaches this scroll strategy to an overlay. */\n attach(overlayRef) {\n if (this._overlayRef && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw getMatScrollStrategyAlreadyAttachedError();\n }\n this._overlayRef = overlayRef;\n }\n /** Enables repositioning of the attached overlay on scroll. */\n enable() {\n if (!this._scrollSubscription) {\n const throttle = this._config ? this._config.scrollThrottle : 0;\n this._scrollSubscription = this._scrollDispatcher.scrolled(throttle).subscribe(() => {\n this._overlayRef.updatePosition();\n // TODO(crisbeto): make `close` on by default once all components can handle it.\n if (this._config && this._config.autoClose) {\n const overlayRect = this._overlayRef.overlayElement.getBoundingClientRect();\n const {\n width,\n height\n } = this._viewportRuler.getViewportSize();\n // TODO(crisbeto): include all ancestor scroll containers here once\n // we have a way of exposing the trigger element to the scroll strategy.\n const parentRects = [{\n width,\n height,\n bottom: height,\n right: width,\n top: 0,\n left: 0\n }];\n if (isElementScrolledOutsideView(overlayRect, parentRects)) {\n this.disable();\n this._ngZone.run(() => this._overlayRef.detach());\n }\n }\n });\n }\n }\n /** Disables repositioning of the attached overlay on scroll. */\n disable() {\n if (this._scrollSubscription) {\n this._scrollSubscription.unsubscribe();\n this._scrollSubscription = null;\n }\n }\n detach() {\n this.disable();\n this._overlayRef = null;\n }\n}\n\n/**\n * Options for how an overlay will handle scrolling.\n *\n * Users can provide a custom value for `ScrollStrategyOptions` to replace the default\n * behaviors. This class primarily acts as a factory for ScrollStrategy instances.\n */\nclass ScrollStrategyOptions {\n constructor(_scrollDispatcher, _viewportRuler, _ngZone, document) {\n this._scrollDispatcher = _scrollDispatcher;\n this._viewportRuler = _viewportRuler;\n this._ngZone = _ngZone;\n /** Do nothing on scroll. */\n this.noop = () => new NoopScrollStrategy();\n /**\n * Close the overlay as soon as the user scrolls.\n * @param config Configuration to be used inside the scroll strategy.\n */\n this.close = config => new CloseScrollStrategy(this._scrollDispatcher, this._ngZone, this._viewportRuler, config);\n /** Block scrolling. */\n this.block = () => new BlockScrollStrategy(this._viewportRuler, this._document);\n /**\n * Update the overlay's position on scroll.\n * @param config Configuration to be used inside the scroll strategy.\n * Allows debouncing the reposition calls.\n */\n this.reposition = config => new RepositionScrollStrategy(this._scrollDispatcher, this._viewportRuler, this._ngZone, config);\n this._document = document;\n }\n static {\n this.ɵfac = function ScrollStrategyOptions_Factory(t) {\n return new (t || ScrollStrategyOptions)(i0.ɵɵinject(i1.ScrollDispatcher), i0.ɵɵinject(i1.ViewportRuler), i0.ɵɵinject(i0.NgZone), i0.ɵɵinject(DOCUMENT));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: ScrollStrategyOptions,\n factory: ScrollStrategyOptions.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(ScrollStrategyOptions, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: i1.ScrollDispatcher\n }, {\n type: i1.ViewportRuler\n }, {\n type: i0.NgZone\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }], null);\n})();\n\n/** Initial configuration used when creating an overlay. */\nclass OverlayConfig {\n constructor(config) {\n /** Strategy to be used when handling scroll events while the overlay is open. */\n this.scrollStrategy = new NoopScrollStrategy();\n /** Custom class to add to the overlay pane. */\n this.panelClass = '';\n /** Whether the overlay has a backdrop. */\n this.hasBackdrop = false;\n /** Custom class to add to the backdrop */\n this.backdropClass = 'cdk-overlay-dark-backdrop';\n /**\n * Whether the overlay should be disposed of when the user goes backwards/forwards in history.\n * Note that this usually doesn't include clicking on links (unless the user is using\n * the `HashLocationStrategy`).\n */\n this.disposeOnNavigation = false;\n if (config) {\n // Use `Iterable` instead of `Array` because TypeScript, as of 3.6.3,\n // loses the array generic type in the `for of`. But we *also* have to use `Array` because\n // typescript won't iterate over an `Iterable` unless you compile with `--downlevelIteration`\n const configKeys = Object.keys(config);\n for (const key of configKeys) {\n if (config[key] !== undefined) {\n // TypeScript, as of version 3.5, sees the left-hand-side of this expression\n // as \"I don't know *which* key this is, so the only valid value is the intersection\n // of all the possible values.\" In this case, that happens to be `undefined`. TypeScript\n // is not smart enough to see that the right-hand-side is actually an access of the same\n // exact type with the same exact key, meaning that the value type must be identical.\n // So we use `any` to work around this.\n this[key] = config[key];\n }\n }\n }\n }\n}\n\n/** The points of the origin element and the overlay element to connect. */\nclass ConnectionPositionPair {\n constructor(origin, overlay, /** Offset along the X axis. */\n offsetX, /** Offset along the Y axis. */\n offsetY, /** Class(es) to be applied to the panel while this position is active. */\n panelClass) {\n this.offsetX = offsetX;\n this.offsetY = offsetY;\n this.panelClass = panelClass;\n this.originX = origin.originX;\n this.originY = origin.originY;\n this.overlayX = overlay.overlayX;\n this.overlayY = overlay.overlayY;\n }\n}\n/**\n * Set of properties regarding the position of the origin and overlay relative to the viewport\n * with respect to the containing Scrollable elements.\n *\n * The overlay and origin are clipped if any part of their bounding client rectangle exceeds the\n * bounds of any one of the strategy's Scrollable's bounding client rectangle.\n *\n * The overlay and origin are outside view if there is no overlap between their bounding client\n * rectangle and any one of the strategy's Scrollable's bounding client rectangle.\n *\n * ----------- -----------\n * | outside | | clipped |\n * | view | --------------------------\n * | | | | | |\n * ---------- | ----------- |\n * -------------------------- | |\n * | | | Scrollable |\n * | | | |\n * | | --------------------------\n * | Scrollable |\n * | |\n * --------------------------\n *\n * @docs-private\n */\nclass ScrollingVisibility {}\n/** The change event emitted by the strategy when a fallback position is used. */\nclass ConnectedOverlayPositionChange {\n constructor( /** The position used as a result of this change. */\n connectionPair, /** @docs-private */\n scrollableViewProperties) {\n this.connectionPair = connectionPair;\n this.scrollableViewProperties = scrollableViewProperties;\n }\n}\n/**\n * Validates whether a vertical position property matches the expected values.\n * @param property Name of the property being validated.\n * @param value Value of the property being validated.\n * @docs-private\n */\nfunction validateVerticalPosition(property, value) {\n if (value !== 'top' && value !== 'bottom' && value !== 'center') {\n throw Error(`ConnectedPosition: Invalid ${property} \"${value}\". ` + `Expected \"top\", \"bottom\" or \"center\".`);\n }\n}\n/**\n * Validates whether a horizontal position property matches the expected values.\n * @param property Name of the property being validated.\n * @param value Value of the property being validated.\n * @docs-private\n */\nfunction validateHorizontalPosition(property, value) {\n if (value !== 'start' && value !== 'end' && value !== 'center') {\n throw Error(`ConnectedPosition: Invalid ${property} \"${value}\". ` + `Expected \"start\", \"end\" or \"center\".`);\n }\n}\n\n/**\n * Service for dispatching events that land on the body to appropriate overlay ref,\n * if any. It maintains a list of attached overlays to determine best suited overlay based\n * on event target and order of overlay opens.\n */\nclass BaseOverlayDispatcher {\n constructor(document) {\n /** Currently attached overlays in the order they were attached. */\n this._attachedOverlays = [];\n this._document = document;\n }\n ngOnDestroy() {\n this.detach();\n }\n /** Add a new overlay to the list of attached overlay refs. */\n add(overlayRef) {\n // Ensure that we don't get the same overlay multiple times.\n this.remove(overlayRef);\n this._attachedOverlays.push(overlayRef);\n }\n /** Remove an overlay from the list of attached overlay refs. */\n remove(overlayRef) {\n const index = this._attachedOverlays.indexOf(overlayRef);\n if (index > -1) {\n this._attachedOverlays.splice(index, 1);\n }\n // Remove the global listener once there are no more overlays.\n if (this._attachedOverlays.length === 0) {\n this.detach();\n }\n }\n static {\n this.ɵfac = function BaseOverlayDispatcher_Factory(t) {\n return new (t || BaseOverlayDispatcher)(i0.ɵɵinject(DOCUMENT));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: BaseOverlayDispatcher,\n factory: BaseOverlayDispatcher.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(BaseOverlayDispatcher, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }], null);\n})();\n\n/**\n * Service for dispatching keyboard events that land on the body to appropriate overlay ref,\n * if any. It maintains a list of attached overlays to determine best suited overlay based\n * on event target and order of overlay opens.\n */\nclass OverlayKeyboardDispatcher extends BaseOverlayDispatcher {\n constructor(document, /** @breaking-change 14.0.0 _ngZone will be required. */\n _ngZone) {\n super(document);\n this._ngZone = _ngZone;\n /** Keyboard event listener that will be attached to the body. */\n this._keydownListener = event => {\n const overlays = this._attachedOverlays;\n for (let i = overlays.length - 1; i > -1; i--) {\n // Dispatch the keydown event to the top overlay which has subscribers to its keydown events.\n // We want to target the most recent overlay, rather than trying to match where the event came\n // from, because some components might open an overlay, but keep focus on a trigger element\n // (e.g. for select and autocomplete). We skip overlays without keydown event subscriptions,\n // because we don't want overlays that don't handle keyboard events to block the ones below\n // them that do.\n if (overlays[i]._keydownEvents.observers.length > 0) {\n const keydownEvents = overlays[i]._keydownEvents;\n /** @breaking-change 14.0.0 _ngZone will be required. */\n if (this._ngZone) {\n this._ngZone.run(() => keydownEvents.next(event));\n } else {\n keydownEvents.next(event);\n }\n break;\n }\n }\n };\n }\n /** Add a new overlay to the list of attached overlay refs. */\n add(overlayRef) {\n super.add(overlayRef);\n // Lazily start dispatcher once first overlay is added\n if (!this._isAttached) {\n /** @breaking-change 14.0.0 _ngZone will be required. */\n if (this._ngZone) {\n this._ngZone.runOutsideAngular(() => this._document.body.addEventListener('keydown', this._keydownListener));\n } else {\n this._document.body.addEventListener('keydown', this._keydownListener);\n }\n this._isAttached = true;\n }\n }\n /** Detaches the global keyboard event listener. */\n detach() {\n if (this._isAttached) {\n this._document.body.removeEventListener('keydown', this._keydownListener);\n this._isAttached = false;\n }\n }\n static {\n this.ɵfac = function OverlayKeyboardDispatcher_Factory(t) {\n return new (t || OverlayKeyboardDispatcher)(i0.ɵɵinject(DOCUMENT), i0.ɵɵinject(i0.NgZone, 8));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: OverlayKeyboardDispatcher,\n factory: OverlayKeyboardDispatcher.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(OverlayKeyboardDispatcher, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: i0.NgZone,\n decorators: [{\n type: Optional\n }]\n }], null);\n})();\n\n/**\n * Service for dispatching mouse click events that land on the body to appropriate overlay ref,\n * if any. It maintains a list of attached overlays to determine best suited overlay based\n * on event target and order of overlay opens.\n */\nclass OverlayOutsideClickDispatcher extends BaseOverlayDispatcher {\n constructor(document, _platform, /** @breaking-change 14.0.0 _ngZone will be required. */\n _ngZone) {\n super(document);\n this._platform = _platform;\n this._ngZone = _ngZone;\n this._cursorStyleIsSet = false;\n /** Store pointerdown event target to track origin of click. */\n this._pointerDownListener = event => {\n this._pointerDownEventTarget = _getEventTarget(event);\n };\n /** Click event listener that will be attached to the body propagate phase. */\n this._clickListener = event => {\n const target = _getEventTarget(event);\n // In case of a click event, we want to check the origin of the click\n // (e.g. in case where a user starts a click inside the overlay and\n // releases the click outside of it).\n // This is done by using the event target of the preceding pointerdown event.\n // Every click event caused by a pointer device has a preceding pointerdown\n // event, unless the click was programmatically triggered (e.g. in a unit test).\n const origin = event.type === 'click' && this._pointerDownEventTarget ? this._pointerDownEventTarget : target;\n // Reset the stored pointerdown event target, to avoid having it interfere\n // in subsequent events.\n this._pointerDownEventTarget = null;\n // We copy the array because the original may be modified asynchronously if the\n // outsidePointerEvents listener decides to detach overlays resulting in index errors inside\n // the for loop.\n const overlays = this._attachedOverlays.slice();\n // Dispatch the mouse event to the top overlay which has subscribers to its mouse events.\n // We want to target all overlays for which the click could be considered as outside click.\n // As soon as we reach an overlay for which the click is not outside click we break off\n // the loop.\n for (let i = overlays.length - 1; i > -1; i--) {\n const overlayRef = overlays[i];\n if (overlayRef._outsidePointerEvents.observers.length < 1 || !overlayRef.hasAttached()) {\n continue;\n }\n // If it's a click inside the overlay, just break - we should do nothing\n // If it's an outside click (both origin and target of the click) dispatch the mouse event,\n // and proceed with the next overlay\n if (overlayRef.overlayElement.contains(target) || overlayRef.overlayElement.contains(origin)) {\n break;\n }\n const outsidePointerEvents = overlayRef._outsidePointerEvents;\n /** @breaking-change 14.0.0 _ngZone will be required. */\n if (this._ngZone) {\n this._ngZone.run(() => outsidePointerEvents.next(event));\n } else {\n outsidePointerEvents.next(event);\n }\n }\n };\n }\n /** Add a new overlay to the list of attached overlay refs. */\n add(overlayRef) {\n super.add(overlayRef);\n // Safari on iOS does not generate click events for non-interactive\n // elements. However, we want to receive a click for any element outside\n // the overlay. We can force a \"clickable\" state by setting\n // `cursor: pointer` on the document body. See:\n // https://developer.mozilla.org/en-US/docs/Web/API/Element/click_event#Safari_Mobile\n // https://developer.apple.com/library/archive/documentation/AppleApplications/Reference/SafariWebContent/HandlingEvents/HandlingEvents.html\n if (!this._isAttached) {\n const body = this._document.body;\n /** @breaking-change 14.0.0 _ngZone will be required. */\n if (this._ngZone) {\n this._ngZone.runOutsideAngular(() => this._addEventListeners(body));\n } else {\n this._addEventListeners(body);\n }\n // click event is not fired on iOS. To make element \"clickable\" we are\n // setting the cursor to pointer\n if (this._platform.IOS && !this._cursorStyleIsSet) {\n this._cursorOriginalValue = body.style.cursor;\n body.style.cursor = 'pointer';\n this._cursorStyleIsSet = true;\n }\n this._isAttached = true;\n }\n }\n /** Detaches the global keyboard event listener. */\n detach() {\n if (this._isAttached) {\n const body = this._document.body;\n body.removeEventListener('pointerdown', this._pointerDownListener, true);\n body.removeEventListener('click', this._clickListener, true);\n body.removeEventListener('auxclick', this._clickListener, true);\n body.removeEventListener('contextmenu', this._clickListener, true);\n if (this._platform.IOS && this._cursorStyleIsSet) {\n body.style.cursor = this._cursorOriginalValue;\n this._cursorStyleIsSet = false;\n }\n this._isAttached = false;\n }\n }\n _addEventListeners(body) {\n body.addEventListener('pointerdown', this._pointerDownListener, true);\n body.addEventListener('click', this._clickListener, true);\n body.addEventListener('auxclick', this._clickListener, true);\n body.addEventListener('contextmenu', this._clickListener, true);\n }\n static {\n this.ɵfac = function OverlayOutsideClickDispatcher_Factory(t) {\n return new (t || OverlayOutsideClickDispatcher)(i0.ɵɵinject(DOCUMENT), i0.ɵɵinject(i1$1.Platform), i0.ɵɵinject(i0.NgZone, 8));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: OverlayOutsideClickDispatcher,\n factory: OverlayOutsideClickDispatcher.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(OverlayOutsideClickDispatcher, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: i1$1.Platform\n }, {\n type: i0.NgZone,\n decorators: [{\n type: Optional\n }]\n }], null);\n})();\n\n/** Container inside which all overlays will render. */\nclass OverlayContainer {\n constructor(document, _platform) {\n this._platform = _platform;\n this._document = document;\n }\n ngOnDestroy() {\n this._containerElement?.remove();\n }\n /**\n * This method returns the overlay container element. It will lazily\n * create the element the first time it is called to facilitate using\n * the container in non-browser environments.\n * @returns the container element\n */\n getContainerElement() {\n if (!this._containerElement) {\n this._createContainer();\n }\n return this._containerElement;\n }\n /**\n * Create the overlay container element, which is simply a div\n * with the 'cdk-overlay-container' class on the document body.\n */\n _createContainer() {\n const containerClass = 'cdk-overlay-container';\n // TODO(crisbeto): remove the testing check once we have an overlay testing\n // module or Angular starts tearing down the testing `NgModule`. See:\n // https://github.com/angular/angular/issues/18831\n if (this._platform.isBrowser || _isTestEnvironment()) {\n const oppositePlatformContainers = this._document.querySelectorAll(`.${containerClass}[platform=\"server\"], ` + `.${containerClass}[platform=\"test\"]`);\n // Remove any old containers from the opposite platform.\n // This can happen when transitioning from the server to the client.\n for (let i = 0; i < oppositePlatformContainers.length; i++) {\n oppositePlatformContainers[i].remove();\n }\n }\n const container = this._document.createElement('div');\n container.classList.add(containerClass);\n // A long time ago we kept adding new overlay containers whenever a new app was instantiated,\n // but at some point we added logic which clears the duplicate ones in order to avoid leaks.\n // The new logic was a little too aggressive since it was breaking some legitimate use cases.\n // To mitigate the problem we made it so that only containers from a different platform are\n // cleared, but the side-effect was that people started depending on the overly-aggressive\n // logic to clean up their tests for them. Until we can introduce an overlay-specific testing\n // module which does the cleanup, we try to detect that we're in a test environment and we\n // always clear the container. See #17006.\n // TODO(crisbeto): remove the test environment check once we have an overlay testing module.\n if (_isTestEnvironment()) {\n container.setAttribute('platform', 'test');\n } else if (!this._platform.isBrowser) {\n container.setAttribute('platform', 'server');\n }\n this._document.body.appendChild(container);\n this._containerElement = container;\n }\n static {\n this.ɵfac = function OverlayContainer_Factory(t) {\n return new (t || OverlayContainer)(i0.ɵɵinject(DOCUMENT), i0.ɵɵinject(i1$1.Platform));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: OverlayContainer,\n factory: OverlayContainer.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(OverlayContainer, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: i1$1.Platform\n }], null);\n})();\n\n/**\n * Reference to an overlay that has been created with the Overlay service.\n * Used to manipulate or dispose of said overlay.\n */\nclass OverlayRef {\n constructor(_portalOutlet, _host, _pane, _config, _ngZone, _keyboardDispatcher, _document, _location, _outsideClickDispatcher, _animationsDisabled = false) {\n this._portalOutlet = _portalOutlet;\n this._host = _host;\n this._pane = _pane;\n this._config = _config;\n this._ngZone = _ngZone;\n this._keyboardDispatcher = _keyboardDispatcher;\n this._document = _document;\n this._location = _location;\n this._outsideClickDispatcher = _outsideClickDispatcher;\n this._animationsDisabled = _animationsDisabled;\n this._backdropElement = null;\n this._backdropClick = new Subject();\n this._attachments = new Subject();\n this._detachments = new Subject();\n this._locationChanges = Subscription.EMPTY;\n this._backdropClickHandler = event => this._backdropClick.next(event);\n this._backdropTransitionendHandler = event => {\n this._disposeBackdrop(event.target);\n };\n /** Stream of keydown events dispatched to this overlay. */\n this._keydownEvents = new Subject();\n /** Stream of mouse outside events dispatched to this overlay. */\n this._outsidePointerEvents = new Subject();\n if (_config.scrollStrategy) {\n this._scrollStrategy = _config.scrollStrategy;\n this._scrollStrategy.attach(this);\n }\n this._positionStrategy = _config.positionStrategy;\n }\n /** The overlay's HTML element */\n get overlayElement() {\n return this._pane;\n }\n /** The overlay's backdrop HTML element. */\n get backdropElement() {\n return this._backdropElement;\n }\n /**\n * Wrapper around the panel element. Can be used for advanced\n * positioning where a wrapper with specific styling is\n * required around the overlay pane.\n */\n get hostElement() {\n return this._host;\n }\n /**\n * Attaches content, given via a Portal, to the overlay.\n * If the overlay is configured to have a backdrop, it will be created.\n *\n * @param portal Portal instance to which to attach the overlay.\n * @returns The portal attachment result.\n */\n attach(portal) {\n // Insert the host into the DOM before attaching the portal, otherwise\n // the animations module will skip animations on repeat attachments.\n if (!this._host.parentElement && this._previousHostParent) {\n this._previousHostParent.appendChild(this._host);\n }\n const attachResult = this._portalOutlet.attach(portal);\n if (this._positionStrategy) {\n this._positionStrategy.attach(this);\n }\n this._updateStackingOrder();\n this._updateElementSize();\n this._updateElementDirection();\n if (this._scrollStrategy) {\n this._scrollStrategy.enable();\n }\n // Update the position once the zone is stable so that the overlay will be fully rendered\n // before attempting to position it, as the position may depend on the size of the rendered\n // content.\n this._ngZone.onStable.pipe(take(1)).subscribe(() => {\n // The overlay could've been detached before the zone has stabilized.\n if (this.hasAttached()) {\n this.updatePosition();\n }\n });\n // Enable pointer events for the overlay pane element.\n this._togglePointerEvents(true);\n if (this._config.hasBackdrop) {\n this._attachBackdrop();\n }\n if (this._config.panelClass) {\n this._toggleClasses(this._pane, this._config.panelClass, true);\n }\n // Only emit the `attachments` event once all other setup is done.\n this._attachments.next();\n // Track this overlay by the keyboard dispatcher\n this._keyboardDispatcher.add(this);\n if (this._config.disposeOnNavigation) {\n this._locationChanges = this._location.subscribe(() => this.dispose());\n }\n this._outsideClickDispatcher.add(this);\n // TODO(crisbeto): the null check is here, because the portal outlet returns `any`.\n // We should be guaranteed for the result to be `ComponentRef | EmbeddedViewRef`, but\n // `instanceof EmbeddedViewRef` doesn't appear to work at the moment.\n if (typeof attachResult?.onDestroy === 'function') {\n // In most cases we control the portal and we know when it is being detached so that\n // we can finish the disposal process. The exception is if the user passes in a custom\n // `ViewContainerRef` that isn't destroyed through the overlay API. Note that we use\n // `detach` here instead of `dispose`, because we don't know if the user intends to\n // reattach the overlay at a later point. It also has the advantage of waiting for animations.\n attachResult.onDestroy(() => {\n if (this.hasAttached()) {\n // We have to delay the `detach` call, because detaching immediately prevents\n // other destroy hooks from running. This is likely a framework bug similar to\n // https://github.com/angular/angular/issues/46119\n this._ngZone.runOutsideAngular(() => Promise.resolve().then(() => this.detach()));\n }\n });\n }\n return attachResult;\n }\n /**\n * Detaches an overlay from a portal.\n * @returns The portal detachment result.\n */\n detach() {\n if (!this.hasAttached()) {\n return;\n }\n this.detachBackdrop();\n // When the overlay is detached, the pane element should disable pointer events.\n // This is necessary because otherwise the pane element will cover the page and disable\n // pointer events therefore. Depends on the position strategy and the applied pane boundaries.\n this._togglePointerEvents(false);\n if (this._positionStrategy && this._positionStrategy.detach) {\n this._positionStrategy.detach();\n }\n if (this._scrollStrategy) {\n this._scrollStrategy.disable();\n }\n const detachmentResult = this._portalOutlet.detach();\n // Only emit after everything is detached.\n this._detachments.next();\n // Remove this overlay from keyboard dispatcher tracking.\n this._keyboardDispatcher.remove(this);\n // Keeping the host element in the DOM can cause scroll jank, because it still gets\n // rendered, even though it's transparent and unclickable which is why we remove it.\n this._detachContentWhenStable();\n this._locationChanges.unsubscribe();\n this._outsideClickDispatcher.remove(this);\n return detachmentResult;\n }\n /** Cleans up the overlay from the DOM. */\n dispose() {\n const isAttached = this.hasAttached();\n if (this._positionStrategy) {\n this._positionStrategy.dispose();\n }\n this._disposeScrollStrategy();\n this._disposeBackdrop(this._backdropElement);\n this._locationChanges.unsubscribe();\n this._keyboardDispatcher.remove(this);\n this._portalOutlet.dispose();\n this._attachments.complete();\n this._backdropClick.complete();\n this._keydownEvents.complete();\n this._outsidePointerEvents.complete();\n this._outsideClickDispatcher.remove(this);\n this._host?.remove();\n this._previousHostParent = this._pane = this._host = null;\n if (isAttached) {\n this._detachments.next();\n }\n this._detachments.complete();\n }\n /** Whether the overlay has attached content. */\n hasAttached() {\n return this._portalOutlet.hasAttached();\n }\n /** Gets an observable that emits when the backdrop has been clicked. */\n backdropClick() {\n return this._backdropClick;\n }\n /** Gets an observable that emits when the overlay has been attached. */\n attachments() {\n return this._attachments;\n }\n /** Gets an observable that emits when the overlay has been detached. */\n detachments() {\n return this._detachments;\n }\n /** Gets an observable of keydown events targeted to this overlay. */\n keydownEvents() {\n return this._keydownEvents;\n }\n /** Gets an observable of pointer events targeted outside this overlay. */\n outsidePointerEvents() {\n return this._outsidePointerEvents;\n }\n /** Gets the current overlay configuration, which is immutable. */\n getConfig() {\n return this._config;\n }\n /** Updates the position of the overlay based on the position strategy. */\n updatePosition() {\n if (this._positionStrategy) {\n this._positionStrategy.apply();\n }\n }\n /** Switches to a new position strategy and updates the overlay position. */\n updatePositionStrategy(strategy) {\n if (strategy === this._positionStrategy) {\n return;\n }\n if (this._positionStrategy) {\n this._positionStrategy.dispose();\n }\n this._positionStrategy = strategy;\n if (this.hasAttached()) {\n strategy.attach(this);\n this.updatePosition();\n }\n }\n /** Update the size properties of the overlay. */\n updateSize(sizeConfig) {\n this._config = {\n ...this._config,\n ...sizeConfig\n };\n this._updateElementSize();\n }\n /** Sets the LTR/RTL direction for the overlay. */\n setDirection(dir) {\n this._config = {\n ...this._config,\n direction: dir\n };\n this._updateElementDirection();\n }\n /** Add a CSS class or an array of classes to the overlay pane. */\n addPanelClass(classes) {\n if (this._pane) {\n this._toggleClasses(this._pane, classes, true);\n }\n }\n /** Remove a CSS class or an array of classes from the overlay pane. */\n removePanelClass(classes) {\n if (this._pane) {\n this._toggleClasses(this._pane, classes, false);\n }\n }\n /**\n * Returns the layout direction of the overlay panel.\n */\n getDirection() {\n const direction = this._config.direction;\n if (!direction) {\n return 'ltr';\n }\n return typeof direction === 'string' ? direction : direction.value;\n }\n /** Switches to a new scroll strategy. */\n updateScrollStrategy(strategy) {\n if (strategy === this._scrollStrategy) {\n return;\n }\n this._disposeScrollStrategy();\n this._scrollStrategy = strategy;\n if (this.hasAttached()) {\n strategy.attach(this);\n strategy.enable();\n }\n }\n /** Updates the text direction of the overlay panel. */\n _updateElementDirection() {\n this._host.setAttribute('dir', this.getDirection());\n }\n /** Updates the size of the overlay element based on the overlay config. */\n _updateElementSize() {\n if (!this._pane) {\n return;\n }\n const style = this._pane.style;\n style.width = coerceCssPixelValue(this._config.width);\n style.height = coerceCssPixelValue(this._config.height);\n style.minWidth = coerceCssPixelValue(this._config.minWidth);\n style.minHeight = coerceCssPixelValue(this._config.minHeight);\n style.maxWidth = coerceCssPixelValue(this._config.maxWidth);\n style.maxHeight = coerceCssPixelValue(this._config.maxHeight);\n }\n /** Toggles the pointer events for the overlay pane element. */\n _togglePointerEvents(enablePointer) {\n this._pane.style.pointerEvents = enablePointer ? '' : 'none';\n }\n /** Attaches a backdrop for this overlay. */\n _attachBackdrop() {\n const showingClass = 'cdk-overlay-backdrop-showing';\n this._backdropElement = this._document.createElement('div');\n this._backdropElement.classList.add('cdk-overlay-backdrop');\n if (this._animationsDisabled) {\n this._backdropElement.classList.add('cdk-overlay-backdrop-noop-animation');\n }\n if (this._config.backdropClass) {\n this._toggleClasses(this._backdropElement, this._config.backdropClass, true);\n }\n // Insert the backdrop before the pane in the DOM order,\n // in order to handle stacked overlays properly.\n this._host.parentElement.insertBefore(this._backdropElement, this._host);\n // Forward backdrop clicks such that the consumer of the overlay can perform whatever\n // action desired when such a click occurs (usually closing the overlay).\n this._backdropElement.addEventListener('click', this._backdropClickHandler);\n // Add class to fade-in the backdrop after one frame.\n if (!this._animationsDisabled && typeof requestAnimationFrame !== 'undefined') {\n this._ngZone.runOutsideAngular(() => {\n requestAnimationFrame(() => {\n if (this._backdropElement) {\n this._backdropElement.classList.add(showingClass);\n }\n });\n });\n } else {\n this._backdropElement.classList.add(showingClass);\n }\n }\n /**\n * Updates the stacking order of the element, moving it to the top if necessary.\n * This is required in cases where one overlay was detached, while another one,\n * that should be behind it, was destroyed. The next time both of them are opened,\n * the stacking will be wrong, because the detached element's pane will still be\n * in its original DOM position.\n */\n _updateStackingOrder() {\n if (this._host.nextSibling) {\n this._host.parentNode.appendChild(this._host);\n }\n }\n /** Detaches the backdrop (if any) associated with the overlay. */\n detachBackdrop() {\n const backdropToDetach = this._backdropElement;\n if (!backdropToDetach) {\n return;\n }\n if (this._animationsDisabled) {\n this._disposeBackdrop(backdropToDetach);\n return;\n }\n backdropToDetach.classList.remove('cdk-overlay-backdrop-showing');\n this._ngZone.runOutsideAngular(() => {\n backdropToDetach.addEventListener('transitionend', this._backdropTransitionendHandler);\n });\n // If the backdrop doesn't have a transition, the `transitionend` event won't fire.\n // In this case we make it unclickable and we try to remove it after a delay.\n backdropToDetach.style.pointerEvents = 'none';\n // Run this outside the Angular zone because there's nothing that Angular cares about.\n // If it were to run inside the Angular zone, every test that used Overlay would have to be\n // either async or fakeAsync.\n this._backdropTimeout = this._ngZone.runOutsideAngular(() => setTimeout(() => {\n this._disposeBackdrop(backdropToDetach);\n }, 500));\n }\n /** Toggles a single CSS class or an array of classes on an element. */\n _toggleClasses(element, cssClasses, isAdd) {\n const classes = coerceArray(cssClasses || []).filter(c => !!c);\n if (classes.length) {\n isAdd ? element.classList.add(...classes) : element.classList.remove(...classes);\n }\n }\n /** Detaches the overlay content next time the zone stabilizes. */\n _detachContentWhenStable() {\n // Normally we wouldn't have to explicitly run this outside the `NgZone`, however\n // if the consumer is using `zone-patch-rxjs`, the `Subscription.unsubscribe` call will\n // be patched to run inside the zone, which will throw us into an infinite loop.\n this._ngZone.runOutsideAngular(() => {\n // We can't remove the host here immediately, because the overlay pane's content\n // might still be animating. This stream helps us avoid interrupting the animation\n // by waiting for the pane to become empty.\n const subscription = this._ngZone.onStable.pipe(takeUntil(merge(this._attachments, this._detachments))).subscribe(() => {\n // Needs a couple of checks for the pane and host, because\n // they may have been removed by the time the zone stabilizes.\n if (!this._pane || !this._host || this._pane.children.length === 0) {\n if (this._pane && this._config.panelClass) {\n this._toggleClasses(this._pane, this._config.panelClass, false);\n }\n if (this._host && this._host.parentElement) {\n this._previousHostParent = this._host.parentElement;\n this._host.remove();\n }\n subscription.unsubscribe();\n }\n });\n });\n }\n /** Disposes of a scroll strategy. */\n _disposeScrollStrategy() {\n const scrollStrategy = this._scrollStrategy;\n if (scrollStrategy) {\n scrollStrategy.disable();\n if (scrollStrategy.detach) {\n scrollStrategy.detach();\n }\n }\n }\n /** Removes a backdrop element from the DOM. */\n _disposeBackdrop(backdrop) {\n if (backdrop) {\n backdrop.removeEventListener('click', this._backdropClickHandler);\n backdrop.removeEventListener('transitionend', this._backdropTransitionendHandler);\n backdrop.remove();\n // It is possible that a new portal has been attached to this overlay since we started\n // removing the backdrop. If that is the case, only clear the backdrop reference if it\n // is still the same instance that we started to remove.\n if (this._backdropElement === backdrop) {\n this._backdropElement = null;\n }\n }\n if (this._backdropTimeout) {\n clearTimeout(this._backdropTimeout);\n this._backdropTimeout = undefined;\n }\n }\n}\n\n// TODO: refactor clipping detection into a separate thing (part of scrolling module)\n// TODO: doesn't handle both flexible width and height when it has to scroll along both axis.\n/** Class to be added to the overlay bounding box. */\nconst boundingBoxClass = 'cdk-overlay-connected-position-bounding-box';\n/** Regex used to split a string on its CSS units. */\nconst cssUnitPattern = /([A-Za-z%]+)$/;\n/**\n * A strategy for positioning overlays. Using this strategy, an overlay is given an\n * implicit position relative some origin element. The relative position is defined in terms of\n * a point on the origin element that is connected to a point on the overlay element. For example,\n * a basic dropdown is connecting the bottom-left corner of the origin to the top-left corner\n * of the overlay.\n */\nclass FlexibleConnectedPositionStrategy {\n /** Ordered list of preferred positions, from most to least desirable. */\n get positions() {\n return this._preferredPositions;\n }\n constructor(connectedTo, _viewportRuler, _document, _platform, _overlayContainer) {\n this._viewportRuler = _viewportRuler;\n this._document = _document;\n this._platform = _platform;\n this._overlayContainer = _overlayContainer;\n /** Last size used for the bounding box. Used to avoid resizing the overlay after open. */\n this._lastBoundingBoxSize = {\n width: 0,\n height: 0\n };\n /** Whether the overlay was pushed in a previous positioning. */\n this._isPushed = false;\n /** Whether the overlay can be pushed on-screen on the initial open. */\n this._canPush = true;\n /** Whether the overlay can grow via flexible width/height after the initial open. */\n this._growAfterOpen = false;\n /** Whether the overlay's width and height can be constrained to fit within the viewport. */\n this._hasFlexibleDimensions = true;\n /** Whether the overlay position is locked. */\n this._positionLocked = false;\n /** Amount of space that must be maintained between the overlay and the edge of the viewport. */\n this._viewportMargin = 0;\n /** The Scrollable containers used to check scrollable view properties on position change. */\n this._scrollables = [];\n /** Ordered list of preferred positions, from most to least desirable. */\n this._preferredPositions = [];\n /** Subject that emits whenever the position changes. */\n this._positionChanges = new Subject();\n /** Subscription to viewport size changes. */\n this._resizeSubscription = Subscription.EMPTY;\n /** Default offset for the overlay along the x axis. */\n this._offsetX = 0;\n /** Default offset for the overlay along the y axis. */\n this._offsetY = 0;\n /** Keeps track of the CSS classes that the position strategy has applied on the overlay panel. */\n this._appliedPanelClasses = [];\n /** Observable sequence of position changes. */\n this.positionChanges = this._positionChanges;\n this.setOrigin(connectedTo);\n }\n /** Attaches this position strategy to an overlay. */\n attach(overlayRef) {\n if (this._overlayRef && overlayRef !== this._overlayRef && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw Error('This position strategy is already attached to an overlay');\n }\n this._validatePositions();\n overlayRef.hostElement.classList.add(boundingBoxClass);\n this._overlayRef = overlayRef;\n this._boundingBox = overlayRef.hostElement;\n this._pane = overlayRef.overlayElement;\n this._isDisposed = false;\n this._isInitialRender = true;\n this._lastPosition = null;\n this._resizeSubscription.unsubscribe();\n this._resizeSubscription = this._viewportRuler.change().subscribe(() => {\n // When the window is resized, we want to trigger the next reposition as if it\n // was an initial render, in order for the strategy to pick a new optimal position,\n // otherwise position locking will cause it to stay at the old one.\n this._isInitialRender = true;\n this.apply();\n });\n }\n /**\n * Updates the position of the overlay element, using whichever preferred position relative\n * to the origin best fits on-screen.\n *\n * The selection of a position goes as follows:\n * - If any positions fit completely within the viewport as-is,\n * choose the first position that does so.\n * - If flexible dimensions are enabled and at least one satisfies the given minimum width/height,\n * choose the position with the greatest available size modified by the positions' weight.\n * - If pushing is enabled, take the position that went off-screen the least and push it\n * on-screen.\n * - If none of the previous criteria were met, use the position that goes off-screen the least.\n * @docs-private\n */\n apply() {\n // We shouldn't do anything if the strategy was disposed or we're on the server.\n if (this._isDisposed || !this._platform.isBrowser) {\n return;\n }\n // If the position has been applied already (e.g. when the overlay was opened) and the\n // consumer opted into locking in the position, re-use the old position, in order to\n // prevent the overlay from jumping around.\n if (!this._isInitialRender && this._positionLocked && this._lastPosition) {\n this.reapplyLastPosition();\n return;\n }\n this._clearPanelClasses();\n this._resetOverlayElementStyles();\n this._resetBoundingBoxStyles();\n // We need the bounding rects for the origin, the overlay and the container to determine how to position\n // the overlay relative to the origin.\n // We use the viewport rect to determine whether a position would go off-screen.\n this._viewportRect = this._getNarrowedViewportRect();\n this._originRect = this._getOriginRect();\n this._overlayRect = this._pane.getBoundingClientRect();\n this._containerRect = this._overlayContainer.getContainerElement().getBoundingClientRect();\n const originRect = this._originRect;\n const overlayRect = this._overlayRect;\n const viewportRect = this._viewportRect;\n const containerRect = this._containerRect;\n // Positions where the overlay will fit with flexible dimensions.\n const flexibleFits = [];\n // Fallback if none of the preferred positions fit within the viewport.\n let fallback;\n // Go through each of the preferred positions looking for a good fit.\n // If a good fit is found, it will be applied immediately.\n for (let pos of this._preferredPositions) {\n // Get the exact (x, y) coordinate for the point-of-origin on the origin element.\n let originPoint = this._getOriginPoint(originRect, containerRect, pos);\n // From that point-of-origin, get the exact (x, y) coordinate for the top-left corner of the\n // overlay in this position. We use the top-left corner for calculations and later translate\n // this into an appropriate (top, left, bottom, right) style.\n let overlayPoint = this._getOverlayPoint(originPoint, overlayRect, pos);\n // Calculate how well the overlay would fit into the viewport with this point.\n let overlayFit = this._getOverlayFit(overlayPoint, overlayRect, viewportRect, pos);\n // If the overlay, without any further work, fits into the viewport, use this position.\n if (overlayFit.isCompletelyWithinViewport) {\n this._isPushed = false;\n this._applyPosition(pos, originPoint);\n return;\n }\n // If the overlay has flexible dimensions, we can use this position\n // so long as there's enough space for the minimum dimensions.\n if (this._canFitWithFlexibleDimensions(overlayFit, overlayPoint, viewportRect)) {\n // Save positions where the overlay will fit with flexible dimensions. We will use these\n // if none of the positions fit *without* flexible dimensions.\n flexibleFits.push({\n position: pos,\n origin: originPoint,\n overlayRect,\n boundingBoxRect: this._calculateBoundingBoxRect(originPoint, pos)\n });\n continue;\n }\n // If the current preferred position does not fit on the screen, remember the position\n // if it has more visible area on-screen than we've seen and move onto the next preferred\n // position.\n if (!fallback || fallback.overlayFit.visibleArea < overlayFit.visibleArea) {\n fallback = {\n overlayFit,\n overlayPoint,\n originPoint,\n position: pos,\n overlayRect\n };\n }\n }\n // If there are any positions where the overlay would fit with flexible dimensions, choose the\n // one that has the greatest area available modified by the position's weight\n if (flexibleFits.length) {\n let bestFit = null;\n let bestScore = -1;\n for (const fit of flexibleFits) {\n const score = fit.boundingBoxRect.width * fit.boundingBoxRect.height * (fit.position.weight || 1);\n if (score > bestScore) {\n bestScore = score;\n bestFit = fit;\n }\n }\n this._isPushed = false;\n this._applyPosition(bestFit.position, bestFit.origin);\n return;\n }\n // When none of the preferred positions fit within the viewport, take the position\n // that went off-screen the least and attempt to push it on-screen.\n if (this._canPush) {\n // TODO(jelbourn): after pushing, the opening \"direction\" of the overlay might not make sense.\n this._isPushed = true;\n this._applyPosition(fallback.position, fallback.originPoint);\n return;\n }\n // All options for getting the overlay within the viewport have been exhausted, so go with the\n // position that went off-screen the least.\n this._applyPosition(fallback.position, fallback.originPoint);\n }\n detach() {\n this._clearPanelClasses();\n this._lastPosition = null;\n this._previousPushAmount = null;\n this._resizeSubscription.unsubscribe();\n }\n /** Cleanup after the element gets destroyed. */\n dispose() {\n if (this._isDisposed) {\n return;\n }\n // We can't use `_resetBoundingBoxStyles` here, because it resets\n // some properties to zero, rather than removing them.\n if (this._boundingBox) {\n extendStyles(this._boundingBox.style, {\n top: '',\n left: '',\n right: '',\n bottom: '',\n height: '',\n width: '',\n alignItems: '',\n justifyContent: ''\n });\n }\n if (this._pane) {\n this._resetOverlayElementStyles();\n }\n if (this._overlayRef) {\n this._overlayRef.hostElement.classList.remove(boundingBoxClass);\n }\n this.detach();\n this._positionChanges.complete();\n this._overlayRef = this._boundingBox = null;\n this._isDisposed = true;\n }\n /**\n * This re-aligns the overlay element with the trigger in its last calculated position,\n * even if a position higher in the \"preferred positions\" list would now fit. This\n * allows one to re-align the panel without changing the orientation of the panel.\n */\n reapplyLastPosition() {\n if (this._isDisposed || !this._platform.isBrowser) {\n return;\n }\n const lastPosition = this._lastPosition;\n if (lastPosition) {\n this._originRect = this._getOriginRect();\n this._overlayRect = this._pane.getBoundingClientRect();\n this._viewportRect = this._getNarrowedViewportRect();\n this._containerRect = this._overlayContainer.getContainerElement().getBoundingClientRect();\n const originPoint = this._getOriginPoint(this._originRect, this._containerRect, lastPosition);\n this._applyPosition(lastPosition, originPoint);\n } else {\n this.apply();\n }\n }\n /**\n * Sets the list of Scrollable containers that host the origin element so that\n * on reposition we can evaluate if it or the overlay has been clipped or outside view. Every\n * Scrollable must be an ancestor element of the strategy's origin element.\n */\n withScrollableContainers(scrollables) {\n this._scrollables = scrollables;\n return this;\n }\n /**\n * Adds new preferred positions.\n * @param positions List of positions options for this overlay.\n */\n withPositions(positions) {\n this._preferredPositions = positions;\n // If the last calculated position object isn't part of the positions anymore, clear\n // it in order to avoid it being picked up if the consumer tries to re-apply.\n if (positions.indexOf(this._lastPosition) === -1) {\n this._lastPosition = null;\n }\n this._validatePositions();\n return this;\n }\n /**\n * Sets a minimum distance the overlay may be positioned to the edge of the viewport.\n * @param margin Required margin between the overlay and the viewport edge in pixels.\n */\n withViewportMargin(margin) {\n this._viewportMargin = margin;\n return this;\n }\n /** Sets whether the overlay's width and height can be constrained to fit within the viewport. */\n withFlexibleDimensions(flexibleDimensions = true) {\n this._hasFlexibleDimensions = flexibleDimensions;\n return this;\n }\n /** Sets whether the overlay can grow after the initial open via flexible width/height. */\n withGrowAfterOpen(growAfterOpen = true) {\n this._growAfterOpen = growAfterOpen;\n return this;\n }\n /** Sets whether the overlay can be pushed on-screen if none of the provided positions fit. */\n withPush(canPush = true) {\n this._canPush = canPush;\n return this;\n }\n /**\n * Sets whether the overlay's position should be locked in after it is positioned\n * initially. When an overlay is locked in, it won't attempt to reposition itself\n * when the position is re-applied (e.g. when the user scrolls away).\n * @param isLocked Whether the overlay should locked in.\n */\n withLockedPosition(isLocked = true) {\n this._positionLocked = isLocked;\n return this;\n }\n /**\n * Sets the origin, relative to which to position the overlay.\n * Using an element origin is useful for building components that need to be positioned\n * relatively to a trigger (e.g. dropdown menus or tooltips), whereas using a point can be\n * used for cases like contextual menus which open relative to the user's pointer.\n * @param origin Reference to the new origin.\n */\n setOrigin(origin) {\n this._origin = origin;\n return this;\n }\n /**\n * Sets the default offset for the overlay's connection point on the x-axis.\n * @param offset New offset in the X axis.\n */\n withDefaultOffsetX(offset) {\n this._offsetX = offset;\n return this;\n }\n /**\n * Sets the default offset for the overlay's connection point on the y-axis.\n * @param offset New offset in the Y axis.\n */\n withDefaultOffsetY(offset) {\n this._offsetY = offset;\n return this;\n }\n /**\n * Configures that the position strategy should set a `transform-origin` on some elements\n * inside the overlay, depending on the current position that is being applied. This is\n * useful for the cases where the origin of an animation can change depending on the\n * alignment of the overlay.\n * @param selector CSS selector that will be used to find the target\n * elements onto which to set the transform origin.\n */\n withTransformOriginOn(selector) {\n this._transformOriginSelector = selector;\n return this;\n }\n /**\n * Gets the (x, y) coordinate of a connection point on the origin based on a relative position.\n */\n _getOriginPoint(originRect, containerRect, pos) {\n let x;\n if (pos.originX == 'center') {\n // Note: when centering we should always use the `left`\n // offset, otherwise the position will be wrong in RTL.\n x = originRect.left + originRect.width / 2;\n } else {\n const startX = this._isRtl() ? originRect.right : originRect.left;\n const endX = this._isRtl() ? originRect.left : originRect.right;\n x = pos.originX == 'start' ? startX : endX;\n }\n // When zooming in Safari the container rectangle contains negative values for the position\n // and we need to re-add them to the calculated coordinates.\n if (containerRect.left < 0) {\n x -= containerRect.left;\n }\n let y;\n if (pos.originY == 'center') {\n y = originRect.top + originRect.height / 2;\n } else {\n y = pos.originY == 'top' ? originRect.top : originRect.bottom;\n }\n // Normally the containerRect's top value would be zero, however when the overlay is attached to an input\n // (e.g. in an autocomplete), mobile browsers will shift everything in order to put the input in the middle\n // of the screen and to make space for the virtual keyboard. We need to account for this offset,\n // otherwise our positioning will be thrown off.\n // Additionally, when zooming in Safari this fixes the vertical position.\n if (containerRect.top < 0) {\n y -= containerRect.top;\n }\n return {\n x,\n y\n };\n }\n /**\n * Gets the (x, y) coordinate of the top-left corner of the overlay given a given position and\n * origin point to which the overlay should be connected.\n */\n _getOverlayPoint(originPoint, overlayRect, pos) {\n // Calculate the (overlayStartX, overlayStartY), the start of the\n // potential overlay position relative to the origin point.\n let overlayStartX;\n if (pos.overlayX == 'center') {\n overlayStartX = -overlayRect.width / 2;\n } else if (pos.overlayX === 'start') {\n overlayStartX = this._isRtl() ? -overlayRect.width : 0;\n } else {\n overlayStartX = this._isRtl() ? 0 : -overlayRect.width;\n }\n let overlayStartY;\n if (pos.overlayY == 'center') {\n overlayStartY = -overlayRect.height / 2;\n } else {\n overlayStartY = pos.overlayY == 'top' ? 0 : -overlayRect.height;\n }\n // The (x, y) coordinates of the overlay.\n return {\n x: originPoint.x + overlayStartX,\n y: originPoint.y + overlayStartY\n };\n }\n /** Gets how well an overlay at the given point will fit within the viewport. */\n _getOverlayFit(point, rawOverlayRect, viewport, position) {\n // Round the overlay rect when comparing against the\n // viewport, because the viewport is always rounded.\n const overlay = getRoundedBoundingClientRect(rawOverlayRect);\n let {\n x,\n y\n } = point;\n let offsetX = this._getOffset(position, 'x');\n let offsetY = this._getOffset(position, 'y');\n // Account for the offsets since they could push the overlay out of the viewport.\n if (offsetX) {\n x += offsetX;\n }\n if (offsetY) {\n y += offsetY;\n }\n // How much the overlay would overflow at this position, on each side.\n let leftOverflow = 0 - x;\n let rightOverflow = x + overlay.width - viewport.width;\n let topOverflow = 0 - y;\n let bottomOverflow = y + overlay.height - viewport.height;\n // Visible parts of the element on each axis.\n let visibleWidth = this._subtractOverflows(overlay.width, leftOverflow, rightOverflow);\n let visibleHeight = this._subtractOverflows(overlay.height, topOverflow, bottomOverflow);\n let visibleArea = visibleWidth * visibleHeight;\n return {\n visibleArea,\n isCompletelyWithinViewport: overlay.width * overlay.height === visibleArea,\n fitsInViewportVertically: visibleHeight === overlay.height,\n fitsInViewportHorizontally: visibleWidth == overlay.width\n };\n }\n /**\n * Whether the overlay can fit within the viewport when it may resize either its width or height.\n * @param fit How well the overlay fits in the viewport at some position.\n * @param point The (x, y) coordinates of the overlay at some position.\n * @param viewport The geometry of the viewport.\n */\n _canFitWithFlexibleDimensions(fit, point, viewport) {\n if (this._hasFlexibleDimensions) {\n const availableHeight = viewport.bottom - point.y;\n const availableWidth = viewport.right - point.x;\n const minHeight = getPixelValue(this._overlayRef.getConfig().minHeight);\n const minWidth = getPixelValue(this._overlayRef.getConfig().minWidth);\n const verticalFit = fit.fitsInViewportVertically || minHeight != null && minHeight <= availableHeight;\n const horizontalFit = fit.fitsInViewportHorizontally || minWidth != null && minWidth <= availableWidth;\n return verticalFit && horizontalFit;\n }\n return false;\n }\n /**\n * Gets the point at which the overlay can be \"pushed\" on-screen. If the overlay is larger than\n * the viewport, the top-left corner will be pushed on-screen (with overflow occurring on the\n * right and bottom).\n *\n * @param start Starting point from which the overlay is pushed.\n * @param rawOverlayRect Dimensions of the overlay.\n * @param scrollPosition Current viewport scroll position.\n * @returns The point at which to position the overlay after pushing. This is effectively a new\n * originPoint.\n */\n _pushOverlayOnScreen(start, rawOverlayRect, scrollPosition) {\n // If the position is locked and we've pushed the overlay already, reuse the previous push\n // amount, rather than pushing it again. If we were to continue pushing, the element would\n // remain in the viewport, which goes against the expectations when position locking is enabled.\n if (this._previousPushAmount && this._positionLocked) {\n return {\n x: start.x + this._previousPushAmount.x,\n y: start.y + this._previousPushAmount.y\n };\n }\n // Round the overlay rect when comparing against the\n // viewport, because the viewport is always rounded.\n const overlay = getRoundedBoundingClientRect(rawOverlayRect);\n const viewport = this._viewportRect;\n // Determine how much the overlay goes outside the viewport on each\n // side, which we'll use to decide which direction to push it.\n const overflowRight = Math.max(start.x + overlay.width - viewport.width, 0);\n const overflowBottom = Math.max(start.y + overlay.height - viewport.height, 0);\n const overflowTop = Math.max(viewport.top - scrollPosition.top - start.y, 0);\n const overflowLeft = Math.max(viewport.left - scrollPosition.left - start.x, 0);\n // Amount by which to push the overlay in each axis such that it remains on-screen.\n let pushX = 0;\n let pushY = 0;\n // If the overlay fits completely within the bounds of the viewport, push it from whichever\n // direction is goes off-screen. Otherwise, push the top-left corner such that its in the\n // viewport and allow for the trailing end of the overlay to go out of bounds.\n if (overlay.width <= viewport.width) {\n pushX = overflowLeft || -overflowRight;\n } else {\n pushX = start.x < this._viewportMargin ? viewport.left - scrollPosition.left - start.x : 0;\n }\n if (overlay.height <= viewport.height) {\n pushY = overflowTop || -overflowBottom;\n } else {\n pushY = start.y < this._viewportMargin ? viewport.top - scrollPosition.top - start.y : 0;\n }\n this._previousPushAmount = {\n x: pushX,\n y: pushY\n };\n return {\n x: start.x + pushX,\n y: start.y + pushY\n };\n }\n /**\n * Applies a computed position to the overlay and emits a position change.\n * @param position The position preference\n * @param originPoint The point on the origin element where the overlay is connected.\n */\n _applyPosition(position, originPoint) {\n this._setTransformOrigin(position);\n this._setOverlayElementStyles(originPoint, position);\n this._setBoundingBoxStyles(originPoint, position);\n if (position.panelClass) {\n this._addPanelClasses(position.panelClass);\n }\n // Notify that the position has been changed along with its change properties.\n // We only emit if we've got any subscriptions, because the scroll visibility\n // calculations can be somewhat expensive.\n if (this._positionChanges.observers.length) {\n const scrollVisibility = this._getScrollVisibility();\n // We're recalculating on scroll, but we only want to emit if anything\n // changed since downstream code might be hitting the `NgZone`.\n if (position !== this._lastPosition || !this._lastScrollVisibility || !compareScrollVisibility(this._lastScrollVisibility, scrollVisibility)) {\n const changeEvent = new ConnectedOverlayPositionChange(position, scrollVisibility);\n this._positionChanges.next(changeEvent);\n }\n this._lastScrollVisibility = scrollVisibility;\n }\n // Save the last connected position in case the position needs to be re-calculated.\n this._lastPosition = position;\n this._isInitialRender = false;\n }\n /** Sets the transform origin based on the configured selector and the passed-in position. */\n _setTransformOrigin(position) {\n if (!this._transformOriginSelector) {\n return;\n }\n const elements = this._boundingBox.querySelectorAll(this._transformOriginSelector);\n let xOrigin;\n let yOrigin = position.overlayY;\n if (position.overlayX === 'center') {\n xOrigin = 'center';\n } else if (this._isRtl()) {\n xOrigin = position.overlayX === 'start' ? 'right' : 'left';\n } else {\n xOrigin = position.overlayX === 'start' ? 'left' : 'right';\n }\n for (let i = 0; i < elements.length; i++) {\n elements[i].style.transformOrigin = `${xOrigin} ${yOrigin}`;\n }\n }\n /**\n * Gets the position and size of the overlay's sizing container.\n *\n * This method does no measuring and applies no styles so that we can cheaply compute the\n * bounds for all positions and choose the best fit based on these results.\n */\n _calculateBoundingBoxRect(origin, position) {\n const viewport = this._viewportRect;\n const isRtl = this._isRtl();\n let height, top, bottom;\n if (position.overlayY === 'top') {\n // Overlay is opening \"downward\" and thus is bound by the bottom viewport edge.\n top = origin.y;\n height = viewport.height - top + this._viewportMargin;\n } else if (position.overlayY === 'bottom') {\n // Overlay is opening \"upward\" and thus is bound by the top viewport edge. We need to add\n // the viewport margin back in, because the viewport rect is narrowed down to remove the\n // margin, whereas the `origin` position is calculated based on its `DOMRect`.\n bottom = viewport.height - origin.y + this._viewportMargin * 2;\n height = viewport.height - bottom + this._viewportMargin;\n } else {\n // If neither top nor bottom, it means that the overlay is vertically centered on the\n // origin point. Note that we want the position relative to the viewport, rather than\n // the page, which is why we don't use something like `viewport.bottom - origin.y` and\n // `origin.y - viewport.top`.\n const smallestDistanceToViewportEdge = Math.min(viewport.bottom - origin.y + viewport.top, origin.y);\n const previousHeight = this._lastBoundingBoxSize.height;\n height = smallestDistanceToViewportEdge * 2;\n top = origin.y - smallestDistanceToViewportEdge;\n if (height > previousHeight && !this._isInitialRender && !this._growAfterOpen) {\n top = origin.y - previousHeight / 2;\n }\n }\n // The overlay is opening 'right-ward' (the content flows to the right).\n const isBoundedByRightViewportEdge = position.overlayX === 'start' && !isRtl || position.overlayX === 'end' && isRtl;\n // The overlay is opening 'left-ward' (the content flows to the left).\n const isBoundedByLeftViewportEdge = position.overlayX === 'end' && !isRtl || position.overlayX === 'start' && isRtl;\n let width, left, right;\n if (isBoundedByLeftViewportEdge) {\n right = viewport.width - origin.x + this._viewportMargin * 2;\n width = origin.x - this._viewportMargin;\n } else if (isBoundedByRightViewportEdge) {\n left = origin.x;\n width = viewport.right - origin.x;\n } else {\n // If neither start nor end, it means that the overlay is horizontally centered on the\n // origin point. Note that we want the position relative to the viewport, rather than\n // the page, which is why we don't use something like `viewport.right - origin.x` and\n // `origin.x - viewport.left`.\n const smallestDistanceToViewportEdge = Math.min(viewport.right - origin.x + viewport.left, origin.x);\n const previousWidth = this._lastBoundingBoxSize.width;\n width = smallestDistanceToViewportEdge * 2;\n left = origin.x - smallestDistanceToViewportEdge;\n if (width > previousWidth && !this._isInitialRender && !this._growAfterOpen) {\n left = origin.x - previousWidth / 2;\n }\n }\n return {\n top: top,\n left: left,\n bottom: bottom,\n right: right,\n width,\n height\n };\n }\n /**\n * Sets the position and size of the overlay's sizing wrapper. The wrapper is positioned on the\n * origin's connection point and stretches to the bounds of the viewport.\n *\n * @param origin The point on the origin element where the overlay is connected.\n * @param position The position preference\n */\n _setBoundingBoxStyles(origin, position) {\n const boundingBoxRect = this._calculateBoundingBoxRect(origin, position);\n // It's weird if the overlay *grows* while scrolling, so we take the last size into account\n // when applying a new size.\n if (!this._isInitialRender && !this._growAfterOpen) {\n boundingBoxRect.height = Math.min(boundingBoxRect.height, this._lastBoundingBoxSize.height);\n boundingBoxRect.width = Math.min(boundingBoxRect.width, this._lastBoundingBoxSize.width);\n }\n const styles = {};\n if (this._hasExactPosition()) {\n styles.top = styles.left = '0';\n styles.bottom = styles.right = styles.maxHeight = styles.maxWidth = '';\n styles.width = styles.height = '100%';\n } else {\n const maxHeight = this._overlayRef.getConfig().maxHeight;\n const maxWidth = this._overlayRef.getConfig().maxWidth;\n styles.height = coerceCssPixelValue(boundingBoxRect.height);\n styles.top = coerceCssPixelValue(boundingBoxRect.top);\n styles.bottom = coerceCssPixelValue(boundingBoxRect.bottom);\n styles.width = coerceCssPixelValue(boundingBoxRect.width);\n styles.left = coerceCssPixelValue(boundingBoxRect.left);\n styles.right = coerceCssPixelValue(boundingBoxRect.right);\n // Push the pane content towards the proper direction.\n if (position.overlayX === 'center') {\n styles.alignItems = 'center';\n } else {\n styles.alignItems = position.overlayX === 'end' ? 'flex-end' : 'flex-start';\n }\n if (position.overlayY === 'center') {\n styles.justifyContent = 'center';\n } else {\n styles.justifyContent = position.overlayY === 'bottom' ? 'flex-end' : 'flex-start';\n }\n if (maxHeight) {\n styles.maxHeight = coerceCssPixelValue(maxHeight);\n }\n if (maxWidth) {\n styles.maxWidth = coerceCssPixelValue(maxWidth);\n }\n }\n this._lastBoundingBoxSize = boundingBoxRect;\n extendStyles(this._boundingBox.style, styles);\n }\n /** Resets the styles for the bounding box so that a new positioning can be computed. */\n _resetBoundingBoxStyles() {\n extendStyles(this._boundingBox.style, {\n top: '0',\n left: '0',\n right: '0',\n bottom: '0',\n height: '',\n width: '',\n alignItems: '',\n justifyContent: ''\n });\n }\n /** Resets the styles for the overlay pane so that a new positioning can be computed. */\n _resetOverlayElementStyles() {\n extendStyles(this._pane.style, {\n top: '',\n left: '',\n bottom: '',\n right: '',\n position: '',\n transform: ''\n });\n }\n /** Sets positioning styles to the overlay element. */\n _setOverlayElementStyles(originPoint, position) {\n const styles = {};\n const hasExactPosition = this._hasExactPosition();\n const hasFlexibleDimensions = this._hasFlexibleDimensions;\n const config = this._overlayRef.getConfig();\n if (hasExactPosition) {\n const scrollPosition = this._viewportRuler.getViewportScrollPosition();\n extendStyles(styles, this._getExactOverlayY(position, originPoint, scrollPosition));\n extendStyles(styles, this._getExactOverlayX(position, originPoint, scrollPosition));\n } else {\n styles.position = 'static';\n }\n // Use a transform to apply the offsets. We do this because the `center` positions rely on\n // being in the normal flex flow and setting a `top` / `left` at all will completely throw\n // off the position. We also can't use margins, because they won't have an effect in some\n // cases where the element doesn't have anything to \"push off of\". Finally, this works\n // better both with flexible and non-flexible positioning.\n let transformString = '';\n let offsetX = this._getOffset(position, 'x');\n let offsetY = this._getOffset(position, 'y');\n if (offsetX) {\n transformString += `translateX(${offsetX}px) `;\n }\n if (offsetY) {\n transformString += `translateY(${offsetY}px)`;\n }\n styles.transform = transformString.trim();\n // If a maxWidth or maxHeight is specified on the overlay, we remove them. We do this because\n // we need these values to both be set to \"100%\" for the automatic flexible sizing to work.\n // The maxHeight and maxWidth are set on the boundingBox in order to enforce the constraint.\n // Note that this doesn't apply when we have an exact position, in which case we do want to\n // apply them because they'll be cleared from the bounding box.\n if (config.maxHeight) {\n if (hasExactPosition) {\n styles.maxHeight = coerceCssPixelValue(config.maxHeight);\n } else if (hasFlexibleDimensions) {\n styles.maxHeight = '';\n }\n }\n if (config.maxWidth) {\n if (hasExactPosition) {\n styles.maxWidth = coerceCssPixelValue(config.maxWidth);\n } else if (hasFlexibleDimensions) {\n styles.maxWidth = '';\n }\n }\n extendStyles(this._pane.style, styles);\n }\n /** Gets the exact top/bottom for the overlay when not using flexible sizing or when pushing. */\n _getExactOverlayY(position, originPoint, scrollPosition) {\n // Reset any existing styles. This is necessary in case the\n // preferred position has changed since the last `apply`.\n let styles = {\n top: '',\n bottom: ''\n };\n let overlayPoint = this._getOverlayPoint(originPoint, this._overlayRect, position);\n if (this._isPushed) {\n overlayPoint = this._pushOverlayOnScreen(overlayPoint, this._overlayRect, scrollPosition);\n }\n // We want to set either `top` or `bottom` based on whether the overlay wants to appear\n // above or below the origin and the direction in which the element will expand.\n if (position.overlayY === 'bottom') {\n // When using `bottom`, we adjust the y position such that it is the distance\n // from the bottom of the viewport rather than the top.\n const documentHeight = this._document.documentElement.clientHeight;\n styles.bottom = `${documentHeight - (overlayPoint.y + this._overlayRect.height)}px`;\n } else {\n styles.top = coerceCssPixelValue(overlayPoint.y);\n }\n return styles;\n }\n /** Gets the exact left/right for the overlay when not using flexible sizing or when pushing. */\n _getExactOverlayX(position, originPoint, scrollPosition) {\n // Reset any existing styles. This is necessary in case the preferred position has\n // changed since the last `apply`.\n let styles = {\n left: '',\n right: ''\n };\n let overlayPoint = this._getOverlayPoint(originPoint, this._overlayRect, position);\n if (this._isPushed) {\n overlayPoint = this._pushOverlayOnScreen(overlayPoint, this._overlayRect, scrollPosition);\n }\n // We want to set either `left` or `right` based on whether the overlay wants to appear \"before\"\n // or \"after\" the origin, which determines the direction in which the element will expand.\n // For the horizontal axis, the meaning of \"before\" and \"after\" change based on whether the\n // page is in RTL or LTR.\n let horizontalStyleProperty;\n if (this._isRtl()) {\n horizontalStyleProperty = position.overlayX === 'end' ? 'left' : 'right';\n } else {\n horizontalStyleProperty = position.overlayX === 'end' ? 'right' : 'left';\n }\n // When we're setting `right`, we adjust the x position such that it is the distance\n // from the right edge of the viewport rather than the left edge.\n if (horizontalStyleProperty === 'right') {\n const documentWidth = this._document.documentElement.clientWidth;\n styles.right = `${documentWidth - (overlayPoint.x + this._overlayRect.width)}px`;\n } else {\n styles.left = coerceCssPixelValue(overlayPoint.x);\n }\n return styles;\n }\n /**\n * Gets the view properties of the trigger and overlay, including whether they are clipped\n * or completely outside the view of any of the strategy's scrollables.\n */\n _getScrollVisibility() {\n // Note: needs fresh rects since the position could've changed.\n const originBounds = this._getOriginRect();\n const overlayBounds = this._pane.getBoundingClientRect();\n // TODO(jelbourn): instead of needing all of the client rects for these scrolling containers\n // every time, we should be able to use the scrollTop of the containers if the size of those\n // containers hasn't changed.\n const scrollContainerBounds = this._scrollables.map(scrollable => {\n return scrollable.getElementRef().nativeElement.getBoundingClientRect();\n });\n return {\n isOriginClipped: isElementClippedByScrolling(originBounds, scrollContainerBounds),\n isOriginOutsideView: isElementScrolledOutsideView(originBounds, scrollContainerBounds),\n isOverlayClipped: isElementClippedByScrolling(overlayBounds, scrollContainerBounds),\n isOverlayOutsideView: isElementScrolledOutsideView(overlayBounds, scrollContainerBounds)\n };\n }\n /** Subtracts the amount that an element is overflowing on an axis from its length. */\n _subtractOverflows(length, ...overflows) {\n return overflows.reduce((currentValue, currentOverflow) => {\n return currentValue - Math.max(currentOverflow, 0);\n }, length);\n }\n /** Narrows the given viewport rect by the current _viewportMargin. */\n _getNarrowedViewportRect() {\n // We recalculate the viewport rect here ourselves, rather than using the ViewportRuler,\n // because we want to use the `clientWidth` and `clientHeight` as the base. The difference\n // being that the client properties don't include the scrollbar, as opposed to `innerWidth`\n // and `innerHeight` that do. This is necessary, because the overlay container uses\n // 100% `width` and `height` which don't include the scrollbar either.\n const width = this._document.documentElement.clientWidth;\n const height = this._document.documentElement.clientHeight;\n const scrollPosition = this._viewportRuler.getViewportScrollPosition();\n return {\n top: scrollPosition.top + this._viewportMargin,\n left: scrollPosition.left + this._viewportMargin,\n right: scrollPosition.left + width - this._viewportMargin,\n bottom: scrollPosition.top + height - this._viewportMargin,\n width: width - 2 * this._viewportMargin,\n height: height - 2 * this._viewportMargin\n };\n }\n /** Whether the we're dealing with an RTL context */\n _isRtl() {\n return this._overlayRef.getDirection() === 'rtl';\n }\n /** Determines whether the overlay uses exact or flexible positioning. */\n _hasExactPosition() {\n return !this._hasFlexibleDimensions || this._isPushed;\n }\n /** Retrieves the offset of a position along the x or y axis. */\n _getOffset(position, axis) {\n if (axis === 'x') {\n // We don't do something like `position['offset' + axis]` in\n // order to avoid breaking minifiers that rename properties.\n return position.offsetX == null ? this._offsetX : position.offsetX;\n }\n return position.offsetY == null ? this._offsetY : position.offsetY;\n }\n /** Validates that the current position match the expected values. */\n _validatePositions() {\n if (typeof ngDevMode === 'undefined' || ngDevMode) {\n if (!this._preferredPositions.length) {\n throw Error('FlexibleConnectedPositionStrategy: At least one position is required.');\n }\n // TODO(crisbeto): remove these once Angular's template type\n // checking is advanced enough to catch these cases.\n this._preferredPositions.forEach(pair => {\n validateHorizontalPosition('originX', pair.originX);\n validateVerticalPosition('originY', pair.originY);\n validateHorizontalPosition('overlayX', pair.overlayX);\n validateVerticalPosition('overlayY', pair.overlayY);\n });\n }\n }\n /** Adds a single CSS class or an array of classes on the overlay panel. */\n _addPanelClasses(cssClasses) {\n if (this._pane) {\n coerceArray(cssClasses).forEach(cssClass => {\n if (cssClass !== '' && this._appliedPanelClasses.indexOf(cssClass) === -1) {\n this._appliedPanelClasses.push(cssClass);\n this._pane.classList.add(cssClass);\n }\n });\n }\n }\n /** Clears the classes that the position strategy has applied from the overlay panel. */\n _clearPanelClasses() {\n if (this._pane) {\n this._appliedPanelClasses.forEach(cssClass => {\n this._pane.classList.remove(cssClass);\n });\n this._appliedPanelClasses = [];\n }\n }\n /** Returns the DOMRect of the current origin. */\n _getOriginRect() {\n const origin = this._origin;\n if (origin instanceof ElementRef) {\n return origin.nativeElement.getBoundingClientRect();\n }\n // Check for Element so SVG elements are also supported.\n if (origin instanceof Element) {\n return origin.getBoundingClientRect();\n }\n const width = origin.width || 0;\n const height = origin.height || 0;\n // If the origin is a point, return a client rect as if it was a 0x0 element at the point.\n return {\n top: origin.y,\n bottom: origin.y + height,\n left: origin.x,\n right: origin.x + width,\n height,\n width\n };\n }\n}\n/** Shallow-extends a stylesheet object with another stylesheet object. */\nfunction extendStyles(destination, source) {\n for (let key in source) {\n if (source.hasOwnProperty(key)) {\n destination[key] = source[key];\n }\n }\n return destination;\n}\n/**\n * Extracts the pixel value as a number from a value, if it's a number\n * or a CSS pixel string (e.g. `1337px`). Otherwise returns null.\n */\nfunction getPixelValue(input) {\n if (typeof input !== 'number' && input != null) {\n const [value, units] = input.split(cssUnitPattern);\n return !units || units === 'px' ? parseFloat(value) : null;\n }\n return input || null;\n}\n/**\n * Gets a version of an element's bounding `DOMRect` where all the values are rounded down to\n * the nearest pixel. This allows us to account for the cases where there may be sub-pixel\n * deviations in the `DOMRect` returned by the browser (e.g. when zoomed in with a percentage\n * size, see #21350).\n */\nfunction getRoundedBoundingClientRect(clientRect) {\n return {\n top: Math.floor(clientRect.top),\n right: Math.floor(clientRect.right),\n bottom: Math.floor(clientRect.bottom),\n left: Math.floor(clientRect.left),\n width: Math.floor(clientRect.width),\n height: Math.floor(clientRect.height)\n };\n}\n/** Returns whether two `ScrollingVisibility` objects are identical. */\nfunction compareScrollVisibility(a, b) {\n if (a === b) {\n return true;\n }\n return a.isOriginClipped === b.isOriginClipped && a.isOriginOutsideView === b.isOriginOutsideView && a.isOverlayClipped === b.isOverlayClipped && a.isOverlayOutsideView === b.isOverlayOutsideView;\n}\nconst STANDARD_DROPDOWN_BELOW_POSITIONS = [{\n originX: 'start',\n originY: 'bottom',\n overlayX: 'start',\n overlayY: 'top'\n}, {\n originX: 'start',\n originY: 'top',\n overlayX: 'start',\n overlayY: 'bottom'\n}, {\n originX: 'end',\n originY: 'bottom',\n overlayX: 'end',\n overlayY: 'top'\n}, {\n originX: 'end',\n originY: 'top',\n overlayX: 'end',\n overlayY: 'bottom'\n}];\nconst STANDARD_DROPDOWN_ADJACENT_POSITIONS = [{\n originX: 'end',\n originY: 'top',\n overlayX: 'start',\n overlayY: 'top'\n}, {\n originX: 'end',\n originY: 'bottom',\n overlayX: 'start',\n overlayY: 'bottom'\n}, {\n originX: 'start',\n originY: 'top',\n overlayX: 'end',\n overlayY: 'top'\n}, {\n originX: 'start',\n originY: 'bottom',\n overlayX: 'end',\n overlayY: 'bottom'\n}];\n\n/** Class to be added to the overlay pane wrapper. */\nconst wrapperClass = 'cdk-global-overlay-wrapper';\n/**\n * A strategy for positioning overlays. Using this strategy, an overlay is given an\n * explicit position relative to the browser's viewport. We use flexbox, instead of\n * transforms, in order to avoid issues with subpixel rendering which can cause the\n * element to become blurry.\n */\nclass GlobalPositionStrategy {\n constructor() {\n this._cssPosition = 'static';\n this._topOffset = '';\n this._bottomOffset = '';\n this._alignItems = '';\n this._xPosition = '';\n this._xOffset = '';\n this._width = '';\n this._height = '';\n this._isDisposed = false;\n }\n attach(overlayRef) {\n const config = overlayRef.getConfig();\n this._overlayRef = overlayRef;\n if (this._width && !config.width) {\n overlayRef.updateSize({\n width: this._width\n });\n }\n if (this._height && !config.height) {\n overlayRef.updateSize({\n height: this._height\n });\n }\n overlayRef.hostElement.classList.add(wrapperClass);\n this._isDisposed = false;\n }\n /**\n * Sets the top position of the overlay. Clears any previously set vertical position.\n * @param value New top offset.\n */\n top(value = '') {\n this._bottomOffset = '';\n this._topOffset = value;\n this._alignItems = 'flex-start';\n return this;\n }\n /**\n * Sets the left position of the overlay. Clears any previously set horizontal position.\n * @param value New left offset.\n */\n left(value = '') {\n this._xOffset = value;\n this._xPosition = 'left';\n return this;\n }\n /**\n * Sets the bottom position of the overlay. Clears any previously set vertical position.\n * @param value New bottom offset.\n */\n bottom(value = '') {\n this._topOffset = '';\n this._bottomOffset = value;\n this._alignItems = 'flex-end';\n return this;\n }\n /**\n * Sets the right position of the overlay. Clears any previously set horizontal position.\n * @param value New right offset.\n */\n right(value = '') {\n this._xOffset = value;\n this._xPosition = 'right';\n return this;\n }\n /**\n * Sets the overlay to the start of the viewport, depending on the overlay direction.\n * This will be to the left in LTR layouts and to the right in RTL.\n * @param offset Offset from the edge of the screen.\n */\n start(value = '') {\n this._xOffset = value;\n this._xPosition = 'start';\n return this;\n }\n /**\n * Sets the overlay to the end of the viewport, depending on the overlay direction.\n * This will be to the right in LTR layouts and to the left in RTL.\n * @param offset Offset from the edge of the screen.\n */\n end(value = '') {\n this._xOffset = value;\n this._xPosition = 'end';\n return this;\n }\n /**\n * Sets the overlay width and clears any previously set width.\n * @param value New width for the overlay\n * @deprecated Pass the `width` through the `OverlayConfig`.\n * @breaking-change 8.0.0\n */\n width(value = '') {\n if (this._overlayRef) {\n this._overlayRef.updateSize({\n width: value\n });\n } else {\n this._width = value;\n }\n return this;\n }\n /**\n * Sets the overlay height and clears any previously set height.\n * @param value New height for the overlay\n * @deprecated Pass the `height` through the `OverlayConfig`.\n * @breaking-change 8.0.0\n */\n height(value = '') {\n if (this._overlayRef) {\n this._overlayRef.updateSize({\n height: value\n });\n } else {\n this._height = value;\n }\n return this;\n }\n /**\n * Centers the overlay horizontally with an optional offset.\n * Clears any previously set horizontal position.\n *\n * @param offset Overlay offset from the horizontal center.\n */\n centerHorizontally(offset = '') {\n this.left(offset);\n this._xPosition = 'center';\n return this;\n }\n /**\n * Centers the overlay vertically with an optional offset.\n * Clears any previously set vertical position.\n *\n * @param offset Overlay offset from the vertical center.\n */\n centerVertically(offset = '') {\n this.top(offset);\n this._alignItems = 'center';\n return this;\n }\n /**\n * Apply the position to the element.\n * @docs-private\n */\n apply() {\n // Since the overlay ref applies the strategy asynchronously, it could\n // have been disposed before it ends up being applied. If that is the\n // case, we shouldn't do anything.\n if (!this._overlayRef || !this._overlayRef.hasAttached()) {\n return;\n }\n const styles = this._overlayRef.overlayElement.style;\n const parentStyles = this._overlayRef.hostElement.style;\n const config = this._overlayRef.getConfig();\n const {\n width,\n height,\n maxWidth,\n maxHeight\n } = config;\n const shouldBeFlushHorizontally = (width === '100%' || width === '100vw') && (!maxWidth || maxWidth === '100%' || maxWidth === '100vw');\n const shouldBeFlushVertically = (height === '100%' || height === '100vh') && (!maxHeight || maxHeight === '100%' || maxHeight === '100vh');\n const xPosition = this._xPosition;\n const xOffset = this._xOffset;\n const isRtl = this._overlayRef.getConfig().direction === 'rtl';\n let marginLeft = '';\n let marginRight = '';\n let justifyContent = '';\n if (shouldBeFlushHorizontally) {\n justifyContent = 'flex-start';\n } else if (xPosition === 'center') {\n justifyContent = 'center';\n if (isRtl) {\n marginRight = xOffset;\n } else {\n marginLeft = xOffset;\n }\n } else if (isRtl) {\n if (xPosition === 'left' || xPosition === 'end') {\n justifyContent = 'flex-end';\n marginLeft = xOffset;\n } else if (xPosition === 'right' || xPosition === 'start') {\n justifyContent = 'flex-start';\n marginRight = xOffset;\n }\n } else if (xPosition === 'left' || xPosition === 'start') {\n justifyContent = 'flex-start';\n marginLeft = xOffset;\n } else if (xPosition === 'right' || xPosition === 'end') {\n justifyContent = 'flex-end';\n marginRight = xOffset;\n }\n styles.position = this._cssPosition;\n styles.marginLeft = shouldBeFlushHorizontally ? '0' : marginLeft;\n styles.marginTop = shouldBeFlushVertically ? '0' : this._topOffset;\n styles.marginBottom = this._bottomOffset;\n styles.marginRight = shouldBeFlushHorizontally ? '0' : marginRight;\n parentStyles.justifyContent = justifyContent;\n parentStyles.alignItems = shouldBeFlushVertically ? 'flex-start' : this._alignItems;\n }\n /**\n * Cleans up the DOM changes from the position strategy.\n * @docs-private\n */\n dispose() {\n if (this._isDisposed || !this._overlayRef) {\n return;\n }\n const styles = this._overlayRef.overlayElement.style;\n const parent = this._overlayRef.hostElement;\n const parentStyles = parent.style;\n parent.classList.remove(wrapperClass);\n parentStyles.justifyContent = parentStyles.alignItems = styles.marginTop = styles.marginBottom = styles.marginLeft = styles.marginRight = styles.position = '';\n this._overlayRef = null;\n this._isDisposed = true;\n }\n}\n\n/** Builder for overlay position strategy. */\nclass OverlayPositionBuilder {\n constructor(_viewportRuler, _document, _platform, _overlayContainer) {\n this._viewportRuler = _viewportRuler;\n this._document = _document;\n this._platform = _platform;\n this._overlayContainer = _overlayContainer;\n }\n /**\n * Creates a global position strategy.\n */\n global() {\n return new GlobalPositionStrategy();\n }\n /**\n * Creates a flexible position strategy.\n * @param origin Origin relative to which to position the overlay.\n */\n flexibleConnectedTo(origin) {\n return new FlexibleConnectedPositionStrategy(origin, this._viewportRuler, this._document, this._platform, this._overlayContainer);\n }\n static {\n this.ɵfac = function OverlayPositionBuilder_Factory(t) {\n return new (t || OverlayPositionBuilder)(i0.ɵɵinject(i1.ViewportRuler), i0.ɵɵinject(DOCUMENT), i0.ɵɵinject(i1$1.Platform), i0.ɵɵinject(OverlayContainer));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: OverlayPositionBuilder,\n factory: OverlayPositionBuilder.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(OverlayPositionBuilder, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: i1.ViewportRuler\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: i1$1.Platform\n }, {\n type: OverlayContainer\n }], null);\n})();\n\n/** Next overlay unique ID. */\nlet nextUniqueId = 0;\n// Note that Overlay is *not* scoped to the app root because of the ComponentFactoryResolver\n// which needs to be different depending on where OverlayModule is imported.\n/**\n * Service to create Overlays. Overlays are dynamically added pieces of floating UI, meant to be\n * used as a low-level building block for other components. Dialogs, tooltips, menus,\n * selects, etc. can all be built using overlays. The service should primarily be used by authors\n * of re-usable components rather than developers building end-user applications.\n *\n * An overlay *is* a PortalOutlet, so any kind of Portal can be loaded into one.\n */\nclass Overlay {\n constructor( /** Scrolling strategies that can be used when creating an overlay. */\n scrollStrategies, _overlayContainer, _componentFactoryResolver, _positionBuilder, _keyboardDispatcher, _injector, _ngZone, _document, _directionality, _location, _outsideClickDispatcher, _animationsModuleType) {\n this.scrollStrategies = scrollStrategies;\n this._overlayContainer = _overlayContainer;\n this._componentFactoryResolver = _componentFactoryResolver;\n this._positionBuilder = _positionBuilder;\n this._keyboardDispatcher = _keyboardDispatcher;\n this._injector = _injector;\n this._ngZone = _ngZone;\n this._document = _document;\n this._directionality = _directionality;\n this._location = _location;\n this._outsideClickDispatcher = _outsideClickDispatcher;\n this._animationsModuleType = _animationsModuleType;\n }\n /**\n * Creates an overlay.\n * @param config Configuration applied to the overlay.\n * @returns Reference to the created overlay.\n */\n create(config) {\n const host = this._createHostElement();\n const pane = this._createPaneElement(host);\n const portalOutlet = this._createPortalOutlet(pane);\n const overlayConfig = new OverlayConfig(config);\n overlayConfig.direction = overlayConfig.direction || this._directionality.value;\n return new OverlayRef(portalOutlet, host, pane, overlayConfig, this._ngZone, this._keyboardDispatcher, this._document, this._location, this._outsideClickDispatcher, this._animationsModuleType === 'NoopAnimations');\n }\n /**\n * Gets a position builder that can be used, via fluent API,\n * to construct and configure a position strategy.\n * @returns An overlay position builder.\n */\n position() {\n return this._positionBuilder;\n }\n /**\n * Creates the DOM element for an overlay and appends it to the overlay container.\n * @returns Newly-created pane element\n */\n _createPaneElement(host) {\n const pane = this._document.createElement('div');\n pane.id = `cdk-overlay-${nextUniqueId++}`;\n pane.classList.add('cdk-overlay-pane');\n host.appendChild(pane);\n return pane;\n }\n /**\n * Creates the host element that wraps around an overlay\n * and can be used for advanced positioning.\n * @returns Newly-create host element.\n */\n _createHostElement() {\n const host = this._document.createElement('div');\n this._overlayContainer.getContainerElement().appendChild(host);\n return host;\n }\n /**\n * Create a DomPortalOutlet into which the overlay content can be loaded.\n * @param pane The DOM element to turn into a portal outlet.\n * @returns A portal outlet for the given DOM element.\n */\n _createPortalOutlet(pane) {\n // We have to resolve the ApplicationRef later in order to allow people\n // to use overlay-based providers during app initialization.\n if (!this._appRef) {\n this._appRef = this._injector.get(ApplicationRef);\n }\n return new DomPortalOutlet(pane, this._componentFactoryResolver, this._appRef, this._injector, this._document);\n }\n static {\n this.ɵfac = function Overlay_Factory(t) {\n return new (t || Overlay)(i0.ɵɵinject(ScrollStrategyOptions), i0.ɵɵinject(OverlayContainer), i0.ɵɵinject(i0.ComponentFactoryResolver), i0.ɵɵinject(OverlayPositionBuilder), i0.ɵɵinject(OverlayKeyboardDispatcher), i0.ɵɵinject(i0.Injector), i0.ɵɵinject(i0.NgZone), i0.ɵɵinject(DOCUMENT), i0.ɵɵinject(i5.Directionality), i0.ɵɵinject(i6.Location), i0.ɵɵinject(OverlayOutsideClickDispatcher), i0.ɵɵinject(ANIMATION_MODULE_TYPE, 8));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: Overlay,\n factory: Overlay.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(Overlay, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: ScrollStrategyOptions\n }, {\n type: OverlayContainer\n }, {\n type: i0.ComponentFactoryResolver\n }, {\n type: OverlayPositionBuilder\n }, {\n type: OverlayKeyboardDispatcher\n }, {\n type: i0.Injector\n }, {\n type: i0.NgZone\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: i5.Directionality\n }, {\n type: i6.Location\n }, {\n type: OverlayOutsideClickDispatcher\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [ANIMATION_MODULE_TYPE]\n }, {\n type: Optional\n }]\n }], null);\n})();\n\n/** Default set of positions for the overlay. Follows the behavior of a dropdown. */\nconst defaultPositionList = [{\n originX: 'start',\n originY: 'bottom',\n overlayX: 'start',\n overlayY: 'top'\n}, {\n originX: 'start',\n originY: 'top',\n overlayX: 'start',\n overlayY: 'bottom'\n}, {\n originX: 'end',\n originY: 'top',\n overlayX: 'end',\n overlayY: 'bottom'\n}, {\n originX: 'end',\n originY: 'bottom',\n overlayX: 'end',\n overlayY: 'top'\n}];\n/** Injection token that determines the scroll handling while the connected overlay is open. */\nconst CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY = new InjectionToken('cdk-connected-overlay-scroll-strategy', {\n providedIn: 'root',\n factory: () => {\n const overlay = inject(Overlay);\n return () => overlay.scrollStrategies.reposition();\n }\n});\n/**\n * Directive applied to an element to make it usable as an origin for an Overlay using a\n * ConnectedPositionStrategy.\n */\nclass CdkOverlayOrigin {\n constructor( /** Reference to the element on which the directive is applied. */\n elementRef) {\n this.elementRef = elementRef;\n }\n static {\n this.ɵfac = function CdkOverlayOrigin_Factory(t) {\n return new (t || CdkOverlayOrigin)(i0.ɵɵdirectiveInject(i0.ElementRef));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkOverlayOrigin,\n selectors: [[\"\", \"cdk-overlay-origin\", \"\"], [\"\", \"overlay-origin\", \"\"], [\"\", \"cdkOverlayOrigin\", \"\"]],\n exportAs: [\"cdkOverlayOrigin\"],\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkOverlayOrigin, [{\n type: Directive,\n args: [{\n selector: '[cdk-overlay-origin], [overlay-origin], [cdkOverlayOrigin]',\n exportAs: 'cdkOverlayOrigin',\n standalone: true\n }]\n }], () => [{\n type: i0.ElementRef\n }], null);\n})();\n/**\n * Directive to facilitate declarative creation of an\n * Overlay using a FlexibleConnectedPositionStrategy.\n */\nclass CdkConnectedOverlay {\n /** The offset in pixels for the overlay connection point on the x-axis */\n get offsetX() {\n return this._offsetX;\n }\n set offsetX(offsetX) {\n this._offsetX = offsetX;\n if (this._position) {\n this._updatePositionStrategy(this._position);\n }\n }\n /** The offset in pixels for the overlay connection point on the y-axis */\n get offsetY() {\n return this._offsetY;\n }\n set offsetY(offsetY) {\n this._offsetY = offsetY;\n if (this._position) {\n this._updatePositionStrategy(this._position);\n }\n }\n /** Whether the overlay should be disposed of when the user goes backwards/forwards in history. */\n get disposeOnNavigation() {\n return this._disposeOnNavigation;\n }\n set disposeOnNavigation(value) {\n this._disposeOnNavigation = value;\n }\n // TODO(jelbourn): inputs for size, scroll behavior, animation, etc.\n constructor(_overlay, templateRef, viewContainerRef, scrollStrategyFactory, _dir) {\n this._overlay = _overlay;\n this._dir = _dir;\n this._backdropSubscription = Subscription.EMPTY;\n this._attachSubscription = Subscription.EMPTY;\n this._detachSubscription = Subscription.EMPTY;\n this._positionSubscription = Subscription.EMPTY;\n this._disposeOnNavigation = false;\n this._ngZone = inject(NgZone);\n /** Margin between the overlay and the viewport edges. */\n this.viewportMargin = 0;\n /** Whether the overlay is open. */\n this.open = false;\n /** Whether the overlay can be closed by user interaction. */\n this.disableClose = false;\n /** Whether or not the overlay should attach a backdrop. */\n this.hasBackdrop = false;\n /** Whether or not the overlay should be locked when scrolling. */\n this.lockPosition = false;\n /** Whether the overlay's width and height can be constrained to fit within the viewport. */\n this.flexibleDimensions = false;\n /** Whether the overlay can grow after the initial open when flexible positioning is turned on. */\n this.growAfterOpen = false;\n /** Whether the overlay can be pushed on-screen if none of the provided positions fit. */\n this.push = false;\n /** Event emitted when the backdrop is clicked. */\n this.backdropClick = new EventEmitter();\n /** Event emitted when the position has changed. */\n this.positionChange = new EventEmitter();\n /** Event emitted when the overlay has been attached. */\n this.attach = new EventEmitter();\n /** Event emitted when the overlay has been detached. */\n this.detach = new EventEmitter();\n /** Emits when there are keyboard events that are targeted at the overlay. */\n this.overlayKeydown = new EventEmitter();\n /** Emits when there are mouse outside click events that are targeted at the overlay. */\n this.overlayOutsideClick = new EventEmitter();\n this._templatePortal = new TemplatePortal(templateRef, viewContainerRef);\n this._scrollStrategyFactory = scrollStrategyFactory;\n this.scrollStrategy = this._scrollStrategyFactory();\n }\n /** The associated overlay reference. */\n get overlayRef() {\n return this._overlayRef;\n }\n /** The element's layout direction. */\n get dir() {\n return this._dir ? this._dir.value : 'ltr';\n }\n ngOnDestroy() {\n this._attachSubscription.unsubscribe();\n this._detachSubscription.unsubscribe();\n this._backdropSubscription.unsubscribe();\n this._positionSubscription.unsubscribe();\n if (this._overlayRef) {\n this._overlayRef.dispose();\n }\n }\n ngOnChanges(changes) {\n if (this._position) {\n this._updatePositionStrategy(this._position);\n this._overlayRef.updateSize({\n width: this.width,\n minWidth: this.minWidth,\n height: this.height,\n minHeight: this.minHeight\n });\n if (changes['origin'] && this.open) {\n this._position.apply();\n }\n }\n if (changes['open']) {\n this.open ? this._attachOverlay() : this._detachOverlay();\n }\n }\n /** Creates an overlay */\n _createOverlay() {\n if (!this.positions || !this.positions.length) {\n this.positions = defaultPositionList;\n }\n const overlayRef = this._overlayRef = this._overlay.create(this._buildConfig());\n this._attachSubscription = overlayRef.attachments().subscribe(() => this.attach.emit());\n this._detachSubscription = overlayRef.detachments().subscribe(() => this.detach.emit());\n overlayRef.keydownEvents().subscribe(event => {\n this.overlayKeydown.next(event);\n if (event.keyCode === ESCAPE && !this.disableClose && !hasModifierKey(event)) {\n event.preventDefault();\n this._detachOverlay();\n }\n });\n this._overlayRef.outsidePointerEvents().subscribe(event => {\n const origin = this._getOriginElement();\n const target = _getEventTarget(event);\n if (!origin || origin !== target && !origin.contains(target)) {\n this.overlayOutsideClick.next(event);\n }\n });\n }\n /** Builds the overlay config based on the directive's inputs */\n _buildConfig() {\n const positionStrategy = this._position = this.positionStrategy || this._createPositionStrategy();\n const overlayConfig = new OverlayConfig({\n direction: this._dir,\n positionStrategy,\n scrollStrategy: this.scrollStrategy,\n hasBackdrop: this.hasBackdrop,\n disposeOnNavigation: this.disposeOnNavigation\n });\n if (this.width || this.width === 0) {\n overlayConfig.width = this.width;\n }\n if (this.height || this.height === 0) {\n overlayConfig.height = this.height;\n }\n if (this.minWidth || this.minWidth === 0) {\n overlayConfig.minWidth = this.minWidth;\n }\n if (this.minHeight || this.minHeight === 0) {\n overlayConfig.minHeight = this.minHeight;\n }\n if (this.backdropClass) {\n overlayConfig.backdropClass = this.backdropClass;\n }\n if (this.panelClass) {\n overlayConfig.panelClass = this.panelClass;\n }\n return overlayConfig;\n }\n /** Updates the state of a position strategy, based on the values of the directive inputs. */\n _updatePositionStrategy(positionStrategy) {\n const positions = this.positions.map(currentPosition => ({\n originX: currentPosition.originX,\n originY: currentPosition.originY,\n overlayX: currentPosition.overlayX,\n overlayY: currentPosition.overlayY,\n offsetX: currentPosition.offsetX || this.offsetX,\n offsetY: currentPosition.offsetY || this.offsetY,\n panelClass: currentPosition.panelClass || undefined\n }));\n return positionStrategy.setOrigin(this._getOrigin()).withPositions(positions).withFlexibleDimensions(this.flexibleDimensions).withPush(this.push).withGrowAfterOpen(this.growAfterOpen).withViewportMargin(this.viewportMargin).withLockedPosition(this.lockPosition).withTransformOriginOn(this.transformOriginSelector);\n }\n /** Returns the position strategy of the overlay to be set on the overlay config */\n _createPositionStrategy() {\n const strategy = this._overlay.position().flexibleConnectedTo(this._getOrigin());\n this._updatePositionStrategy(strategy);\n return strategy;\n }\n _getOrigin() {\n if (this.origin instanceof CdkOverlayOrigin) {\n return this.origin.elementRef;\n } else {\n return this.origin;\n }\n }\n _getOriginElement() {\n if (this.origin instanceof CdkOverlayOrigin) {\n return this.origin.elementRef.nativeElement;\n }\n if (this.origin instanceof ElementRef) {\n return this.origin.nativeElement;\n }\n if (typeof Element !== 'undefined' && this.origin instanceof Element) {\n return this.origin;\n }\n return null;\n }\n /** Attaches the overlay and subscribes to backdrop clicks if backdrop exists */\n _attachOverlay() {\n if (!this._overlayRef) {\n this._createOverlay();\n } else {\n // Update the overlay size, in case the directive's inputs have changed\n this._overlayRef.getConfig().hasBackdrop = this.hasBackdrop;\n }\n if (!this._overlayRef.hasAttached()) {\n this._overlayRef.attach(this._templatePortal);\n }\n if (this.hasBackdrop) {\n this._backdropSubscription = this._overlayRef.backdropClick().subscribe(event => {\n this.backdropClick.emit(event);\n });\n } else {\n this._backdropSubscription.unsubscribe();\n }\n this._positionSubscription.unsubscribe();\n // Only subscribe to `positionChanges` if requested, because putting\n // together all the information for it can be expensive.\n if (this.positionChange.observers.length > 0) {\n this._positionSubscription = this._position.positionChanges.pipe(takeWhile(() => this.positionChange.observers.length > 0)).subscribe(position => {\n this._ngZone.run(() => this.positionChange.emit(position));\n if (this.positionChange.observers.length === 0) {\n this._positionSubscription.unsubscribe();\n }\n });\n }\n }\n /** Detaches the overlay and unsubscribes to backdrop clicks if backdrop exists */\n _detachOverlay() {\n if (this._overlayRef) {\n this._overlayRef.detach();\n }\n this._backdropSubscription.unsubscribe();\n this._positionSubscription.unsubscribe();\n }\n static {\n this.ɵfac = function CdkConnectedOverlay_Factory(t) {\n return new (t || CdkConnectedOverlay)(i0.ɵɵdirectiveInject(Overlay), i0.ɵɵdirectiveInject(i0.TemplateRef), i0.ɵɵdirectiveInject(i0.ViewContainerRef), i0.ɵɵdirectiveInject(CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY), i0.ɵɵdirectiveInject(i5.Directionality, 8));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: CdkConnectedOverlay,\n selectors: [[\"\", \"cdk-connected-overlay\", \"\"], [\"\", \"connected-overlay\", \"\"], [\"\", \"cdkConnectedOverlay\", \"\"]],\n inputs: {\n origin: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayOrigin\", \"origin\"],\n positions: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayPositions\", \"positions\"],\n positionStrategy: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayPositionStrategy\", \"positionStrategy\"],\n offsetX: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayOffsetX\", \"offsetX\"],\n offsetY: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayOffsetY\", \"offsetY\"],\n width: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayWidth\", \"width\"],\n height: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayHeight\", \"height\"],\n minWidth: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayMinWidth\", \"minWidth\"],\n minHeight: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayMinHeight\", \"minHeight\"],\n backdropClass: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayBackdropClass\", \"backdropClass\"],\n panelClass: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayPanelClass\", \"panelClass\"],\n viewportMargin: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayViewportMargin\", \"viewportMargin\"],\n scrollStrategy: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayScrollStrategy\", \"scrollStrategy\"],\n open: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayOpen\", \"open\"],\n disableClose: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayDisableClose\", \"disableClose\"],\n transformOriginSelector: [i0.ɵɵInputFlags.None, \"cdkConnectedOverlayTransformOriginOn\", \"transformOriginSelector\"],\n hasBackdrop: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"cdkConnectedOverlayHasBackdrop\", \"hasBackdrop\", booleanAttribute],\n lockPosition: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"cdkConnectedOverlayLockPosition\", \"lockPosition\", booleanAttribute],\n flexibleDimensions: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"cdkConnectedOverlayFlexibleDimensions\", \"flexibleDimensions\", booleanAttribute],\n growAfterOpen: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"cdkConnectedOverlayGrowAfterOpen\", \"growAfterOpen\", booleanAttribute],\n push: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"cdkConnectedOverlayPush\", \"push\", booleanAttribute],\n disposeOnNavigation: [i0.ɵɵInputFlags.HasDecoratorInputTransform, \"cdkConnectedOverlayDisposeOnNavigation\", \"disposeOnNavigation\", booleanAttribute]\n },\n outputs: {\n backdropClick: \"backdropClick\",\n positionChange: \"positionChange\",\n attach: \"attach\",\n detach: \"detach\",\n overlayKeydown: \"overlayKeydown\",\n overlayOutsideClick: \"overlayOutsideClick\"\n },\n exportAs: [\"cdkConnectedOverlay\"],\n standalone: true,\n features: [i0.ɵɵInputTransformsFeature, i0.ɵɵNgOnChangesFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkConnectedOverlay, [{\n type: Directive,\n args: [{\n selector: '[cdk-connected-overlay], [connected-overlay], [cdkConnectedOverlay]',\n exportAs: 'cdkConnectedOverlay',\n standalone: true\n }]\n }], () => [{\n type: Overlay\n }, {\n type: i0.TemplateRef\n }, {\n type: i0.ViewContainerRef\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY]\n }]\n }, {\n type: i5.Directionality,\n decorators: [{\n type: Optional\n }]\n }], {\n origin: [{\n type: Input,\n args: ['cdkConnectedOverlayOrigin']\n }],\n positions: [{\n type: Input,\n args: ['cdkConnectedOverlayPositions']\n }],\n positionStrategy: [{\n type: Input,\n args: ['cdkConnectedOverlayPositionStrategy']\n }],\n offsetX: [{\n type: Input,\n args: ['cdkConnectedOverlayOffsetX']\n }],\n offsetY: [{\n type: Input,\n args: ['cdkConnectedOverlayOffsetY']\n }],\n width: [{\n type: Input,\n args: ['cdkConnectedOverlayWidth']\n }],\n height: [{\n type: Input,\n args: ['cdkConnectedOverlayHeight']\n }],\n minWidth: [{\n type: Input,\n args: ['cdkConnectedOverlayMinWidth']\n }],\n minHeight: [{\n type: Input,\n args: ['cdkConnectedOverlayMinHeight']\n }],\n backdropClass: [{\n type: Input,\n args: ['cdkConnectedOverlayBackdropClass']\n }],\n panelClass: [{\n type: Input,\n args: ['cdkConnectedOverlayPanelClass']\n }],\n viewportMargin: [{\n type: Input,\n args: ['cdkConnectedOverlayViewportMargin']\n }],\n scrollStrategy: [{\n type: Input,\n args: ['cdkConnectedOverlayScrollStrategy']\n }],\n open: [{\n type: Input,\n args: ['cdkConnectedOverlayOpen']\n }],\n disableClose: [{\n type: Input,\n args: ['cdkConnectedOverlayDisableClose']\n }],\n transformOriginSelector: [{\n type: Input,\n args: ['cdkConnectedOverlayTransformOriginOn']\n }],\n hasBackdrop: [{\n type: Input,\n args: [{\n alias: 'cdkConnectedOverlayHasBackdrop',\n transform: booleanAttribute\n }]\n }],\n lockPosition: [{\n type: Input,\n args: [{\n alias: 'cdkConnectedOverlayLockPosition',\n transform: booleanAttribute\n }]\n }],\n flexibleDimensions: [{\n type: Input,\n args: [{\n alias: 'cdkConnectedOverlayFlexibleDimensions',\n transform: booleanAttribute\n }]\n }],\n growAfterOpen: [{\n type: Input,\n args: [{\n alias: 'cdkConnectedOverlayGrowAfterOpen',\n transform: booleanAttribute\n }]\n }],\n push: [{\n type: Input,\n args: [{\n alias: 'cdkConnectedOverlayPush',\n transform: booleanAttribute\n }]\n }],\n disposeOnNavigation: [{\n type: Input,\n args: [{\n alias: 'cdkConnectedOverlayDisposeOnNavigation',\n transform: booleanAttribute\n }]\n }],\n backdropClick: [{\n type: Output\n }],\n positionChange: [{\n type: Output\n }],\n attach: [{\n type: Output\n }],\n detach: [{\n type: Output\n }],\n overlayKeydown: [{\n type: Output\n }],\n overlayOutsideClick: [{\n type: Output\n }]\n });\n})();\n/** @docs-private */\nfunction CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY_PROVIDER_FACTORY(overlay) {\n return () => overlay.scrollStrategies.reposition();\n}\n/** @docs-private */\nconst CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY_PROVIDER = {\n provide: CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY,\n deps: [Overlay],\n useFactory: CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY_PROVIDER_FACTORY\n};\nclass OverlayModule {\n static {\n this.ɵfac = function OverlayModule_Factory(t) {\n return new (t || OverlayModule)();\n };\n }\n static {\n this.ɵmod = /* @__PURE__ */i0.ɵɵdefineNgModule({\n type: OverlayModule\n });\n }\n static {\n this.ɵinj = /* @__PURE__ */i0.ɵɵdefineInjector({\n providers: [Overlay, CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY_PROVIDER],\n imports: [BidiModule, PortalModule, ScrollingModule, ScrollingModule]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(OverlayModule, [{\n type: NgModule,\n args: [{\n imports: [BidiModule, PortalModule, ScrollingModule, CdkConnectedOverlay, CdkOverlayOrigin],\n exports: [CdkConnectedOverlay, CdkOverlayOrigin, ScrollingModule],\n providers: [Overlay, CDK_CONNECTED_OVERLAY_SCROLL_STRATEGY_PROVIDER]\n }]\n }], null, null);\n})();\n\n/**\n * Alternative to OverlayContainer that supports correct displaying of overlay elements in\n * Fullscreen mode\n * https://developer.mozilla.org/en-US/docs/Web/API/Element/requestFullScreen\n *\n * Should be provided in the root component.\n */\nclass FullscreenOverlayContainer extends OverlayContainer {\n constructor(_document, platform) {\n super(_document, platform);\n }\n ngOnDestroy() {\n super.ngOnDestroy();\n if (this._fullScreenEventName && this._fullScreenListener) {\n this._document.removeEventListener(this._fullScreenEventName, this._fullScreenListener);\n }\n }\n _createContainer() {\n super._createContainer();\n this._adjustParentForFullscreenChange();\n this._addFullscreenChangeListener(() => this._adjustParentForFullscreenChange());\n }\n _adjustParentForFullscreenChange() {\n if (!this._containerElement) {\n return;\n }\n const fullscreenElement = this.getFullscreenElement();\n const parent = fullscreenElement || this._document.body;\n parent.appendChild(this._containerElement);\n }\n _addFullscreenChangeListener(fn) {\n const eventName = this._getEventName();\n if (eventName) {\n if (this._fullScreenListener) {\n this._document.removeEventListener(eventName, this._fullScreenListener);\n }\n this._document.addEventListener(eventName, fn);\n this._fullScreenListener = fn;\n }\n }\n _getEventName() {\n if (!this._fullScreenEventName) {\n const _document = this._document;\n if (_document.fullscreenEnabled) {\n this._fullScreenEventName = 'fullscreenchange';\n } else if (_document.webkitFullscreenEnabled) {\n this._fullScreenEventName = 'webkitfullscreenchange';\n } else if (_document.mozFullScreenEnabled) {\n this._fullScreenEventName = 'mozfullscreenchange';\n } else if (_document.msFullscreenEnabled) {\n this._fullScreenEventName = 'MSFullscreenChange';\n }\n }\n return this._fullScreenEventName;\n }\n /**\n * When the page is put into fullscreen mode, a specific element is specified.\n * Only that element and its children are visible when in fullscreen mode.\n */\n getFullscreenElement() {\n const _document = this._document;\n return _document.fullscreenElement || _document.webkitFullscreenElement || _document.mozFullScreenElement || _document.msFullscreenElement || null;\n }\n static {\n this.ɵfac = function FullscreenOverlayContainer_Factory(t) {\n return new (t || FullscreenOverlayContainer)(i0.ɵɵinject(DOCUMENT), i0.ɵɵinject(i1$1.Platform));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: FullscreenOverlayContainer,\n factory: FullscreenOverlayContainer.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(FullscreenOverlayContainer, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: i1$1.Platform\n }], null);\n})();\n\n/**\n * Generated bundle index. Do not edit.\n */\n\nexport { BlockScrollStrategy, CdkConnectedOverlay, CdkOverlayOrigin, CloseScrollStrategy, ConnectedOverlayPositionChange, ConnectionPositionPair, FlexibleConnectedPositionStrategy, FullscreenOverlayContainer, GlobalPositionStrategy, NoopScrollStrategy, Overlay, OverlayConfig, OverlayContainer, OverlayKeyboardDispatcher, OverlayModule, OverlayOutsideClickDispatcher, OverlayPositionBuilder, OverlayRef, RepositionScrollStrategy, STANDARD_DROPDOWN_ADJACENT_POSITIONS, STANDARD_DROPDOWN_BELOW_POSITIONS, ScrollStrategyOptions, ScrollingVisibility, validateHorizontalPosition, validateVerticalPosition };\n","import * as i1 from '@angular/cdk/a11y';\nimport { A11yModule } from '@angular/cdk/a11y';\nimport * as i1$1 from '@angular/cdk/overlay';\nimport { Overlay, OverlayConfig, OverlayRef, OverlayModule } from '@angular/cdk/overlay';\nimport { Platform, _getFocusedElementPierceShadowDom } from '@angular/cdk/platform';\nimport { BasePortalOutlet, CdkPortalOutlet, ComponentPortal, TemplatePortal, PortalModule } from '@angular/cdk/portal';\nimport { DOCUMENT } from '@angular/common';\nimport * as i0 from '@angular/core';\nimport { inject, ChangeDetectorRef, Component, ViewEncapsulation, ChangeDetectionStrategy, Optional, Inject, ViewChild, InjectionToken, Injector, TemplateRef, Injectable, SkipSelf, NgModule } from '@angular/core';\nimport { ESCAPE, hasModifierKey } from '@angular/cdk/keycodes';\nimport { Subject, defer, of } from 'rxjs';\nimport { Directionality } from '@angular/cdk/bidi';\nimport { startWith } from 'rxjs/operators';\n\n/** Configuration for opening a modal dialog. */\nfunction CdkDialogContainer_ng_template_0_Template(rf, ctx) {}\nclass DialogConfig {\n constructor() {\n /** The ARIA role of the dialog element. */\n this.role = 'dialog';\n /** Optional CSS class or classes applied to the overlay panel. */\n this.panelClass = '';\n /** Whether the dialog has a backdrop. */\n this.hasBackdrop = true;\n /** Optional CSS class or classes applied to the overlay backdrop. */\n this.backdropClass = '';\n /** Whether the dialog closes with the escape key or pointer events outside the panel element. */\n this.disableClose = false;\n /** Width of the dialog. */\n this.width = '';\n /** Height of the dialog. */\n this.height = '';\n /** Data being injected into the child component. */\n this.data = null;\n /** ID of the element that describes the dialog. */\n this.ariaDescribedBy = null;\n /** ID of the element that labels the dialog. */\n this.ariaLabelledBy = null;\n /** Dialog label applied via `aria-label` */\n this.ariaLabel = null;\n /** Whether this is a modal dialog. Used to set the `aria-modal` attribute. */\n this.ariaModal = true;\n /**\n * Where the dialog should focus on open.\n * @breaking-change 14.0.0 Remove boolean option from autoFocus. Use string or\n * AutoFocusTarget instead.\n */\n this.autoFocus = 'first-tabbable';\n /**\n * Whether the dialog should restore focus to the previously-focused element upon closing.\n * Has the following behavior based on the type that is passed in:\n * - `boolean` - when true, will return focus to the element that was focused before the dialog\n * was opened, otherwise won't restore focus at all.\n * - `string` - focus will be restored to the first element that matches the CSS selector.\n * - `HTMLElement` - focus will be restored to the specific element.\n */\n this.restoreFocus = true;\n /**\n * Whether the dialog should close when the user navigates backwards or forwards through browser\n * history. This does not apply to navigation via anchor element unless using URL-hash based\n * routing (`HashLocationStrategy` in the Angular router).\n */\n this.closeOnNavigation = true;\n /**\n * Whether the dialog should close when the dialog service is destroyed. This is useful if\n * another service is wrapping the dialog and is managing the destruction instead.\n */\n this.closeOnDestroy = true;\n /**\n * Whether the dialog should close when the underlying overlay is detached. This is useful if\n * another service is wrapping the dialog and is managing the destruction instead. E.g. an\n * external detachment can happen as a result of a scroll strategy triggering it or when the\n * browser location changes.\n */\n this.closeOnOverlayDetachments = true;\n }\n}\nfunction throwDialogContentAlreadyAttachedError() {\n throw Error('Attempting to attach dialog content after content is already attached');\n}\n/**\n * Internal component that wraps user-provided dialog content.\n * @docs-private\n */\nclass CdkDialogContainer extends BasePortalOutlet {\n constructor(_elementRef, _focusTrapFactory, _document, _config, _interactivityChecker, _ngZone, _overlayRef, _focusMonitor) {\n super();\n this._elementRef = _elementRef;\n this._focusTrapFactory = _focusTrapFactory;\n this._config = _config;\n this._interactivityChecker = _interactivityChecker;\n this._ngZone = _ngZone;\n this._overlayRef = _overlayRef;\n this._focusMonitor = _focusMonitor;\n this._platform = inject(Platform);\n /** The class that traps and manages focus within the dialog. */\n this._focusTrap = null;\n /** Element that was focused before the dialog was opened. Save this to restore upon close. */\n this._elementFocusedBeforeDialogWasOpened = null;\n /**\n * Type of interaction that led to the dialog being closed. This is used to determine\n * whether the focus style will be applied when returning focus to its original location\n * after the dialog is closed.\n */\n this._closeInteractionType = null;\n /**\n * Queue of the IDs of the dialog's label element, based on their definition order. The first\n * ID will be used as the `aria-labelledby` value. We use a queue here to handle the case\n * where there are two or more titles in the DOM at a time and the first one is destroyed while\n * the rest are present.\n */\n this._ariaLabelledByQueue = [];\n this._changeDetectorRef = inject(ChangeDetectorRef);\n /**\n * Attaches a DOM portal to the dialog container.\n * @param portal Portal to be attached.\n * @deprecated To be turned into a method.\n * @breaking-change 10.0.0\n */\n this.attachDomPortal = portal => {\n if (this._portalOutlet.hasAttached() && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throwDialogContentAlreadyAttachedError();\n }\n const result = this._portalOutlet.attachDomPortal(portal);\n this._contentAttached();\n return result;\n };\n this._document = _document;\n if (this._config.ariaLabelledBy) {\n this._ariaLabelledByQueue.push(this._config.ariaLabelledBy);\n }\n }\n _addAriaLabelledBy(id) {\n this._ariaLabelledByQueue.push(id);\n this._changeDetectorRef.markForCheck();\n }\n _removeAriaLabelledBy(id) {\n const index = this._ariaLabelledByQueue.indexOf(id);\n if (index > -1) {\n this._ariaLabelledByQueue.splice(index, 1);\n this._changeDetectorRef.markForCheck();\n }\n }\n _contentAttached() {\n this._initializeFocusTrap();\n this._handleBackdropClicks();\n this._captureInitialFocus();\n }\n /**\n * Can be used by child classes to customize the initial focus\n * capturing behavior (e.g. if it's tied to an animation).\n */\n _captureInitialFocus() {\n this._trapFocus();\n }\n ngOnDestroy() {\n this._restoreFocus();\n }\n /**\n * Attach a ComponentPortal as content to this dialog container.\n * @param portal Portal to be attached as the dialog content.\n */\n attachComponentPortal(portal) {\n if (this._portalOutlet.hasAttached() && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throwDialogContentAlreadyAttachedError();\n }\n const result = this._portalOutlet.attachComponentPortal(portal);\n this._contentAttached();\n return result;\n }\n /**\n * Attach a TemplatePortal as content to this dialog container.\n * @param portal Portal to be attached as the dialog content.\n */\n attachTemplatePortal(portal) {\n if (this._portalOutlet.hasAttached() && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throwDialogContentAlreadyAttachedError();\n }\n const result = this._portalOutlet.attachTemplatePortal(portal);\n this._contentAttached();\n return result;\n }\n // TODO(crisbeto): this shouldn't be exposed, but there are internal references to it.\n /** Captures focus if it isn't already inside the dialog. */\n _recaptureFocus() {\n if (!this._containsFocus()) {\n this._trapFocus();\n }\n }\n /**\n * Focuses the provided element. If the element is not focusable, it will add a tabIndex\n * attribute to forcefully focus it. The attribute is removed after focus is moved.\n * @param element The element to focus.\n */\n _forceFocus(element, options) {\n if (!this._interactivityChecker.isFocusable(element)) {\n element.tabIndex = -1;\n // The tabindex attribute should be removed to avoid navigating to that element again\n this._ngZone.runOutsideAngular(() => {\n const callback = () => {\n element.removeEventListener('blur', callback);\n element.removeEventListener('mousedown', callback);\n element.removeAttribute('tabindex');\n };\n element.addEventListener('blur', callback);\n element.addEventListener('mousedown', callback);\n });\n }\n element.focus(options);\n }\n /**\n * Focuses the first element that matches the given selector within the focus trap.\n * @param selector The CSS selector for the element to set focus to.\n */\n _focusByCssSelector(selector, options) {\n let elementToFocus = this._elementRef.nativeElement.querySelector(selector);\n if (elementToFocus) {\n this._forceFocus(elementToFocus, options);\n }\n }\n /**\n * Moves the focus inside the focus trap. When autoFocus is not set to 'dialog', if focus\n * cannot be moved then focus will go to the dialog container.\n */\n _trapFocus() {\n const element = this._elementRef.nativeElement;\n // If were to attempt to focus immediately, then the content of the dialog would not yet be\n // ready in instances where change detection has to run first. To deal with this, we simply\n // wait for the microtask queue to be empty when setting focus when autoFocus isn't set to\n // dialog. If the element inside the dialog can't be focused, then the container is focused\n // so the user can't tab into other elements behind it.\n switch (this._config.autoFocus) {\n case false:\n case 'dialog':\n // Ensure that focus is on the dialog container. It's possible that a different\n // component tried to move focus while the open animation was running. See:\n // https://github.com/angular/components/issues/16215. Note that we only want to do this\n // if the focus isn't inside the dialog already, because it's possible that the consumer\n // turned off `autoFocus` in order to move focus themselves.\n if (!this._containsFocus()) {\n element.focus();\n }\n break;\n case true:\n case 'first-tabbable':\n this._focusTrap?.focusInitialElementWhenReady().then(focusedSuccessfully => {\n // If we weren't able to find a focusable element in the dialog, then focus the dialog\n // container instead.\n if (!focusedSuccessfully) {\n this._focusDialogContainer();\n }\n });\n break;\n case 'first-heading':\n this._focusByCssSelector('h1, h2, h3, h4, h5, h6, [role=\"heading\"]');\n break;\n default:\n this._focusByCssSelector(this._config.autoFocus);\n break;\n }\n }\n /** Restores focus to the element that was focused before the dialog opened. */\n _restoreFocus() {\n const focusConfig = this._config.restoreFocus;\n let focusTargetElement = null;\n if (typeof focusConfig === 'string') {\n focusTargetElement = this._document.querySelector(focusConfig);\n } else if (typeof focusConfig === 'boolean') {\n focusTargetElement = focusConfig ? this._elementFocusedBeforeDialogWasOpened : null;\n } else if (focusConfig) {\n focusTargetElement = focusConfig;\n }\n // We need the extra check, because IE can set the `activeElement` to null in some cases.\n if (this._config.restoreFocus && focusTargetElement && typeof focusTargetElement.focus === 'function') {\n const activeElement = _getFocusedElementPierceShadowDom();\n const element = this._elementRef.nativeElement;\n // Make sure that focus is still inside the dialog or is on the body (usually because a\n // non-focusable element like the backdrop was clicked) before moving it. It's possible that\n // the consumer moved it themselves before the animation was done, in which case we shouldn't\n // do anything.\n if (!activeElement || activeElement === this._document.body || activeElement === element || element.contains(activeElement)) {\n if (this._focusMonitor) {\n this._focusMonitor.focusVia(focusTargetElement, this._closeInteractionType);\n this._closeInteractionType = null;\n } else {\n focusTargetElement.focus();\n }\n }\n }\n if (this._focusTrap) {\n this._focusTrap.destroy();\n }\n }\n /** Focuses the dialog container. */\n _focusDialogContainer() {\n // Note that there is no focus method when rendering on the server.\n if (this._elementRef.nativeElement.focus) {\n this._elementRef.nativeElement.focus();\n }\n }\n /** Returns whether focus is inside the dialog. */\n _containsFocus() {\n const element = this._elementRef.nativeElement;\n const activeElement = _getFocusedElementPierceShadowDom();\n return element === activeElement || element.contains(activeElement);\n }\n /** Sets up the focus trap. */\n _initializeFocusTrap() {\n if (this._platform.isBrowser) {\n this._focusTrap = this._focusTrapFactory.create(this._elementRef.nativeElement);\n // Save the previously focused element. This element will be re-focused\n // when the dialog closes.\n if (this._document) {\n this._elementFocusedBeforeDialogWasOpened = _getFocusedElementPierceShadowDom();\n }\n }\n }\n /** Sets up the listener that handles clicks on the dialog backdrop. */\n _handleBackdropClicks() {\n // Clicking on the backdrop will move focus out of dialog.\n // Recapture it if closing via the backdrop is disabled.\n this._overlayRef.backdropClick().subscribe(() => {\n if (this._config.disableClose) {\n this._recaptureFocus();\n }\n });\n }\n static {\n this.ɵfac = function CdkDialogContainer_Factory(t) {\n return new (t || CdkDialogContainer)(i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(i1.FocusTrapFactory), i0.ɵɵdirectiveInject(DOCUMENT, 8), i0.ɵɵdirectiveInject(DialogConfig), i0.ɵɵdirectiveInject(i1.InteractivityChecker), i0.ɵɵdirectiveInject(i0.NgZone), i0.ɵɵdirectiveInject(i1$1.OverlayRef), i0.ɵɵdirectiveInject(i1.FocusMonitor));\n };\n }\n static {\n this.ɵcmp = /* @__PURE__ */i0.ɵɵdefineComponent({\n type: CdkDialogContainer,\n selectors: [[\"cdk-dialog-container\"]],\n viewQuery: function CdkDialogContainer_Query(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵviewQuery(CdkPortalOutlet, 7);\n }\n if (rf & 2) {\n let _t;\n i0.ɵɵqueryRefresh(_t = i0.ɵɵloadQuery()) && (ctx._portalOutlet = _t.first);\n }\n },\n hostAttrs: [\"tabindex\", \"-1\", 1, \"cdk-dialog-container\"],\n hostVars: 6,\n hostBindings: function CdkDialogContainer_HostBindings(rf, ctx) {\n if (rf & 2) {\n i0.ɵɵattribute(\"id\", ctx._config.id || null)(\"role\", ctx._config.role)(\"aria-modal\", ctx._config.ariaModal)(\"aria-labelledby\", ctx._config.ariaLabel ? null : ctx._ariaLabelledByQueue[0])(\"aria-label\", ctx._config.ariaLabel)(\"aria-describedby\", ctx._config.ariaDescribedBy || null);\n }\n },\n standalone: true,\n features: [i0.ɵɵInheritDefinitionFeature, i0.ɵɵStandaloneFeature],\n decls: 1,\n vars: 0,\n consts: [[\"cdkPortalOutlet\", \"\"]],\n template: function CdkDialogContainer_Template(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵtemplate(0, CdkDialogContainer_ng_template_0_Template, 0, 0, \"ng-template\", 0);\n }\n },\n dependencies: [CdkPortalOutlet],\n styles: [\".cdk-dialog-container{display:block;width:100%;height:100%;min-height:inherit;max-height:inherit}\"],\n encapsulation: 2\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(CdkDialogContainer, [{\n type: Component,\n args: [{\n selector: 'cdk-dialog-container',\n encapsulation: ViewEncapsulation.None,\n changeDetection: ChangeDetectionStrategy.Default,\n standalone: true,\n imports: [CdkPortalOutlet],\n host: {\n 'class': 'cdk-dialog-container',\n 'tabindex': '-1',\n '[attr.id]': '_config.id || null',\n '[attr.role]': '_config.role',\n '[attr.aria-modal]': '_config.ariaModal',\n '[attr.aria-labelledby]': '_config.ariaLabel ? null : _ariaLabelledByQueue[0]',\n '[attr.aria-label]': '_config.ariaLabel',\n '[attr.aria-describedby]': '_config.ariaDescribedBy || null'\n },\n template: \" \\n\",\n styles: [\".cdk-dialog-container{display:block;width:100%;height:100%;min-height:inherit;max-height:inherit}\"]\n }]\n }], () => [{\n type: i0.ElementRef\n }, {\n type: i1.FocusTrapFactory\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DialogConfig]\n }]\n }, {\n type: i1.InteractivityChecker\n }, {\n type: i0.NgZone\n }, {\n type: i1$1.OverlayRef\n }, {\n type: i1.FocusMonitor\n }], {\n _portalOutlet: [{\n type: ViewChild,\n args: [CdkPortalOutlet, {\n static: true\n }]\n }]\n });\n})();\n\n/**\n * Reference to a dialog opened via the Dialog service.\n */\nclass DialogRef {\n constructor(overlayRef, config) {\n this.overlayRef = overlayRef;\n this.config = config;\n /** Emits when the dialog has been closed. */\n this.closed = new Subject();\n this.disableClose = config.disableClose;\n this.backdropClick = overlayRef.backdropClick();\n this.keydownEvents = overlayRef.keydownEvents();\n this.outsidePointerEvents = overlayRef.outsidePointerEvents();\n this.id = config.id; // By the time the dialog is created we are guaranteed to have an ID.\n this.keydownEvents.subscribe(event => {\n if (event.keyCode === ESCAPE && !this.disableClose && !hasModifierKey(event)) {\n event.preventDefault();\n this.close(undefined, {\n focusOrigin: 'keyboard'\n });\n }\n });\n this.backdropClick.subscribe(() => {\n if (!this.disableClose) {\n this.close(undefined, {\n focusOrigin: 'mouse'\n });\n }\n });\n this._detachSubscription = overlayRef.detachments().subscribe(() => {\n // Check specifically for `false`, because we want `undefined` to be treated like `true`.\n if (config.closeOnOverlayDetachments !== false) {\n this.close();\n }\n });\n }\n /**\n * Close the dialog.\n * @param result Optional result to return to the dialog opener.\n * @param options Additional options to customize the closing behavior.\n */\n close(result, options) {\n if (this.containerInstance) {\n const closedSubject = this.closed;\n this.containerInstance._closeInteractionType = options?.focusOrigin || 'program';\n // Drop the detach subscription first since it can be triggered by the\n // `dispose` call and override the result of this closing sequence.\n this._detachSubscription.unsubscribe();\n this.overlayRef.dispose();\n closedSubject.next(result);\n closedSubject.complete();\n this.componentInstance = this.containerInstance = null;\n }\n }\n /** Updates the position of the dialog based on the current position strategy. */\n updatePosition() {\n this.overlayRef.updatePosition();\n return this;\n }\n /**\n * Updates the dialog's width and height.\n * @param width New width of the dialog.\n * @param height New height of the dialog.\n */\n updateSize(width = '', height = '') {\n this.overlayRef.updateSize({\n width,\n height\n });\n return this;\n }\n /** Add a CSS class or an array of classes to the overlay pane. */\n addPanelClass(classes) {\n this.overlayRef.addPanelClass(classes);\n return this;\n }\n /** Remove a CSS class or an array of classes from the overlay pane. */\n removePanelClass(classes) {\n this.overlayRef.removePanelClass(classes);\n return this;\n }\n}\n\n/** Injection token for the Dialog's ScrollStrategy. */\nconst DIALOG_SCROLL_STRATEGY = new InjectionToken('DialogScrollStrategy', {\n providedIn: 'root',\n factory: () => {\n const overlay = inject(Overlay);\n return () => overlay.scrollStrategies.block();\n }\n});\n/** Injection token for the Dialog's Data. */\nconst DIALOG_DATA = new InjectionToken('DialogData');\n/** Injection token that can be used to provide default options for the dialog module. */\nconst DEFAULT_DIALOG_CONFIG = new InjectionToken('DefaultDialogConfig');\n/**\n * @docs-private\n * @deprecated No longer used. To be removed.\n * @breaking-change 19.0.0\n */\nfunction DIALOG_SCROLL_STRATEGY_PROVIDER_FACTORY(overlay) {\n return () => overlay.scrollStrategies.block();\n}\n/**\n * @docs-private\n * @deprecated No longer used. To be removed.\n * @breaking-change 19.0.0\n */\nconst DIALOG_SCROLL_STRATEGY_PROVIDER = {\n provide: DIALOG_SCROLL_STRATEGY,\n deps: [Overlay],\n useFactory: DIALOG_SCROLL_STRATEGY_PROVIDER_FACTORY\n};\n\n/** Unique id for the created dialog. */\nlet uniqueId = 0;\nclass Dialog {\n /** Keeps track of the currently-open dialogs. */\n get openDialogs() {\n return this._parentDialog ? this._parentDialog.openDialogs : this._openDialogsAtThisLevel;\n }\n /** Stream that emits when a dialog has been opened. */\n get afterOpened() {\n return this._parentDialog ? this._parentDialog.afterOpened : this._afterOpenedAtThisLevel;\n }\n constructor(_overlay, _injector, _defaultOptions, _parentDialog, _overlayContainer, scrollStrategy) {\n this._overlay = _overlay;\n this._injector = _injector;\n this._defaultOptions = _defaultOptions;\n this._parentDialog = _parentDialog;\n this._overlayContainer = _overlayContainer;\n this._openDialogsAtThisLevel = [];\n this._afterAllClosedAtThisLevel = new Subject();\n this._afterOpenedAtThisLevel = new Subject();\n this._ariaHiddenElements = new Map();\n /**\n * Stream that emits when all open dialog have finished closing.\n * Will emit on subscribe if there are no open dialogs to begin with.\n */\n this.afterAllClosed = defer(() => this.openDialogs.length ? this._getAfterAllClosed() : this._getAfterAllClosed().pipe(startWith(undefined)));\n this._scrollStrategy = scrollStrategy;\n }\n open(componentOrTemplateRef, config) {\n const defaults = this._defaultOptions || new DialogConfig();\n config = {\n ...defaults,\n ...config\n };\n config.id = config.id || `cdk-dialog-${uniqueId++}`;\n if (config.id && this.getDialogById(config.id) && (typeof ngDevMode === 'undefined' || ngDevMode)) {\n throw Error(`Dialog with id \"${config.id}\" exists already. The dialog id must be unique.`);\n }\n const overlayConfig = this._getOverlayConfig(config);\n const overlayRef = this._overlay.create(overlayConfig);\n const dialogRef = new DialogRef(overlayRef, config);\n const dialogContainer = this._attachContainer(overlayRef, dialogRef, config);\n dialogRef.containerInstance = dialogContainer;\n this._attachDialogContent(componentOrTemplateRef, dialogRef, dialogContainer, config);\n // If this is the first dialog that we're opening, hide all the non-overlay content.\n if (!this.openDialogs.length) {\n this._hideNonDialogContentFromAssistiveTechnology();\n }\n this.openDialogs.push(dialogRef);\n dialogRef.closed.subscribe(() => this._removeOpenDialog(dialogRef, true));\n this.afterOpened.next(dialogRef);\n return dialogRef;\n }\n /**\n * Closes all of the currently-open dialogs.\n */\n closeAll() {\n reverseForEach(this.openDialogs, dialog => dialog.close());\n }\n /**\n * Finds an open dialog by its id.\n * @param id ID to use when looking up the dialog.\n */\n getDialogById(id) {\n return this.openDialogs.find(dialog => dialog.id === id);\n }\n ngOnDestroy() {\n // Make one pass over all the dialogs that need to be untracked, but should not be closed. We\n // want to stop tracking the open dialog even if it hasn't been closed, because the tracking\n // determines when `aria-hidden` is removed from elements outside the dialog.\n reverseForEach(this._openDialogsAtThisLevel, dialog => {\n // Check for `false` specifically since we want `undefined` to be interpreted as `true`.\n if (dialog.config.closeOnDestroy === false) {\n this._removeOpenDialog(dialog, false);\n }\n });\n // Make a second pass and close the remaining dialogs. We do this second pass in order to\n // correctly dispatch the `afterAllClosed` event in case we have a mixed array of dialogs\n // that should be closed and dialogs that should not.\n reverseForEach(this._openDialogsAtThisLevel, dialog => dialog.close());\n this._afterAllClosedAtThisLevel.complete();\n this._afterOpenedAtThisLevel.complete();\n this._openDialogsAtThisLevel = [];\n }\n /**\n * Creates an overlay config from a dialog config.\n * @param config The dialog configuration.\n * @returns The overlay configuration.\n */\n _getOverlayConfig(config) {\n const state = new OverlayConfig({\n positionStrategy: config.positionStrategy || this._overlay.position().global().centerHorizontally().centerVertically(),\n scrollStrategy: config.scrollStrategy || this._scrollStrategy(),\n panelClass: config.panelClass,\n hasBackdrop: config.hasBackdrop,\n direction: config.direction,\n minWidth: config.minWidth,\n minHeight: config.minHeight,\n maxWidth: config.maxWidth,\n maxHeight: config.maxHeight,\n width: config.width,\n height: config.height,\n disposeOnNavigation: config.closeOnNavigation\n });\n if (config.backdropClass) {\n state.backdropClass = config.backdropClass;\n }\n return state;\n }\n /**\n * Attaches a dialog container to a dialog's already-created overlay.\n * @param overlay Reference to the dialog's underlying overlay.\n * @param config The dialog configuration.\n * @returns A promise resolving to a ComponentRef for the attached container.\n */\n _attachContainer(overlay, dialogRef, config) {\n const userInjector = config.injector || config.viewContainerRef?.injector;\n const providers = [{\n provide: DialogConfig,\n useValue: config\n }, {\n provide: DialogRef,\n useValue: dialogRef\n }, {\n provide: OverlayRef,\n useValue: overlay\n }];\n let containerType;\n if (config.container) {\n if (typeof config.container === 'function') {\n containerType = config.container;\n } else {\n containerType = config.container.type;\n providers.push(...config.container.providers(config));\n }\n } else {\n containerType = CdkDialogContainer;\n }\n const containerPortal = new ComponentPortal(containerType, config.viewContainerRef, Injector.create({\n parent: userInjector || this._injector,\n providers\n }), config.componentFactoryResolver);\n const containerRef = overlay.attach(containerPortal);\n return containerRef.instance;\n }\n /**\n * Attaches the user-provided component to the already-created dialog container.\n * @param componentOrTemplateRef The type of component being loaded into the dialog,\n * or a TemplateRef to instantiate as the content.\n * @param dialogRef Reference to the dialog being opened.\n * @param dialogContainer Component that is going to wrap the dialog content.\n * @param config Configuration used to open the dialog.\n */\n _attachDialogContent(componentOrTemplateRef, dialogRef, dialogContainer, config) {\n if (componentOrTemplateRef instanceof TemplateRef) {\n const injector = this._createInjector(config, dialogRef, dialogContainer, undefined);\n let context = {\n $implicit: config.data,\n dialogRef\n };\n if (config.templateContext) {\n context = {\n ...context,\n ...(typeof config.templateContext === 'function' ? config.templateContext() : config.templateContext)\n };\n }\n dialogContainer.attachTemplatePortal(new TemplatePortal(componentOrTemplateRef, null, context, injector));\n } else {\n const injector = this._createInjector(config, dialogRef, dialogContainer, this._injector);\n const contentRef = dialogContainer.attachComponentPortal(new ComponentPortal(componentOrTemplateRef, config.viewContainerRef, injector, config.componentFactoryResolver));\n dialogRef.componentRef = contentRef;\n dialogRef.componentInstance = contentRef.instance;\n }\n }\n /**\n * Creates a custom injector to be used inside the dialog. This allows a component loaded inside\n * of a dialog to close itself and, optionally, to return a value.\n * @param config Config object that is used to construct the dialog.\n * @param dialogRef Reference to the dialog being opened.\n * @param dialogContainer Component that is going to wrap the dialog content.\n * @param fallbackInjector Injector to use as a fallback when a lookup fails in the custom\n * dialog injector, if the user didn't provide a custom one.\n * @returns The custom injector that can be used inside the dialog.\n */\n _createInjector(config, dialogRef, dialogContainer, fallbackInjector) {\n const userInjector = config.injector || config.viewContainerRef?.injector;\n const providers = [{\n provide: DIALOG_DATA,\n useValue: config.data\n }, {\n provide: DialogRef,\n useValue: dialogRef\n }];\n if (config.providers) {\n if (typeof config.providers === 'function') {\n providers.push(...config.providers(dialogRef, config, dialogContainer));\n } else {\n providers.push(...config.providers);\n }\n }\n if (config.direction && (!userInjector || !userInjector.get(Directionality, null, {\n optional: true\n }))) {\n providers.push({\n provide: Directionality,\n useValue: {\n value: config.direction,\n change: of()\n }\n });\n }\n return Injector.create({\n parent: userInjector || fallbackInjector,\n providers\n });\n }\n /**\n * Removes a dialog from the array of open dialogs.\n * @param dialogRef Dialog to be removed.\n * @param emitEvent Whether to emit an event if this is the last dialog.\n */\n _removeOpenDialog(dialogRef, emitEvent) {\n const index = this.openDialogs.indexOf(dialogRef);\n if (index > -1) {\n this.openDialogs.splice(index, 1);\n // If all the dialogs were closed, remove/restore the `aria-hidden`\n // to a the siblings and emit to the `afterAllClosed` stream.\n if (!this.openDialogs.length) {\n this._ariaHiddenElements.forEach((previousValue, element) => {\n if (previousValue) {\n element.setAttribute('aria-hidden', previousValue);\n } else {\n element.removeAttribute('aria-hidden');\n }\n });\n this._ariaHiddenElements.clear();\n if (emitEvent) {\n this._getAfterAllClosed().next();\n }\n }\n }\n }\n /** Hides all of the content that isn't an overlay from assistive technology. */\n _hideNonDialogContentFromAssistiveTechnology() {\n const overlayContainer = this._overlayContainer.getContainerElement();\n // Ensure that the overlay container is attached to the DOM.\n if (overlayContainer.parentElement) {\n const siblings = overlayContainer.parentElement.children;\n for (let i = siblings.length - 1; i > -1; i--) {\n const sibling = siblings[i];\n if (sibling !== overlayContainer && sibling.nodeName !== 'SCRIPT' && sibling.nodeName !== 'STYLE' && !sibling.hasAttribute('aria-live')) {\n this._ariaHiddenElements.set(sibling, sibling.getAttribute('aria-hidden'));\n sibling.setAttribute('aria-hidden', 'true');\n }\n }\n }\n }\n _getAfterAllClosed() {\n const parent = this._parentDialog;\n return parent ? parent._getAfterAllClosed() : this._afterAllClosedAtThisLevel;\n }\n static {\n this.ɵfac = function Dialog_Factory(t) {\n return new (t || Dialog)(i0.ɵɵinject(i1$1.Overlay), i0.ɵɵinject(i0.Injector), i0.ɵɵinject(DEFAULT_DIALOG_CONFIG, 8), i0.ɵɵinject(Dialog, 12), i0.ɵɵinject(i1$1.OverlayContainer), i0.ɵɵinject(DIALOG_SCROLL_STRATEGY));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: Dialog,\n factory: Dialog.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(Dialog, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: i1$1.Overlay\n }, {\n type: i0.Injector\n }, {\n type: DialogConfig,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [DEFAULT_DIALOG_CONFIG]\n }]\n }, {\n type: Dialog,\n decorators: [{\n type: Optional\n }, {\n type: SkipSelf\n }]\n }, {\n type: i1$1.OverlayContainer\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [DIALOG_SCROLL_STRATEGY]\n }]\n }], null);\n})();\n/**\n * Executes a callback against all elements in an array while iterating in reverse.\n * Useful if the array is being modified as it is being iterated.\n */\nfunction reverseForEach(items, callback) {\n let i = items.length;\n while (i--) {\n callback(items[i]);\n }\n}\nclass DialogModule {\n static {\n this.ɵfac = function DialogModule_Factory(t) {\n return new (t || DialogModule)();\n };\n }\n static {\n this.ɵmod = /* @__PURE__ */i0.ɵɵdefineNgModule({\n type: DialogModule\n });\n }\n static {\n this.ɵinj = /* @__PURE__ */i0.ɵɵdefineInjector({\n providers: [Dialog],\n imports: [OverlayModule, PortalModule, A11yModule,\n // Re-export the PortalModule so that people extending the `CdkDialogContainer`\n // don't have to remember to import it or be faced with an unhelpful error.\n PortalModule]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(DialogModule, [{\n type: NgModule,\n args: [{\n imports: [OverlayModule, PortalModule, A11yModule, CdkDialogContainer],\n exports: [\n // Re-export the PortalModule so that people extending the `CdkDialogContainer`\n // don't have to remember to import it or be faced with an unhelpful error.\n PortalModule, CdkDialogContainer],\n providers: [Dialog]\n }]\n }], null, null);\n})();\n\n/**\n * Generated bundle index. Do not edit.\n */\n\nexport { CdkDialogContainer, DEFAULT_DIALOG_CONFIG, DIALOG_DATA, DIALOG_SCROLL_STRATEGY, DIALOG_SCROLL_STRATEGY_PROVIDER, DIALOG_SCROLL_STRATEGY_PROVIDER_FACTORY, Dialog, DialogConfig, DialogModule, DialogRef, throwDialogContentAlreadyAttachedError };\n","import * as i1$1 from '@angular/cdk/overlay';\nimport { Overlay, OverlayModule } from '@angular/cdk/overlay';\nimport * as i2 from '@angular/common';\nimport { DOCUMENT } from '@angular/common';\nimport * as i0 from '@angular/core';\nimport { EventEmitter, ANIMATION_MODULE_TYPE, Component, ViewEncapsulation, ChangeDetectionStrategy, Optional, Inject, InjectionToken, inject, Injectable, SkipSelf, Directive, Input, NgModule } from '@angular/core';\nimport * as i1 from '@angular/cdk/a11y';\nimport { CdkDialogContainer, Dialog, DialogConfig, DialogModule } from '@angular/cdk/dialog';\nimport { coerceNumberProperty } from '@angular/cdk/coercion';\nimport { CdkPortalOutlet, PortalModule } from '@angular/cdk/portal';\nimport { Subject, merge, defer } from 'rxjs';\nimport { filter, take, startWith } from 'rxjs/operators';\nimport { ESCAPE, hasModifierKey } from '@angular/cdk/keycodes';\nimport { MatCommonModule } from '@angular/material/core';\nimport { trigger, state, style, transition, group, animate, query, animateChild } from '@angular/animations';\n\n/**\n * Configuration for opening a modal dialog with the MatDialog service.\n */\nfunction MatDialogContainer_ng_template_2_Template(rf, ctx) {}\nclass MatDialogConfig {\n constructor() {\n /** The ARIA role of the dialog element. */\n this.role = 'dialog';\n /** Custom class for the overlay pane. */\n this.panelClass = '';\n /** Whether the dialog has a backdrop. */\n this.hasBackdrop = true;\n /** Custom class for the backdrop. */\n this.backdropClass = '';\n /** Whether the user can use escape or clicking on the backdrop to close the modal. */\n this.disableClose = false;\n /** Width of the dialog. */\n this.width = '';\n /** Height of the dialog. */\n this.height = '';\n /** Data being injected into the child component. */\n this.data = null;\n /** ID of the element that describes the dialog. */\n this.ariaDescribedBy = null;\n /** ID of the element that labels the dialog. */\n this.ariaLabelledBy = null;\n /** Aria label to assign to the dialog element. */\n this.ariaLabel = null;\n /** Whether this is a modal dialog. Used to set the `aria-modal` attribute. */\n this.ariaModal = true;\n /**\n * Where the dialog should focus on open.\n * @breaking-change 14.0.0 Remove boolean option from autoFocus. Use string or\n * AutoFocusTarget instead.\n */\n this.autoFocus = 'first-tabbable';\n /**\n * Whether the dialog should restore focus to the\n * previously-focused element, after it's closed.\n */\n this.restoreFocus = true;\n /** Whether to wait for the opening animation to finish before trapping focus. */\n this.delayFocusTrap = true;\n /**\n * Whether the dialog should close when the user goes backwards/forwards in history.\n * Note that this usually doesn't include clicking on links (unless the user is using\n * the `HashLocationStrategy`).\n */\n this.closeOnNavigation = true;\n // TODO(jelbourn): add configuration for lifecycle hooks, ARIA labelling.\n }\n}\n\n/** Class added when the dialog is open. */\nconst OPEN_CLASS = 'mdc-dialog--open';\n/** Class added while the dialog is opening. */\nconst OPENING_CLASS = 'mdc-dialog--opening';\n/** Class added while the dialog is closing. */\nconst CLOSING_CLASS = 'mdc-dialog--closing';\n/** Duration of the opening animation in milliseconds. */\nconst OPEN_ANIMATION_DURATION = 150;\n/** Duration of the closing animation in milliseconds. */\nconst CLOSE_ANIMATION_DURATION = 75;\nclass MatDialogContainer extends CdkDialogContainer {\n constructor(elementRef, focusTrapFactory, _document, dialogConfig, interactivityChecker, ngZone, overlayRef, _animationMode, focusMonitor) {\n super(elementRef, focusTrapFactory, _document, dialogConfig, interactivityChecker, ngZone, overlayRef, focusMonitor);\n this._animationMode = _animationMode;\n /** Emits when an animation state changes. */\n this._animationStateChanged = new EventEmitter();\n /** Whether animations are enabled. */\n this._animationsEnabled = this._animationMode !== 'NoopAnimations';\n /** Number of actions projected in the dialog. */\n this._actionSectionCount = 0;\n /** Host element of the dialog container component. */\n this._hostElement = this._elementRef.nativeElement;\n /** Duration of the dialog open animation. */\n this._enterAnimationDuration = this._animationsEnabled ? parseCssTime(this._config.enterAnimationDuration) ?? OPEN_ANIMATION_DURATION : 0;\n /** Duration of the dialog close animation. */\n this._exitAnimationDuration = this._animationsEnabled ? parseCssTime(this._config.exitAnimationDuration) ?? CLOSE_ANIMATION_DURATION : 0;\n /** Current timer for dialog animations. */\n this._animationTimer = null;\n /**\n * Completes the dialog open by clearing potential animation classes, trapping\n * focus and emitting an opened event.\n */\n this._finishDialogOpen = () => {\n this._clearAnimationClasses();\n this._openAnimationDone(this._enterAnimationDuration);\n };\n /**\n * Completes the dialog close by clearing potential animation classes, restoring\n * focus and emitting a closed event.\n */\n this._finishDialogClose = () => {\n this._clearAnimationClasses();\n this._animationStateChanged.emit({\n state: 'closed',\n totalTime: this._exitAnimationDuration\n });\n };\n }\n _contentAttached() {\n // Delegate to the original dialog-container initialization (i.e. saving the\n // previous element, setting up the focus trap and moving focus to the container).\n super._contentAttached();\n // Note: Usually we would be able to use the MDC dialog foundation here to handle\n // the dialog animation for us, but there are a few reasons why we just leverage\n // their styles and not use the runtime foundation code:\n // 1. Foundation does not allow us to disable animations.\n // 2. Foundation contains unnecessary features we don't need and aren't\n // tree-shakeable. e.g. background scrim, keyboard event handlers for ESC button.\n // 3. Foundation uses unnecessary timers for animations to work around limitations\n // in React's `setState` mechanism.\n // https://github.com/material-components/material-components-web/pull/3682.\n this._startOpenAnimation();\n }\n /** Starts the dialog open animation if enabled. */\n _startOpenAnimation() {\n this._animationStateChanged.emit({\n state: 'opening',\n totalTime: this._enterAnimationDuration\n });\n if (this._animationsEnabled) {\n this._hostElement.style.setProperty(TRANSITION_DURATION_PROPERTY, `${this._enterAnimationDuration}ms`);\n // We need to give the `setProperty` call from above some time to be applied.\n // One would expect that the open class is added once the animation finished, but MDC\n // uses the open class in combination with the opening class to start the animation.\n this._requestAnimationFrame(() => this._hostElement.classList.add(OPENING_CLASS, OPEN_CLASS));\n this._waitForAnimationToComplete(this._enterAnimationDuration, this._finishDialogOpen);\n } else {\n this._hostElement.classList.add(OPEN_CLASS);\n // Note: We could immediately finish the dialog opening here with noop animations,\n // but we defer until next tick so that consumers can subscribe to `afterOpened`.\n // Executing this immediately would mean that `afterOpened` emits synchronously\n // on `dialog.open` before the consumer had a change to subscribe to `afterOpened`.\n Promise.resolve().then(() => this._finishDialogOpen());\n }\n }\n /**\n * Starts the exit animation of the dialog if enabled. This method is\n * called by the dialog ref.\n */\n _startExitAnimation() {\n this._animationStateChanged.emit({\n state: 'closing',\n totalTime: this._exitAnimationDuration\n });\n this._hostElement.classList.remove(OPEN_CLASS);\n if (this._animationsEnabled) {\n this._hostElement.style.setProperty(TRANSITION_DURATION_PROPERTY, `${this._exitAnimationDuration}ms`);\n // We need to give the `setProperty` call from above some time to be applied.\n this._requestAnimationFrame(() => this._hostElement.classList.add(CLOSING_CLASS));\n this._waitForAnimationToComplete(this._exitAnimationDuration, this._finishDialogClose);\n } else {\n // This subscription to the `OverlayRef#backdropClick` observable in the `DialogRef` is\n // set up before any user can subscribe to the backdrop click. The subscription triggers\n // the dialog close and this method synchronously. If we'd synchronously emit the `CLOSED`\n // animation state event if animations are disabled, the overlay would be disposed\n // immediately and all other subscriptions to `DialogRef#backdropClick` would be silently\n // skipped. We work around this by waiting with the dialog close until the next tick when\n // all subscriptions have been fired as expected. This is not an ideal solution, but\n // there doesn't seem to be any other good way. Alternatives that have been considered:\n // 1. Deferring `DialogRef.close`. This could be a breaking change due to a new microtask.\n // Also this issue is specific to the MDC implementation where the dialog could\n // technically be closed synchronously. In the non-MDC one, Angular animations are used\n // and closing always takes at least a tick.\n // 2. Ensuring that user subscriptions to `backdropClick`, `keydownEvents` in the dialog\n // ref are first. This would solve the issue, but has the risk of memory leaks and also\n // doesn't solve the case where consumers call `DialogRef.close` in their subscriptions.\n // Based on the fact that this is specific to the MDC-based implementation of the dialog\n // animations, the defer is applied here.\n Promise.resolve().then(() => this._finishDialogClose());\n }\n }\n /**\n * Updates the number action sections.\n * @param delta Increase/decrease in the number of sections.\n */\n _updateActionSectionCount(delta) {\n this._actionSectionCount += delta;\n this._changeDetectorRef.markForCheck();\n }\n /** Clears all dialog animation classes. */\n _clearAnimationClasses() {\n this._hostElement.classList.remove(OPENING_CLASS, CLOSING_CLASS);\n }\n _waitForAnimationToComplete(duration, callback) {\n if (this._animationTimer !== null) {\n clearTimeout(this._animationTimer);\n }\n // Note that we want this timer to run inside the NgZone, because we want\n // the related events like `afterClosed` to be inside the zone as well.\n this._animationTimer = setTimeout(callback, duration);\n }\n /** Runs a callback in `requestAnimationFrame`, if available. */\n _requestAnimationFrame(callback) {\n this._ngZone.runOutsideAngular(() => {\n if (typeof requestAnimationFrame === 'function') {\n requestAnimationFrame(callback);\n } else {\n callback();\n }\n });\n }\n _captureInitialFocus() {\n if (!this._config.delayFocusTrap) {\n this._trapFocus();\n }\n }\n /**\n * Callback for when the open dialog animation has finished. Intended to\n * be called by sub-classes that use different animation implementations.\n */\n _openAnimationDone(totalTime) {\n if (this._config.delayFocusTrap) {\n this._trapFocus();\n }\n this._animationStateChanged.next({\n state: 'opened',\n totalTime\n });\n }\n ngOnDestroy() {\n super.ngOnDestroy();\n if (this._animationTimer !== null) {\n clearTimeout(this._animationTimer);\n }\n }\n attachComponentPortal(portal) {\n // When a component is passed into the dialog, the host element interrupts\n // the `display:flex` from affecting the dialog title, content, and\n // actions. To fix this, we make the component host `display: contents` by\n // marking its host with the `mat-mdc-dialog-component-host` class.\n //\n // Note that this problem does not exist when a template ref is used since\n // the title, contents, and actions are then nested directly under the\n // dialog surface.\n const ref = super.attachComponentPortal(portal);\n ref.location.nativeElement.classList.add('mat-mdc-dialog-component-host');\n return ref;\n }\n static {\n this.ɵfac = function MatDialogContainer_Factory(t) {\n return new (t || MatDialogContainer)(i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(i1.FocusTrapFactory), i0.ɵɵdirectiveInject(DOCUMENT, 8), i0.ɵɵdirectiveInject(MatDialogConfig), i0.ɵɵdirectiveInject(i1.InteractivityChecker), i0.ɵɵdirectiveInject(i0.NgZone), i0.ɵɵdirectiveInject(i1$1.OverlayRef), i0.ɵɵdirectiveInject(ANIMATION_MODULE_TYPE, 8), i0.ɵɵdirectiveInject(i1.FocusMonitor));\n };\n }\n static {\n this.ɵcmp = /* @__PURE__ */i0.ɵɵdefineComponent({\n type: MatDialogContainer,\n selectors: [[\"mat-dialog-container\"]],\n hostAttrs: [\"tabindex\", \"-1\", 1, \"mat-mdc-dialog-container\", \"mdc-dialog\"],\n hostVars: 10,\n hostBindings: function MatDialogContainer_HostBindings(rf, ctx) {\n if (rf & 2) {\n i0.ɵɵhostProperty(\"id\", ctx._config.id);\n i0.ɵɵattribute(\"aria-modal\", ctx._config.ariaModal)(\"role\", ctx._config.role)(\"aria-labelledby\", ctx._config.ariaLabel ? null : ctx._ariaLabelledByQueue[0])(\"aria-label\", ctx._config.ariaLabel)(\"aria-describedby\", ctx._config.ariaDescribedBy || null);\n i0.ɵɵclassProp(\"_mat-animation-noopable\", !ctx._animationsEnabled)(\"mat-mdc-dialog-container-with-actions\", ctx._actionSectionCount > 0);\n }\n },\n standalone: true,\n features: [i0.ɵɵInheritDefinitionFeature, i0.ɵɵStandaloneFeature],\n decls: 3,\n vars: 0,\n consts: [[1, \"mdc-dialog__container\"], [1, \"mat-mdc-dialog-surface\", \"mdc-dialog__surface\"], [\"cdkPortalOutlet\", \"\"]],\n template: function MatDialogContainer_Template(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵelementStart(0, \"div\", 0)(1, \"div\", 1);\n i0.ɵɵtemplate(2, MatDialogContainer_ng_template_2_Template, 0, 0, \"ng-template\", 2);\n i0.ɵɵelementEnd()();\n }\n },\n dependencies: [CdkPortalOutlet],\n styles: [\".mdc-elevation-overlay{position:absolute;border-radius:inherit;pointer-events:none;opacity:var(--mdc-elevation-overlay-opacity, 0);transition:opacity 280ms cubic-bezier(0.4, 0, 0.2, 1)}.mdc-dialog,.mdc-dialog__scrim{position:fixed;top:0;left:0;align-items:center;justify-content:center;box-sizing:border-box;width:100%;height:100%}.mdc-dialog{display:none;z-index:var(--mdc-dialog-z-index, 7)}.mdc-dialog .mdc-dialog__content{padding:20px 24px 20px 24px}.mdc-dialog .mdc-dialog__surface{min-width:280px}@media(max-width: 592px){.mdc-dialog .mdc-dialog__surface{max-width:calc(100vw - 32px)}}@media(min-width: 592px){.mdc-dialog .mdc-dialog__surface{max-width:560px}}.mdc-dialog .mdc-dialog__surface{max-height:calc(100% - 32px)}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-width:none}@media(max-width: 960px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-height:560px;width:560px}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{right:-12px}}@media(max-width: 720px)and (max-width: 672px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{width:calc(100vw - 112px)}}@media(max-width: 720px)and (min-width: 672px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{width:560px}}@media(max-width: 720px)and (max-height: 720px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-height:calc(100vh - 160px)}}@media(max-width: 720px)and (min-height: 720px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-height:560px}}@media(max-width: 720px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{right:-12px}}@media(max-width: 720px)and (max-height: 400px),(max-width: 600px),(min-width: 720px)and (max-height: 400px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{height:100%;max-height:100vh;max-width:100vw;width:100vw;border-radius:0}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{order:-1;left:-12px}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__header{padding:0 16px 9px;justify-content:flex-start}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__title{margin-left:calc(16px - 2 * 12px)}}@media(min-width: 960px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{width:calc(100vw - 400px)}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{right:-12px}}.mdc-dialog.mdc-dialog__scrim--hidden .mdc-dialog__scrim{opacity:0}.mdc-dialog__scrim{opacity:0;z-index:-1}.mdc-dialog__container{display:flex;flex-direction:row;align-items:center;justify-content:space-around;box-sizing:border-box;height:100%;opacity:0;pointer-events:none}.mdc-dialog__surface{position:relative;display:flex;flex-direction:column;flex-grow:0;flex-shrink:0;box-sizing:border-box;max-width:100%;max-height:100%;pointer-events:auto;overflow-y:auto;outline:0;transform:scale(0.8)}.mdc-dialog__surface .mdc-elevation-overlay{width:100%;height:100%;top:0;left:0}[dir=rtl] .mdc-dialog__surface,.mdc-dialog__surface[dir=rtl]{text-align:right}@media screen and (forced-colors: active),(-ms-high-contrast: active){.mdc-dialog__surface{outline:2px solid windowText}}.mdc-dialog__surface::before{position:absolute;box-sizing:border-box;width:100%;height:100%;top:0;left:0;border:2px solid rgba(0,0,0,0);border-radius:inherit;content:\\\"\\\";pointer-events:none}@media screen and (forced-colors: active){.mdc-dialog__surface::before{border-color:CanvasText}}@media screen and (-ms-high-contrast: active),screen and (-ms-high-contrast: none){.mdc-dialog__surface::before{content:none}}.mdc-dialog__title{display:block;margin-top:0;position:relative;flex-shrink:0;box-sizing:border-box;margin:0 0 1px;padding:0 24px 9px}.mdc-dialog__title::before{display:inline-block;width:0;height:40px;content:\\\"\\\";vertical-align:0}[dir=rtl] .mdc-dialog__title,.mdc-dialog__title[dir=rtl]{text-align:right}.mdc-dialog--scrollable .mdc-dialog__title{margin-bottom:1px;padding-bottom:15px}.mdc-dialog--fullscreen .mdc-dialog__header{align-items:baseline;border-bottom:1px solid rgba(0,0,0,0);display:inline-flex;justify-content:space-between;padding:0 24px 9px;z-index:1}@media screen and (forced-colors: active){.mdc-dialog--fullscreen .mdc-dialog__header{border-bottom-color:CanvasText}}.mdc-dialog--fullscreen .mdc-dialog__header .mdc-dialog__close{right:-12px}.mdc-dialog--fullscreen .mdc-dialog__title{margin-bottom:0;padding:0;border-bottom:0}.mdc-dialog--fullscreen.mdc-dialog--scrollable .mdc-dialog__title{border-bottom:0;margin-bottom:0}.mdc-dialog--fullscreen .mdc-dialog__close{top:5px}.mdc-dialog--fullscreen.mdc-dialog--scrollable .mdc-dialog__actions{border-top:1px solid rgba(0,0,0,0)}@media screen and (forced-colors: active){.mdc-dialog--fullscreen.mdc-dialog--scrollable .mdc-dialog__actions{border-top-color:CanvasText}}.mdc-dialog--fullscreen--titleless .mdc-dialog__close{margin-top:4px}.mdc-dialog--fullscreen--titleless.mdc-dialog--scrollable .mdc-dialog__close{margin-top:0}.mdc-dialog__content{flex-grow:1;box-sizing:border-box;margin:0;overflow:auto}.mdc-dialog__content>:first-child{margin-top:0}.mdc-dialog__content>:last-child{margin-bottom:0}.mdc-dialog__title+.mdc-dialog__content,.mdc-dialog__header+.mdc-dialog__content{padding-top:0}.mdc-dialog--scrollable .mdc-dialog__title+.mdc-dialog__content{padding-top:8px;padding-bottom:8px}.mdc-dialog__content .mdc-deprecated-list:first-child:last-child{padding:6px 0 0}.mdc-dialog--scrollable .mdc-dialog__content .mdc-deprecated-list:first-child:last-child{padding:0}.mdc-dialog__actions{display:flex;position:relative;flex-shrink:0;flex-wrap:wrap;align-items:center;justify-content:flex-end;box-sizing:border-box;min-height:52px;margin:0;padding:8px;border-top:1px solid rgba(0,0,0,0)}@media screen and (forced-colors: active){.mdc-dialog__actions{border-top-color:CanvasText}}.mdc-dialog--stacked .mdc-dialog__actions{flex-direction:column;align-items:flex-end}.mdc-dialog__button{margin-left:8px;margin-right:0;max-width:100%;text-align:right}[dir=rtl] .mdc-dialog__button,.mdc-dialog__button[dir=rtl]{margin-left:0;margin-right:8px}.mdc-dialog__button:first-child{margin-left:0;margin-right:0}[dir=rtl] .mdc-dialog__button:first-child,.mdc-dialog__button:first-child[dir=rtl]{margin-left:0;margin-right:0}[dir=rtl] .mdc-dialog__button,.mdc-dialog__button[dir=rtl]{text-align:left}.mdc-dialog--stacked .mdc-dialog__button:not(:first-child){margin-top:12px}.mdc-dialog--open,.mdc-dialog--opening,.mdc-dialog--closing{display:flex}.mdc-dialog--opening .mdc-dialog__scrim{transition:opacity 150ms linear}.mdc-dialog--opening .mdc-dialog__container{transition:opacity 75ms linear,transform 150ms 0ms cubic-bezier(0, 0, 0.2, 1)}.mdc-dialog--closing .mdc-dialog__scrim,.mdc-dialog--closing .mdc-dialog__container{transition:opacity 75ms linear}.mdc-dialog--closing .mdc-dialog__container{transform:none}.mdc-dialog--closing .mdc-dialog__surface{transform:none}.mdc-dialog--open .mdc-dialog__scrim{opacity:1}.mdc-dialog--open .mdc-dialog__container{opacity:1}.mdc-dialog--open .mdc-dialog__surface{transform:none}.mdc-dialog--open.mdc-dialog__surface-scrim--shown .mdc-dialog__surface-scrim{opacity:1}.mdc-dialog--open.mdc-dialog__surface-scrim--hiding .mdc-dialog__surface-scrim{transition:opacity 75ms linear}.mdc-dialog--open.mdc-dialog__surface-scrim--showing .mdc-dialog__surface-scrim{transition:opacity 150ms linear}.mdc-dialog__surface-scrim{display:none;opacity:0;position:absolute;width:100%;height:100%;z-index:1}.mdc-dialog__surface-scrim--shown .mdc-dialog__surface-scrim,.mdc-dialog__surface-scrim--showing .mdc-dialog__surface-scrim,.mdc-dialog__surface-scrim--hiding .mdc-dialog__surface-scrim{display:block}.mdc-dialog-scroll-lock{overflow:hidden}.mdc-dialog--no-content-padding .mdc-dialog__content{padding:0}.mdc-dialog--sheet .mdc-dialog__container .mdc-dialog__close{right:12px;top:9px;position:absolute;z-index:1}.mdc-dialog__scrim--removed{pointer-events:none}.mdc-dialog__scrim--removed .mdc-dialog__scrim,.mdc-dialog__scrim--removed .mdc-dialog__surface-scrim{display:none}.mat-mdc-dialog-content{max-height:65vh}.mat-mdc-dialog-container{position:static;display:block}.mat-mdc-dialog-container,.mat-mdc-dialog-container .mdc-dialog__container,.mat-mdc-dialog-container .mdc-dialog__surface{max-height:inherit;min-height:inherit;min-width:inherit;max-width:inherit}.mat-mdc-dialog-container .mdc-dialog__surface{width:100%;height:100%}.mat-mdc-dialog-component-host{display:contents}.mat-mdc-dialog-container{--mdc-dialog-container-elevation: var(--mdc-dialog-container-elevation-shadow);outline:0}.mat-mdc-dialog-container .mdc-dialog__surface{background-color:var(--mdc-dialog-container-color, white)}.mat-mdc-dialog-container .mdc-dialog__surface{box-shadow:var(--mdc-dialog-container-elevation, 0px 11px 15px -7px rgba(0, 0, 0, 0.2), 0px 24px 38px 3px rgba(0, 0, 0, 0.14), 0px 9px 46px 8px rgba(0, 0, 0, 0.12))}.mat-mdc-dialog-container .mdc-dialog__surface{border-radius:var(--mdc-dialog-container-shape, 4px)}.mat-mdc-dialog-container .mdc-dialog__title{font-family:var(--mdc-dialog-subhead-font, Roboto, sans-serif);line-height:var(--mdc-dialog-subhead-line-height, 1.5rem);font-size:var(--mdc-dialog-subhead-size, 1rem);font-weight:var(--mdc-dialog-subhead-weight, 400);letter-spacing:var(--mdc-dialog-subhead-tracking, 0.03125em)}.mat-mdc-dialog-container .mdc-dialog__title{color:var(--mdc-dialog-subhead-color, rgba(0, 0, 0, 0.87))}.mat-mdc-dialog-container .mdc-dialog__content{font-family:var(--mdc-dialog-supporting-text-font, Roboto, sans-serif);line-height:var(--mdc-dialog-supporting-text-line-height, 1.5rem);font-size:var(--mdc-dialog-supporting-text-size, 1rem);font-weight:var(--mdc-dialog-supporting-text-weight, 400);letter-spacing:var(--mdc-dialog-supporting-text-tracking, 0.03125em)}.mat-mdc-dialog-container .mdc-dialog__content{color:var(--mdc-dialog-supporting-text-color, rgba(0, 0, 0, 0.6))}.mat-mdc-dialog-container .mdc-dialog__container{transition:opacity linear var(--mat-dialog-transition-duration, 0ms)}.mat-mdc-dialog-container .mdc-dialog__surface{transition:transform var(--mat-dialog-transition-duration, 0ms) 0ms cubic-bezier(0, 0, 0.2, 1)}.mat-mdc-dialog-container._mat-animation-noopable .mdc-dialog__container,.mat-mdc-dialog-container._mat-animation-noopable .mdc-dialog__surface{transition:none}.cdk-overlay-pane.mat-mdc-dialog-panel{max-width:var(--mat-dialog-container-max-width, 80vw);min-width:var(--mat-dialog-container-min-width, 0)}@media(max-width: 599px){.cdk-overlay-pane.mat-mdc-dialog-panel{max-width:var(--mat-dialog-container-small-max-width, 80vw)}}.mat-mdc-dialog-title{padding:var(--mat-dialog-headline-padding, 0 24px 9px)}.mat-mdc-dialog-content{display:block}.mat-mdc-dialog-container .mat-mdc-dialog-content{padding:var(--mat-dialog-content-padding, 20px 24px)}.mat-mdc-dialog-container-with-actions .mat-mdc-dialog-content{padding:var(--mat-dialog-with-actions-content-padding, 20px 24px)}.mat-mdc-dialog-container .mat-mdc-dialog-title+.mat-mdc-dialog-content{padding-top:0}.mat-mdc-dialog-actions{padding:var(--mat-dialog-actions-padding, 8px);justify-content:var(--mat-dialog-actions-alignment, start)}.mat-mdc-dialog-actions.mat-mdc-dialog-actions-align-start,.mat-mdc-dialog-actions[align=start]{justify-content:start}.mat-mdc-dialog-actions.mat-mdc-dialog-actions-align-center,.mat-mdc-dialog-actions[align=center]{justify-content:center}.mat-mdc-dialog-actions.mat-mdc-dialog-actions-align-end,.mat-mdc-dialog-actions[align=end]{justify-content:flex-end}.mat-mdc-dialog-actions .mat-button-base+.mat-button-base,.mat-mdc-dialog-actions .mat-mdc-button-base+.mat-mdc-button-base{margin-left:8px}[dir=rtl] .mat-mdc-dialog-actions .mat-button-base+.mat-button-base,[dir=rtl] .mat-mdc-dialog-actions .mat-mdc-button-base+.mat-mdc-button-base{margin-left:0;margin-right:8px}\"],\n encapsulation: 2\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialogContainer, [{\n type: Component,\n args: [{\n selector: 'mat-dialog-container',\n encapsulation: ViewEncapsulation.None,\n changeDetection: ChangeDetectionStrategy.Default,\n standalone: true,\n imports: [CdkPortalOutlet],\n host: {\n 'class': 'mat-mdc-dialog-container mdc-dialog',\n 'tabindex': '-1',\n '[attr.aria-modal]': '_config.ariaModal',\n '[id]': '_config.id',\n '[attr.role]': '_config.role',\n '[attr.aria-labelledby]': '_config.ariaLabel ? null : _ariaLabelledByQueue[0]',\n '[attr.aria-label]': '_config.ariaLabel',\n '[attr.aria-describedby]': '_config.ariaDescribedBy || null',\n '[class._mat-animation-noopable]': '!_animationsEnabled',\n '[class.mat-mdc-dialog-container-with-actions]': '_actionSectionCount > 0'\n },\n template: \"\\n\",\n styles: [\".mdc-elevation-overlay{position:absolute;border-radius:inherit;pointer-events:none;opacity:var(--mdc-elevation-overlay-opacity, 0);transition:opacity 280ms cubic-bezier(0.4, 0, 0.2, 1)}.mdc-dialog,.mdc-dialog__scrim{position:fixed;top:0;left:0;align-items:center;justify-content:center;box-sizing:border-box;width:100%;height:100%}.mdc-dialog{display:none;z-index:var(--mdc-dialog-z-index, 7)}.mdc-dialog .mdc-dialog__content{padding:20px 24px 20px 24px}.mdc-dialog .mdc-dialog__surface{min-width:280px}@media(max-width: 592px){.mdc-dialog .mdc-dialog__surface{max-width:calc(100vw - 32px)}}@media(min-width: 592px){.mdc-dialog .mdc-dialog__surface{max-width:560px}}.mdc-dialog .mdc-dialog__surface{max-height:calc(100% - 32px)}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-width:none}@media(max-width: 960px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-height:560px;width:560px}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{right:-12px}}@media(max-width: 720px)and (max-width: 672px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{width:calc(100vw - 112px)}}@media(max-width: 720px)and (min-width: 672px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{width:560px}}@media(max-width: 720px)and (max-height: 720px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-height:calc(100vh - 160px)}}@media(max-width: 720px)and (min-height: 720px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{max-height:560px}}@media(max-width: 720px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{right:-12px}}@media(max-width: 720px)and (max-height: 400px),(max-width: 600px),(min-width: 720px)and (max-height: 400px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{height:100%;max-height:100vh;max-width:100vw;width:100vw;border-radius:0}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{order:-1;left:-12px}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__header{padding:0 16px 9px;justify-content:flex-start}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__title{margin-left:calc(16px - 2 * 12px)}}@media(min-width: 960px){.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface{width:calc(100vw - 400px)}.mdc-dialog.mdc-dialog--fullscreen .mdc-dialog__surface .mdc-dialog__close{right:-12px}}.mdc-dialog.mdc-dialog__scrim--hidden .mdc-dialog__scrim{opacity:0}.mdc-dialog__scrim{opacity:0;z-index:-1}.mdc-dialog__container{display:flex;flex-direction:row;align-items:center;justify-content:space-around;box-sizing:border-box;height:100%;opacity:0;pointer-events:none}.mdc-dialog__surface{position:relative;display:flex;flex-direction:column;flex-grow:0;flex-shrink:0;box-sizing:border-box;max-width:100%;max-height:100%;pointer-events:auto;overflow-y:auto;outline:0;transform:scale(0.8)}.mdc-dialog__surface .mdc-elevation-overlay{width:100%;height:100%;top:0;left:0}[dir=rtl] .mdc-dialog__surface,.mdc-dialog__surface[dir=rtl]{text-align:right}@media screen and (forced-colors: active),(-ms-high-contrast: active){.mdc-dialog__surface{outline:2px solid windowText}}.mdc-dialog__surface::before{position:absolute;box-sizing:border-box;width:100%;height:100%;top:0;left:0;border:2px solid rgba(0,0,0,0);border-radius:inherit;content:\\\"\\\";pointer-events:none}@media screen and (forced-colors: active){.mdc-dialog__surface::before{border-color:CanvasText}}@media screen and (-ms-high-contrast: active),screen and (-ms-high-contrast: none){.mdc-dialog__surface::before{content:none}}.mdc-dialog__title{display:block;margin-top:0;position:relative;flex-shrink:0;box-sizing:border-box;margin:0 0 1px;padding:0 24px 9px}.mdc-dialog__title::before{display:inline-block;width:0;height:40px;content:\\\"\\\";vertical-align:0}[dir=rtl] .mdc-dialog__title,.mdc-dialog__title[dir=rtl]{text-align:right}.mdc-dialog--scrollable .mdc-dialog__title{margin-bottom:1px;padding-bottom:15px}.mdc-dialog--fullscreen .mdc-dialog__header{align-items:baseline;border-bottom:1px solid rgba(0,0,0,0);display:inline-flex;justify-content:space-between;padding:0 24px 9px;z-index:1}@media screen and (forced-colors: active){.mdc-dialog--fullscreen .mdc-dialog__header{border-bottom-color:CanvasText}}.mdc-dialog--fullscreen .mdc-dialog__header .mdc-dialog__close{right:-12px}.mdc-dialog--fullscreen .mdc-dialog__title{margin-bottom:0;padding:0;border-bottom:0}.mdc-dialog--fullscreen.mdc-dialog--scrollable .mdc-dialog__title{border-bottom:0;margin-bottom:0}.mdc-dialog--fullscreen .mdc-dialog__close{top:5px}.mdc-dialog--fullscreen.mdc-dialog--scrollable .mdc-dialog__actions{border-top:1px solid rgba(0,0,0,0)}@media screen and (forced-colors: active){.mdc-dialog--fullscreen.mdc-dialog--scrollable .mdc-dialog__actions{border-top-color:CanvasText}}.mdc-dialog--fullscreen--titleless .mdc-dialog__close{margin-top:4px}.mdc-dialog--fullscreen--titleless.mdc-dialog--scrollable .mdc-dialog__close{margin-top:0}.mdc-dialog__content{flex-grow:1;box-sizing:border-box;margin:0;overflow:auto}.mdc-dialog__content>:first-child{margin-top:0}.mdc-dialog__content>:last-child{margin-bottom:0}.mdc-dialog__title+.mdc-dialog__content,.mdc-dialog__header+.mdc-dialog__content{padding-top:0}.mdc-dialog--scrollable .mdc-dialog__title+.mdc-dialog__content{padding-top:8px;padding-bottom:8px}.mdc-dialog__content .mdc-deprecated-list:first-child:last-child{padding:6px 0 0}.mdc-dialog--scrollable .mdc-dialog__content .mdc-deprecated-list:first-child:last-child{padding:0}.mdc-dialog__actions{display:flex;position:relative;flex-shrink:0;flex-wrap:wrap;align-items:center;justify-content:flex-end;box-sizing:border-box;min-height:52px;margin:0;padding:8px;border-top:1px solid rgba(0,0,0,0)}@media screen and (forced-colors: active){.mdc-dialog__actions{border-top-color:CanvasText}}.mdc-dialog--stacked .mdc-dialog__actions{flex-direction:column;align-items:flex-end}.mdc-dialog__button{margin-left:8px;margin-right:0;max-width:100%;text-align:right}[dir=rtl] .mdc-dialog__button,.mdc-dialog__button[dir=rtl]{margin-left:0;margin-right:8px}.mdc-dialog__button:first-child{margin-left:0;margin-right:0}[dir=rtl] .mdc-dialog__button:first-child,.mdc-dialog__button:first-child[dir=rtl]{margin-left:0;margin-right:0}[dir=rtl] .mdc-dialog__button,.mdc-dialog__button[dir=rtl]{text-align:left}.mdc-dialog--stacked .mdc-dialog__button:not(:first-child){margin-top:12px}.mdc-dialog--open,.mdc-dialog--opening,.mdc-dialog--closing{display:flex}.mdc-dialog--opening .mdc-dialog__scrim{transition:opacity 150ms linear}.mdc-dialog--opening .mdc-dialog__container{transition:opacity 75ms linear,transform 150ms 0ms cubic-bezier(0, 0, 0.2, 1)}.mdc-dialog--closing .mdc-dialog__scrim,.mdc-dialog--closing .mdc-dialog__container{transition:opacity 75ms linear}.mdc-dialog--closing .mdc-dialog__container{transform:none}.mdc-dialog--closing .mdc-dialog__surface{transform:none}.mdc-dialog--open .mdc-dialog__scrim{opacity:1}.mdc-dialog--open .mdc-dialog__container{opacity:1}.mdc-dialog--open .mdc-dialog__surface{transform:none}.mdc-dialog--open.mdc-dialog__surface-scrim--shown .mdc-dialog__surface-scrim{opacity:1}.mdc-dialog--open.mdc-dialog__surface-scrim--hiding .mdc-dialog__surface-scrim{transition:opacity 75ms linear}.mdc-dialog--open.mdc-dialog__surface-scrim--showing .mdc-dialog__surface-scrim{transition:opacity 150ms linear}.mdc-dialog__surface-scrim{display:none;opacity:0;position:absolute;width:100%;height:100%;z-index:1}.mdc-dialog__surface-scrim--shown .mdc-dialog__surface-scrim,.mdc-dialog__surface-scrim--showing .mdc-dialog__surface-scrim,.mdc-dialog__surface-scrim--hiding .mdc-dialog__surface-scrim{display:block}.mdc-dialog-scroll-lock{overflow:hidden}.mdc-dialog--no-content-padding .mdc-dialog__content{padding:0}.mdc-dialog--sheet .mdc-dialog__container .mdc-dialog__close{right:12px;top:9px;position:absolute;z-index:1}.mdc-dialog__scrim--removed{pointer-events:none}.mdc-dialog__scrim--removed .mdc-dialog__scrim,.mdc-dialog__scrim--removed .mdc-dialog__surface-scrim{display:none}.mat-mdc-dialog-content{max-height:65vh}.mat-mdc-dialog-container{position:static;display:block}.mat-mdc-dialog-container,.mat-mdc-dialog-container .mdc-dialog__container,.mat-mdc-dialog-container .mdc-dialog__surface{max-height:inherit;min-height:inherit;min-width:inherit;max-width:inherit}.mat-mdc-dialog-container .mdc-dialog__surface{width:100%;height:100%}.mat-mdc-dialog-component-host{display:contents}.mat-mdc-dialog-container{--mdc-dialog-container-elevation: var(--mdc-dialog-container-elevation-shadow);outline:0}.mat-mdc-dialog-container .mdc-dialog__surface{background-color:var(--mdc-dialog-container-color, white)}.mat-mdc-dialog-container .mdc-dialog__surface{box-shadow:var(--mdc-dialog-container-elevation, 0px 11px 15px -7px rgba(0, 0, 0, 0.2), 0px 24px 38px 3px rgba(0, 0, 0, 0.14), 0px 9px 46px 8px rgba(0, 0, 0, 0.12))}.mat-mdc-dialog-container .mdc-dialog__surface{border-radius:var(--mdc-dialog-container-shape, 4px)}.mat-mdc-dialog-container .mdc-dialog__title{font-family:var(--mdc-dialog-subhead-font, Roboto, sans-serif);line-height:var(--mdc-dialog-subhead-line-height, 1.5rem);font-size:var(--mdc-dialog-subhead-size, 1rem);font-weight:var(--mdc-dialog-subhead-weight, 400);letter-spacing:var(--mdc-dialog-subhead-tracking, 0.03125em)}.mat-mdc-dialog-container .mdc-dialog__title{color:var(--mdc-dialog-subhead-color, rgba(0, 0, 0, 0.87))}.mat-mdc-dialog-container .mdc-dialog__content{font-family:var(--mdc-dialog-supporting-text-font, Roboto, sans-serif);line-height:var(--mdc-dialog-supporting-text-line-height, 1.5rem);font-size:var(--mdc-dialog-supporting-text-size, 1rem);font-weight:var(--mdc-dialog-supporting-text-weight, 400);letter-spacing:var(--mdc-dialog-supporting-text-tracking, 0.03125em)}.mat-mdc-dialog-container .mdc-dialog__content{color:var(--mdc-dialog-supporting-text-color, rgba(0, 0, 0, 0.6))}.mat-mdc-dialog-container .mdc-dialog__container{transition:opacity linear var(--mat-dialog-transition-duration, 0ms)}.mat-mdc-dialog-container .mdc-dialog__surface{transition:transform var(--mat-dialog-transition-duration, 0ms) 0ms cubic-bezier(0, 0, 0.2, 1)}.mat-mdc-dialog-container._mat-animation-noopable .mdc-dialog__container,.mat-mdc-dialog-container._mat-animation-noopable .mdc-dialog__surface{transition:none}.cdk-overlay-pane.mat-mdc-dialog-panel{max-width:var(--mat-dialog-container-max-width, 80vw);min-width:var(--mat-dialog-container-min-width, 0)}@media(max-width: 599px){.cdk-overlay-pane.mat-mdc-dialog-panel{max-width:var(--mat-dialog-container-small-max-width, 80vw)}}.mat-mdc-dialog-title{padding:var(--mat-dialog-headline-padding, 0 24px 9px)}.mat-mdc-dialog-content{display:block}.mat-mdc-dialog-container .mat-mdc-dialog-content{padding:var(--mat-dialog-content-padding, 20px 24px)}.mat-mdc-dialog-container-with-actions .mat-mdc-dialog-content{padding:var(--mat-dialog-with-actions-content-padding, 20px 24px)}.mat-mdc-dialog-container .mat-mdc-dialog-title+.mat-mdc-dialog-content{padding-top:0}.mat-mdc-dialog-actions{padding:var(--mat-dialog-actions-padding, 8px);justify-content:var(--mat-dialog-actions-alignment, start)}.mat-mdc-dialog-actions.mat-mdc-dialog-actions-align-start,.mat-mdc-dialog-actions[align=start]{justify-content:start}.mat-mdc-dialog-actions.mat-mdc-dialog-actions-align-center,.mat-mdc-dialog-actions[align=center]{justify-content:center}.mat-mdc-dialog-actions.mat-mdc-dialog-actions-align-end,.mat-mdc-dialog-actions[align=end]{justify-content:flex-end}.mat-mdc-dialog-actions .mat-button-base+.mat-button-base,.mat-mdc-dialog-actions .mat-mdc-button-base+.mat-mdc-button-base{margin-left:8px}[dir=rtl] .mat-mdc-dialog-actions .mat-button-base+.mat-button-base,[dir=rtl] .mat-mdc-dialog-actions .mat-mdc-button-base+.mat-mdc-button-base{margin-left:0;margin-right:8px}\"]\n }]\n }], () => [{\n type: i0.ElementRef\n }, {\n type: i1.FocusTrapFactory\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [DOCUMENT]\n }]\n }, {\n type: MatDialogConfig\n }, {\n type: i1.InteractivityChecker\n }, {\n type: i0.NgZone\n }, {\n type: i1$1.OverlayRef\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [ANIMATION_MODULE_TYPE]\n }]\n }, {\n type: i1.FocusMonitor\n }], null);\n})();\nconst TRANSITION_DURATION_PROPERTY = '--mat-dialog-transition-duration';\n// TODO(mmalerba): Remove this function after animation durations are required\n// to be numbers.\n/**\n * Converts a CSS time string to a number in ms. If the given time is already a\n * number, it is assumed to be in ms.\n */\nfunction parseCssTime(time) {\n if (time == null) {\n return null;\n }\n if (typeof time === 'number') {\n return time;\n }\n if (time.endsWith('ms')) {\n return coerceNumberProperty(time.substring(0, time.length - 2));\n }\n if (time.endsWith('s')) {\n return coerceNumberProperty(time.substring(0, time.length - 1)) * 1000;\n }\n if (time === '0') {\n return 0;\n }\n return null; // anything else is invalid.\n}\nvar MatDialogState;\n(function (MatDialogState) {\n MatDialogState[MatDialogState[\"OPEN\"] = 0] = \"OPEN\";\n MatDialogState[MatDialogState[\"CLOSING\"] = 1] = \"CLOSING\";\n MatDialogState[MatDialogState[\"CLOSED\"] = 2] = \"CLOSED\";\n})(MatDialogState || (MatDialogState = {}));\n/**\n * Reference to a dialog opened via the MatDialog service.\n */\nclass MatDialogRef {\n constructor(_ref, config, _containerInstance) {\n this._ref = _ref;\n this._containerInstance = _containerInstance;\n /** Subject for notifying the user that the dialog has finished opening. */\n this._afterOpened = new Subject();\n /** Subject for notifying the user that the dialog has started closing. */\n this._beforeClosed = new Subject();\n /** Current state of the dialog. */\n this._state = MatDialogState.OPEN;\n this.disableClose = config.disableClose;\n this.id = _ref.id;\n // Used to target panels specifically tied to dialogs.\n _ref.addPanelClass('mat-mdc-dialog-panel');\n // Emit when opening animation completes\n _containerInstance._animationStateChanged.pipe(filter(event => event.state === 'opened'), take(1)).subscribe(() => {\n this._afterOpened.next();\n this._afterOpened.complete();\n });\n // Dispose overlay when closing animation is complete\n _containerInstance._animationStateChanged.pipe(filter(event => event.state === 'closed'), take(1)).subscribe(() => {\n clearTimeout(this._closeFallbackTimeout);\n this._finishDialogClose();\n });\n _ref.overlayRef.detachments().subscribe(() => {\n this._beforeClosed.next(this._result);\n this._beforeClosed.complete();\n this._finishDialogClose();\n });\n merge(this.backdropClick(), this.keydownEvents().pipe(filter(event => event.keyCode === ESCAPE && !this.disableClose && !hasModifierKey(event)))).subscribe(event => {\n if (!this.disableClose) {\n event.preventDefault();\n _closeDialogVia(this, event.type === 'keydown' ? 'keyboard' : 'mouse');\n }\n });\n }\n /**\n * Close the dialog.\n * @param dialogResult Optional result to return to the dialog opener.\n */\n close(dialogResult) {\n this._result = dialogResult;\n // Transition the backdrop in parallel to the dialog.\n this._containerInstance._animationStateChanged.pipe(filter(event => event.state === 'closing'), take(1)).subscribe(event => {\n this._beforeClosed.next(dialogResult);\n this._beforeClosed.complete();\n this._ref.overlayRef.detachBackdrop();\n // The logic that disposes of the overlay depends on the exit animation completing, however\n // it isn't guaranteed if the parent view is destroyed while it's running. Add a fallback\n // timeout which will clean everything up if the animation hasn't fired within the specified\n // amount of time plus 100ms. We don't need to run this outside the NgZone, because for the\n // vast majority of cases the timeout will have been cleared before it has the chance to fire.\n this._closeFallbackTimeout = setTimeout(() => this._finishDialogClose(), event.totalTime + 100);\n });\n this._state = MatDialogState.CLOSING;\n this._containerInstance._startExitAnimation();\n }\n /**\n * Gets an observable that is notified when the dialog is finished opening.\n */\n afterOpened() {\n return this._afterOpened;\n }\n /**\n * Gets an observable that is notified when the dialog is finished closing.\n */\n afterClosed() {\n return this._ref.closed;\n }\n /**\n * Gets an observable that is notified when the dialog has started closing.\n */\n beforeClosed() {\n return this._beforeClosed;\n }\n /**\n * Gets an observable that emits when the overlay's backdrop has been clicked.\n */\n backdropClick() {\n return this._ref.backdropClick;\n }\n /**\n * Gets an observable that emits when keydown events are targeted on the overlay.\n */\n keydownEvents() {\n return this._ref.keydownEvents;\n }\n /**\n * Updates the dialog's position.\n * @param position New dialog position.\n */\n updatePosition(position) {\n let strategy = this._ref.config.positionStrategy;\n if (position && (position.left || position.right)) {\n position.left ? strategy.left(position.left) : strategy.right(position.right);\n } else {\n strategy.centerHorizontally();\n }\n if (position && (position.top || position.bottom)) {\n position.top ? strategy.top(position.top) : strategy.bottom(position.bottom);\n } else {\n strategy.centerVertically();\n }\n this._ref.updatePosition();\n return this;\n }\n /**\n * Updates the dialog's width and height.\n * @param width New width of the dialog.\n * @param height New height of the dialog.\n */\n updateSize(width = '', height = '') {\n this._ref.updateSize(width, height);\n return this;\n }\n /** Add a CSS class or an array of classes to the overlay pane. */\n addPanelClass(classes) {\n this._ref.addPanelClass(classes);\n return this;\n }\n /** Remove a CSS class or an array of classes from the overlay pane. */\n removePanelClass(classes) {\n this._ref.removePanelClass(classes);\n return this;\n }\n /** Gets the current state of the dialog's lifecycle. */\n getState() {\n return this._state;\n }\n /**\n * Finishes the dialog close by updating the state of the dialog\n * and disposing the overlay.\n */\n _finishDialogClose() {\n this._state = MatDialogState.CLOSED;\n this._ref.close(this._result, {\n focusOrigin: this._closeInteractionType\n });\n this.componentInstance = null;\n }\n}\n/**\n * Closes the dialog with the specified interaction type. This is currently not part of\n * `MatDialogRef` as that would conflict with custom dialog ref mocks provided in tests.\n * More details. See: https://github.com/angular/components/pull/9257#issuecomment-651342226.\n */\n// TODO: Move this back into `MatDialogRef` when we provide an official mock dialog ref.\nfunction _closeDialogVia(ref, interactionType, result) {\n ref._closeInteractionType = interactionType;\n return ref.close(result);\n}\n\n/** Injection token that can be used to access the data that was passed in to a dialog. */\nconst MAT_DIALOG_DATA = new InjectionToken('MatMdcDialogData');\n/** Injection token that can be used to specify default dialog options. */\nconst MAT_DIALOG_DEFAULT_OPTIONS = new InjectionToken('mat-mdc-dialog-default-options');\n/** Injection token that determines the scroll handling while the dialog is open. */\nconst MAT_DIALOG_SCROLL_STRATEGY = new InjectionToken('mat-mdc-dialog-scroll-strategy', {\n providedIn: 'root',\n factory: () => {\n const overlay = inject(Overlay);\n return () => overlay.scrollStrategies.block();\n }\n});\n/**\n * @docs-private\n * @deprecated No longer used. To be removed.\n * @breaking-change 19.0.0\n */\nfunction MAT_DIALOG_SCROLL_STRATEGY_PROVIDER_FACTORY(overlay) {\n return () => overlay.scrollStrategies.block();\n}\n/**\n * @docs-private\n * @deprecated No longer used. To be removed.\n * @breaking-change 19.0.0\n */\nconst MAT_DIALOG_SCROLL_STRATEGY_PROVIDER = {\n provide: MAT_DIALOG_SCROLL_STRATEGY,\n deps: [Overlay],\n useFactory: MAT_DIALOG_SCROLL_STRATEGY_PROVIDER_FACTORY\n};\n// Counter for unique dialog ids.\nlet uniqueId = 0;\n/**\n * Service to open Material Design modal dialogs.\n */\nclass MatDialog {\n /** Keeps track of the currently-open dialogs. */\n get openDialogs() {\n return this._parentDialog ? this._parentDialog.openDialogs : this._openDialogsAtThisLevel;\n }\n /** Stream that emits when a dialog has been opened. */\n get afterOpened() {\n return this._parentDialog ? this._parentDialog.afterOpened : this._afterOpenedAtThisLevel;\n }\n _getAfterAllClosed() {\n const parent = this._parentDialog;\n return parent ? parent._getAfterAllClosed() : this._afterAllClosedAtThisLevel;\n }\n constructor(_overlay, injector,\n /**\n * @deprecated `_location` parameter to be removed.\n * @breaking-change 10.0.0\n */\n location, _defaultOptions, _scrollStrategy, _parentDialog,\n /**\n * @deprecated No longer used. To be removed.\n * @breaking-change 15.0.0\n */\n _overlayContainer,\n /**\n * @deprecated No longer used. To be removed.\n * @breaking-change 14.0.0\n */\n _animationMode) {\n this._overlay = _overlay;\n this._defaultOptions = _defaultOptions;\n this._scrollStrategy = _scrollStrategy;\n this._parentDialog = _parentDialog;\n this._openDialogsAtThisLevel = [];\n this._afterAllClosedAtThisLevel = new Subject();\n this._afterOpenedAtThisLevel = new Subject();\n this.dialogConfigClass = MatDialogConfig;\n /**\n * Stream that emits when all open dialog have finished closing.\n * Will emit on subscribe if there are no open dialogs to begin with.\n */\n this.afterAllClosed = defer(() => this.openDialogs.length ? this._getAfterAllClosed() : this._getAfterAllClosed().pipe(startWith(undefined)));\n this._dialog = injector.get(Dialog);\n this._dialogRefConstructor = MatDialogRef;\n this._dialogContainerType = MatDialogContainer;\n this._dialogDataToken = MAT_DIALOG_DATA;\n }\n open(componentOrTemplateRef, config) {\n let dialogRef;\n config = {\n ...(this._defaultOptions || new MatDialogConfig()),\n ...config\n };\n config.id = config.id || `mat-mdc-dialog-${uniqueId++}`;\n config.scrollStrategy = config.scrollStrategy || this._scrollStrategy();\n const cdkRef = this._dialog.open(componentOrTemplateRef, {\n ...config,\n positionStrategy: this._overlay.position().global().centerHorizontally().centerVertically(),\n // Disable closing since we need to sync it up to the animation ourselves.\n disableClose: true,\n // Disable closing on destroy, because this service cleans up its open dialogs as well.\n // We want to do the cleanup here, rather than the CDK service, because the CDK destroys\n // the dialogs immediately whereas we want it to wait for the animations to finish.\n closeOnDestroy: false,\n // Disable closing on detachments so that we can sync up the animation.\n // The Material dialog ref handles this manually.\n closeOnOverlayDetachments: false,\n container: {\n type: this._dialogContainerType,\n providers: () => [\n // Provide our config as the CDK config as well since it has the same interface as the\n // CDK one, but it contains the actual values passed in by the user for things like\n // `disableClose` which we disable for the CDK dialog since we handle it ourselves.\n {\n provide: this.dialogConfigClass,\n useValue: config\n }, {\n provide: DialogConfig,\n useValue: config\n }]\n },\n templateContext: () => ({\n dialogRef\n }),\n providers: (ref, cdkConfig, dialogContainer) => {\n dialogRef = new this._dialogRefConstructor(ref, config, dialogContainer);\n dialogRef.updatePosition(config?.position);\n return [{\n provide: this._dialogContainerType,\n useValue: dialogContainer\n }, {\n provide: this._dialogDataToken,\n useValue: cdkConfig.data\n }, {\n provide: this._dialogRefConstructor,\n useValue: dialogRef\n }];\n }\n });\n // This can't be assigned in the `providers` callback, because\n // the instance hasn't been assigned to the CDK ref yet.\n dialogRef.componentRef = cdkRef.componentRef;\n dialogRef.componentInstance = cdkRef.componentInstance;\n this.openDialogs.push(dialogRef);\n this.afterOpened.next(dialogRef);\n dialogRef.afterClosed().subscribe(() => {\n const index = this.openDialogs.indexOf(dialogRef);\n if (index > -1) {\n this.openDialogs.splice(index, 1);\n if (!this.openDialogs.length) {\n this._getAfterAllClosed().next();\n }\n }\n });\n return dialogRef;\n }\n /**\n * Closes all of the currently-open dialogs.\n */\n closeAll() {\n this._closeDialogs(this.openDialogs);\n }\n /**\n * Finds an open dialog by its id.\n * @param id ID to use when looking up the dialog.\n */\n getDialogById(id) {\n return this.openDialogs.find(dialog => dialog.id === id);\n }\n ngOnDestroy() {\n // Only close the dialogs at this level on destroy\n // since the parent service may still be active.\n this._closeDialogs(this._openDialogsAtThisLevel);\n this._afterAllClosedAtThisLevel.complete();\n this._afterOpenedAtThisLevel.complete();\n }\n _closeDialogs(dialogs) {\n let i = dialogs.length;\n while (i--) {\n dialogs[i].close();\n }\n }\n static {\n this.ɵfac = function MatDialog_Factory(t) {\n return new (t || MatDialog)(i0.ɵɵinject(i1$1.Overlay), i0.ɵɵinject(i0.Injector), i0.ɵɵinject(i2.Location, 8), i0.ɵɵinject(MAT_DIALOG_DEFAULT_OPTIONS, 8), i0.ɵɵinject(MAT_DIALOG_SCROLL_STRATEGY), i0.ɵɵinject(MatDialog, 12), i0.ɵɵinject(i1$1.OverlayContainer), i0.ɵɵinject(ANIMATION_MODULE_TYPE, 8));\n };\n }\n static {\n this.ɵprov = /* @__PURE__ */i0.ɵɵdefineInjectable({\n token: MatDialog,\n factory: MatDialog.ɵfac,\n providedIn: 'root'\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialog, [{\n type: Injectable,\n args: [{\n providedIn: 'root'\n }]\n }], () => [{\n type: i1$1.Overlay\n }, {\n type: i0.Injector\n }, {\n type: i2.Location,\n decorators: [{\n type: Optional\n }]\n }, {\n type: MatDialogConfig,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [MAT_DIALOG_DEFAULT_OPTIONS]\n }]\n }, {\n type: undefined,\n decorators: [{\n type: Inject,\n args: [MAT_DIALOG_SCROLL_STRATEGY]\n }]\n }, {\n type: MatDialog,\n decorators: [{\n type: Optional\n }, {\n type: SkipSelf\n }]\n }, {\n type: i1$1.OverlayContainer\n }, {\n type: undefined,\n decorators: [{\n type: Optional\n }, {\n type: Inject,\n args: [ANIMATION_MODULE_TYPE]\n }]\n }], null);\n})();\n\n/** Counter used to generate unique IDs for dialog elements. */\nlet dialogElementUid = 0;\n/**\n * Button that will close the current dialog.\n */\nclass MatDialogClose {\n constructor(\n // The dialog title directive is always used in combination with a `MatDialogRef`.\n // tslint:disable-next-line: lightweight-tokens\n dialogRef, _elementRef, _dialog) {\n this.dialogRef = dialogRef;\n this._elementRef = _elementRef;\n this._dialog = _dialog;\n /** Default to \"button\" to prevents accidental form submits. */\n this.type = 'button';\n }\n ngOnInit() {\n if (!this.dialogRef) {\n // When this directive is included in a dialog via TemplateRef (rather than being\n // in a Component), the DialogRef isn't available via injection because embedded\n // views cannot be given a custom injector. Instead, we look up the DialogRef by\n // ID. This must occur in `onInit`, as the ID binding for the dialog container won't\n // be resolved at constructor time.\n this.dialogRef = getClosestDialog(this._elementRef, this._dialog.openDialogs);\n }\n }\n ngOnChanges(changes) {\n const proxiedChange = changes['_matDialogClose'] || changes['_matDialogCloseResult'];\n if (proxiedChange) {\n this.dialogResult = proxiedChange.currentValue;\n }\n }\n _onButtonClick(event) {\n // Determinate the focus origin using the click event, because using the FocusMonitor will\n // result in incorrect origins. Most of the time, close buttons will be auto focused in the\n // dialog, and therefore clicking the button won't result in a focus change. This means that\n // the FocusMonitor won't detect any origin change, and will always output `program`.\n _closeDialogVia(this.dialogRef, event.screenX === 0 && event.screenY === 0 ? 'keyboard' : 'mouse', this.dialogResult);\n }\n static {\n this.ɵfac = function MatDialogClose_Factory(t) {\n return new (t || MatDialogClose)(i0.ɵɵdirectiveInject(MatDialogRef, 8), i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(MatDialog));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatDialogClose,\n selectors: [[\"\", \"mat-dialog-close\", \"\"], [\"\", \"matDialogClose\", \"\"]],\n hostVars: 2,\n hostBindings: function MatDialogClose_HostBindings(rf, ctx) {\n if (rf & 1) {\n i0.ɵɵlistener(\"click\", function MatDialogClose_click_HostBindingHandler($event) {\n return ctx._onButtonClick($event);\n });\n }\n if (rf & 2) {\n i0.ɵɵattribute(\"aria-label\", ctx.ariaLabel || null)(\"type\", ctx.type);\n }\n },\n inputs: {\n ariaLabel: [i0.ɵɵInputFlags.None, \"aria-label\", \"ariaLabel\"],\n type: \"type\",\n dialogResult: [i0.ɵɵInputFlags.None, \"mat-dialog-close\", \"dialogResult\"],\n _matDialogClose: [i0.ɵɵInputFlags.None, \"matDialogClose\", \"_matDialogClose\"]\n },\n exportAs: [\"matDialogClose\"],\n standalone: true,\n features: [i0.ɵɵNgOnChangesFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialogClose, [{\n type: Directive,\n args: [{\n selector: '[mat-dialog-close], [matDialogClose]',\n exportAs: 'matDialogClose',\n standalone: true,\n host: {\n '(click)': '_onButtonClick($event)',\n '[attr.aria-label]': 'ariaLabel || null',\n '[attr.type]': 'type'\n }\n }]\n }], () => [{\n type: MatDialogRef,\n decorators: [{\n type: Optional\n }]\n }, {\n type: i0.ElementRef\n }, {\n type: MatDialog\n }], {\n ariaLabel: [{\n type: Input,\n args: ['aria-label']\n }],\n type: [{\n type: Input\n }],\n dialogResult: [{\n type: Input,\n args: ['mat-dialog-close']\n }],\n _matDialogClose: [{\n type: Input,\n args: ['matDialogClose']\n }]\n });\n})();\nclass MatDialogLayoutSection {\n constructor(\n // The dialog title directive is always used in combination with a `MatDialogRef`.\n // tslint:disable-next-line: lightweight-tokens\n _dialogRef, _elementRef, _dialog) {\n this._dialogRef = _dialogRef;\n this._elementRef = _elementRef;\n this._dialog = _dialog;\n }\n ngOnInit() {\n if (!this._dialogRef) {\n this._dialogRef = getClosestDialog(this._elementRef, this._dialog.openDialogs);\n }\n if (this._dialogRef) {\n Promise.resolve().then(() => {\n this._onAdd();\n });\n }\n }\n ngOnDestroy() {\n // Note: we null check because there are some internal\n // tests that are mocking out `MatDialogRef` incorrectly.\n const instance = this._dialogRef?._containerInstance;\n if (instance) {\n Promise.resolve().then(() => {\n this._onRemove();\n });\n }\n }\n static {\n this.ɵfac = function MatDialogLayoutSection_Factory(t) {\n return new (t || MatDialogLayoutSection)(i0.ɵɵdirectiveInject(MatDialogRef, 8), i0.ɵɵdirectiveInject(i0.ElementRef), i0.ɵɵdirectiveInject(MatDialog));\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatDialogLayoutSection,\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialogLayoutSection, [{\n type: Directive,\n args: [{\n standalone: true\n }]\n }], () => [{\n type: MatDialogRef,\n decorators: [{\n type: Optional\n }]\n }, {\n type: i0.ElementRef\n }, {\n type: MatDialog\n }], null);\n})();\n/**\n * Title of a dialog element. Stays fixed to the top of the dialog when scrolling.\n */\nclass MatDialogTitle extends MatDialogLayoutSection {\n constructor() {\n super(...arguments);\n this.id = `mat-mdc-dialog-title-${dialogElementUid++}`;\n }\n _onAdd() {\n // Note: we null check the queue, because there are some internal\n // tests that are mocking out `MatDialogRef` incorrectly.\n this._dialogRef._containerInstance?._addAriaLabelledBy?.(this.id);\n }\n _onRemove() {\n this._dialogRef?._containerInstance?._removeAriaLabelledBy?.(this.id);\n }\n static {\n this.ɵfac = /* @__PURE__ */(() => {\n let ɵMatDialogTitle_BaseFactory;\n return function MatDialogTitle_Factory(t) {\n return (ɵMatDialogTitle_BaseFactory || (ɵMatDialogTitle_BaseFactory = i0.ɵɵgetInheritedFactory(MatDialogTitle)))(t || MatDialogTitle);\n };\n })();\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatDialogTitle,\n selectors: [[\"\", \"mat-dialog-title\", \"\"], [\"\", \"matDialogTitle\", \"\"]],\n hostAttrs: [1, \"mat-mdc-dialog-title\", \"mdc-dialog__title\"],\n hostVars: 1,\n hostBindings: function MatDialogTitle_HostBindings(rf, ctx) {\n if (rf & 2) {\n i0.ɵɵhostProperty(\"id\", ctx.id);\n }\n },\n inputs: {\n id: \"id\"\n },\n exportAs: [\"matDialogTitle\"],\n standalone: true,\n features: [i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialogTitle, [{\n type: Directive,\n args: [{\n selector: '[mat-dialog-title], [matDialogTitle]',\n exportAs: 'matDialogTitle',\n standalone: true,\n host: {\n 'class': 'mat-mdc-dialog-title mdc-dialog__title',\n '[id]': 'id'\n }\n }]\n }], null, {\n id: [{\n type: Input\n }]\n });\n})();\n/**\n * Scrollable content container of a dialog.\n */\nclass MatDialogContent {\n static {\n this.ɵfac = function MatDialogContent_Factory(t) {\n return new (t || MatDialogContent)();\n };\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatDialogContent,\n selectors: [[\"\", \"mat-dialog-content\", \"\"], [\"mat-dialog-content\"], [\"\", \"matDialogContent\", \"\"]],\n hostAttrs: [1, \"mat-mdc-dialog-content\", \"mdc-dialog__content\"],\n standalone: true\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialogContent, [{\n type: Directive,\n args: [{\n selector: `[mat-dialog-content], mat-dialog-content, [matDialogContent]`,\n host: {\n 'class': 'mat-mdc-dialog-content mdc-dialog__content'\n },\n standalone: true\n }]\n }], null, null);\n})();\n/**\n * Container for the bottom action buttons in a dialog.\n * Stays fixed to the bottom when scrolling.\n */\nclass MatDialogActions extends MatDialogLayoutSection {\n _onAdd() {\n this._dialogRef._containerInstance?._updateActionSectionCount?.(1);\n }\n _onRemove() {\n this._dialogRef._containerInstance?._updateActionSectionCount?.(-1);\n }\n static {\n this.ɵfac = /* @__PURE__ */(() => {\n let ɵMatDialogActions_BaseFactory;\n return function MatDialogActions_Factory(t) {\n return (ɵMatDialogActions_BaseFactory || (ɵMatDialogActions_BaseFactory = i0.ɵɵgetInheritedFactory(MatDialogActions)))(t || MatDialogActions);\n };\n })();\n }\n static {\n this.ɵdir = /* @__PURE__ */i0.ɵɵdefineDirective({\n type: MatDialogActions,\n selectors: [[\"\", \"mat-dialog-actions\", \"\"], [\"mat-dialog-actions\"], [\"\", \"matDialogActions\", \"\"]],\n hostAttrs: [1, \"mat-mdc-dialog-actions\", \"mdc-dialog__actions\"],\n hostVars: 6,\n hostBindings: function MatDialogActions_HostBindings(rf, ctx) {\n if (rf & 2) {\n i0.ɵɵclassProp(\"mat-mdc-dialog-actions-align-start\", ctx.align === \"start\")(\"mat-mdc-dialog-actions-align-center\", ctx.align === \"center\")(\"mat-mdc-dialog-actions-align-end\", ctx.align === \"end\");\n }\n },\n inputs: {\n align: \"align\"\n },\n standalone: true,\n features: [i0.ɵɵInheritDefinitionFeature]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialogActions, [{\n type: Directive,\n args: [{\n selector: `[mat-dialog-actions], mat-dialog-actions, [matDialogActions]`,\n standalone: true,\n host: {\n 'class': 'mat-mdc-dialog-actions mdc-dialog__actions',\n '[class.mat-mdc-dialog-actions-align-start]': 'align === \"start\"',\n '[class.mat-mdc-dialog-actions-align-center]': 'align === \"center\"',\n '[class.mat-mdc-dialog-actions-align-end]': 'align === \"end\"'\n }\n }]\n }], null, {\n align: [{\n type: Input\n }]\n });\n})();\n/**\n * Finds the closest MatDialogRef to an element by looking at the DOM.\n * @param element Element relative to which to look for a dialog.\n * @param openDialogs References to the currently-open dialogs.\n */\nfunction getClosestDialog(element, openDialogs) {\n let parent = element.nativeElement.parentElement;\n while (parent && !parent.classList.contains('mat-mdc-dialog-container')) {\n parent = parent.parentElement;\n }\n return parent ? openDialogs.find(dialog => dialog.id === parent.id) : null;\n}\nconst DIRECTIVES = [MatDialogContainer, MatDialogClose, MatDialogTitle, MatDialogActions, MatDialogContent];\nclass MatDialogModule {\n static {\n this.ɵfac = function MatDialogModule_Factory(t) {\n return new (t || MatDialogModule)();\n };\n }\n static {\n this.ɵmod = /* @__PURE__ */i0.ɵɵdefineNgModule({\n type: MatDialogModule\n });\n }\n static {\n this.ɵinj = /* @__PURE__ */i0.ɵɵdefineInjector({\n providers: [MatDialog],\n imports: [DialogModule, OverlayModule, PortalModule, MatCommonModule, MatCommonModule]\n });\n }\n}\n(() => {\n (typeof ngDevMode === \"undefined\" || ngDevMode) && i0.ɵsetClassMetadata(MatDialogModule, [{\n type: NgModule,\n args: [{\n imports: [DialogModule, OverlayModule, PortalModule, MatCommonModule, ...DIRECTIVES],\n exports: [MatCommonModule, ...DIRECTIVES],\n providers: [MatDialog]\n }]\n }], null, null);\n})();\n\n/**\n * Default parameters for the animation for backwards compatibility.\n * @docs-private\n */\nconst _defaultParams = {\n params: {\n enterAnimationDuration: '150ms',\n exitAnimationDuration: '75ms'\n }\n};\n/**\n * Animations used by MatDialog.\n * @docs-private\n */\nconst matDialogAnimations = {\n /** Animation that is applied on the dialog container by default. */\n dialogContainer: trigger('dialogContainer', [\n // Note: The `enter` animation transitions to `transform: none`, because for some reason\n // specifying the transform explicitly, causes IE both to blur the dialog content and\n // decimate the animation performance. Leaving it as `none` solves both issues.\n state('void, exit', style({\n opacity: 0,\n transform: 'scale(0.7)'\n })), state('enter', style({\n transform: 'none'\n })), transition('* => enter', group([animate('{{enterAnimationDuration}} cubic-bezier(0, 0, 0.2, 1)', style({\n transform: 'none',\n opacity: 1\n })), query('@*', animateChild(), {\n optional: true\n })]), _defaultParams), transition('* => void, * => exit', group([animate('{{exitAnimationDuration}} cubic-bezier(0.4, 0.0, 0.2, 1)', style({\n opacity: 0\n })), query('@*', animateChild(), {\n optional: true\n })]), _defaultParams)])\n};\n\n/**\n * Generated bundle index. Do not edit.\n */\n\nexport { MAT_DIALOG_DATA, MAT_DIALOG_DEFAULT_OPTIONS, MAT_DIALOG_SCROLL_STRATEGY, MAT_DIALOG_SCROLL_STRATEGY_PROVIDER, MAT_DIALOG_SCROLL_STRATEGY_PROVIDER_FACTORY, MatDialog, MatDialogActions, MatDialogClose, MatDialogConfig, MatDialogContainer, MatDialogContent, MatDialogModule, MatDialogRef, MatDialogState, MatDialogTitle, _closeDialogVia, _defaultParams, matDialogAnimations };\n","/**\n * @licstart The following is the entire license notice for the\n * JavaScript code in this page\n *\n * Copyright 2023 Mozilla Foundation\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * @licend The above is the entire license notice for the\n * JavaScript code in this page\n */\n\n/******/ // The require scope\n/******/ var __webpack_require__ = {};\n/******/ \n/************************************************************************/\n/******/ /* webpack/runtime/define property getters */\n/******/ (() => {\n/******/ \t// define getter functions for harmony exports\n/******/ \t__webpack_require__.d = (exports, definition) => {\n/******/ \t\tfor(var key in definition) {\n/******/ \t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t}\n/******/ \t\t}\n/******/ \t};\n/******/ })();\n/******/ \n/******/ /* webpack/runtime/hasOwnProperty shorthand */\n/******/ (() => {\n/******/ \t__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))\n/******/ })();\n/******/ \n/************************************************************************/\nvar __webpack_exports__ = globalThis.pdfjsLib = {};\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n AbortException: () => (/* reexport */ AbortException),\n AnnotationEditorLayer: () => (/* reexport */ AnnotationEditorLayer),\n AnnotationEditorParamsType: () => (/* reexport */ AnnotationEditorParamsType),\n AnnotationEditorType: () => (/* reexport */ AnnotationEditorType),\n AnnotationEditorUIManager: () => (/* reexport */ AnnotationEditorUIManager),\n AnnotationLayer: () => (/* reexport */ AnnotationLayer),\n AnnotationMode: () => (/* reexport */ AnnotationMode),\n CMapCompressionType: () => (/* reexport */ CMapCompressionType),\n ColorPicker: () => (/* reexport */ ColorPicker),\n DOMSVGFactory: () => (/* reexport */ DOMSVGFactory),\n DrawLayer: () => (/* reexport */ DrawLayer),\n FeatureTest: () => (/* reexport */ util_FeatureTest),\n GlobalWorkerOptions: () => (/* reexport */ GlobalWorkerOptions),\n ImageKind: () => (/* reexport */ util_ImageKind),\n InvalidPDFException: () => (/* reexport */ InvalidPDFException),\n MissingPDFException: () => (/* reexport */ MissingPDFException),\n OPS: () => (/* reexport */ OPS),\n Outliner: () => (/* reexport */ Outliner),\n PDFDataRangeTransport: () => (/* reexport */ PDFDataRangeTransport),\n PDFDateString: () => (/* reexport */ PDFDateString),\n PDFWorker: () => (/* reexport */ PDFWorker),\n PasswordResponses: () => (/* reexport */ PasswordResponses),\n PermissionFlag: () => (/* reexport */ PermissionFlag),\n PixelsPerInch: () => (/* reexport */ PixelsPerInch),\n RenderingCancelledException: () => (/* reexport */ RenderingCancelledException),\n TextLayer: () => (/* reexport */ TextLayer),\n UnexpectedResponseException: () => (/* reexport */ UnexpectedResponseException),\n Util: () => (/* reexport */ Util),\n VerbosityLevel: () => (/* reexport */ VerbosityLevel),\n XfaLayer: () => (/* reexport */ XfaLayer),\n build: () => (/* reexport */ build),\n createValidAbsoluteUrl: () => (/* reexport */ createValidAbsoluteUrl),\n fetchData: () => (/* reexport */ fetchData),\n getDocument: () => (/* reexport */ getDocument),\n getFilenameFromUrl: () => (/* reexport */ getFilenameFromUrl),\n getPdfFilenameFromUrl: () => (/* reexport */ getPdfFilenameFromUrl),\n getXfaPageViewport: () => (/* reexport */ getXfaPageViewport),\n isDataScheme: () => (/* reexport */ isDataScheme),\n isPdfFile: () => (/* reexport */ isPdfFile),\n noContextMenu: () => (/* reexport */ noContextMenu),\n normalizeUnicode: () => (/* reexport */ normalizeUnicode),\n renderTextLayer: () => (/* reexport */ renderTextLayer),\n setLayerDimensions: () => (/* reexport */ setLayerDimensions),\n shadow: () => (/* reexport */ shadow),\n updateTextLayer: () => (/* reexport */ updateTextLayer),\n version: () => (/* reexport */ version)\n});\n\n;// CONCATENATED MODULE: ./src/shared/util.js\nconst isNodeJS = typeof process === \"object\" && process + \"\" === \"[object process]\" && !process.versions.nw && !(process.versions.electron && process.type && process.type !== \"browser\");\nconst IDENTITY_MATRIX = [1, 0, 0, 1, 0, 0];\nconst FONT_IDENTITY_MATRIX = [0.001, 0, 0, 0.001, 0, 0];\nconst MAX_IMAGE_SIZE_TO_CACHE = 10e6;\nconst LINE_FACTOR = 1.35;\nconst LINE_DESCENT_FACTOR = 0.35;\nconst BASELINE_FACTOR = LINE_DESCENT_FACTOR / LINE_FACTOR;\nconst RenderingIntentFlag = {\n ANY: 0x01,\n DISPLAY: 0x02,\n PRINT: 0x04,\n SAVE: 0x08,\n ANNOTATIONS_FORMS: 0x10,\n ANNOTATIONS_STORAGE: 0x20,\n ANNOTATIONS_DISABLE: 0x40,\n OPLIST: 0x100\n};\nconst AnnotationMode = {\n DISABLE: 0,\n ENABLE: 1,\n ENABLE_FORMS: 2,\n ENABLE_STORAGE: 3\n};\nconst AnnotationEditorPrefix = \"pdfjs_internal_editor_\";\nconst AnnotationEditorType = {\n DISABLE: -1,\n NONE: 0,\n FREETEXT: 3,\n HIGHLIGHT: 9,\n STAMP: 13,\n INK: 15\n};\nconst AnnotationEditorParamsType = {\n RESIZE: 1,\n CREATE: 2,\n FREETEXT_SIZE: 11,\n FREETEXT_COLOR: 12,\n FREETEXT_OPACITY: 13,\n INK_COLOR: 21,\n INK_THICKNESS: 22,\n INK_OPACITY: 23,\n HIGHLIGHT_COLOR: 31,\n HIGHLIGHT_DEFAULT_COLOR: 32,\n HIGHLIGHT_THICKNESS: 33,\n HIGHLIGHT_FREE: 34,\n HIGHLIGHT_SHOW_ALL: 35\n};\nconst PermissionFlag = {\n PRINT: 0x04,\n MODIFY_CONTENTS: 0x08,\n COPY: 0x10,\n MODIFY_ANNOTATIONS: 0x20,\n FILL_INTERACTIVE_FORMS: 0x100,\n COPY_FOR_ACCESSIBILITY: 0x200,\n ASSEMBLE: 0x400,\n PRINT_HIGH_QUALITY: 0x800\n};\nconst TextRenderingMode = {\n FILL: 0,\n STROKE: 1,\n FILL_STROKE: 2,\n INVISIBLE: 3,\n FILL_ADD_TO_PATH: 4,\n STROKE_ADD_TO_PATH: 5,\n FILL_STROKE_ADD_TO_PATH: 6,\n ADD_TO_PATH: 7,\n FILL_STROKE_MASK: 3,\n ADD_TO_PATH_FLAG: 4\n};\nconst util_ImageKind = {\n GRAYSCALE_1BPP: 1,\n RGB_24BPP: 2,\n RGBA_32BPP: 3\n};\nconst AnnotationType = {\n TEXT: 1,\n LINK: 2,\n FREETEXT: 3,\n LINE: 4,\n SQUARE: 5,\n CIRCLE: 6,\n POLYGON: 7,\n POLYLINE: 8,\n HIGHLIGHT: 9,\n UNDERLINE: 10,\n SQUIGGLY: 11,\n STRIKEOUT: 12,\n STAMP: 13,\n CARET: 14,\n INK: 15,\n POPUP: 16,\n FILEATTACHMENT: 17,\n SOUND: 18,\n MOVIE: 19,\n WIDGET: 20,\n SCREEN: 21,\n PRINTERMARK: 22,\n TRAPNET: 23,\n WATERMARK: 24,\n THREED: 25,\n REDACT: 26\n};\nconst AnnotationReplyType = {\n GROUP: \"Group\",\n REPLY: \"R\"\n};\nconst AnnotationFlag = {\n INVISIBLE: 0x01,\n HIDDEN: 0x02,\n PRINT: 0x04,\n NOZOOM: 0x08,\n NOROTATE: 0x10,\n NOVIEW: 0x20,\n READONLY: 0x40,\n LOCKED: 0x80,\n TOGGLENOVIEW: 0x100,\n LOCKEDCONTENTS: 0x200\n};\nconst AnnotationFieldFlag = {\n READONLY: 0x0000001,\n REQUIRED: 0x0000002,\n NOEXPORT: 0x0000004,\n MULTILINE: 0x0001000,\n PASSWORD: 0x0002000,\n NOTOGGLETOOFF: 0x0004000,\n RADIO: 0x0008000,\n PUSHBUTTON: 0x0010000,\n COMBO: 0x0020000,\n EDIT: 0x0040000,\n SORT: 0x0080000,\n FILESELECT: 0x0100000,\n MULTISELECT: 0x0200000,\n DONOTSPELLCHECK: 0x0400000,\n DONOTSCROLL: 0x0800000,\n COMB: 0x1000000,\n RICHTEXT: 0x2000000,\n RADIOSINUNISON: 0x2000000,\n COMMITONSELCHANGE: 0x4000000\n};\nconst AnnotationBorderStyleType = {\n SOLID: 1,\n DASHED: 2,\n BEVELED: 3,\n INSET: 4,\n UNDERLINE: 5\n};\nconst AnnotationActionEventType = {\n E: \"Mouse Enter\",\n X: \"Mouse Exit\",\n D: \"Mouse Down\",\n U: \"Mouse Up\",\n Fo: \"Focus\",\n Bl: \"Blur\",\n PO: \"PageOpen\",\n PC: \"PageClose\",\n PV: \"PageVisible\",\n PI: \"PageInvisible\",\n K: \"Keystroke\",\n F: \"Format\",\n V: \"Validate\",\n C: \"Calculate\"\n};\nconst DocumentActionEventType = {\n WC: \"WillClose\",\n WS: \"WillSave\",\n DS: \"DidSave\",\n WP: \"WillPrint\",\n DP: \"DidPrint\"\n};\nconst PageActionEventType = {\n O: \"PageOpen\",\n C: \"PageClose\"\n};\nconst VerbosityLevel = {\n ERRORS: 0,\n WARNINGS: 1,\n INFOS: 5\n};\nconst CMapCompressionType = {\n NONE: 0,\n BINARY: 1\n};\nconst OPS = {\n dependency: 1,\n setLineWidth: 2,\n setLineCap: 3,\n setLineJoin: 4,\n setMiterLimit: 5,\n setDash: 6,\n setRenderingIntent: 7,\n setFlatness: 8,\n setGState: 9,\n save: 10,\n restore: 11,\n transform: 12,\n moveTo: 13,\n lineTo: 14,\n curveTo: 15,\n curveTo2: 16,\n curveTo3: 17,\n closePath: 18,\n rectangle: 19,\n stroke: 20,\n closeStroke: 21,\n fill: 22,\n eoFill: 23,\n fillStroke: 24,\n eoFillStroke: 25,\n closeFillStroke: 26,\n closeEOFillStroke: 27,\n endPath: 28,\n clip: 29,\n eoClip: 30,\n beginText: 31,\n endText: 32,\n setCharSpacing: 33,\n setWordSpacing: 34,\n setHScale: 35,\n setLeading: 36,\n setFont: 37,\n setTextRenderingMode: 38,\n setTextRise: 39,\n moveText: 40,\n setLeadingMoveText: 41,\n setTextMatrix: 42,\n nextLine: 43,\n showText: 44,\n showSpacedText: 45,\n nextLineShowText: 46,\n nextLineSetSpacingShowText: 47,\n setCharWidth: 48,\n setCharWidthAndBounds: 49,\n setStrokeColorSpace: 50,\n setFillColorSpace: 51,\n setStrokeColor: 52,\n setStrokeColorN: 53,\n setFillColor: 54,\n setFillColorN: 55,\n setStrokeGray: 56,\n setFillGray: 57,\n setStrokeRGBColor: 58,\n setFillRGBColor: 59,\n setStrokeCMYKColor: 60,\n setFillCMYKColor: 61,\n shadingFill: 62,\n beginInlineImage: 63,\n beginImageData: 64,\n endInlineImage: 65,\n paintXObject: 66,\n markPoint: 67,\n markPointProps: 68,\n beginMarkedContent: 69,\n beginMarkedContentProps: 70,\n endMarkedContent: 71,\n beginCompat: 72,\n endCompat: 73,\n paintFormXObjectBegin: 74,\n paintFormXObjectEnd: 75,\n beginGroup: 76,\n endGroup: 77,\n beginAnnotation: 80,\n endAnnotation: 81,\n paintImageMaskXObject: 83,\n paintImageMaskXObjectGroup: 84,\n paintImageXObject: 85,\n paintInlineImageXObject: 86,\n paintInlineImageXObjectGroup: 87,\n paintImageXObjectRepeat: 88,\n paintImageMaskXObjectRepeat: 89,\n paintSolidColorImageMask: 90,\n constructPath: 91\n};\nconst PasswordResponses = {\n NEED_PASSWORD: 1,\n INCORRECT_PASSWORD: 2\n};\nlet verbosity = VerbosityLevel.WARNINGS;\nfunction setVerbosityLevel(level) {\n if (Number.isInteger(level)) {\n verbosity = level;\n }\n}\nfunction getVerbosityLevel() {\n return verbosity;\n}\nfunction info(msg) {\n if (verbosity >= VerbosityLevel.INFOS) {\n console.log(`Info: ${msg}`);\n }\n}\nfunction warn(msg) {\n if (verbosity >= VerbosityLevel.WARNINGS) {\n console.log(`Warning: ${msg}`);\n }\n}\nfunction unreachable(msg) {\n throw new Error(msg);\n}\nfunction assert(cond, msg) {\n if (!cond) {\n unreachable(msg);\n }\n}\nfunction _isValidProtocol(url) {\n switch (url?.protocol) {\n case \"http:\":\n case \"https:\":\n case \"ftp:\":\n case \"mailto:\":\n case \"tel:\":\n return true;\n default:\n return false;\n }\n}\nfunction createValidAbsoluteUrl(url, baseUrl = null, options = null) {\n if (!url) {\n return null;\n }\n try {\n if (options && typeof url === \"string\") {\n if (options.addDefaultProtocol && url.startsWith(\"www.\")) {\n const dots = url.match(/\\./g);\n if (dots?.length >= 2) {\n url = `http://${url}`;\n }\n }\n if (options.tryConvertEncoding) {\n try {\n url = stringToUTF8String(url);\n } catch {}\n }\n }\n const absoluteUrl = baseUrl ? new URL(url, baseUrl) : new URL(url);\n if (_isValidProtocol(absoluteUrl)) {\n return absoluteUrl;\n }\n } catch {}\n return null;\n}\nfunction shadow(obj, prop, value, nonSerializable = false) {\n Object.defineProperty(obj, prop, {\n value,\n enumerable: !nonSerializable,\n configurable: true,\n writable: false\n });\n return value;\n}\nconst BaseException = function BaseExceptionClosure() {\n function BaseException(message, name) {\n if (this.constructor === BaseException) {\n unreachable(\"Cannot initialize BaseException.\");\n }\n this.message = message;\n this.name = name;\n }\n BaseException.prototype = new Error();\n BaseException.constructor = BaseException;\n return BaseException;\n}();\nclass PasswordException extends BaseException {\n constructor(msg, code) {\n super(msg, \"PasswordException\");\n this.code = code;\n }\n}\nclass UnknownErrorException extends BaseException {\n constructor(msg, details) {\n super(msg, \"UnknownErrorException\");\n this.details = details;\n }\n}\nclass InvalidPDFException extends BaseException {\n constructor(msg) {\n super(msg, \"InvalidPDFException\");\n }\n}\nclass MissingPDFException extends BaseException {\n constructor(msg) {\n super(msg, \"MissingPDFException\");\n }\n}\nclass UnexpectedResponseException extends BaseException {\n constructor(msg, status) {\n super(msg, \"UnexpectedResponseException\");\n this.status = status;\n }\n}\nclass FormatError extends BaseException {\n constructor(msg) {\n super(msg, \"FormatError\");\n }\n}\nclass AbortException extends BaseException {\n constructor(msg) {\n super(msg, \"AbortException\");\n }\n}\nfunction bytesToString(bytes) {\n if (typeof bytes !== \"object\" || bytes?.length === undefined) {\n unreachable(\"Invalid argument for bytesToString\");\n }\n const length = bytes.length;\n const MAX_ARGUMENT_COUNT = 8192;\n if (length < MAX_ARGUMENT_COUNT) {\n return String.fromCharCode.apply(null, bytes);\n }\n const strBuf = [];\n for (let i = 0; i < length; i += MAX_ARGUMENT_COUNT) {\n const chunkEnd = Math.min(i + MAX_ARGUMENT_COUNT, length);\n const chunk = bytes.subarray(i, chunkEnd);\n strBuf.push(String.fromCharCode.apply(null, chunk));\n }\n return strBuf.join(\"\");\n}\nfunction stringToBytes(str) {\n if (typeof str !== \"string\") {\n unreachable(\"Invalid argument for stringToBytes\");\n }\n const length = str.length;\n const bytes = new Uint8Array(length);\n for (let i = 0; i < length; ++i) {\n bytes[i] = str.charCodeAt(i) & 0xff;\n }\n return bytes;\n}\nfunction string32(value) {\n return String.fromCharCode(value >> 24 & 0xff, value >> 16 & 0xff, value >> 8 & 0xff, value & 0xff);\n}\nfunction objectSize(obj) {\n return Object.keys(obj).length;\n}\nfunction objectFromMap(map) {\n const obj = Object.create(null);\n for (const [key, value] of map) {\n obj[key] = value;\n }\n return obj;\n}\nfunction isLittleEndian() {\n const buffer8 = new Uint8Array(4);\n buffer8[0] = 1;\n const view32 = new Uint32Array(buffer8.buffer, 0, 1);\n return view32[0] === 1;\n}\nfunction isEvalSupported() {\n try {\n new Function(\"\");\n return true;\n } catch {\n return false;\n }\n}\nclass util_FeatureTest {\n static get isLittleEndian() {\n return shadow(this, \"isLittleEndian\", isLittleEndian());\n }\n static get isEvalSupported() {\n return shadow(this, \"isEvalSupported\", isEvalSupported());\n }\n static get isOffscreenCanvasSupported() {\n return shadow(this, \"isOffscreenCanvasSupported\", typeof OffscreenCanvas !== \"undefined\");\n }\n static get platform() {\n if (typeof navigator !== \"undefined\" && typeof navigator?.platform === \"string\") {\n return shadow(this, \"platform\", {\n isMac: navigator.platform.includes(\"Mac\")\n });\n }\n return shadow(this, \"platform\", {\n isMac: false\n });\n }\n static get isCSSRoundSupported() {\n return shadow(this, \"isCSSRoundSupported\", globalThis.CSS?.supports?.(\"width: round(1.5px, 1px)\"));\n }\n}\nconst hexNumbers = Array.from(Array(256).keys(), n => n.toString(16).padStart(2, \"0\"));\nclass Util {\n static makeHexColor(r, g, b) {\n return `#${hexNumbers[r]}${hexNumbers[g]}${hexNumbers[b]}`;\n }\n static scaleMinMax(transform, minMax) {\n let temp;\n if (transform[0]) {\n if (transform[0] < 0) {\n temp = minMax[0];\n minMax[0] = minMax[2];\n minMax[2] = temp;\n }\n minMax[0] *= transform[0];\n minMax[2] *= transform[0];\n if (transform[3] < 0) {\n temp = minMax[1];\n minMax[1] = minMax[3];\n minMax[3] = temp;\n }\n minMax[1] *= transform[3];\n minMax[3] *= transform[3];\n } else {\n temp = minMax[0];\n minMax[0] = minMax[1];\n minMax[1] = temp;\n temp = minMax[2];\n minMax[2] = minMax[3];\n minMax[3] = temp;\n if (transform[1] < 0) {\n temp = minMax[1];\n minMax[1] = minMax[3];\n minMax[3] = temp;\n }\n minMax[1] *= transform[1];\n minMax[3] *= transform[1];\n if (transform[2] < 0) {\n temp = minMax[0];\n minMax[0] = minMax[2];\n minMax[2] = temp;\n }\n minMax[0] *= transform[2];\n minMax[2] *= transform[2];\n }\n minMax[0] += transform[4];\n minMax[1] += transform[5];\n minMax[2] += transform[4];\n minMax[3] += transform[5];\n }\n static transform(m1, m2) {\n return [m1[0] * m2[0] + m1[2] * m2[1], m1[1] * m2[0] + m1[3] * m2[1], m1[0] * m2[2] + m1[2] * m2[3], m1[1] * m2[2] + m1[3] * m2[3], m1[0] * m2[4] + m1[2] * m2[5] + m1[4], m1[1] * m2[4] + m1[3] * m2[5] + m1[5]];\n }\n static applyTransform(p, m) {\n const xt = p[0] * m[0] + p[1] * m[2] + m[4];\n const yt = p[0] * m[1] + p[1] * m[3] + m[5];\n return [xt, yt];\n }\n static applyInverseTransform(p, m) {\n const d = m[0] * m[3] - m[1] * m[2];\n const xt = (p[0] * m[3] - p[1] * m[2] + m[2] * m[5] - m[4] * m[3]) / d;\n const yt = (-p[0] * m[1] + p[1] * m[0] + m[4] * m[1] - m[5] * m[0]) / d;\n return [xt, yt];\n }\n static getAxialAlignedBoundingBox(r, m) {\n const p1 = this.applyTransform(r, m);\n const p2 = this.applyTransform(r.slice(2, 4), m);\n const p3 = this.applyTransform([r[0], r[3]], m);\n const p4 = this.applyTransform([r[2], r[1]], m);\n return [Math.min(p1[0], p2[0], p3[0], p4[0]), Math.min(p1[1], p2[1], p3[1], p4[1]), Math.max(p1[0], p2[0], p3[0], p4[0]), Math.max(p1[1], p2[1], p3[1], p4[1])];\n }\n static inverseTransform(m) {\n const d = m[0] * m[3] - m[1] * m[2];\n return [m[3] / d, -m[1] / d, -m[2] / d, m[0] / d, (m[2] * m[5] - m[4] * m[3]) / d, (m[4] * m[1] - m[5] * m[0]) / d];\n }\n static singularValueDecompose2dScale(m) {\n const transpose = [m[0], m[2], m[1], m[3]];\n const a = m[0] * transpose[0] + m[1] * transpose[2];\n const b = m[0] * transpose[1] + m[1] * transpose[3];\n const c = m[2] * transpose[0] + m[3] * transpose[2];\n const d = m[2] * transpose[1] + m[3] * transpose[3];\n const first = (a + d) / 2;\n const second = Math.sqrt((a + d) ** 2 - 4 * (a * d - c * b)) / 2;\n const sx = first + second || 1;\n const sy = first - second || 1;\n return [Math.sqrt(sx), Math.sqrt(sy)];\n }\n static normalizeRect(rect) {\n const r = rect.slice(0);\n if (rect[0] > rect[2]) {\n r[0] = rect[2];\n r[2] = rect[0];\n }\n if (rect[1] > rect[3]) {\n r[1] = rect[3];\n r[3] = rect[1];\n }\n return r;\n }\n static intersect(rect1, rect2) {\n const xLow = Math.max(Math.min(rect1[0], rect1[2]), Math.min(rect2[0], rect2[2]));\n const xHigh = Math.min(Math.max(rect1[0], rect1[2]), Math.max(rect2[0], rect2[2]));\n if (xLow > xHigh) {\n return null;\n }\n const yLow = Math.max(Math.min(rect1[1], rect1[3]), Math.min(rect2[1], rect2[3]));\n const yHigh = Math.min(Math.max(rect1[1], rect1[3]), Math.max(rect2[1], rect2[3]));\n if (yLow > yHigh) {\n return null;\n }\n return [xLow, yLow, xHigh, yHigh];\n }\n static #getExtremumOnCurve(x0, x1, x2, x3, y0, y1, y2, y3, t, minMax) {\n if (t <= 0 || t >= 1) {\n return;\n }\n const mt = 1 - t;\n const tt = t * t;\n const ttt = tt * t;\n const x = mt * (mt * (mt * x0 + 3 * t * x1) + 3 * tt * x2) + ttt * x3;\n const y = mt * (mt * (mt * y0 + 3 * t * y1) + 3 * tt * y2) + ttt * y3;\n minMax[0] = Math.min(minMax[0], x);\n minMax[1] = Math.min(minMax[1], y);\n minMax[2] = Math.max(minMax[2], x);\n minMax[3] = Math.max(minMax[3], y);\n }\n static #getExtremum(x0, x1, x2, x3, y0, y1, y2, y3, a, b, c, minMax) {\n if (Math.abs(a) < 1e-12) {\n if (Math.abs(b) >= 1e-12) {\n this.#getExtremumOnCurve(x0, x1, x2, x3, y0, y1, y2, y3, -c / b, minMax);\n }\n return;\n }\n const delta = b ** 2 - 4 * c * a;\n if (delta < 0) {\n return;\n }\n const sqrtDelta = Math.sqrt(delta);\n const a2 = 2 * a;\n this.#getExtremumOnCurve(x0, x1, x2, x3, y0, y1, y2, y3, (-b + sqrtDelta) / a2, minMax);\n this.#getExtremumOnCurve(x0, x1, x2, x3, y0, y1, y2, y3, (-b - sqrtDelta) / a2, minMax);\n }\n static bezierBoundingBox(x0, y0, x1, y1, x2, y2, x3, y3, minMax) {\n if (minMax) {\n minMax[0] = Math.min(minMax[0], x0, x3);\n minMax[1] = Math.min(minMax[1], y0, y3);\n minMax[2] = Math.max(minMax[2], x0, x3);\n minMax[3] = Math.max(minMax[3], y0, y3);\n } else {\n minMax = [Math.min(x0, x3), Math.min(y0, y3), Math.max(x0, x3), Math.max(y0, y3)];\n }\n this.#getExtremum(x0, x1, x2, x3, y0, y1, y2, y3, 3 * (-x0 + 3 * (x1 - x2) + x3), 6 * (x0 - 2 * x1 + x2), 3 * (x1 - x0), minMax);\n this.#getExtremum(x0, x1, x2, x3, y0, y1, y2, y3, 3 * (-y0 + 3 * (y1 - y2) + y3), 6 * (y0 - 2 * y1 + y2), 3 * (y1 - y0), minMax);\n return minMax;\n }\n}\nconst PDFStringTranslateTable = (/* unused pure expression or super */ null && ([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2d8, 0x2c7, 0x2c6, 0x2d9, 0x2dd, 0x2db, 0x2da, 0x2dc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2022, 0x2020, 0x2021, 0x2026, 0x2014, 0x2013, 0x192, 0x2044, 0x2039, 0x203a, 0x2212, 0x2030, 0x201e, 0x201c, 0x201d, 0x2018, 0x2019, 0x201a, 0x2122, 0xfb01, 0xfb02, 0x141, 0x152, 0x160, 0x178, 0x17d, 0x131, 0x142, 0x153, 0x161, 0x17e, 0, 0x20ac]));\nfunction stringToPDFString(str) {\n if (str[0] >= \"\\xEF\") {\n let encoding;\n if (str[0] === \"\\xFE\" && str[1] === \"\\xFF\") {\n encoding = \"utf-16be\";\n if (str.length % 2 === 1) {\n str = str.slice(0, -1);\n }\n } else if (str[0] === \"\\xFF\" && str[1] === \"\\xFE\") {\n encoding = \"utf-16le\";\n if (str.length % 2 === 1) {\n str = str.slice(0, -1);\n }\n } else if (str[0] === \"\\xEF\" && str[1] === \"\\xBB\" && str[2] === \"\\xBF\") {\n encoding = \"utf-8\";\n }\n if (encoding) {\n try {\n const decoder = new TextDecoder(encoding, {\n fatal: true\n });\n const buffer = stringToBytes(str);\n const decoded = decoder.decode(buffer);\n if (!decoded.includes(\"\\x1b\")) {\n return decoded;\n }\n return decoded.replaceAll(/\\x1b[^\\x1b]*(?:\\x1b|$)/g, \"\");\n } catch (ex) {\n warn(`stringToPDFString: \"${ex}\".`);\n }\n }\n }\n const strBuf = [];\n for (let i = 0, ii = str.length; i < ii; i++) {\n const charCode = str.charCodeAt(i);\n if (charCode === 0x1b) {\n while (++i < ii && str.charCodeAt(i) !== 0x1b) {}\n continue;\n }\n const code = PDFStringTranslateTable[charCode];\n strBuf.push(code ? String.fromCharCode(code) : str.charAt(i));\n }\n return strBuf.join(\"\");\n}\nfunction stringToUTF8String(str) {\n return decodeURIComponent(escape(str));\n}\nfunction utf8StringToString(str) {\n return unescape(encodeURIComponent(str));\n}\nfunction isArrayEqual(arr1, arr2) {\n if (arr1.length !== arr2.length) {\n return false;\n }\n for (let i = 0, ii = arr1.length; i < ii; i++) {\n if (arr1[i] !== arr2[i]) {\n return false;\n }\n }\n return true;\n}\nfunction getModificationDate(date = new Date()) {\n const buffer = [date.getUTCFullYear().toString(), (date.getUTCMonth() + 1).toString().padStart(2, \"0\"), date.getUTCDate().toString().padStart(2, \"0\"), date.getUTCHours().toString().padStart(2, \"0\"), date.getUTCMinutes().toString().padStart(2, \"0\"), date.getUTCSeconds().toString().padStart(2, \"0\")];\n return buffer.join(\"\");\n}\nlet NormalizeRegex = null;\nlet NormalizationMap = null;\nfunction normalizeUnicode(str) {\n if (!NormalizeRegex) {\n NormalizeRegex = /([\\u00a0\\u00b5\\u037e\\u0eb3\\u2000-\\u200a\\u202f\\u2126\\ufb00-\\ufb04\\ufb06\\ufb20-\\ufb36\\ufb38-\\ufb3c\\ufb3e\\ufb40-\\ufb41\\ufb43-\\ufb44\\ufb46-\\ufba1\\ufba4-\\ufba9\\ufbae-\\ufbb1\\ufbd3-\\ufbdc\\ufbde-\\ufbe7\\ufbea-\\ufbf8\\ufbfc-\\ufbfd\\ufc00-\\ufc5d\\ufc64-\\ufcf1\\ufcf5-\\ufd3d\\ufd88\\ufdf4\\ufdfa-\\ufdfb\\ufe71\\ufe77\\ufe79\\ufe7b\\ufe7d]+)|(\\ufb05+)/gu;\n NormalizationMap = new Map([[\"ſt\", \"ſt\"]]);\n }\n return str.replaceAll(NormalizeRegex, (_, p1, p2) => p1 ? p1.normalize(\"NFKC\") : NormalizationMap.get(p2));\n}\nfunction getUuid() {\n if (typeof crypto !== \"undefined\" && typeof crypto?.randomUUID === \"function\") {\n return crypto.randomUUID();\n }\n const buf = new Uint8Array(32);\n if (typeof crypto !== \"undefined\" && typeof crypto?.getRandomValues === \"function\") {\n crypto.getRandomValues(buf);\n } else {\n for (let i = 0; i < 32; i++) {\n buf[i] = Math.floor(Math.random() * 255);\n }\n }\n return bytesToString(buf);\n}\nconst AnnotationPrefix = \"pdfjs_internal_id_\";\nconst FontRenderOps = {\n BEZIER_CURVE_TO: 0,\n MOVE_TO: 1,\n LINE_TO: 2,\n QUADRATIC_CURVE_TO: 3,\n RESTORE: 4,\n SAVE: 5,\n SCALE: 6,\n TRANSFORM: 7,\n TRANSLATE: 8\n};\n\n;// CONCATENATED MODULE: ./src/display/base_factory.js\n\nclass BaseFilterFactory {\n constructor() {\n if (this.constructor === BaseFilterFactory) {\n unreachable(\"Cannot initialize BaseFilterFactory.\");\n }\n }\n addFilter(maps) {\n return \"none\";\n }\n addHCMFilter(fgColor, bgColor) {\n return \"none\";\n }\n addAlphaFilter(map) {\n return \"none\";\n }\n addLuminosityFilter(map) {\n return \"none\";\n }\n addHighlightHCMFilter(filterName, fgColor, bgColor, newFgColor, newBgColor) {\n return \"none\";\n }\n destroy(keepHCM = false) {}\n}\nclass BaseCanvasFactory {\n #enableHWA = false;\n constructor({\n enableHWA = false\n } = {}) {\n if (this.constructor === BaseCanvasFactory) {\n unreachable(\"Cannot initialize BaseCanvasFactory.\");\n }\n this.#enableHWA = enableHWA;\n }\n create(width, height) {\n if (width <= 0 || height <= 0) {\n throw new Error(\"Invalid canvas size\");\n }\n const canvas = this._createCanvas(width, height);\n return {\n canvas,\n context: canvas.getContext(\"2d\", {\n willReadFrequently: !this.#enableHWA\n })\n };\n }\n reset(canvasAndContext, width, height) {\n if (!canvasAndContext.canvas) {\n throw new Error(\"Canvas is not specified\");\n }\n if (width <= 0 || height <= 0) {\n throw new Error(\"Invalid canvas size\");\n }\n canvasAndContext.canvas.width = width;\n canvasAndContext.canvas.height = height;\n }\n destroy(canvasAndContext) {\n if (!canvasAndContext.canvas) {\n throw new Error(\"Canvas is not specified\");\n }\n canvasAndContext.canvas.width = 0;\n canvasAndContext.canvas.height = 0;\n canvasAndContext.canvas = null;\n canvasAndContext.context = null;\n }\n _createCanvas(width, height) {\n unreachable(\"Abstract method `_createCanvas` called.\");\n }\n}\nclass BaseCMapReaderFactory {\n constructor({\n baseUrl = null,\n isCompressed = true\n }) {\n if (this.constructor === BaseCMapReaderFactory) {\n unreachable(\"Cannot initialize BaseCMapReaderFactory.\");\n }\n this.baseUrl = baseUrl;\n this.isCompressed = isCompressed;\n }\n async fetch({\n name\n }) {\n if (!this.baseUrl) {\n throw new Error('The CMap \"baseUrl\" parameter must be specified, ensure that ' + 'the \"cMapUrl\" and \"cMapPacked\" API parameters are provided.');\n }\n if (!name) {\n throw new Error(\"CMap name must be specified.\");\n }\n const url = this.baseUrl + name + (this.isCompressed ? \".bcmap\" : \"\");\n const compressionType = this.isCompressed ? CMapCompressionType.BINARY : CMapCompressionType.NONE;\n return this._fetchData(url, compressionType).catch(reason => {\n throw new Error(`Unable to load ${this.isCompressed ? \"binary \" : \"\"}CMap at: ${url}`);\n });\n }\n _fetchData(url, compressionType) {\n unreachable(\"Abstract method `_fetchData` called.\");\n }\n}\nclass BaseStandardFontDataFactory {\n constructor({\n baseUrl = null\n }) {\n if (this.constructor === BaseStandardFontDataFactory) {\n unreachable(\"Cannot initialize BaseStandardFontDataFactory.\");\n }\n this.baseUrl = baseUrl;\n }\n async fetch({\n filename\n }) {\n if (!this.baseUrl) {\n throw new Error('The standard font \"baseUrl\" parameter must be specified, ensure that ' + 'the \"standardFontDataUrl\" API parameter is provided.');\n }\n if (!filename) {\n throw new Error(\"Font filename must be specified.\");\n }\n const url = `${this.baseUrl}${filename}`;\n return this._fetchData(url).catch(reason => {\n throw new Error(`Unable to load font data at: ${url}`);\n });\n }\n _fetchData(url) {\n unreachable(\"Abstract method `_fetchData` called.\");\n }\n}\nclass BaseSVGFactory {\n constructor() {\n if (this.constructor === BaseSVGFactory) {\n unreachable(\"Cannot initialize BaseSVGFactory.\");\n }\n }\n create(width, height, skipDimensions = false) {\n if (width <= 0 || height <= 0) {\n throw new Error(\"Invalid SVG dimensions\");\n }\n const svg = this._createSVG(\"svg:svg\");\n svg.setAttribute(\"version\", \"1.1\");\n if (!skipDimensions) {\n svg.setAttribute(\"width\", `${width}px`);\n svg.setAttribute(\"height\", `${height}px`);\n }\n svg.setAttribute(\"preserveAspectRatio\", \"none\");\n svg.setAttribute(\"viewBox\", `0 0 ${width} ${height}`);\n return svg;\n }\n createElement(type) {\n if (typeof type !== \"string\") {\n throw new Error(\"Invalid SVG element type\");\n }\n return this._createSVG(type);\n }\n _createSVG(type) {\n unreachable(\"Abstract method `_createSVG` called.\");\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/display_utils.js\n\n\nconst SVG_NS = \"http://www.w3.org/2000/svg\";\nclass PixelsPerInch {\n static CSS = 96.0;\n static PDF = 72.0;\n static PDF_TO_CSS_UNITS = this.CSS / this.PDF;\n}\nclass DOMFilterFactory extends BaseFilterFactory {\n #_cache;\n #_defs;\n #docId;\n #document;\n #_hcmCache;\n #id = 0;\n constructor({\n docId,\n ownerDocument = globalThis.document\n } = {}) {\n super();\n this.#docId = docId;\n this.#document = ownerDocument;\n }\n get #cache() {\n return this.#_cache ||= new Map();\n }\n get #hcmCache() {\n return this.#_hcmCache ||= new Map();\n }\n get #defs() {\n if (!this.#_defs) {\n const div = this.#document.createElement(\"div\");\n const {\n style\n } = div;\n style.visibility = \"hidden\";\n style.contain = \"strict\";\n style.width = style.height = 0;\n style.position = \"absolute\";\n style.top = style.left = 0;\n style.zIndex = -1;\n const svg = this.#document.createElementNS(SVG_NS, \"svg\");\n svg.setAttribute(\"width\", 0);\n svg.setAttribute(\"height\", 0);\n this.#_defs = this.#document.createElementNS(SVG_NS, \"defs\");\n div.append(svg);\n svg.append(this.#_defs);\n this.#document.body.append(div);\n }\n return this.#_defs;\n }\n #createTables(maps) {\n if (maps.length === 1) {\n const mapR = maps[0];\n const buffer = new Array(256);\n for (let i = 0; i < 256; i++) {\n buffer[i] = mapR[i] / 255;\n }\n const table = buffer.join(\",\");\n return [table, table, table];\n }\n const [mapR, mapG, mapB] = maps;\n const bufferR = new Array(256);\n const bufferG = new Array(256);\n const bufferB = new Array(256);\n for (let i = 0; i < 256; i++) {\n bufferR[i] = mapR[i] / 255;\n bufferG[i] = mapG[i] / 255;\n bufferB[i] = mapB[i] / 255;\n }\n return [bufferR.join(\",\"), bufferG.join(\",\"), bufferB.join(\",\")];\n }\n addFilter(maps) {\n if (!maps) {\n return \"none\";\n }\n let value = this.#cache.get(maps);\n if (value) {\n return value;\n }\n const [tableR, tableG, tableB] = this.#createTables(maps);\n const key = maps.length === 1 ? tableR : `${tableR}${tableG}${tableB}`;\n value = this.#cache.get(key);\n if (value) {\n this.#cache.set(maps, value);\n return value;\n }\n const id = `g_${this.#docId}_transfer_map_${this.#id++}`;\n const url = `url(#${id})`;\n this.#cache.set(maps, url);\n this.#cache.set(key, url);\n const filter = this.#createFilter(id);\n this.#addTransferMapConversion(tableR, tableG, tableB, filter);\n return url;\n }\n addHCMFilter(fgColor, bgColor) {\n const key = `${fgColor}-${bgColor}`;\n const filterName = \"base\";\n let info = this.#hcmCache.get(filterName);\n if (info?.key === key) {\n return info.url;\n }\n if (info) {\n info.filter?.remove();\n info.key = key;\n info.url = \"none\";\n info.filter = null;\n } else {\n info = {\n key,\n url: \"none\",\n filter: null\n };\n this.#hcmCache.set(filterName, info);\n }\n if (!fgColor || !bgColor) {\n return info.url;\n }\n const fgRGB = this.#getRGB(fgColor);\n fgColor = Util.makeHexColor(...fgRGB);\n const bgRGB = this.#getRGB(bgColor);\n bgColor = Util.makeHexColor(...bgRGB);\n this.#defs.style.color = \"\";\n if (fgColor === \"#000000\" && bgColor === \"#ffffff\" || fgColor === bgColor) {\n return info.url;\n }\n const map = new Array(256);\n for (let i = 0; i <= 255; i++) {\n const x = i / 255;\n map[i] = x <= 0.03928 ? x / 12.92 : ((x + 0.055) / 1.055) ** 2.4;\n }\n const table = map.join(\",\");\n const id = `g_${this.#docId}_hcm_filter`;\n const filter = info.filter = this.#createFilter(id);\n this.#addTransferMapConversion(table, table, table, filter);\n this.#addGrayConversion(filter);\n const getSteps = (c, n) => {\n const start = fgRGB[c] / 255;\n const end = bgRGB[c] / 255;\n const arr = new Array(n + 1);\n for (let i = 0; i <= n; i++) {\n arr[i] = start + i / n * (end - start);\n }\n return arr.join(\",\");\n };\n this.#addTransferMapConversion(getSteps(0, 5), getSteps(1, 5), getSteps(2, 5), filter);\n info.url = `url(#${id})`;\n return info.url;\n }\n addAlphaFilter(map) {\n let value = this.#cache.get(map);\n if (value) {\n return value;\n }\n const [tableA] = this.#createTables([map]);\n const key = `alpha_${tableA}`;\n value = this.#cache.get(key);\n if (value) {\n this.#cache.set(map, value);\n return value;\n }\n const id = `g_${this.#docId}_alpha_map_${this.#id++}`;\n const url = `url(#${id})`;\n this.#cache.set(map, url);\n this.#cache.set(key, url);\n const filter = this.#createFilter(id);\n this.#addTransferMapAlphaConversion(tableA, filter);\n return url;\n }\n addLuminosityFilter(map) {\n let value = this.#cache.get(map || \"luminosity\");\n if (value) {\n return value;\n }\n let tableA, key;\n if (map) {\n [tableA] = this.#createTables([map]);\n key = `luminosity_${tableA}`;\n } else {\n key = \"luminosity\";\n }\n value = this.#cache.get(key);\n if (value) {\n this.#cache.set(map, value);\n return value;\n }\n const id = `g_${this.#docId}_luminosity_map_${this.#id++}`;\n const url = `url(#${id})`;\n this.#cache.set(map, url);\n this.#cache.set(key, url);\n const filter = this.#createFilter(id);\n this.#addLuminosityConversion(filter);\n if (map) {\n this.#addTransferMapAlphaConversion(tableA, filter);\n }\n return url;\n }\n addHighlightHCMFilter(filterName, fgColor, bgColor, newFgColor, newBgColor) {\n const key = `${fgColor}-${bgColor}-${newFgColor}-${newBgColor}`;\n let info = this.#hcmCache.get(filterName);\n if (info?.key === key) {\n return info.url;\n }\n if (info) {\n info.filter?.remove();\n info.key = key;\n info.url = \"none\";\n info.filter = null;\n } else {\n info = {\n key,\n url: \"none\",\n filter: null\n };\n this.#hcmCache.set(filterName, info);\n }\n if (!fgColor || !bgColor) {\n return info.url;\n }\n const [fgRGB, bgRGB] = [fgColor, bgColor].map(this.#getRGB.bind(this));\n let fgGray = Math.round(0.2126 * fgRGB[0] + 0.7152 * fgRGB[1] + 0.0722 * fgRGB[2]);\n let bgGray = Math.round(0.2126 * bgRGB[0] + 0.7152 * bgRGB[1] + 0.0722 * bgRGB[2]);\n let [newFgRGB, newBgRGB] = [newFgColor, newBgColor].map(this.#getRGB.bind(this));\n if (bgGray < fgGray) {\n [fgGray, bgGray, newFgRGB, newBgRGB] = [bgGray, fgGray, newBgRGB, newFgRGB];\n }\n this.#defs.style.color = \"\";\n const getSteps = (fg, bg, n) => {\n const arr = new Array(256);\n const step = (bgGray - fgGray) / n;\n const newStart = fg / 255;\n const newStep = (bg - fg) / (255 * n);\n let prev = 0;\n for (let i = 0; i <= n; i++) {\n const k = Math.round(fgGray + i * step);\n const value = newStart + i * newStep;\n for (let j = prev; j <= k; j++) {\n arr[j] = value;\n }\n prev = k + 1;\n }\n for (let i = prev; i < 256; i++) {\n arr[i] = arr[prev - 1];\n }\n return arr.join(\",\");\n };\n const id = `g_${this.#docId}_hcm_${filterName}_filter`;\n const filter = info.filter = this.#createFilter(id);\n this.#addGrayConversion(filter);\n this.#addTransferMapConversion(getSteps(newFgRGB[0], newBgRGB[0], 5), getSteps(newFgRGB[1], newBgRGB[1], 5), getSteps(newFgRGB[2], newBgRGB[2], 5), filter);\n info.url = `url(#${id})`;\n return info.url;\n }\n destroy(keepHCM = false) {\n if (keepHCM && this.#hcmCache.size !== 0) {\n return;\n }\n if (this.#_defs) {\n this.#_defs.parentNode.parentNode.remove();\n this.#_defs = null;\n }\n if (this.#_cache) {\n this.#_cache.clear();\n this.#_cache = null;\n }\n this.#id = 0;\n }\n #addLuminosityConversion(filter) {\n const feColorMatrix = this.#document.createElementNS(SVG_NS, \"feColorMatrix\");\n feColorMatrix.setAttribute(\"type\", \"matrix\");\n feColorMatrix.setAttribute(\"values\", \"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.3 0.59 0.11 0 0\");\n filter.append(feColorMatrix);\n }\n #addGrayConversion(filter) {\n const feColorMatrix = this.#document.createElementNS(SVG_NS, \"feColorMatrix\");\n feColorMatrix.setAttribute(\"type\", \"matrix\");\n feColorMatrix.setAttribute(\"values\", \"0.2126 0.7152 0.0722 0 0 0.2126 0.7152 0.0722 0 0 0.2126 0.7152 0.0722 0 0 0 0 0 1 0\");\n filter.append(feColorMatrix);\n }\n #createFilter(id) {\n const filter = this.#document.createElementNS(SVG_NS, \"filter\");\n filter.setAttribute(\"color-interpolation-filters\", \"sRGB\");\n filter.setAttribute(\"id\", id);\n this.#defs.append(filter);\n return filter;\n }\n #appendFeFunc(feComponentTransfer, func, table) {\n const feFunc = this.#document.createElementNS(SVG_NS, func);\n feFunc.setAttribute(\"type\", \"discrete\");\n feFunc.setAttribute(\"tableValues\", table);\n feComponentTransfer.append(feFunc);\n }\n #addTransferMapConversion(rTable, gTable, bTable, filter) {\n const feComponentTransfer = this.#document.createElementNS(SVG_NS, \"feComponentTransfer\");\n filter.append(feComponentTransfer);\n this.#appendFeFunc(feComponentTransfer, \"feFuncR\", rTable);\n this.#appendFeFunc(feComponentTransfer, \"feFuncG\", gTable);\n this.#appendFeFunc(feComponentTransfer, \"feFuncB\", bTable);\n }\n #addTransferMapAlphaConversion(aTable, filter) {\n const feComponentTransfer = this.#document.createElementNS(SVG_NS, \"feComponentTransfer\");\n filter.append(feComponentTransfer);\n this.#appendFeFunc(feComponentTransfer, \"feFuncA\", aTable);\n }\n #getRGB(color) {\n this.#defs.style.color = color;\n return getRGB(getComputedStyle(this.#defs).getPropertyValue(\"color\"));\n }\n}\nclass DOMCanvasFactory extends BaseCanvasFactory {\n constructor({\n ownerDocument = globalThis.document,\n enableHWA = false\n } = {}) {\n super({\n enableHWA\n });\n this._document = ownerDocument;\n }\n _createCanvas(width, height) {\n const canvas = this._document.createElement(\"canvas\");\n canvas.width = width;\n canvas.height = height;\n return canvas;\n }\n}\nasync function fetchData(url, type = \"text\") {\n if (isValidFetchUrl(url, document.baseURI)) {\n const response = await fetch(url);\n if (!response.ok) {\n throw new Error(response.statusText);\n }\n switch (type) {\n case \"arraybuffer\":\n return response.arrayBuffer();\n case \"blob\":\n return response.blob();\n case \"json\":\n return response.json();\n }\n return response.text();\n }\n return new Promise((resolve, reject) => {\n const request = new XMLHttpRequest();\n request.open(\"GET\", url, true);\n request.responseType = type;\n request.onreadystatechange = () => {\n if (request.readyState !== XMLHttpRequest.DONE) {\n return;\n }\n if (request.status === 200 || request.status === 0) {\n switch (type) {\n case \"arraybuffer\":\n case \"blob\":\n case \"json\":\n resolve(request.response);\n return;\n }\n resolve(request.responseText);\n return;\n }\n reject(new Error(request.statusText));\n };\n request.send(null);\n });\n}\nclass DOMCMapReaderFactory extends BaseCMapReaderFactory {\n _fetchData(url, compressionType) {\n return fetchData(url, this.isCompressed ? \"arraybuffer\" : \"text\").then(data => ({\n cMapData: data instanceof ArrayBuffer ? new Uint8Array(data) : stringToBytes(data),\n compressionType\n }));\n }\n}\nclass DOMStandardFontDataFactory extends BaseStandardFontDataFactory {\n _fetchData(url) {\n return fetchData(url, \"arraybuffer\").then(data => new Uint8Array(data));\n }\n}\nclass DOMSVGFactory extends BaseSVGFactory {\n _createSVG(type) {\n return document.createElementNS(SVG_NS, type);\n }\n}\nclass PageViewport {\n constructor({\n viewBox,\n scale,\n rotation,\n offsetX = 0,\n offsetY = 0,\n dontFlip = false\n }) {\n this.viewBox = viewBox;\n this.scale = scale;\n this.rotation = rotation;\n this.offsetX = offsetX;\n this.offsetY = offsetY;\n const centerX = (viewBox[2] + viewBox[0]) / 2;\n const centerY = (viewBox[3] + viewBox[1]) / 2;\n let rotateA, rotateB, rotateC, rotateD;\n rotation %= 360;\n if (rotation < 0) {\n rotation += 360;\n }\n switch (rotation) {\n case 180:\n rotateA = -1;\n rotateB = 0;\n rotateC = 0;\n rotateD = 1;\n break;\n case 90:\n rotateA = 0;\n rotateB = 1;\n rotateC = 1;\n rotateD = 0;\n break;\n case 270:\n rotateA = 0;\n rotateB = -1;\n rotateC = -1;\n rotateD = 0;\n break;\n case 0:\n rotateA = 1;\n rotateB = 0;\n rotateC = 0;\n rotateD = -1;\n break;\n default:\n throw new Error(\"PageViewport: Invalid rotation, must be a multiple of 90 degrees.\");\n }\n if (dontFlip) {\n rotateC = -rotateC;\n rotateD = -rotateD;\n }\n let offsetCanvasX, offsetCanvasY;\n let width, height;\n if (rotateA === 0) {\n offsetCanvasX = Math.abs(centerY - viewBox[1]) * scale + offsetX;\n offsetCanvasY = Math.abs(centerX - viewBox[0]) * scale + offsetY;\n width = (viewBox[3] - viewBox[1]) * scale;\n height = (viewBox[2] - viewBox[0]) * scale;\n } else {\n offsetCanvasX = Math.abs(centerX - viewBox[0]) * scale + offsetX;\n offsetCanvasY = Math.abs(centerY - viewBox[1]) * scale + offsetY;\n width = (viewBox[2] - viewBox[0]) * scale;\n height = (viewBox[3] - viewBox[1]) * scale;\n }\n this.transform = [rotateA * scale, rotateB * scale, rotateC * scale, rotateD * scale, offsetCanvasX - rotateA * scale * centerX - rotateC * scale * centerY, offsetCanvasY - rotateB * scale * centerX - rotateD * scale * centerY];\n this.width = width;\n this.height = height;\n }\n get rawDims() {\n const {\n viewBox\n } = this;\n return shadow(this, \"rawDims\", {\n pageWidth: viewBox[2] - viewBox[0],\n pageHeight: viewBox[3] - viewBox[1],\n pageX: viewBox[0],\n pageY: viewBox[1]\n });\n }\n clone({\n scale = this.scale,\n rotation = this.rotation,\n offsetX = this.offsetX,\n offsetY = this.offsetY,\n dontFlip = false\n } = {}) {\n return new PageViewport({\n viewBox: this.viewBox.slice(),\n scale,\n rotation,\n offsetX,\n offsetY,\n dontFlip\n });\n }\n convertToViewportPoint(x, y) {\n return Util.applyTransform([x, y], this.transform);\n }\n convertToViewportRectangle(rect) {\n const topLeft = Util.applyTransform([rect[0], rect[1]], this.transform);\n const bottomRight = Util.applyTransform([rect[2], rect[3]], this.transform);\n return [topLeft[0], topLeft[1], bottomRight[0], bottomRight[1]];\n }\n convertToPdfPoint(x, y) {\n return Util.applyInverseTransform([x, y], this.transform);\n }\n}\nclass RenderingCancelledException extends BaseException {\n constructor(msg, extraDelay = 0) {\n super(msg, \"RenderingCancelledException\");\n this.extraDelay = extraDelay;\n }\n}\nfunction isDataScheme(url) {\n const ii = url.length;\n let i = 0;\n while (i < ii && url[i].trim() === \"\") {\n i++;\n }\n return url.substring(i, i + 5).toLowerCase() === \"data:\";\n}\nfunction isPdfFile(filename) {\n return typeof filename === \"string\" && /\\.pdf$/i.test(filename);\n}\nfunction getFilenameFromUrl(url) {\n [url] = url.split(/[#?]/, 1);\n return url.substring(url.lastIndexOf(\"/\") + 1);\n}\nfunction getPdfFilenameFromUrl(url, defaultFilename = \"document.pdf\") {\n if (typeof url !== \"string\") {\n return defaultFilename;\n }\n if (isDataScheme(url)) {\n warn('getPdfFilenameFromUrl: ignore \"data:\"-URL for performance reasons.');\n return defaultFilename;\n }\n const reURI = /^(?:(?:[^:]+:)?\\/\\/[^/]+)?([^?#]*)(\\?[^#]*)?(#.*)?$/;\n const reFilename = /[^/?#=]+\\.pdf\\b(?!.*\\.pdf\\b)/i;\n const splitURI = reURI.exec(url);\n let suggestedFilename = reFilename.exec(splitURI[1]) || reFilename.exec(splitURI[2]) || reFilename.exec(splitURI[3]);\n if (suggestedFilename) {\n suggestedFilename = suggestedFilename[0];\n if (suggestedFilename.includes(\"%\")) {\n try {\n suggestedFilename = reFilename.exec(decodeURIComponent(suggestedFilename))[0];\n } catch {}\n }\n }\n return suggestedFilename || defaultFilename;\n}\nclass StatTimer {\n started = Object.create(null);\n times = [];\n time(name) {\n if (name in this.started) {\n warn(`Timer is already running for ${name}`);\n }\n this.started[name] = Date.now();\n }\n timeEnd(name) {\n if (!(name in this.started)) {\n warn(`Timer has not been started for ${name}`);\n }\n this.times.push({\n name,\n start: this.started[name],\n end: Date.now()\n });\n delete this.started[name];\n }\n toString() {\n const outBuf = [];\n let longest = 0;\n for (const {\n name\n } of this.times) {\n longest = Math.max(name.length, longest);\n }\n for (const {\n name,\n start,\n end\n } of this.times) {\n outBuf.push(`${name.padEnd(longest)} ${end - start}ms\\n`);\n }\n return outBuf.join(\"\");\n }\n}\nfunction isValidFetchUrl(url, baseUrl) {\n try {\n const {\n protocol\n } = baseUrl ? new URL(url, baseUrl) : new URL(url);\n return protocol === \"http:\" || protocol === \"https:\";\n } catch {\n return false;\n }\n}\nfunction noContextMenu(e) {\n e.preventDefault();\n}\nfunction deprecated(details) {\n console.log(\"Deprecated API usage: \" + details);\n}\nlet pdfDateStringRegex;\nclass PDFDateString {\n static toDateObject(input) {\n if (!input || typeof input !== \"string\") {\n return null;\n }\n pdfDateStringRegex ||= new RegExp(\"^D:\" + \"(\\\\d{4})\" + \"(\\\\d{2})?\" + \"(\\\\d{2})?\" + \"(\\\\d{2})?\" + \"(\\\\d{2})?\" + \"(\\\\d{2})?\" + \"([Z|+|-])?\" + \"(\\\\d{2})?\" + \"'?\" + \"(\\\\d{2})?\" + \"'?\");\n const matches = pdfDateStringRegex.exec(input);\n if (!matches) {\n return null;\n }\n const year = parseInt(matches[1], 10);\n let month = parseInt(matches[2], 10);\n month = month >= 1 && month <= 12 ? month - 1 : 0;\n let day = parseInt(matches[3], 10);\n day = day >= 1 && day <= 31 ? day : 1;\n let hour = parseInt(matches[4], 10);\n hour = hour >= 0 && hour <= 23 ? hour : 0;\n let minute = parseInt(matches[5], 10);\n minute = minute >= 0 && minute <= 59 ? minute : 0;\n let second = parseInt(matches[6], 10);\n second = second >= 0 && second <= 59 ? second : 0;\n const universalTimeRelation = matches[7] || \"Z\";\n let offsetHour = parseInt(matches[8], 10);\n offsetHour = offsetHour >= 0 && offsetHour <= 23 ? offsetHour : 0;\n let offsetMinute = parseInt(matches[9], 10) || 0;\n offsetMinute = offsetMinute >= 0 && offsetMinute <= 59 ? offsetMinute : 0;\n if (universalTimeRelation === \"-\") {\n hour += offsetHour;\n minute += offsetMinute;\n } else if (universalTimeRelation === \"+\") {\n hour -= offsetHour;\n minute -= offsetMinute;\n }\n return new Date(Date.UTC(year, month, day, hour, minute, second));\n }\n}\nfunction getXfaPageViewport(xfaPage, {\n scale = 1,\n rotation = 0\n}) {\n const {\n width,\n height\n } = xfaPage.attributes.style;\n const viewBox = [0, 0, parseInt(width), parseInt(height)];\n return new PageViewport({\n viewBox,\n scale,\n rotation\n });\n}\nfunction getRGB(color) {\n if (color.startsWith(\"#\")) {\n const colorRGB = parseInt(color.slice(1), 16);\n return [(colorRGB & 0xff0000) >> 16, (colorRGB & 0x00ff00) >> 8, colorRGB & 0x0000ff];\n }\n if (color.startsWith(\"rgb(\")) {\n return color.slice(4, -1).split(\",\").map(x => parseInt(x));\n }\n if (color.startsWith(\"rgba(\")) {\n return color.slice(5, -1).split(\",\").map(x => parseInt(x)).slice(0, 3);\n }\n warn(`Not a valid color format: \"${color}\"`);\n return [0, 0, 0];\n}\nfunction getColorValues(colors) {\n const span = document.createElement(\"span\");\n span.style.visibility = \"hidden\";\n document.body.append(span);\n for (const name of colors.keys()) {\n span.style.color = name;\n const computedColor = window.getComputedStyle(span).color;\n colors.set(name, getRGB(computedColor));\n }\n span.remove();\n}\nfunction getCurrentTransform(ctx) {\n const {\n a,\n b,\n c,\n d,\n e,\n f\n } = ctx.getTransform();\n return [a, b, c, d, e, f];\n}\nfunction getCurrentTransformInverse(ctx) {\n const {\n a,\n b,\n c,\n d,\n e,\n f\n } = ctx.getTransform().invertSelf();\n return [a, b, c, d, e, f];\n}\nfunction setLayerDimensions(div, viewport, mustFlip = false, mustRotate = true) {\n if (viewport instanceof PageViewport) {\n const {\n pageWidth,\n pageHeight\n } = viewport.rawDims;\n const {\n style\n } = div;\n const useRound = util_FeatureTest.isCSSRoundSupported;\n const w = `var(--scale-factor) * ${pageWidth}px`,\n h = `var(--scale-factor) * ${pageHeight}px`;\n const widthStr = useRound ? `round(${w}, 1px)` : `calc(${w})`,\n heightStr = useRound ? `round(${h}, 1px)` : `calc(${h})`;\n if (!mustFlip || viewport.rotation % 180 === 0) {\n style.width = widthStr;\n style.height = heightStr;\n } else {\n style.width = heightStr;\n style.height = widthStr;\n }\n }\n if (mustRotate) {\n div.setAttribute(\"data-main-rotation\", viewport.rotation);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/toolbar.js\n\nclass EditorToolbar {\n #toolbar = null;\n #colorPicker = null;\n #editor;\n #buttons = null;\n constructor(editor) {\n this.#editor = editor;\n }\n render() {\n const editToolbar = this.#toolbar = document.createElement(\"div\");\n editToolbar.className = \"editToolbar\";\n editToolbar.setAttribute(\"role\", \"toolbar\");\n const signal = this.#editor._uiManager._signal;\n editToolbar.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n editToolbar.addEventListener(\"pointerdown\", EditorToolbar.#pointerDown, {\n signal\n });\n const buttons = this.#buttons = document.createElement(\"div\");\n buttons.className = \"buttons\";\n editToolbar.append(buttons);\n const position = this.#editor.toolbarPosition;\n if (position) {\n const {\n style\n } = editToolbar;\n const x = this.#editor._uiManager.direction === \"ltr\" ? 1 - position[0] : position[0];\n style.insetInlineEnd = `${100 * x}%`;\n style.top = `calc(${100 * position[1]}% + var(--editor-toolbar-vert-offset))`;\n }\n this.#addDeleteButton();\n return editToolbar;\n }\n static #pointerDown(e) {\n e.stopPropagation();\n }\n #focusIn(e) {\n this.#editor._focusEventsAllowed = false;\n e.preventDefault();\n e.stopPropagation();\n }\n #focusOut(e) {\n this.#editor._focusEventsAllowed = true;\n e.preventDefault();\n e.stopPropagation();\n }\n #addListenersToElement(element) {\n const signal = this.#editor._uiManager._signal;\n element.addEventListener(\"focusin\", this.#focusIn.bind(this), {\n capture: true,\n signal\n });\n element.addEventListener(\"focusout\", this.#focusOut.bind(this), {\n capture: true,\n signal\n });\n element.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n }\n hide() {\n this.#toolbar.classList.add(\"hidden\");\n this.#colorPicker?.hideDropdown();\n }\n show() {\n this.#toolbar.classList.remove(\"hidden\");\n }\n #addDeleteButton() {\n const button = document.createElement(\"button\");\n button.className = \"delete\";\n button.tabIndex = 0;\n button.setAttribute(\"data-l10n-id\", `pdfjs-editor-remove-${this.#editor.editorType}-button`);\n this.#addListenersToElement(button);\n button.addEventListener(\"click\", e => {\n this.#editor._uiManager.delete();\n }, {\n signal: this.#editor._uiManager._signal\n });\n this.#buttons.append(button);\n }\n get #divider() {\n const divider = document.createElement(\"div\");\n divider.className = \"divider\";\n return divider;\n }\n addAltTextButton(button) {\n this.#addListenersToElement(button);\n this.#buttons.prepend(button, this.#divider);\n }\n addColorPicker(colorPicker) {\n this.#colorPicker = colorPicker;\n const button = colorPicker.renderButton();\n this.#addListenersToElement(button);\n this.#buttons.prepend(button, this.#divider);\n }\n remove() {\n this.#toolbar.remove();\n this.#colorPicker?.destroy();\n this.#colorPicker = null;\n }\n}\nclass HighlightToolbar {\n #buttons = null;\n #toolbar = null;\n #uiManager;\n constructor(uiManager) {\n this.#uiManager = uiManager;\n }\n #render() {\n const editToolbar = this.#toolbar = document.createElement(\"div\");\n editToolbar.className = \"editToolbar\";\n editToolbar.setAttribute(\"role\", \"toolbar\");\n editToolbar.addEventListener(\"contextmenu\", noContextMenu, {\n signal: this.#uiManager._signal\n });\n const buttons = this.#buttons = document.createElement(\"div\");\n buttons.className = \"buttons\";\n editToolbar.append(buttons);\n this.#addHighlightButton();\n return editToolbar;\n }\n #getLastPoint(boxes, isLTR) {\n let lastY = 0;\n let lastX = 0;\n for (const box of boxes) {\n const y = box.y + box.height;\n if (y < lastY) {\n continue;\n }\n const x = box.x + (isLTR ? box.width : 0);\n if (y > lastY) {\n lastX = x;\n lastY = y;\n continue;\n }\n if (isLTR) {\n if (x > lastX) {\n lastX = x;\n }\n } else if (x < lastX) {\n lastX = x;\n }\n }\n return [isLTR ? 1 - lastX : lastX, lastY];\n }\n show(parent, boxes, isLTR) {\n const [x, y] = this.#getLastPoint(boxes, isLTR);\n const {\n style\n } = this.#toolbar ||= this.#render();\n parent.append(this.#toolbar);\n style.insetInlineEnd = `${100 * x}%`;\n style.top = `calc(${100 * y}% + var(--editor-toolbar-vert-offset))`;\n }\n hide() {\n this.#toolbar.remove();\n }\n #addHighlightButton() {\n const button = document.createElement(\"button\");\n button.className = \"highlightButton\";\n button.tabIndex = 0;\n button.setAttribute(\"data-l10n-id\", `pdfjs-highlight-floating-button1`);\n const span = document.createElement(\"span\");\n button.append(span);\n span.className = \"visuallyHidden\";\n span.setAttribute(\"data-l10n-id\", \"pdfjs-highlight-floating-button-label\");\n const signal = this.#uiManager._signal;\n button.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n button.addEventListener(\"click\", () => {\n this.#uiManager.highlightSelection(\"floating_button\");\n }, {\n signal\n });\n this.#buttons.append(button);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/tools.js\n\n\n\nfunction bindEvents(obj, element, names) {\n for (const name of names) {\n element.addEventListener(name, obj[name].bind(obj));\n }\n}\nfunction opacityToHex(opacity) {\n return Math.round(Math.min(255, Math.max(1, 255 * opacity))).toString(16).padStart(2, \"0\");\n}\nclass IdManager {\n #id = 0;\n get id() {\n return `${AnnotationEditorPrefix}${this.#id++}`;\n }\n}\nclass ImageManager {\n #baseId = getUuid();\n #id = 0;\n #cache = null;\n static get _isSVGFittingCanvas() {\n const svg = `data:image/svg+xml;charset=UTF-8, `;\n const canvas = new OffscreenCanvas(1, 3);\n const ctx = canvas.getContext(\"2d\", {\n willReadFrequently: true\n });\n const image = new Image();\n image.src = svg;\n const promise = image.decode().then(() => {\n ctx.drawImage(image, 0, 0, 1, 1, 0, 0, 1, 3);\n return new Uint32Array(ctx.getImageData(0, 0, 1, 1).data.buffer)[0] === 0;\n });\n return shadow(this, \"_isSVGFittingCanvas\", promise);\n }\n async #get(key, rawData) {\n this.#cache ||= new Map();\n let data = this.#cache.get(key);\n if (data === null) {\n return null;\n }\n if (data?.bitmap) {\n data.refCounter += 1;\n return data;\n }\n try {\n data ||= {\n bitmap: null,\n id: `image_${this.#baseId}_${this.#id++}`,\n refCounter: 0,\n isSvg: false\n };\n let image;\n if (typeof rawData === \"string\") {\n data.url = rawData;\n image = await fetchData(rawData, \"blob\");\n } else {\n image = data.file = rawData;\n }\n if (image.type === \"image/svg+xml\") {\n const mustRemoveAspectRatioPromise = ImageManager._isSVGFittingCanvas;\n const fileReader = new FileReader();\n const imageElement = new Image();\n const imagePromise = new Promise((resolve, reject) => {\n imageElement.onload = () => {\n data.bitmap = imageElement;\n data.isSvg = true;\n resolve();\n };\n fileReader.onload = async () => {\n const url = data.svgUrl = fileReader.result;\n imageElement.src = (await mustRemoveAspectRatioPromise) ? `${url}#svgView(preserveAspectRatio(none))` : url;\n };\n imageElement.onerror = fileReader.onerror = reject;\n });\n fileReader.readAsDataURL(image);\n await imagePromise;\n } else {\n data.bitmap = await createImageBitmap(image);\n }\n data.refCounter = 1;\n } catch (e) {\n console.error(e);\n data = null;\n }\n this.#cache.set(key, data);\n if (data) {\n this.#cache.set(data.id, data);\n }\n return data;\n }\n async getFromFile(file) {\n const {\n lastModified,\n name,\n size,\n type\n } = file;\n return this.#get(`${lastModified}_${name}_${size}_${type}`, file);\n }\n async getFromUrl(url) {\n return this.#get(url, url);\n }\n async getFromId(id) {\n this.#cache ||= new Map();\n const data = this.#cache.get(id);\n if (!data) {\n return null;\n }\n if (data.bitmap) {\n data.refCounter += 1;\n return data;\n }\n if (data.file) {\n return this.getFromFile(data.file);\n }\n return this.getFromUrl(data.url);\n }\n getSvgUrl(id) {\n const data = this.#cache.get(id);\n if (!data?.isSvg) {\n return null;\n }\n return data.svgUrl;\n }\n deleteId(id) {\n this.#cache ||= new Map();\n const data = this.#cache.get(id);\n if (!data) {\n return;\n }\n data.refCounter -= 1;\n if (data.refCounter !== 0) {\n return;\n }\n data.bitmap = null;\n }\n isValidId(id) {\n return id.startsWith(`image_${this.#baseId}_`);\n }\n}\nclass CommandManager {\n #commands = [];\n #locked = false;\n #maxSize;\n #position = -1;\n constructor(maxSize = 128) {\n this.#maxSize = maxSize;\n }\n add({\n cmd,\n undo,\n post,\n mustExec,\n type = NaN,\n overwriteIfSameType = false,\n keepUndo = false\n }) {\n if (mustExec) {\n cmd();\n }\n if (this.#locked) {\n return;\n }\n const save = {\n cmd,\n undo,\n post,\n type\n };\n if (this.#position === -1) {\n if (this.#commands.length > 0) {\n this.#commands.length = 0;\n }\n this.#position = 0;\n this.#commands.push(save);\n return;\n }\n if (overwriteIfSameType && this.#commands[this.#position].type === type) {\n if (keepUndo) {\n save.undo = this.#commands[this.#position].undo;\n }\n this.#commands[this.#position] = save;\n return;\n }\n const next = this.#position + 1;\n if (next === this.#maxSize) {\n this.#commands.splice(0, 1);\n } else {\n this.#position = next;\n if (next < this.#commands.length) {\n this.#commands.splice(next);\n }\n }\n this.#commands.push(save);\n }\n undo() {\n if (this.#position === -1) {\n return;\n }\n this.#locked = true;\n const {\n undo,\n post\n } = this.#commands[this.#position];\n undo();\n post?.();\n this.#locked = false;\n this.#position -= 1;\n }\n redo() {\n if (this.#position < this.#commands.length - 1) {\n this.#position += 1;\n this.#locked = true;\n const {\n cmd,\n post\n } = this.#commands[this.#position];\n cmd();\n post?.();\n this.#locked = false;\n }\n }\n hasSomethingToUndo() {\n return this.#position !== -1;\n }\n hasSomethingToRedo() {\n return this.#position < this.#commands.length - 1;\n }\n destroy() {\n this.#commands = null;\n }\n}\nclass KeyboardManager {\n constructor(callbacks) {\n this.buffer = [];\n this.callbacks = new Map();\n this.allKeys = new Set();\n const {\n isMac\n } = util_FeatureTest.platform;\n for (const [keys, callback, options = {}] of callbacks) {\n for (const key of keys) {\n const isMacKey = key.startsWith(\"mac+\");\n if (isMac && isMacKey) {\n this.callbacks.set(key.slice(4), {\n callback,\n options\n });\n this.allKeys.add(key.split(\"+\").at(-1));\n } else if (!isMac && !isMacKey) {\n this.callbacks.set(key, {\n callback,\n options\n });\n this.allKeys.add(key.split(\"+\").at(-1));\n }\n }\n }\n }\n #serialize(event) {\n if (event.altKey) {\n this.buffer.push(\"alt\");\n }\n if (event.ctrlKey) {\n this.buffer.push(\"ctrl\");\n }\n if (event.metaKey) {\n this.buffer.push(\"meta\");\n }\n if (event.shiftKey) {\n this.buffer.push(\"shift\");\n }\n this.buffer.push(event.key);\n const str = this.buffer.join(\"+\");\n this.buffer.length = 0;\n return str;\n }\n exec(self, event) {\n if (!this.allKeys.has(event.key)) {\n return;\n }\n const info = this.callbacks.get(this.#serialize(event));\n if (!info) {\n return;\n }\n const {\n callback,\n options: {\n bubbles = false,\n args = [],\n checker = null\n }\n } = info;\n if (checker && !checker(self, event)) {\n return;\n }\n callback.bind(self, ...args, event)();\n if (!bubbles) {\n event.stopPropagation();\n event.preventDefault();\n }\n }\n}\nclass ColorManager {\n static _colorsMapping = new Map([[\"CanvasText\", [0, 0, 0]], [\"Canvas\", [255, 255, 255]]]);\n get _colors() {\n const colors = new Map([[\"CanvasText\", null], [\"Canvas\", null]]);\n getColorValues(colors);\n return shadow(this, \"_colors\", colors);\n }\n convert(color) {\n const rgb = getRGB(color);\n if (!window.matchMedia(\"(forced-colors: active)\").matches) {\n return rgb;\n }\n for (const [name, RGB] of this._colors) {\n if (RGB.every((x, i) => x === rgb[i])) {\n return ColorManager._colorsMapping.get(name);\n }\n }\n return rgb;\n }\n getHexCode(name) {\n const rgb = this._colors.get(name);\n if (!rgb) {\n return name;\n }\n return Util.makeHexColor(...rgb);\n }\n}\nclass AnnotationEditorUIManager {\n #abortController = new AbortController();\n #activeEditor = null;\n #allEditors = new Map();\n #allLayers = new Map();\n #altTextManager = null;\n #annotationStorage = null;\n #changedExistingAnnotations = null;\n #commandManager = new CommandManager();\n #currentPageIndex = 0;\n #deletedAnnotationsElementIds = new Set();\n #draggingEditors = null;\n #editorTypes = null;\n #editorsToRescale = new Set();\n #enableHighlightFloatingButton = false;\n #filterFactory = null;\n #focusMainContainerTimeoutId = null;\n #highlightColors = null;\n #highlightWhenShiftUp = false;\n #highlightToolbar = null;\n #idManager = new IdManager();\n #isEnabled = false;\n #isWaiting = false;\n #lastActiveElement = null;\n #mainHighlightColorPicker = null;\n #mlManager = null;\n #mode = AnnotationEditorType.NONE;\n #selectedEditors = new Set();\n #selectedTextNode = null;\n #pageColors = null;\n #showAllStates = null;\n #boundBlur = this.blur.bind(this);\n #boundFocus = this.focus.bind(this);\n #boundCopy = this.copy.bind(this);\n #boundCut = this.cut.bind(this);\n #boundPaste = this.paste.bind(this);\n #boundKeydown = this.keydown.bind(this);\n #boundKeyup = this.keyup.bind(this);\n #boundOnEditingAction = this.onEditingAction.bind(this);\n #boundOnPageChanging = this.onPageChanging.bind(this);\n #boundOnScaleChanging = this.onScaleChanging.bind(this);\n #boundOnRotationChanging = this.onRotationChanging.bind(this);\n #previousStates = {\n isEditing: false,\n isEmpty: true,\n hasSomethingToUndo: false,\n hasSomethingToRedo: false,\n hasSelectedEditor: false,\n hasSelectedText: false\n };\n #translation = [0, 0];\n #translationTimeoutId = null;\n #container = null;\n #viewer = null;\n static TRANSLATE_SMALL = 1;\n static TRANSLATE_BIG = 10;\n static get _keyboardManager() {\n const proto = AnnotationEditorUIManager.prototype;\n const arrowChecker = self => self.#container.contains(document.activeElement) && document.activeElement.tagName !== \"BUTTON\" && self.hasSomethingToControl();\n const textInputChecker = (_self, {\n target: el\n }) => {\n if (el instanceof HTMLInputElement) {\n const {\n type\n } = el;\n return type !== \"text\" && type !== \"number\";\n }\n return true;\n };\n const small = this.TRANSLATE_SMALL;\n const big = this.TRANSLATE_BIG;\n return shadow(this, \"_keyboardManager\", new KeyboardManager([[[\"ctrl+a\", \"mac+meta+a\"], proto.selectAll, {\n checker: textInputChecker\n }], [[\"ctrl+z\", \"mac+meta+z\"], proto.undo, {\n checker: textInputChecker\n }], [[\"ctrl+y\", \"ctrl+shift+z\", \"mac+meta+shift+z\", \"ctrl+shift+Z\", \"mac+meta+shift+Z\"], proto.redo, {\n checker: textInputChecker\n }], [[\"Backspace\", \"alt+Backspace\", \"ctrl+Backspace\", \"shift+Backspace\", \"mac+Backspace\", \"mac+alt+Backspace\", \"mac+ctrl+Backspace\", \"Delete\", \"ctrl+Delete\", \"shift+Delete\", \"mac+Delete\"], proto.delete, {\n checker: textInputChecker\n }], [[\"Enter\", \"mac+Enter\"], proto.addNewEditorFromKeyboard, {\n checker: (self, {\n target: el\n }) => !(el instanceof HTMLButtonElement) && self.#container.contains(el) && !self.isEnterHandled\n }], [[\" \", \"mac+ \"], proto.addNewEditorFromKeyboard, {\n checker: (self, {\n target: el\n }) => !(el instanceof HTMLButtonElement) && self.#container.contains(document.activeElement)\n }], [[\"Escape\", \"mac+Escape\"], proto.unselectAll], [[\"ArrowLeft\", \"mac+ArrowLeft\"], proto.translateSelectedEditors, {\n args: [-small, 0],\n checker: arrowChecker\n }], [[\"ctrl+ArrowLeft\", \"mac+shift+ArrowLeft\"], proto.translateSelectedEditors, {\n args: [-big, 0],\n checker: arrowChecker\n }], [[\"ArrowRight\", \"mac+ArrowRight\"], proto.translateSelectedEditors, {\n args: [small, 0],\n checker: arrowChecker\n }], [[\"ctrl+ArrowRight\", \"mac+shift+ArrowRight\"], proto.translateSelectedEditors, {\n args: [big, 0],\n checker: arrowChecker\n }], [[\"ArrowUp\", \"mac+ArrowUp\"], proto.translateSelectedEditors, {\n args: [0, -small],\n checker: arrowChecker\n }], [[\"ctrl+ArrowUp\", \"mac+shift+ArrowUp\"], proto.translateSelectedEditors, {\n args: [0, -big],\n checker: arrowChecker\n }], [[\"ArrowDown\", \"mac+ArrowDown\"], proto.translateSelectedEditors, {\n args: [0, small],\n checker: arrowChecker\n }], [[\"ctrl+ArrowDown\", \"mac+shift+ArrowDown\"], proto.translateSelectedEditors, {\n args: [0, big],\n checker: arrowChecker\n }]]));\n }\n constructor(container, viewer, altTextManager, eventBus, pdfDocument, pageColors, highlightColors, enableHighlightFloatingButton, mlManager) {\n this._signal = this.#abortController.signal;\n this.#container = container;\n this.#viewer = viewer;\n this.#altTextManager = altTextManager;\n this._eventBus = eventBus;\n this._eventBus._on(\"editingaction\", this.#boundOnEditingAction);\n this._eventBus._on(\"pagechanging\", this.#boundOnPageChanging);\n this._eventBus._on(\"scalechanging\", this.#boundOnScaleChanging);\n this._eventBus._on(\"rotationchanging\", this.#boundOnRotationChanging);\n this.#addSelectionListener();\n this.#addDragAndDropListeners();\n this.#addKeyboardManager();\n this.#annotationStorage = pdfDocument.annotationStorage;\n this.#filterFactory = pdfDocument.filterFactory;\n this.#pageColors = pageColors;\n this.#highlightColors = highlightColors || null;\n this.#enableHighlightFloatingButton = enableHighlightFloatingButton;\n this.#mlManager = mlManager || null;\n this.viewParameters = {\n realScale: PixelsPerInch.PDF_TO_CSS_UNITS,\n rotation: 0\n };\n this.isShiftKeyDown = false;\n }\n destroy() {\n this.#abortController?.abort();\n this.#abortController = null;\n this._signal = null;\n this._eventBus._off(\"editingaction\", this.#boundOnEditingAction);\n this._eventBus._off(\"pagechanging\", this.#boundOnPageChanging);\n this._eventBus._off(\"scalechanging\", this.#boundOnScaleChanging);\n this._eventBus._off(\"rotationchanging\", this.#boundOnRotationChanging);\n for (const layer of this.#allLayers.values()) {\n layer.destroy();\n }\n this.#allLayers.clear();\n this.#allEditors.clear();\n this.#editorsToRescale.clear();\n this.#activeEditor = null;\n this.#selectedEditors.clear();\n this.#commandManager.destroy();\n this.#altTextManager?.destroy();\n this.#highlightToolbar?.hide();\n this.#highlightToolbar = null;\n if (this.#focusMainContainerTimeoutId) {\n clearTimeout(this.#focusMainContainerTimeoutId);\n this.#focusMainContainerTimeoutId = null;\n }\n if (this.#translationTimeoutId) {\n clearTimeout(this.#translationTimeoutId);\n this.#translationTimeoutId = null;\n }\n }\n async mlGuess(data) {\n return this.#mlManager?.guess(data) || null;\n }\n get hasMLManager() {\n return !!this.#mlManager;\n }\n get hcmFilter() {\n return shadow(this, \"hcmFilter\", this.#pageColors ? this.#filterFactory.addHCMFilter(this.#pageColors.foreground, this.#pageColors.background) : \"none\");\n }\n get direction() {\n return shadow(this, \"direction\", getComputedStyle(this.#container).direction);\n }\n get highlightColors() {\n return shadow(this, \"highlightColors\", this.#highlightColors ? new Map(this.#highlightColors.split(\",\").map(pair => pair.split(\"=\").map(x => x.trim()))) : null);\n }\n get highlightColorNames() {\n return shadow(this, \"highlightColorNames\", this.highlightColors ? new Map(Array.from(this.highlightColors, e => e.reverse())) : null);\n }\n setMainHighlightColorPicker(colorPicker) {\n this.#mainHighlightColorPicker = colorPicker;\n }\n editAltText(editor) {\n this.#altTextManager?.editAltText(this, editor);\n }\n onPageChanging({\n pageNumber\n }) {\n this.#currentPageIndex = pageNumber - 1;\n }\n focusMainContainer() {\n this.#container.focus();\n }\n findParent(x, y) {\n for (const layer of this.#allLayers.values()) {\n const {\n x: layerX,\n y: layerY,\n width,\n height\n } = layer.div.getBoundingClientRect();\n if (x >= layerX && x <= layerX + width && y >= layerY && y <= layerY + height) {\n return layer;\n }\n }\n return null;\n }\n disableUserSelect(value = false) {\n this.#viewer.classList.toggle(\"noUserSelect\", value);\n }\n addShouldRescale(editor) {\n this.#editorsToRescale.add(editor);\n }\n removeShouldRescale(editor) {\n this.#editorsToRescale.delete(editor);\n }\n onScaleChanging({\n scale\n }) {\n this.commitOrRemove();\n this.viewParameters.realScale = scale * PixelsPerInch.PDF_TO_CSS_UNITS;\n for (const editor of this.#editorsToRescale) {\n editor.onScaleChanging();\n }\n }\n onRotationChanging({\n pagesRotation\n }) {\n this.commitOrRemove();\n this.viewParameters.rotation = pagesRotation;\n }\n #getAnchorElementForSelection({\n anchorNode\n }) {\n return anchorNode.nodeType === Node.TEXT_NODE ? anchorNode.parentElement : anchorNode;\n }\n highlightSelection(methodOfCreation = \"\") {\n const selection = document.getSelection();\n if (!selection || selection.isCollapsed) {\n return;\n }\n const {\n anchorNode,\n anchorOffset,\n focusNode,\n focusOffset\n } = selection;\n const text = selection.toString();\n const anchorElement = this.#getAnchorElementForSelection(selection);\n const textLayer = anchorElement.closest(\".textLayer\");\n const boxes = this.getSelectionBoxes(textLayer);\n if (!boxes) {\n return;\n }\n selection.empty();\n if (this.#mode === AnnotationEditorType.NONE) {\n this._eventBus.dispatch(\"showannotationeditorui\", {\n source: this,\n mode: AnnotationEditorType.HIGHLIGHT\n });\n this.showAllEditors(\"highlight\", true, true);\n }\n for (const layer of this.#allLayers.values()) {\n if (layer.hasTextLayer(textLayer)) {\n layer.createAndAddNewEditor({\n x: 0,\n y: 0\n }, false, {\n methodOfCreation,\n boxes,\n anchorNode,\n anchorOffset,\n focusNode,\n focusOffset,\n text\n });\n break;\n }\n }\n }\n #displayHighlightToolbar() {\n const selection = document.getSelection();\n if (!selection || selection.isCollapsed) {\n return;\n }\n const anchorElement = this.#getAnchorElementForSelection(selection);\n const textLayer = anchorElement.closest(\".textLayer\");\n const boxes = this.getSelectionBoxes(textLayer);\n if (!boxes) {\n return;\n }\n this.#highlightToolbar ||= new HighlightToolbar(this);\n this.#highlightToolbar.show(textLayer, boxes, this.direction === \"ltr\");\n }\n addToAnnotationStorage(editor) {\n if (!editor.isEmpty() && this.#annotationStorage && !this.#annotationStorage.has(editor.id)) {\n this.#annotationStorage.setValue(editor.id, editor);\n }\n }\n #selectionChange() {\n const selection = document.getSelection();\n if (!selection || selection.isCollapsed) {\n if (this.#selectedTextNode) {\n this.#highlightToolbar?.hide();\n this.#selectedTextNode = null;\n this.#dispatchUpdateStates({\n hasSelectedText: false\n });\n }\n return;\n }\n const {\n anchorNode\n } = selection;\n if (anchorNode === this.#selectedTextNode) {\n return;\n }\n const anchorElement = this.#getAnchorElementForSelection(selection);\n const textLayer = anchorElement.closest(\".textLayer\");\n if (!textLayer) {\n if (this.#selectedTextNode) {\n this.#highlightToolbar?.hide();\n this.#selectedTextNode = null;\n this.#dispatchUpdateStates({\n hasSelectedText: false\n });\n }\n return;\n }\n this.#highlightToolbar?.hide();\n this.#selectedTextNode = anchorNode;\n this.#dispatchUpdateStates({\n hasSelectedText: true\n });\n if (this.#mode !== AnnotationEditorType.HIGHLIGHT && this.#mode !== AnnotationEditorType.NONE) {\n return;\n }\n if (this.#mode === AnnotationEditorType.HIGHLIGHT) {\n this.showAllEditors(\"highlight\", true, true);\n }\n this.#highlightWhenShiftUp = this.isShiftKeyDown;\n if (!this.isShiftKeyDown) {\n const signal = this._signal;\n const pointerup = e => {\n if (e.type === \"pointerup\" && e.button !== 0) {\n return;\n }\n window.removeEventListener(\"pointerup\", pointerup);\n window.removeEventListener(\"blur\", pointerup);\n if (e.type === \"pointerup\") {\n this.#onSelectEnd(\"main_toolbar\");\n }\n };\n window.addEventListener(\"pointerup\", pointerup, {\n signal\n });\n window.addEventListener(\"blur\", pointerup, {\n signal\n });\n }\n }\n #onSelectEnd(methodOfCreation = \"\") {\n if (this.#mode === AnnotationEditorType.HIGHLIGHT) {\n this.highlightSelection(methodOfCreation);\n } else if (this.#enableHighlightFloatingButton) {\n this.#displayHighlightToolbar();\n }\n }\n #addSelectionListener() {\n document.addEventListener(\"selectionchange\", this.#selectionChange.bind(this), {\n signal: this._signal\n });\n }\n #addFocusManager() {\n const signal = this._signal;\n window.addEventListener(\"focus\", this.#boundFocus, {\n signal\n });\n window.addEventListener(\"blur\", this.#boundBlur, {\n signal\n });\n }\n #removeFocusManager() {\n window.removeEventListener(\"focus\", this.#boundFocus);\n window.removeEventListener(\"blur\", this.#boundBlur);\n }\n blur() {\n this.isShiftKeyDown = false;\n if (this.#highlightWhenShiftUp) {\n this.#highlightWhenShiftUp = false;\n this.#onSelectEnd(\"main_toolbar\");\n }\n if (!this.hasSelection) {\n return;\n }\n const {\n activeElement\n } = document;\n for (const editor of this.#selectedEditors) {\n if (editor.div.contains(activeElement)) {\n this.#lastActiveElement = [editor, activeElement];\n editor._focusEventsAllowed = false;\n break;\n }\n }\n }\n focus() {\n if (!this.#lastActiveElement) {\n return;\n }\n const [lastEditor, lastActiveElement] = this.#lastActiveElement;\n this.#lastActiveElement = null;\n lastActiveElement.addEventListener(\"focusin\", () => {\n lastEditor._focusEventsAllowed = true;\n }, {\n once: true,\n signal: this._signal\n });\n lastActiveElement.focus();\n }\n #addKeyboardManager() {\n const signal = this._signal;\n window.addEventListener(\"keydown\", this.#boundKeydown, {\n signal\n });\n window.addEventListener(\"keyup\", this.#boundKeyup, {\n signal\n });\n }\n #removeKeyboardManager() {\n window.removeEventListener(\"keydown\", this.#boundKeydown);\n window.removeEventListener(\"keyup\", this.#boundKeyup);\n }\n #addCopyPasteListeners() {\n const signal = this._signal;\n document.addEventListener(\"copy\", this.#boundCopy, {\n signal\n });\n document.addEventListener(\"cut\", this.#boundCut, {\n signal\n });\n document.addEventListener(\"paste\", this.#boundPaste, {\n signal\n });\n }\n #removeCopyPasteListeners() {\n document.removeEventListener(\"copy\", this.#boundCopy);\n document.removeEventListener(\"cut\", this.#boundCut);\n document.removeEventListener(\"paste\", this.#boundPaste);\n }\n #addDragAndDropListeners() {\n const signal = this._signal;\n document.addEventListener(\"dragover\", this.dragOver.bind(this), {\n signal\n });\n document.addEventListener(\"drop\", this.drop.bind(this), {\n signal\n });\n }\n addEditListeners() {\n this.#addKeyboardManager();\n this.#addCopyPasteListeners();\n }\n removeEditListeners() {\n this.#removeKeyboardManager();\n this.#removeCopyPasteListeners();\n }\n dragOver(event) {\n for (const {\n type\n } of event.dataTransfer.items) {\n for (const editorType of this.#editorTypes) {\n if (editorType.isHandlingMimeForPasting(type)) {\n event.dataTransfer.dropEffect = \"copy\";\n event.preventDefault();\n return;\n }\n }\n }\n }\n drop(event) {\n for (const item of event.dataTransfer.items) {\n for (const editorType of this.#editorTypes) {\n if (editorType.isHandlingMimeForPasting(item.type)) {\n editorType.paste(item, this.currentLayer);\n event.preventDefault();\n return;\n }\n }\n }\n }\n copy(event) {\n event.preventDefault();\n this.#activeEditor?.commitOrRemove();\n if (!this.hasSelection) {\n return;\n }\n const editors = [];\n for (const editor of this.#selectedEditors) {\n const serialized = editor.serialize(true);\n if (serialized) {\n editors.push(serialized);\n }\n }\n if (editors.length === 0) {\n return;\n }\n event.clipboardData.setData(\"application/pdfjs\", JSON.stringify(editors));\n }\n cut(event) {\n this.copy(event);\n this.delete();\n }\n paste(event) {\n event.preventDefault();\n const {\n clipboardData\n } = event;\n for (const item of clipboardData.items) {\n for (const editorType of this.#editorTypes) {\n if (editorType.isHandlingMimeForPasting(item.type)) {\n editorType.paste(item, this.currentLayer);\n return;\n }\n }\n }\n let data = clipboardData.getData(\"application/pdfjs\");\n if (!data) {\n return;\n }\n try {\n data = JSON.parse(data);\n } catch (ex) {\n warn(`paste: \"${ex.message}\".`);\n return;\n }\n if (!Array.isArray(data)) {\n return;\n }\n this.unselectAll();\n const layer = this.currentLayer;\n try {\n const newEditors = [];\n for (const editor of data) {\n const deserializedEditor = layer.deserialize(editor);\n if (!deserializedEditor) {\n return;\n }\n newEditors.push(deserializedEditor);\n }\n const cmd = () => {\n for (const editor of newEditors) {\n this.#addEditorToLayer(editor);\n }\n this.#selectEditors(newEditors);\n };\n const undo = () => {\n for (const editor of newEditors) {\n editor.remove();\n }\n };\n this.addCommands({\n cmd,\n undo,\n mustExec: true\n });\n } catch (ex) {\n warn(`paste: \"${ex.message}\".`);\n }\n }\n keydown(event) {\n if (!this.isShiftKeyDown && event.key === \"Shift\") {\n this.isShiftKeyDown = true;\n }\n if (this.#mode !== AnnotationEditorType.NONE && !this.isEditorHandlingKeyboard) {\n AnnotationEditorUIManager._keyboardManager.exec(this, event);\n }\n }\n keyup(event) {\n if (this.isShiftKeyDown && event.key === \"Shift\") {\n this.isShiftKeyDown = false;\n if (this.#highlightWhenShiftUp) {\n this.#highlightWhenShiftUp = false;\n this.#onSelectEnd(\"main_toolbar\");\n }\n }\n }\n onEditingAction({\n name\n }) {\n switch (name) {\n case \"undo\":\n case \"redo\":\n case \"delete\":\n case \"selectAll\":\n this[name]();\n break;\n case \"highlightSelection\":\n this.highlightSelection(\"context_menu\");\n break;\n }\n }\n #dispatchUpdateStates(details) {\n const hasChanged = Object.entries(details).some(([key, value]) => this.#previousStates[key] !== value);\n if (hasChanged) {\n this._eventBus.dispatch(\"annotationeditorstateschanged\", {\n source: this,\n details: Object.assign(this.#previousStates, details)\n });\n if (this.#mode === AnnotationEditorType.HIGHLIGHT && details.hasSelectedEditor === false) {\n this.#dispatchUpdateUI([[AnnotationEditorParamsType.HIGHLIGHT_FREE, true]]);\n }\n }\n }\n #dispatchUpdateUI(details) {\n this._eventBus.dispatch(\"annotationeditorparamschanged\", {\n source: this,\n details\n });\n }\n setEditingState(isEditing) {\n if (isEditing) {\n this.#addFocusManager();\n this.#addCopyPasteListeners();\n this.#dispatchUpdateStates({\n isEditing: this.#mode !== AnnotationEditorType.NONE,\n isEmpty: this.#isEmpty(),\n hasSomethingToUndo: this.#commandManager.hasSomethingToUndo(),\n hasSomethingToRedo: this.#commandManager.hasSomethingToRedo(),\n hasSelectedEditor: false\n });\n } else {\n this.#removeFocusManager();\n this.#removeCopyPasteListeners();\n this.#dispatchUpdateStates({\n isEditing: false\n });\n this.disableUserSelect(false);\n }\n }\n registerEditorTypes(types) {\n if (this.#editorTypes) {\n return;\n }\n this.#editorTypes = types;\n for (const editorType of this.#editorTypes) {\n this.#dispatchUpdateUI(editorType.defaultPropertiesToUpdate);\n }\n }\n getId() {\n return this.#idManager.id;\n }\n get currentLayer() {\n return this.#allLayers.get(this.#currentPageIndex);\n }\n getLayer(pageIndex) {\n return this.#allLayers.get(pageIndex);\n }\n get currentPageIndex() {\n return this.#currentPageIndex;\n }\n addLayer(layer) {\n this.#allLayers.set(layer.pageIndex, layer);\n if (this.#isEnabled) {\n layer.enable();\n } else {\n layer.disable();\n }\n }\n removeLayer(layer) {\n this.#allLayers.delete(layer.pageIndex);\n }\n updateMode(mode, editId = null, isFromKeyboard = false) {\n if (this.#mode === mode) {\n return;\n }\n this.#mode = mode;\n if (mode === AnnotationEditorType.NONE) {\n this.setEditingState(false);\n this.#disableAll();\n return;\n }\n this.setEditingState(true);\n this.#enableAll();\n this.unselectAll();\n for (const layer of this.#allLayers.values()) {\n layer.updateMode(mode);\n }\n if (!editId && isFromKeyboard) {\n this.addNewEditorFromKeyboard();\n return;\n }\n if (!editId) {\n return;\n }\n for (const editor of this.#allEditors.values()) {\n if (editor.annotationElementId === editId) {\n this.setSelected(editor);\n editor.enterInEditMode();\n break;\n }\n }\n }\n addNewEditorFromKeyboard() {\n if (this.currentLayer.canCreateNewEmptyEditor()) {\n this.currentLayer.addNewEditor();\n }\n }\n updateToolbar(mode) {\n if (mode === this.#mode) {\n return;\n }\n this._eventBus.dispatch(\"switchannotationeditormode\", {\n source: this,\n mode\n });\n }\n updateParams(type, value) {\n if (!this.#editorTypes) {\n return;\n }\n switch (type) {\n case AnnotationEditorParamsType.CREATE:\n this.currentLayer.addNewEditor();\n return;\n case AnnotationEditorParamsType.HIGHLIGHT_DEFAULT_COLOR:\n this.#mainHighlightColorPicker?.updateColor(value);\n break;\n case AnnotationEditorParamsType.HIGHLIGHT_SHOW_ALL:\n this._eventBus.dispatch(\"reporttelemetry\", {\n source: this,\n details: {\n type: \"editing\",\n data: {\n type: \"highlight\",\n action: \"toggle_visibility\"\n }\n }\n });\n (this.#showAllStates ||= new Map()).set(type, value);\n this.showAllEditors(\"highlight\", value);\n break;\n }\n for (const editor of this.#selectedEditors) {\n editor.updateParams(type, value);\n }\n for (const editorType of this.#editorTypes) {\n editorType.updateDefaultParams(type, value);\n }\n }\n showAllEditors(type, visible, updateButton = false) {\n for (const editor of this.#allEditors.values()) {\n if (editor.editorType === type) {\n editor.show(visible);\n }\n }\n const state = this.#showAllStates?.get(AnnotationEditorParamsType.HIGHLIGHT_SHOW_ALL) ?? true;\n if (state !== visible) {\n this.#dispatchUpdateUI([[AnnotationEditorParamsType.HIGHLIGHT_SHOW_ALL, visible]]);\n }\n }\n enableWaiting(mustWait = false) {\n if (this.#isWaiting === mustWait) {\n return;\n }\n this.#isWaiting = mustWait;\n for (const layer of this.#allLayers.values()) {\n if (mustWait) {\n layer.disableClick();\n } else {\n layer.enableClick();\n }\n layer.div.classList.toggle(\"waiting\", mustWait);\n }\n }\n #enableAll() {\n if (!this.#isEnabled) {\n this.#isEnabled = true;\n for (const layer of this.#allLayers.values()) {\n layer.enable();\n }\n for (const editor of this.#allEditors.values()) {\n editor.enable();\n }\n }\n }\n #disableAll() {\n this.unselectAll();\n if (this.#isEnabled) {\n this.#isEnabled = false;\n for (const layer of this.#allLayers.values()) {\n layer.disable();\n }\n for (const editor of this.#allEditors.values()) {\n editor.disable();\n }\n }\n }\n getEditors(pageIndex) {\n const editors = [];\n for (const editor of this.#allEditors.values()) {\n if (editor.pageIndex === pageIndex) {\n editors.push(editor);\n }\n }\n return editors;\n }\n getEditor(id) {\n return this.#allEditors.get(id);\n }\n addEditor(editor) {\n this.#allEditors.set(editor.id, editor);\n }\n removeEditor(editor) {\n if (editor.div.contains(document.activeElement)) {\n if (this.#focusMainContainerTimeoutId) {\n clearTimeout(this.#focusMainContainerTimeoutId);\n }\n this.#focusMainContainerTimeoutId = setTimeout(() => {\n this.focusMainContainer();\n this.#focusMainContainerTimeoutId = null;\n }, 0);\n }\n this.#allEditors.delete(editor.id);\n this.unselect(editor);\n if (!editor.annotationElementId || !this.#deletedAnnotationsElementIds.has(editor.annotationElementId)) {\n this.#annotationStorage?.remove(editor.id);\n }\n }\n addDeletedAnnotationElement(editor) {\n this.#deletedAnnotationsElementIds.add(editor.annotationElementId);\n this.addChangedExistingAnnotation(editor);\n editor.deleted = true;\n }\n isDeletedAnnotationElement(annotationElementId) {\n return this.#deletedAnnotationsElementIds.has(annotationElementId);\n }\n removeDeletedAnnotationElement(editor) {\n this.#deletedAnnotationsElementIds.delete(editor.annotationElementId);\n this.removeChangedExistingAnnotation(editor);\n editor.deleted = false;\n }\n #addEditorToLayer(editor) {\n const layer = this.#allLayers.get(editor.pageIndex);\n if (layer) {\n layer.addOrRebuild(editor);\n } else {\n this.addEditor(editor);\n this.addToAnnotationStorage(editor);\n }\n }\n setActiveEditor(editor) {\n if (this.#activeEditor === editor) {\n return;\n }\n this.#activeEditor = editor;\n if (editor) {\n this.#dispatchUpdateUI(editor.propertiesToUpdate);\n }\n }\n get #lastSelectedEditor() {\n let ed = null;\n for (ed of this.#selectedEditors) {}\n return ed;\n }\n updateUI(editor) {\n if (this.#lastSelectedEditor === editor) {\n this.#dispatchUpdateUI(editor.propertiesToUpdate);\n }\n }\n toggleSelected(editor) {\n if (this.#selectedEditors.has(editor)) {\n this.#selectedEditors.delete(editor);\n editor.unselect();\n this.#dispatchUpdateStates({\n hasSelectedEditor: this.hasSelection\n });\n return;\n }\n this.#selectedEditors.add(editor);\n editor.select();\n this.#dispatchUpdateUI(editor.propertiesToUpdate);\n this.#dispatchUpdateStates({\n hasSelectedEditor: true\n });\n }\n setSelected(editor) {\n for (const ed of this.#selectedEditors) {\n if (ed !== editor) {\n ed.unselect();\n }\n }\n this.#selectedEditors.clear();\n this.#selectedEditors.add(editor);\n editor.select();\n this.#dispatchUpdateUI(editor.propertiesToUpdate);\n this.#dispatchUpdateStates({\n hasSelectedEditor: true\n });\n }\n isSelected(editor) {\n return this.#selectedEditors.has(editor);\n }\n get firstSelectedEditor() {\n return this.#selectedEditors.values().next().value;\n }\n unselect(editor) {\n editor.unselect();\n this.#selectedEditors.delete(editor);\n this.#dispatchUpdateStates({\n hasSelectedEditor: this.hasSelection\n });\n }\n get hasSelection() {\n return this.#selectedEditors.size !== 0;\n }\n get isEnterHandled() {\n return this.#selectedEditors.size === 1 && this.firstSelectedEditor.isEnterHandled;\n }\n undo() {\n this.#commandManager.undo();\n this.#dispatchUpdateStates({\n hasSomethingToUndo: this.#commandManager.hasSomethingToUndo(),\n hasSomethingToRedo: true,\n isEmpty: this.#isEmpty()\n });\n }\n redo() {\n this.#commandManager.redo();\n this.#dispatchUpdateStates({\n hasSomethingToUndo: true,\n hasSomethingToRedo: this.#commandManager.hasSomethingToRedo(),\n isEmpty: this.#isEmpty()\n });\n }\n addCommands(params) {\n this.#commandManager.add(params);\n this.#dispatchUpdateStates({\n hasSomethingToUndo: true,\n hasSomethingToRedo: false,\n isEmpty: this.#isEmpty()\n });\n }\n #isEmpty() {\n if (this.#allEditors.size === 0) {\n return true;\n }\n if (this.#allEditors.size === 1) {\n for (const editor of this.#allEditors.values()) {\n return editor.isEmpty();\n }\n }\n return false;\n }\n delete() {\n this.commitOrRemove();\n if (!this.hasSelection) {\n return;\n }\n const editors = [...this.#selectedEditors];\n const cmd = () => {\n for (const editor of editors) {\n editor.remove();\n }\n };\n const undo = () => {\n for (const editor of editors) {\n this.#addEditorToLayer(editor);\n }\n };\n this.addCommands({\n cmd,\n undo,\n mustExec: true\n });\n }\n commitOrRemove() {\n this.#activeEditor?.commitOrRemove();\n }\n hasSomethingToControl() {\n return this.#activeEditor || this.hasSelection;\n }\n #selectEditors(editors) {\n for (const editor of this.#selectedEditors) {\n editor.unselect();\n }\n this.#selectedEditors.clear();\n for (const editor of editors) {\n if (editor.isEmpty()) {\n continue;\n }\n this.#selectedEditors.add(editor);\n editor.select();\n }\n this.#dispatchUpdateStates({\n hasSelectedEditor: this.hasSelection\n });\n }\n selectAll() {\n for (const editor of this.#selectedEditors) {\n editor.commit();\n }\n this.#selectEditors(this.#allEditors.values());\n }\n unselectAll() {\n if (this.#activeEditor) {\n this.#activeEditor.commitOrRemove();\n if (this.#mode !== AnnotationEditorType.NONE) {\n return;\n }\n }\n if (!this.hasSelection) {\n return;\n }\n for (const editor of this.#selectedEditors) {\n editor.unselect();\n }\n this.#selectedEditors.clear();\n this.#dispatchUpdateStates({\n hasSelectedEditor: false\n });\n }\n translateSelectedEditors(x, y, noCommit = false) {\n if (!noCommit) {\n this.commitOrRemove();\n }\n if (!this.hasSelection) {\n return;\n }\n this.#translation[0] += x;\n this.#translation[1] += y;\n const [totalX, totalY] = this.#translation;\n const editors = [...this.#selectedEditors];\n const TIME_TO_WAIT = 1000;\n if (this.#translationTimeoutId) {\n clearTimeout(this.#translationTimeoutId);\n }\n this.#translationTimeoutId = setTimeout(() => {\n this.#translationTimeoutId = null;\n this.#translation[0] = this.#translation[1] = 0;\n this.addCommands({\n cmd: () => {\n for (const editor of editors) {\n if (this.#allEditors.has(editor.id)) {\n editor.translateInPage(totalX, totalY);\n }\n }\n },\n undo: () => {\n for (const editor of editors) {\n if (this.#allEditors.has(editor.id)) {\n editor.translateInPage(-totalX, -totalY);\n }\n }\n },\n mustExec: false\n });\n }, TIME_TO_WAIT);\n for (const editor of editors) {\n editor.translateInPage(x, y);\n }\n }\n setUpDragSession() {\n if (!this.hasSelection) {\n return;\n }\n this.disableUserSelect(true);\n this.#draggingEditors = new Map();\n for (const editor of this.#selectedEditors) {\n this.#draggingEditors.set(editor, {\n savedX: editor.x,\n savedY: editor.y,\n savedPageIndex: editor.pageIndex,\n newX: 0,\n newY: 0,\n newPageIndex: -1\n });\n }\n }\n endDragSession() {\n if (!this.#draggingEditors) {\n return false;\n }\n this.disableUserSelect(false);\n const map = this.#draggingEditors;\n this.#draggingEditors = null;\n let mustBeAddedInUndoStack = false;\n for (const [{\n x,\n y,\n pageIndex\n }, value] of map) {\n value.newX = x;\n value.newY = y;\n value.newPageIndex = pageIndex;\n mustBeAddedInUndoStack ||= x !== value.savedX || y !== value.savedY || pageIndex !== value.savedPageIndex;\n }\n if (!mustBeAddedInUndoStack) {\n return false;\n }\n const move = (editor, x, y, pageIndex) => {\n if (this.#allEditors.has(editor.id)) {\n const parent = this.#allLayers.get(pageIndex);\n if (parent) {\n editor._setParentAndPosition(parent, x, y);\n } else {\n editor.pageIndex = pageIndex;\n editor.x = x;\n editor.y = y;\n }\n }\n };\n this.addCommands({\n cmd: () => {\n for (const [editor, {\n newX,\n newY,\n newPageIndex\n }] of map) {\n move(editor, newX, newY, newPageIndex);\n }\n },\n undo: () => {\n for (const [editor, {\n savedX,\n savedY,\n savedPageIndex\n }] of map) {\n move(editor, savedX, savedY, savedPageIndex);\n }\n },\n mustExec: true\n });\n return true;\n }\n dragSelectedEditors(tx, ty) {\n if (!this.#draggingEditors) {\n return;\n }\n for (const editor of this.#draggingEditors.keys()) {\n editor.drag(tx, ty);\n }\n }\n rebuild(editor) {\n if (editor.parent === null) {\n const parent = this.getLayer(editor.pageIndex);\n if (parent) {\n parent.changeParent(editor);\n parent.addOrRebuild(editor);\n } else {\n this.addEditor(editor);\n this.addToAnnotationStorage(editor);\n editor.rebuild();\n }\n } else {\n editor.parent.addOrRebuild(editor);\n }\n }\n get isEditorHandlingKeyboard() {\n return this.getActive()?.shouldGetKeyboardEvents() || this.#selectedEditors.size === 1 && this.firstSelectedEditor.shouldGetKeyboardEvents();\n }\n isActive(editor) {\n return this.#activeEditor === editor;\n }\n getActive() {\n return this.#activeEditor;\n }\n getMode() {\n return this.#mode;\n }\n get imageManager() {\n return shadow(this, \"imageManager\", new ImageManager());\n }\n getSelectionBoxes(textLayer) {\n if (!textLayer) {\n return null;\n }\n const selection = document.getSelection();\n for (let i = 0, ii = selection.rangeCount; i < ii; i++) {\n if (!textLayer.contains(selection.getRangeAt(i).commonAncestorContainer)) {\n return null;\n }\n }\n const {\n x: layerX,\n y: layerY,\n width: parentWidth,\n height: parentHeight\n } = textLayer.getBoundingClientRect();\n let rotator;\n switch (textLayer.getAttribute(\"data-main-rotation\")) {\n case \"90\":\n rotator = (x, y, w, h) => ({\n x: (y - layerY) / parentHeight,\n y: 1 - (x + w - layerX) / parentWidth,\n width: h / parentHeight,\n height: w / parentWidth\n });\n break;\n case \"180\":\n rotator = (x, y, w, h) => ({\n x: 1 - (x + w - layerX) / parentWidth,\n y: 1 - (y + h - layerY) / parentHeight,\n width: w / parentWidth,\n height: h / parentHeight\n });\n break;\n case \"270\":\n rotator = (x, y, w, h) => ({\n x: 1 - (y + h - layerY) / parentHeight,\n y: (x - layerX) / parentWidth,\n width: h / parentHeight,\n height: w / parentWidth\n });\n break;\n default:\n rotator = (x, y, w, h) => ({\n x: (x - layerX) / parentWidth,\n y: (y - layerY) / parentHeight,\n width: w / parentWidth,\n height: h / parentHeight\n });\n break;\n }\n const boxes = [];\n for (let i = 0, ii = selection.rangeCount; i < ii; i++) {\n const range = selection.getRangeAt(i);\n if (range.collapsed) {\n continue;\n }\n for (const {\n x,\n y,\n width,\n height\n } of range.getClientRects()) {\n if (width === 0 || height === 0) {\n continue;\n }\n boxes.push(rotator(x, y, width, height));\n }\n }\n return boxes.length === 0 ? null : boxes;\n }\n addChangedExistingAnnotation({\n annotationElementId,\n id\n }) {\n (this.#changedExistingAnnotations ||= new Map()).set(annotationElementId, id);\n }\n removeChangedExistingAnnotation({\n annotationElementId\n }) {\n this.#changedExistingAnnotations?.delete(annotationElementId);\n }\n renderAnnotationElement(annotation) {\n const editorId = this.#changedExistingAnnotations?.get(annotation.data.id);\n if (!editorId) {\n return;\n }\n const editor = this.#annotationStorage.getRawValue(editorId);\n if (!editor) {\n return;\n }\n if (this.#mode === AnnotationEditorType.NONE && !editor.hasBeenModified) {\n return;\n }\n editor.renderAnnotationElement(annotation);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/alt_text.js\n\nclass AltText {\n #altText = \"\";\n #altTextDecorative = false;\n #altTextButton = null;\n #altTextTooltip = null;\n #altTextTooltipTimeout = null;\n #altTextWasFromKeyBoard = false;\n #editor = null;\n static _l10nPromise = null;\n constructor(editor) {\n this.#editor = editor;\n }\n static initialize(l10nPromise) {\n AltText._l10nPromise ||= l10nPromise;\n }\n async render() {\n const altText = this.#altTextButton = document.createElement(\"button\");\n altText.className = \"altText\";\n const msg = await AltText._l10nPromise.get(\"pdfjs-editor-alt-text-button-label\");\n altText.textContent = msg;\n altText.setAttribute(\"aria-label\", msg);\n altText.tabIndex = \"0\";\n const signal = this.#editor._uiManager._signal;\n altText.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n altText.addEventListener(\"pointerdown\", event => event.stopPropagation(), {\n signal\n });\n const onClick = event => {\n event.preventDefault();\n this.#editor._uiManager.editAltText(this.#editor);\n };\n altText.addEventListener(\"click\", onClick, {\n capture: true,\n signal\n });\n altText.addEventListener(\"keydown\", event => {\n if (event.target === altText && event.key === \"Enter\") {\n this.#altTextWasFromKeyBoard = true;\n onClick(event);\n }\n }, {\n signal\n });\n await this.#setState();\n return altText;\n }\n finish() {\n if (!this.#altTextButton) {\n return;\n }\n this.#altTextButton.focus({\n focusVisible: this.#altTextWasFromKeyBoard\n });\n this.#altTextWasFromKeyBoard = false;\n }\n isEmpty() {\n return !this.#altText && !this.#altTextDecorative;\n }\n get data() {\n return {\n altText: this.#altText,\n decorative: this.#altTextDecorative\n };\n }\n set data({\n altText,\n decorative\n }) {\n if (this.#altText === altText && this.#altTextDecorative === decorative) {\n return;\n }\n this.#altText = altText;\n this.#altTextDecorative = decorative;\n this.#setState();\n }\n toggle(enabled = false) {\n if (!this.#altTextButton) {\n return;\n }\n if (!enabled && this.#altTextTooltipTimeout) {\n clearTimeout(this.#altTextTooltipTimeout);\n this.#altTextTooltipTimeout = null;\n }\n this.#altTextButton.disabled = !enabled;\n }\n destroy() {\n this.#altTextButton?.remove();\n this.#altTextButton = null;\n this.#altTextTooltip = null;\n }\n async #setState() {\n const button = this.#altTextButton;\n if (!button) {\n return;\n }\n if (!this.#altText && !this.#altTextDecorative) {\n button.classList.remove(\"done\");\n this.#altTextTooltip?.remove();\n return;\n }\n button.classList.add(\"done\");\n AltText._l10nPromise.get(\"pdfjs-editor-alt-text-edit-button-label\").then(msg => {\n button.setAttribute(\"aria-label\", msg);\n });\n let tooltip = this.#altTextTooltip;\n if (!tooltip) {\n this.#altTextTooltip = tooltip = document.createElement(\"span\");\n tooltip.className = \"tooltip\";\n tooltip.setAttribute(\"role\", \"tooltip\");\n const id = tooltip.id = `alt-text-tooltip-${this.#editor.id}`;\n button.setAttribute(\"aria-describedby\", id);\n const DELAY_TO_SHOW_TOOLTIP = 100;\n const signal = this.#editor._uiManager._signal;\n signal.addEventListener(\"abort\", () => {\n clearTimeout(this.#altTextTooltipTimeout);\n this.#altTextTooltipTimeout = null;\n }, {\n once: true\n });\n button.addEventListener(\"mouseenter\", () => {\n this.#altTextTooltipTimeout = setTimeout(() => {\n this.#altTextTooltipTimeout = null;\n this.#altTextTooltip.classList.add(\"show\");\n this.#editor._reportTelemetry({\n action: \"alt_text_tooltip\"\n });\n }, DELAY_TO_SHOW_TOOLTIP);\n }, {\n signal\n });\n button.addEventListener(\"mouseleave\", () => {\n if (this.#altTextTooltipTimeout) {\n clearTimeout(this.#altTextTooltipTimeout);\n this.#altTextTooltipTimeout = null;\n }\n this.#altTextTooltip?.classList.remove(\"show\");\n }, {\n signal\n });\n }\n tooltip.innerText = this.#altTextDecorative ? await AltText._l10nPromise.get(\"pdfjs-editor-alt-text-decorative-tooltip\") : this.#altText;\n if (!tooltip.parentNode) {\n button.append(tooltip);\n }\n const element = this.#editor.getImageForAltText();\n element?.setAttribute(\"aria-describedby\", tooltip.id);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/editor.js\n\n\n\n\n\nclass AnnotationEditor {\n #accessibilityData = null;\n #allResizerDivs = null;\n #altText = null;\n #disabled = false;\n #keepAspectRatio = false;\n #resizersDiv = null;\n #savedDimensions = null;\n #boundFocusin = this.focusin.bind(this);\n #boundFocusout = this.focusout.bind(this);\n #editToolbar = null;\n #focusedResizerName = \"\";\n #hasBeenClicked = false;\n #initialPosition = null;\n #isEditing = false;\n #isInEditMode = false;\n #isResizerEnabledForKeyboard = false;\n #moveInDOMTimeout = null;\n #prevDragX = 0;\n #prevDragY = 0;\n #telemetryTimeouts = null;\n _initialOptions = Object.create(null);\n _isVisible = true;\n _uiManager = null;\n _focusEventsAllowed = true;\n _l10nPromise = null;\n #isDraggable = false;\n #zIndex = AnnotationEditor._zIndex++;\n static _borderLineWidth = -1;\n static _colorManager = new ColorManager();\n static _zIndex = 1;\n static _telemetryTimeout = 1000;\n static get _resizerKeyboardManager() {\n const resize = AnnotationEditor.prototype._resizeWithKeyboard;\n const small = AnnotationEditorUIManager.TRANSLATE_SMALL;\n const big = AnnotationEditorUIManager.TRANSLATE_BIG;\n return shadow(this, \"_resizerKeyboardManager\", new KeyboardManager([[[\"ArrowLeft\", \"mac+ArrowLeft\"], resize, {\n args: [-small, 0]\n }], [[\"ctrl+ArrowLeft\", \"mac+shift+ArrowLeft\"], resize, {\n args: [-big, 0]\n }], [[\"ArrowRight\", \"mac+ArrowRight\"], resize, {\n args: [small, 0]\n }], [[\"ctrl+ArrowRight\", \"mac+shift+ArrowRight\"], resize, {\n args: [big, 0]\n }], [[\"ArrowUp\", \"mac+ArrowUp\"], resize, {\n args: [0, -small]\n }], [[\"ctrl+ArrowUp\", \"mac+shift+ArrowUp\"], resize, {\n args: [0, -big]\n }], [[\"ArrowDown\", \"mac+ArrowDown\"], resize, {\n args: [0, small]\n }], [[\"ctrl+ArrowDown\", \"mac+shift+ArrowDown\"], resize, {\n args: [0, big]\n }], [[\"Escape\", \"mac+Escape\"], AnnotationEditor.prototype._stopResizingWithKeyboard]]));\n }\n constructor(parameters) {\n if (this.constructor === AnnotationEditor) {\n unreachable(\"Cannot initialize AnnotationEditor.\");\n }\n this.parent = parameters.parent;\n this.id = parameters.id;\n this.width = this.height = null;\n this.pageIndex = parameters.parent.pageIndex;\n this.name = parameters.name;\n this.div = null;\n this._uiManager = parameters.uiManager;\n this.annotationElementId = null;\n this._willKeepAspectRatio = false;\n this._initialOptions.isCentered = parameters.isCentered;\n this._structTreeParentId = null;\n const {\n rotation,\n rawDims: {\n pageWidth,\n pageHeight,\n pageX,\n pageY\n }\n } = this.parent.viewport;\n this.rotation = rotation;\n this.pageRotation = (360 + rotation - this._uiManager.viewParameters.rotation) % 360;\n this.pageDimensions = [pageWidth, pageHeight];\n this.pageTranslation = [pageX, pageY];\n const [width, height] = this.parentDimensions;\n this.x = parameters.x / width;\n this.y = parameters.y / height;\n this.isAttachedToDOM = false;\n this.deleted = false;\n }\n get editorType() {\n return Object.getPrototypeOf(this).constructor._type;\n }\n static get _defaultLineColor() {\n return shadow(this, \"_defaultLineColor\", this._colorManager.getHexCode(\"CanvasText\"));\n }\n static deleteAnnotationElement(editor) {\n const fakeEditor = new FakeEditor({\n id: editor.parent.getNextId(),\n parent: editor.parent,\n uiManager: editor._uiManager\n });\n fakeEditor.annotationElementId = editor.annotationElementId;\n fakeEditor.deleted = true;\n fakeEditor._uiManager.addToAnnotationStorage(fakeEditor);\n }\n static initialize(l10n, _uiManager, options) {\n AnnotationEditor._l10nPromise ||= new Map([\"pdfjs-editor-alt-text-button-label\", \"pdfjs-editor-alt-text-edit-button-label\", \"pdfjs-editor-alt-text-decorative-tooltip\", \"pdfjs-editor-resizer-label-topLeft\", \"pdfjs-editor-resizer-label-topMiddle\", \"pdfjs-editor-resizer-label-topRight\", \"pdfjs-editor-resizer-label-middleRight\", \"pdfjs-editor-resizer-label-bottomRight\", \"pdfjs-editor-resizer-label-bottomMiddle\", \"pdfjs-editor-resizer-label-bottomLeft\", \"pdfjs-editor-resizer-label-middleLeft\"].map(str => [str, l10n.get(str.replaceAll(/([A-Z])/g, c => `-${c.toLowerCase()}`))]));\n if (options?.strings) {\n for (const str of options.strings) {\n AnnotationEditor._l10nPromise.set(str, l10n.get(str));\n }\n }\n if (AnnotationEditor._borderLineWidth !== -1) {\n return;\n }\n const style = getComputedStyle(document.documentElement);\n AnnotationEditor._borderLineWidth = parseFloat(style.getPropertyValue(\"--outline-width\")) || 0;\n }\n static updateDefaultParams(_type, _value) {}\n static get defaultPropertiesToUpdate() {\n return [];\n }\n static isHandlingMimeForPasting(mime) {\n return false;\n }\n static paste(item, parent) {\n unreachable(\"Not implemented\");\n }\n get propertiesToUpdate() {\n return [];\n }\n get _isDraggable() {\n return this.#isDraggable;\n }\n set _isDraggable(value) {\n this.#isDraggable = value;\n this.div?.classList.toggle(\"draggable\", value);\n }\n get isEnterHandled() {\n return true;\n }\n center() {\n const [pageWidth, pageHeight] = this.pageDimensions;\n switch (this.parentRotation) {\n case 90:\n this.x -= this.height * pageHeight / (pageWidth * 2);\n this.y += this.width * pageWidth / (pageHeight * 2);\n break;\n case 180:\n this.x += this.width / 2;\n this.y += this.height / 2;\n break;\n case 270:\n this.x += this.height * pageHeight / (pageWidth * 2);\n this.y -= this.width * pageWidth / (pageHeight * 2);\n break;\n default:\n this.x -= this.width / 2;\n this.y -= this.height / 2;\n break;\n }\n this.fixAndSetPosition();\n }\n addCommands(params) {\n this._uiManager.addCommands(params);\n }\n get currentLayer() {\n return this._uiManager.currentLayer;\n }\n setInBackground() {\n this.div.style.zIndex = 0;\n }\n setInForeground() {\n this.div.style.zIndex = this.#zIndex;\n }\n setParent(parent) {\n if (parent !== null) {\n this.pageIndex = parent.pageIndex;\n this.pageDimensions = parent.pageDimensions;\n } else {\n this.#stopResizing();\n }\n this.parent = parent;\n }\n focusin(event) {\n if (!this._focusEventsAllowed) {\n return;\n }\n if (!this.#hasBeenClicked) {\n this.parent.setSelected(this);\n } else {\n this.#hasBeenClicked = false;\n }\n }\n focusout(event) {\n if (!this._focusEventsAllowed) {\n return;\n }\n if (!this.isAttachedToDOM) {\n return;\n }\n const target = event.relatedTarget;\n if (target?.closest(`#${this.id}`)) {\n return;\n }\n event.preventDefault();\n if (!this.parent?.isMultipleSelection) {\n this.commitOrRemove();\n }\n }\n commitOrRemove() {\n if (this.isEmpty()) {\n this.remove();\n } else {\n this.commit();\n }\n }\n commit() {\n this.addToAnnotationStorage();\n }\n addToAnnotationStorage() {\n this._uiManager.addToAnnotationStorage(this);\n }\n setAt(x, y, tx, ty) {\n const [width, height] = this.parentDimensions;\n [tx, ty] = this.screenToPageTranslation(tx, ty);\n this.x = (x + tx) / width;\n this.y = (y + ty) / height;\n this.fixAndSetPosition();\n }\n #translate([width, height], x, y) {\n [x, y] = this.screenToPageTranslation(x, y);\n this.x += x / width;\n this.y += y / height;\n this.fixAndSetPosition();\n }\n translate(x, y) {\n this.#translate(this.parentDimensions, x, y);\n }\n translateInPage(x, y) {\n this.#initialPosition ||= [this.x, this.y];\n this.#translate(this.pageDimensions, x, y);\n this.div.scrollIntoView({\n block: \"nearest\"\n });\n }\n drag(tx, ty) {\n this.#initialPosition ||= [this.x, this.y];\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.x += tx / parentWidth;\n this.y += ty / parentHeight;\n if (this.parent && (this.x < 0 || this.x > 1 || this.y < 0 || this.y > 1)) {\n const {\n x,\n y\n } = this.div.getBoundingClientRect();\n if (this.parent.findNewParent(this, x, y)) {\n this.x -= Math.floor(this.x);\n this.y -= Math.floor(this.y);\n }\n }\n let {\n x,\n y\n } = this;\n const [bx, by] = this.getBaseTranslation();\n x += bx;\n y += by;\n this.div.style.left = `${(100 * x).toFixed(2)}%`;\n this.div.style.top = `${(100 * y).toFixed(2)}%`;\n this.div.scrollIntoView({\n block: \"nearest\"\n });\n }\n get _hasBeenMoved() {\n return !!this.#initialPosition && (this.#initialPosition[0] !== this.x || this.#initialPosition[1] !== this.y);\n }\n getBaseTranslation() {\n const [parentWidth, parentHeight] = this.parentDimensions;\n const {\n _borderLineWidth\n } = AnnotationEditor;\n const x = _borderLineWidth / parentWidth;\n const y = _borderLineWidth / parentHeight;\n switch (this.rotation) {\n case 90:\n return [-x, y];\n case 180:\n return [x, y];\n case 270:\n return [x, -y];\n default:\n return [-x, -y];\n }\n }\n get _mustFixPosition() {\n return true;\n }\n fixAndSetPosition(rotation = this.rotation) {\n const [pageWidth, pageHeight] = this.pageDimensions;\n let {\n x,\n y,\n width,\n height\n } = this;\n width *= pageWidth;\n height *= pageHeight;\n x *= pageWidth;\n y *= pageHeight;\n if (this._mustFixPosition) {\n switch (rotation) {\n case 0:\n x = Math.max(0, Math.min(pageWidth - width, x));\n y = Math.max(0, Math.min(pageHeight - height, y));\n break;\n case 90:\n x = Math.max(0, Math.min(pageWidth - height, x));\n y = Math.min(pageHeight, Math.max(width, y));\n break;\n case 180:\n x = Math.min(pageWidth, Math.max(width, x));\n y = Math.min(pageHeight, Math.max(height, y));\n break;\n case 270:\n x = Math.min(pageWidth, Math.max(height, x));\n y = Math.max(0, Math.min(pageHeight - width, y));\n break;\n }\n }\n this.x = x /= pageWidth;\n this.y = y /= pageHeight;\n const [bx, by] = this.getBaseTranslation();\n x += bx;\n y += by;\n const {\n style\n } = this.div;\n style.left = `${(100 * x).toFixed(2)}%`;\n style.top = `${(100 * y).toFixed(2)}%`;\n this.moveInDOM();\n }\n static #rotatePoint(x, y, angle) {\n switch (angle) {\n case 90:\n return [y, -x];\n case 180:\n return [-x, -y];\n case 270:\n return [-y, x];\n default:\n return [x, y];\n }\n }\n screenToPageTranslation(x, y) {\n return AnnotationEditor.#rotatePoint(x, y, this.parentRotation);\n }\n pageTranslationToScreen(x, y) {\n return AnnotationEditor.#rotatePoint(x, y, 360 - this.parentRotation);\n }\n #getRotationMatrix(rotation) {\n switch (rotation) {\n case 90:\n {\n const [pageWidth, pageHeight] = this.pageDimensions;\n return [0, -pageWidth / pageHeight, pageHeight / pageWidth, 0];\n }\n case 180:\n return [-1, 0, 0, -1];\n case 270:\n {\n const [pageWidth, pageHeight] = this.pageDimensions;\n return [0, pageWidth / pageHeight, -pageHeight / pageWidth, 0];\n }\n default:\n return [1, 0, 0, 1];\n }\n }\n get parentScale() {\n return this._uiManager.viewParameters.realScale;\n }\n get parentRotation() {\n return (this._uiManager.viewParameters.rotation + this.pageRotation) % 360;\n }\n get parentDimensions() {\n const {\n parentScale,\n pageDimensions: [pageWidth, pageHeight]\n } = this;\n const scaledWidth = pageWidth * parentScale;\n const scaledHeight = pageHeight * parentScale;\n return util_FeatureTest.isCSSRoundSupported ? [Math.round(scaledWidth), Math.round(scaledHeight)] : [scaledWidth, scaledHeight];\n }\n setDims(width, height) {\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.div.style.width = `${(100 * width / parentWidth).toFixed(2)}%`;\n if (!this.#keepAspectRatio) {\n this.div.style.height = `${(100 * height / parentHeight).toFixed(2)}%`;\n }\n }\n fixDims() {\n const {\n style\n } = this.div;\n const {\n height,\n width\n } = style;\n const widthPercent = width.endsWith(\"%\");\n const heightPercent = !this.#keepAspectRatio && height.endsWith(\"%\");\n if (widthPercent && heightPercent) {\n return;\n }\n const [parentWidth, parentHeight] = this.parentDimensions;\n if (!widthPercent) {\n style.width = `${(100 * parseFloat(width) / parentWidth).toFixed(2)}%`;\n }\n if (!this.#keepAspectRatio && !heightPercent) {\n style.height = `${(100 * parseFloat(height) / parentHeight).toFixed(2)}%`;\n }\n }\n getInitialTranslation() {\n return [0, 0];\n }\n #createResizers() {\n if (this.#resizersDiv) {\n return;\n }\n this.#resizersDiv = document.createElement(\"div\");\n this.#resizersDiv.classList.add(\"resizers\");\n const classes = this._willKeepAspectRatio ? [\"topLeft\", \"topRight\", \"bottomRight\", \"bottomLeft\"] : [\"topLeft\", \"topMiddle\", \"topRight\", \"middleRight\", \"bottomRight\", \"bottomMiddle\", \"bottomLeft\", \"middleLeft\"];\n const signal = this._uiManager._signal;\n for (const name of classes) {\n const div = document.createElement(\"div\");\n this.#resizersDiv.append(div);\n div.classList.add(\"resizer\", name);\n div.setAttribute(\"data-resizer-name\", name);\n div.addEventListener(\"pointerdown\", this.#resizerPointerdown.bind(this, name), {\n signal\n });\n div.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n div.tabIndex = -1;\n }\n this.div.prepend(this.#resizersDiv);\n }\n #resizerPointerdown(name, event) {\n event.preventDefault();\n const {\n isMac\n } = util_FeatureTest.platform;\n if (event.button !== 0 || event.ctrlKey && isMac) {\n return;\n }\n this.#altText?.toggle(false);\n const boundResizerPointermove = this.#resizerPointermove.bind(this, name);\n const savedDraggable = this._isDraggable;\n this._isDraggable = false;\n const signal = this._uiManager._signal;\n const pointerMoveOptions = {\n passive: true,\n capture: true,\n signal\n };\n this.parent.togglePointerEvents(false);\n window.addEventListener(\"pointermove\", boundResizerPointermove, pointerMoveOptions);\n window.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n const savedX = this.x;\n const savedY = this.y;\n const savedWidth = this.width;\n const savedHeight = this.height;\n const savedParentCursor = this.parent.div.style.cursor;\n const savedCursor = this.div.style.cursor;\n this.div.style.cursor = this.parent.div.style.cursor = window.getComputedStyle(event.target).cursor;\n const pointerUpCallback = () => {\n this.parent.togglePointerEvents(true);\n this.#altText?.toggle(true);\n this._isDraggable = savedDraggable;\n window.removeEventListener(\"pointerup\", pointerUpCallback);\n window.removeEventListener(\"blur\", pointerUpCallback);\n window.removeEventListener(\"pointermove\", boundResizerPointermove, pointerMoveOptions);\n window.removeEventListener(\"contextmenu\", noContextMenu);\n this.parent.div.style.cursor = savedParentCursor;\n this.div.style.cursor = savedCursor;\n this.#addResizeToUndoStack(savedX, savedY, savedWidth, savedHeight);\n };\n window.addEventListener(\"pointerup\", pointerUpCallback, {\n signal\n });\n window.addEventListener(\"blur\", pointerUpCallback, {\n signal\n });\n }\n #addResizeToUndoStack(savedX, savedY, savedWidth, savedHeight) {\n const newX = this.x;\n const newY = this.y;\n const newWidth = this.width;\n const newHeight = this.height;\n if (newX === savedX && newY === savedY && newWidth === savedWidth && newHeight === savedHeight) {\n return;\n }\n this.addCommands({\n cmd: () => {\n this.width = newWidth;\n this.height = newHeight;\n this.x = newX;\n this.y = newY;\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.setDims(parentWidth * newWidth, parentHeight * newHeight);\n this.fixAndSetPosition();\n },\n undo: () => {\n this.width = savedWidth;\n this.height = savedHeight;\n this.x = savedX;\n this.y = savedY;\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.setDims(parentWidth * savedWidth, parentHeight * savedHeight);\n this.fixAndSetPosition();\n },\n mustExec: true\n });\n }\n #resizerPointermove(name, event) {\n const [parentWidth, parentHeight] = this.parentDimensions;\n const savedX = this.x;\n const savedY = this.y;\n const savedWidth = this.width;\n const savedHeight = this.height;\n const minWidth = AnnotationEditor.MIN_SIZE / parentWidth;\n const minHeight = AnnotationEditor.MIN_SIZE / parentHeight;\n const round = x => Math.round(x * 10000) / 10000;\n const rotationMatrix = this.#getRotationMatrix(this.rotation);\n const transf = (x, y) => [rotationMatrix[0] * x + rotationMatrix[2] * y, rotationMatrix[1] * x + rotationMatrix[3] * y];\n const invRotationMatrix = this.#getRotationMatrix(360 - this.rotation);\n const invTransf = (x, y) => [invRotationMatrix[0] * x + invRotationMatrix[2] * y, invRotationMatrix[1] * x + invRotationMatrix[3] * y];\n let getPoint;\n let getOpposite;\n let isDiagonal = false;\n let isHorizontal = false;\n switch (name) {\n case \"topLeft\":\n isDiagonal = true;\n getPoint = (w, h) => [0, 0];\n getOpposite = (w, h) => [w, h];\n break;\n case \"topMiddle\":\n getPoint = (w, h) => [w / 2, 0];\n getOpposite = (w, h) => [w / 2, h];\n break;\n case \"topRight\":\n isDiagonal = true;\n getPoint = (w, h) => [w, 0];\n getOpposite = (w, h) => [0, h];\n break;\n case \"middleRight\":\n isHorizontal = true;\n getPoint = (w, h) => [w, h / 2];\n getOpposite = (w, h) => [0, h / 2];\n break;\n case \"bottomRight\":\n isDiagonal = true;\n getPoint = (w, h) => [w, h];\n getOpposite = (w, h) => [0, 0];\n break;\n case \"bottomMiddle\":\n getPoint = (w, h) => [w / 2, h];\n getOpposite = (w, h) => [w / 2, 0];\n break;\n case \"bottomLeft\":\n isDiagonal = true;\n getPoint = (w, h) => [0, h];\n getOpposite = (w, h) => [w, 0];\n break;\n case \"middleLeft\":\n isHorizontal = true;\n getPoint = (w, h) => [0, h / 2];\n getOpposite = (w, h) => [w, h / 2];\n break;\n }\n const point = getPoint(savedWidth, savedHeight);\n const oppositePoint = getOpposite(savedWidth, savedHeight);\n let transfOppositePoint = transf(...oppositePoint);\n const oppositeX = round(savedX + transfOppositePoint[0]);\n const oppositeY = round(savedY + transfOppositePoint[1]);\n let ratioX = 1;\n let ratioY = 1;\n let [deltaX, deltaY] = this.screenToPageTranslation(event.movementX, event.movementY);\n [deltaX, deltaY] = invTransf(deltaX / parentWidth, deltaY / parentHeight);\n if (isDiagonal) {\n const oldDiag = Math.hypot(savedWidth, savedHeight);\n ratioX = ratioY = Math.max(Math.min(Math.hypot(oppositePoint[0] - point[0] - deltaX, oppositePoint[1] - point[1] - deltaY) / oldDiag, 1 / savedWidth, 1 / savedHeight), minWidth / savedWidth, minHeight / savedHeight);\n } else if (isHorizontal) {\n ratioX = Math.max(minWidth, Math.min(1, Math.abs(oppositePoint[0] - point[0] - deltaX))) / savedWidth;\n } else {\n ratioY = Math.max(minHeight, Math.min(1, Math.abs(oppositePoint[1] - point[1] - deltaY))) / savedHeight;\n }\n const newWidth = round(savedWidth * ratioX);\n const newHeight = round(savedHeight * ratioY);\n transfOppositePoint = transf(...getOpposite(newWidth, newHeight));\n const newX = oppositeX - transfOppositePoint[0];\n const newY = oppositeY - transfOppositePoint[1];\n this.width = newWidth;\n this.height = newHeight;\n this.x = newX;\n this.y = newY;\n this.setDims(parentWidth * newWidth, parentHeight * newHeight);\n this.fixAndSetPosition();\n }\n altTextFinish() {\n this.#altText?.finish();\n }\n async addEditToolbar() {\n if (this.#editToolbar || this.#isInEditMode) {\n return this.#editToolbar;\n }\n this.#editToolbar = new EditorToolbar(this);\n this.div.append(this.#editToolbar.render());\n if (this.#altText) {\n this.#editToolbar.addAltTextButton(await this.#altText.render());\n }\n return this.#editToolbar;\n }\n removeEditToolbar() {\n if (!this.#editToolbar) {\n return;\n }\n this.#editToolbar.remove();\n this.#editToolbar = null;\n this.#altText?.destroy();\n }\n getClientDimensions() {\n return this.div.getBoundingClientRect();\n }\n async addAltTextButton() {\n if (this.#altText) {\n return;\n }\n AltText.initialize(AnnotationEditor._l10nPromise);\n this.#altText = new AltText(this);\n if (this.#accessibilityData) {\n this.#altText.data = this.#accessibilityData;\n this.#accessibilityData = null;\n }\n await this.addEditToolbar();\n }\n get altTextData() {\n return this.#altText?.data;\n }\n set altTextData(data) {\n if (!this.#altText) {\n return;\n }\n this.#altText.data = data;\n }\n hasAltText() {\n return !this.#altText?.isEmpty();\n }\n render() {\n this.div = document.createElement(\"div\");\n this.div.setAttribute(\"data-editor-rotation\", (360 - this.rotation) % 360);\n this.div.className = this.name;\n this.div.setAttribute(\"id\", this.id);\n this.div.tabIndex = this.#disabled ? -1 : 0;\n if (!this._isVisible) {\n this.div.classList.add(\"hidden\");\n }\n this.setInForeground();\n const signal = this._uiManager._signal;\n this.div.addEventListener(\"focusin\", this.#boundFocusin, {\n signal\n });\n this.div.addEventListener(\"focusout\", this.#boundFocusout, {\n signal\n });\n const [parentWidth, parentHeight] = this.parentDimensions;\n if (this.parentRotation % 180 !== 0) {\n this.div.style.maxWidth = `${(100 * parentHeight / parentWidth).toFixed(2)}%`;\n this.div.style.maxHeight = `${(100 * parentWidth / parentHeight).toFixed(2)}%`;\n }\n const [tx, ty] = this.getInitialTranslation();\n this.translate(tx, ty);\n bindEvents(this, this.div, [\"pointerdown\"]);\n return this.div;\n }\n pointerdown(event) {\n const {\n isMac\n } = util_FeatureTest.platform;\n if (event.button !== 0 || event.ctrlKey && isMac) {\n event.preventDefault();\n return;\n }\n this.#hasBeenClicked = true;\n if (this._isDraggable) {\n this.#setUpDragSession(event);\n return;\n }\n this.#selectOnPointerEvent(event);\n }\n #selectOnPointerEvent(event) {\n const {\n isMac\n } = util_FeatureTest.platform;\n if (event.ctrlKey && !isMac || event.shiftKey || event.metaKey && isMac) {\n this.parent.toggleSelected(this);\n } else {\n this.parent.setSelected(this);\n }\n }\n #setUpDragSession(event) {\n const isSelected = this._uiManager.isSelected(this);\n this._uiManager.setUpDragSession();\n let pointerMoveOptions, pointerMoveCallback;\n const signal = this._uiManager._signal;\n if (isSelected) {\n this.div.classList.add(\"moving\");\n pointerMoveOptions = {\n passive: true,\n capture: true,\n signal\n };\n this.#prevDragX = event.clientX;\n this.#prevDragY = event.clientY;\n pointerMoveCallback = e => {\n const {\n clientX: x,\n clientY: y\n } = e;\n const [tx, ty] = this.screenToPageTranslation(x - this.#prevDragX, y - this.#prevDragY);\n this.#prevDragX = x;\n this.#prevDragY = y;\n this._uiManager.dragSelectedEditors(tx, ty);\n };\n window.addEventListener(\"pointermove\", pointerMoveCallback, pointerMoveOptions);\n }\n const pointerUpCallback = () => {\n window.removeEventListener(\"pointerup\", pointerUpCallback);\n window.removeEventListener(\"blur\", pointerUpCallback);\n if (isSelected) {\n this.div.classList.remove(\"moving\");\n window.removeEventListener(\"pointermove\", pointerMoveCallback, pointerMoveOptions);\n }\n this.#hasBeenClicked = false;\n if (!this._uiManager.endDragSession()) {\n this.#selectOnPointerEvent(event);\n }\n };\n window.addEventListener(\"pointerup\", pointerUpCallback, {\n signal\n });\n window.addEventListener(\"blur\", pointerUpCallback, {\n signal\n });\n }\n moveInDOM() {\n if (this.#moveInDOMTimeout) {\n clearTimeout(this.#moveInDOMTimeout);\n }\n this.#moveInDOMTimeout = setTimeout(() => {\n this.#moveInDOMTimeout = null;\n this.parent?.moveEditorInDOM(this);\n }, 0);\n }\n _setParentAndPosition(parent, x, y) {\n parent.changeParent(this);\n this.x = x;\n this.y = y;\n this.fixAndSetPosition();\n }\n getRect(tx, ty, rotation = this.rotation) {\n const scale = this.parentScale;\n const [pageWidth, pageHeight] = this.pageDimensions;\n const [pageX, pageY] = this.pageTranslation;\n const shiftX = tx / scale;\n const shiftY = ty / scale;\n const x = this.x * pageWidth;\n const y = this.y * pageHeight;\n const width = this.width * pageWidth;\n const height = this.height * pageHeight;\n switch (rotation) {\n case 0:\n return [x + shiftX + pageX, pageHeight - y - shiftY - height + pageY, x + shiftX + width + pageX, pageHeight - y - shiftY + pageY];\n case 90:\n return [x + shiftY + pageX, pageHeight - y + shiftX + pageY, x + shiftY + height + pageX, pageHeight - y + shiftX + width + pageY];\n case 180:\n return [x - shiftX - width + pageX, pageHeight - y + shiftY + pageY, x - shiftX + pageX, pageHeight - y + shiftY + height + pageY];\n case 270:\n return [x - shiftY - height + pageX, pageHeight - y - shiftX - width + pageY, x - shiftY + pageX, pageHeight - y - shiftX + pageY];\n default:\n throw new Error(\"Invalid rotation\");\n }\n }\n getRectInCurrentCoords(rect, pageHeight) {\n const [x1, y1, x2, y2] = rect;\n const width = x2 - x1;\n const height = y2 - y1;\n switch (this.rotation) {\n case 0:\n return [x1, pageHeight - y2, width, height];\n case 90:\n return [x1, pageHeight - y1, height, width];\n case 180:\n return [x2, pageHeight - y1, width, height];\n case 270:\n return [x2, pageHeight - y2, height, width];\n default:\n throw new Error(\"Invalid rotation\");\n }\n }\n onceAdded() {}\n isEmpty() {\n return false;\n }\n enableEditMode() {\n this.#isInEditMode = true;\n }\n disableEditMode() {\n this.#isInEditMode = false;\n }\n isInEditMode() {\n return this.#isInEditMode;\n }\n shouldGetKeyboardEvents() {\n return this.#isResizerEnabledForKeyboard;\n }\n needsToBeRebuilt() {\n return this.div && !this.isAttachedToDOM;\n }\n rebuild() {\n const signal = this._uiManager._signal;\n this.div?.addEventListener(\"focusin\", this.#boundFocusin, {\n signal\n });\n this.div?.addEventListener(\"focusout\", this.#boundFocusout, {\n signal\n });\n }\n rotate(_angle) {}\n serialize(isForCopying = false, context = null) {\n unreachable(\"An editor must be serializable\");\n }\n static deserialize(data, parent, uiManager) {\n const editor = new this.prototype.constructor({\n parent,\n id: parent.getNextId(),\n uiManager\n });\n editor.rotation = data.rotation;\n editor.#accessibilityData = data.accessibilityData;\n const [pageWidth, pageHeight] = editor.pageDimensions;\n const [x, y, width, height] = editor.getRectInCurrentCoords(data.rect, pageHeight);\n editor.x = x / pageWidth;\n editor.y = y / pageHeight;\n editor.width = width / pageWidth;\n editor.height = height / pageHeight;\n return editor;\n }\n get hasBeenModified() {\n return !!this.annotationElementId && (this.deleted || this.serialize() !== null);\n }\n remove() {\n this.div.removeEventListener(\"focusin\", this.#boundFocusin);\n this.div.removeEventListener(\"focusout\", this.#boundFocusout);\n if (!this.isEmpty()) {\n this.commit();\n }\n if (this.parent) {\n this.parent.remove(this);\n } else {\n this._uiManager.removeEditor(this);\n }\n if (this.#moveInDOMTimeout) {\n clearTimeout(this.#moveInDOMTimeout);\n this.#moveInDOMTimeout = null;\n }\n this.#stopResizing();\n this.removeEditToolbar();\n if (this.#telemetryTimeouts) {\n for (const timeout of this.#telemetryTimeouts.values()) {\n clearTimeout(timeout);\n }\n this.#telemetryTimeouts = null;\n }\n this.parent = null;\n }\n get isResizable() {\n return false;\n }\n makeResizable() {\n if (this.isResizable) {\n this.#createResizers();\n this.#resizersDiv.classList.remove(\"hidden\");\n bindEvents(this, this.div, [\"keydown\"]);\n }\n }\n get toolbarPosition() {\n return null;\n }\n keydown(event) {\n if (!this.isResizable || event.target !== this.div || event.key !== \"Enter\") {\n return;\n }\n this._uiManager.setSelected(this);\n this.#savedDimensions = {\n savedX: this.x,\n savedY: this.y,\n savedWidth: this.width,\n savedHeight: this.height\n };\n const children = this.#resizersDiv.children;\n if (!this.#allResizerDivs) {\n this.#allResizerDivs = Array.from(children);\n const boundResizerKeydown = this.#resizerKeydown.bind(this);\n const boundResizerBlur = this.#resizerBlur.bind(this);\n const signal = this._uiManager._signal;\n for (const div of this.#allResizerDivs) {\n const name = div.getAttribute(\"data-resizer-name\");\n div.setAttribute(\"role\", \"spinbutton\");\n div.addEventListener(\"keydown\", boundResizerKeydown, {\n signal\n });\n div.addEventListener(\"blur\", boundResizerBlur, {\n signal\n });\n div.addEventListener(\"focus\", this.#resizerFocus.bind(this, name), {\n signal\n });\n AnnotationEditor._l10nPromise.get(`pdfjs-editor-resizer-label-${name}`).then(msg => div.setAttribute(\"aria-label\", msg));\n }\n }\n const first = this.#allResizerDivs[0];\n let firstPosition = 0;\n for (const div of children) {\n if (div === first) {\n break;\n }\n firstPosition++;\n }\n const nextFirstPosition = (360 - this.rotation + this.parentRotation) % 360 / 90 * (this.#allResizerDivs.length / 4);\n if (nextFirstPosition !== firstPosition) {\n if (nextFirstPosition < firstPosition) {\n for (let i = 0; i < firstPosition - nextFirstPosition; i++) {\n this.#resizersDiv.append(this.#resizersDiv.firstChild);\n }\n } else if (nextFirstPosition > firstPosition) {\n for (let i = 0; i < nextFirstPosition - firstPosition; i++) {\n this.#resizersDiv.firstChild.before(this.#resizersDiv.lastChild);\n }\n }\n let i = 0;\n for (const child of children) {\n const div = this.#allResizerDivs[i++];\n const name = div.getAttribute(\"data-resizer-name\");\n AnnotationEditor._l10nPromise.get(`pdfjs-editor-resizer-label-${name}`).then(msg => child.setAttribute(\"aria-label\", msg));\n }\n }\n this.#setResizerTabIndex(0);\n this.#isResizerEnabledForKeyboard = true;\n this.#resizersDiv.firstChild.focus({\n focusVisible: true\n });\n event.preventDefault();\n event.stopImmediatePropagation();\n }\n #resizerKeydown(event) {\n AnnotationEditor._resizerKeyboardManager.exec(this, event);\n }\n #resizerBlur(event) {\n if (this.#isResizerEnabledForKeyboard && event.relatedTarget?.parentNode !== this.#resizersDiv) {\n this.#stopResizing();\n }\n }\n #resizerFocus(name) {\n this.#focusedResizerName = this.#isResizerEnabledForKeyboard ? name : \"\";\n }\n #setResizerTabIndex(value) {\n if (!this.#allResizerDivs) {\n return;\n }\n for (const div of this.#allResizerDivs) {\n div.tabIndex = value;\n }\n }\n _resizeWithKeyboard(x, y) {\n if (!this.#isResizerEnabledForKeyboard) {\n return;\n }\n this.#resizerPointermove(this.#focusedResizerName, {\n movementX: x,\n movementY: y\n });\n }\n #stopResizing() {\n this.#isResizerEnabledForKeyboard = false;\n this.#setResizerTabIndex(-1);\n if (this.#savedDimensions) {\n const {\n savedX,\n savedY,\n savedWidth,\n savedHeight\n } = this.#savedDimensions;\n this.#addResizeToUndoStack(savedX, savedY, savedWidth, savedHeight);\n this.#savedDimensions = null;\n }\n }\n _stopResizingWithKeyboard() {\n this.#stopResizing();\n this.div.focus();\n }\n select() {\n this.makeResizable();\n this.div?.classList.add(\"selectedEditor\");\n if (!this.#editToolbar) {\n this.addEditToolbar().then(() => {\n if (this.div?.classList.contains(\"selectedEditor\")) {\n this.#editToolbar?.show();\n }\n });\n return;\n }\n this.#editToolbar?.show();\n }\n unselect() {\n this.#resizersDiv?.classList.add(\"hidden\");\n this.div?.classList.remove(\"selectedEditor\");\n if (this.div?.contains(document.activeElement)) {\n this._uiManager.currentLayer.div.focus({\n preventScroll: true\n });\n }\n this.#editToolbar?.hide();\n }\n updateParams(type, value) {}\n disableEditing() {}\n enableEditing() {}\n enterInEditMode() {}\n getImageForAltText() {\n return null;\n }\n get contentDiv() {\n return this.div;\n }\n get isEditing() {\n return this.#isEditing;\n }\n set isEditing(value) {\n this.#isEditing = value;\n if (!this.parent) {\n return;\n }\n if (value) {\n this.parent.setSelected(this);\n this.parent.setActiveEditor(this);\n } else {\n this.parent.setActiveEditor(null);\n }\n }\n setAspectRatio(width, height) {\n this.#keepAspectRatio = true;\n const aspectRatio = width / height;\n const {\n style\n } = this.div;\n style.aspectRatio = aspectRatio;\n style.height = \"auto\";\n }\n static get MIN_SIZE() {\n return 16;\n }\n static canCreateNewEmptyEditor() {\n return true;\n }\n get telemetryInitialData() {\n return {\n action: \"added\"\n };\n }\n get telemetryFinalData() {\n return null;\n }\n _reportTelemetry(data, mustWait = false) {\n if (mustWait) {\n this.#telemetryTimeouts ||= new Map();\n const {\n action\n } = data;\n let timeout = this.#telemetryTimeouts.get(action);\n if (timeout) {\n clearTimeout(timeout);\n }\n timeout = setTimeout(() => {\n this._reportTelemetry(data);\n this.#telemetryTimeouts.delete(action);\n if (this.#telemetryTimeouts.size === 0) {\n this.#telemetryTimeouts = null;\n }\n }, AnnotationEditor._telemetryTimeout);\n this.#telemetryTimeouts.set(action, timeout);\n return;\n }\n data.type ||= this.editorType;\n this._uiManager._eventBus.dispatch(\"reporttelemetry\", {\n source: this,\n details: {\n type: \"editing\",\n data\n }\n });\n }\n show(visible = this._isVisible) {\n this.div.classList.toggle(\"hidden\", !visible);\n this._isVisible = visible;\n }\n enable() {\n if (this.div) {\n this.div.tabIndex = 0;\n }\n this.#disabled = false;\n }\n disable() {\n if (this.div) {\n this.div.tabIndex = -1;\n }\n this.#disabled = true;\n }\n renderAnnotationElement(annotation) {\n let content = annotation.container.querySelector(\".annotationContent\");\n if (!content) {\n content = document.createElement(\"div\");\n content.classList.add(\"annotationContent\", this.editorType);\n annotation.container.prepend(content);\n } else if (content.nodeName === \"CANVAS\") {\n const canvas = content;\n content = document.createElement(\"div\");\n content.classList.add(\"annotationContent\", this.editorType);\n canvas.before(content);\n }\n return content;\n }\n resetAnnotationElement(annotation) {\n const {\n firstChild\n } = annotation.container;\n if (firstChild.nodeName === \"DIV\" && firstChild.classList.contains(\"annotationContent\")) {\n firstChild.remove();\n }\n }\n}\nclass FakeEditor extends AnnotationEditor {\n constructor(params) {\n super(params);\n this.annotationElementId = params.annotationElementId;\n this.deleted = true;\n }\n serialize() {\n return {\n id: this.annotationElementId,\n deleted: true,\n pageIndex: this.pageIndex\n };\n }\n}\n\n;// CONCATENATED MODULE: ./src/shared/murmurhash3.js\nconst SEED = 0xc3d2e1f0;\nconst MASK_HIGH = 0xffff0000;\nconst MASK_LOW = 0xffff;\nclass MurmurHash3_64 {\n constructor(seed) {\n this.h1 = seed ? seed & 0xffffffff : SEED;\n this.h2 = seed ? seed & 0xffffffff : SEED;\n }\n update(input) {\n let data, length;\n if (typeof input === \"string\") {\n data = new Uint8Array(input.length * 2);\n length = 0;\n for (let i = 0, ii = input.length; i < ii; i++) {\n const code = input.charCodeAt(i);\n if (code <= 0xff) {\n data[length++] = code;\n } else {\n data[length++] = code >>> 8;\n data[length++] = code & 0xff;\n }\n }\n } else if (ArrayBuffer.isView(input)) {\n data = input.slice();\n length = data.byteLength;\n } else {\n throw new Error(\"Invalid data format, must be a string or TypedArray.\");\n }\n const blockCounts = length >> 2;\n const tailLength = length - blockCounts * 4;\n const dataUint32 = new Uint32Array(data.buffer, 0, blockCounts);\n let k1 = 0,\n k2 = 0;\n let h1 = this.h1,\n h2 = this.h2;\n const C1 = 0xcc9e2d51,\n C2 = 0x1b873593;\n const C1_LOW = C1 & MASK_LOW,\n C2_LOW = C2 & MASK_LOW;\n for (let i = 0; i < blockCounts; i++) {\n if (i & 1) {\n k1 = dataUint32[i];\n k1 = k1 * C1 & MASK_HIGH | k1 * C1_LOW & MASK_LOW;\n k1 = k1 << 15 | k1 >>> 17;\n k1 = k1 * C2 & MASK_HIGH | k1 * C2_LOW & MASK_LOW;\n h1 ^= k1;\n h1 = h1 << 13 | h1 >>> 19;\n h1 = h1 * 5 + 0xe6546b64;\n } else {\n k2 = dataUint32[i];\n k2 = k2 * C1 & MASK_HIGH | k2 * C1_LOW & MASK_LOW;\n k2 = k2 << 15 | k2 >>> 17;\n k2 = k2 * C2 & MASK_HIGH | k2 * C2_LOW & MASK_LOW;\n h2 ^= k2;\n h2 = h2 << 13 | h2 >>> 19;\n h2 = h2 * 5 + 0xe6546b64;\n }\n }\n k1 = 0;\n switch (tailLength) {\n case 3:\n k1 ^= data[blockCounts * 4 + 2] << 16;\n case 2:\n k1 ^= data[blockCounts * 4 + 1] << 8;\n case 1:\n k1 ^= data[blockCounts * 4];\n k1 = k1 * C1 & MASK_HIGH | k1 * C1_LOW & MASK_LOW;\n k1 = k1 << 15 | k1 >>> 17;\n k1 = k1 * C2 & MASK_HIGH | k1 * C2_LOW & MASK_LOW;\n if (blockCounts & 1) {\n h1 ^= k1;\n } else {\n h2 ^= k1;\n }\n }\n this.h1 = h1;\n this.h2 = h2;\n }\n hexdigest() {\n let h1 = this.h1,\n h2 = this.h2;\n h1 ^= h2 >>> 1;\n h1 = h1 * 0xed558ccd & MASK_HIGH | h1 * 0x8ccd & MASK_LOW;\n h2 = h2 * 0xff51afd7 & MASK_HIGH | ((h2 << 16 | h1 >>> 16) * 0xafd7ed55 & MASK_HIGH) >>> 16;\n h1 ^= h2 >>> 1;\n h1 = h1 * 0x1a85ec53 & MASK_HIGH | h1 * 0xec53 & MASK_LOW;\n h2 = h2 * 0xc4ceb9fe & MASK_HIGH | ((h2 << 16 | h1 >>> 16) * 0xb9fe1a85 & MASK_HIGH) >>> 16;\n h1 ^= h2 >>> 1;\n return (h1 >>> 0).toString(16).padStart(8, \"0\") + (h2 >>> 0).toString(16).padStart(8, \"0\");\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/annotation_storage.js\n\n\n\nconst SerializableEmpty = Object.freeze({\n map: null,\n hash: \"\",\n transfer: undefined\n});\nclass AnnotationStorage {\n #modified = false;\n #storage = new Map();\n constructor() {\n this.onSetModified = null;\n this.onResetModified = null;\n this.onAnnotationEditor = null;\n }\n getValue(key, defaultValue) {\n const value = this.#storage.get(key);\n if (value === undefined) {\n return defaultValue;\n }\n return Object.assign(defaultValue, value);\n }\n getRawValue(key) {\n return this.#storage.get(key);\n }\n remove(key) {\n this.#storage.delete(key);\n if (this.#storage.size === 0) {\n this.resetModified();\n }\n if (typeof this.onAnnotationEditor === \"function\") {\n for (const value of this.#storage.values()) {\n if (value instanceof AnnotationEditor) {\n return;\n }\n }\n this.onAnnotationEditor(null);\n }\n }\n setValue(key, value) {\n const obj = this.#storage.get(key);\n let modified = false;\n if (obj !== undefined) {\n for (const [entry, val] of Object.entries(value)) {\n if (obj[entry] !== val) {\n modified = true;\n obj[entry] = val;\n }\n }\n } else {\n modified = true;\n this.#storage.set(key, value);\n }\n if (modified) {\n this.#setModified();\n }\n if (value instanceof AnnotationEditor && typeof this.onAnnotationEditor === \"function\") {\n this.onAnnotationEditor(value.constructor._type);\n }\n }\n has(key) {\n return this.#storage.has(key);\n }\n getAll() {\n return this.#storage.size > 0 ? objectFromMap(this.#storage) : null;\n }\n setAll(obj) {\n for (const [key, val] of Object.entries(obj)) {\n this.setValue(key, val);\n }\n }\n get size() {\n return this.#storage.size;\n }\n #setModified() {\n if (!this.#modified) {\n this.#modified = true;\n if (typeof this.onSetModified === \"function\") {\n this.onSetModified();\n }\n }\n }\n resetModified() {\n if (this.#modified) {\n this.#modified = false;\n if (typeof this.onResetModified === \"function\") {\n this.onResetModified();\n }\n }\n }\n get print() {\n return new PrintAnnotationStorage(this);\n }\n get serializable() {\n if (this.#storage.size === 0) {\n return SerializableEmpty;\n }\n const map = new Map(),\n hash = new MurmurHash3_64(),\n transfer = [];\n const context = Object.create(null);\n let hasBitmap = false;\n for (const [key, val] of this.#storage) {\n const serialized = val instanceof AnnotationEditor ? val.serialize(false, context) : val;\n if (serialized) {\n map.set(key, serialized);\n hash.update(`${key}:${JSON.stringify(serialized)}`);\n hasBitmap ||= !!serialized.bitmap;\n }\n }\n if (hasBitmap) {\n for (const value of map.values()) {\n if (value.bitmap) {\n transfer.push(value.bitmap);\n }\n }\n }\n return map.size > 0 ? {\n map,\n hash: hash.hexdigest(),\n transfer\n } : SerializableEmpty;\n }\n get editorStats() {\n let stats = null;\n const typeToEditor = new Map();\n for (const value of this.#storage.values()) {\n if (!(value instanceof AnnotationEditor)) {\n continue;\n }\n const editorStats = value.telemetryFinalData;\n if (!editorStats) {\n continue;\n }\n const {\n type\n } = editorStats;\n if (!typeToEditor.has(type)) {\n typeToEditor.set(type, Object.getPrototypeOf(value).constructor);\n }\n stats ||= Object.create(null);\n const map = stats[type] ||= new Map();\n for (const [key, val] of Object.entries(editorStats)) {\n if (key === \"type\") {\n continue;\n }\n let counters = map.get(key);\n if (!counters) {\n counters = new Map();\n map.set(key, counters);\n }\n const count = counters.get(val) ?? 0;\n counters.set(val, count + 1);\n }\n }\n for (const [type, editor] of typeToEditor) {\n stats[type] = editor.computeTelemetryFinalData(stats[type]);\n }\n return stats;\n }\n}\nclass PrintAnnotationStorage extends AnnotationStorage {\n #serializable;\n constructor(parent) {\n super();\n const {\n map,\n hash,\n transfer\n } = parent.serializable;\n const clone = structuredClone(map, transfer ? {\n transfer\n } : null);\n this.#serializable = {\n map: clone,\n hash,\n transfer\n };\n }\n get print() {\n unreachable(\"Should not call PrintAnnotationStorage.print\");\n }\n get serializable() {\n return this.#serializable;\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/font_loader.js\n\nclass FontLoader {\n #systemFonts = new Set();\n constructor({\n ownerDocument = globalThis.document,\n styleElement = null\n }) {\n this._document = ownerDocument;\n this.nativeFontFaces = new Set();\n this.styleElement = null;\n this.loadingRequests = [];\n this.loadTestFontId = 0;\n }\n addNativeFontFace(nativeFontFace) {\n this.nativeFontFaces.add(nativeFontFace);\n this._document.fonts.add(nativeFontFace);\n }\n removeNativeFontFace(nativeFontFace) {\n this.nativeFontFaces.delete(nativeFontFace);\n this._document.fonts.delete(nativeFontFace);\n }\n insertRule(rule) {\n if (!this.styleElement) {\n this.styleElement = this._document.createElement(\"style\");\n this._document.documentElement.getElementsByTagName(\"head\")[0].append(this.styleElement);\n }\n const styleSheet = this.styleElement.sheet;\n styleSheet.insertRule(rule, styleSheet.cssRules.length);\n }\n clear() {\n for (const nativeFontFace of this.nativeFontFaces) {\n this._document.fonts.delete(nativeFontFace);\n }\n this.nativeFontFaces.clear();\n this.#systemFonts.clear();\n if (this.styleElement) {\n this.styleElement.remove();\n this.styleElement = null;\n }\n }\n async loadSystemFont({\n systemFontInfo: info,\n _inspectFont\n }) {\n if (!info || this.#systemFonts.has(info.loadedName)) {\n return;\n }\n assert(!this.disableFontFace, \"loadSystemFont shouldn't be called when `disableFontFace` is set.\");\n if (this.isFontLoadingAPISupported) {\n const {\n loadedName,\n src,\n style\n } = info;\n const fontFace = new FontFace(loadedName, src, style);\n this.addNativeFontFace(fontFace);\n try {\n await fontFace.load();\n this.#systemFonts.add(loadedName);\n _inspectFont?.(info);\n } catch {\n warn(`Cannot load system font: ${info.baseFontName}, installing it could help to improve PDF rendering.`);\n this.removeNativeFontFace(fontFace);\n }\n return;\n }\n unreachable(\"Not implemented: loadSystemFont without the Font Loading API.\");\n }\n async bind(font) {\n if (font.attached || font.missingFile && !font.systemFontInfo) {\n return;\n }\n font.attached = true;\n if (font.systemFontInfo) {\n await this.loadSystemFont(font);\n return;\n }\n if (this.isFontLoadingAPISupported) {\n const nativeFontFace = font.createNativeFontFace();\n if (nativeFontFace) {\n this.addNativeFontFace(nativeFontFace);\n try {\n await nativeFontFace.loaded;\n } catch (ex) {\n warn(`Failed to load font '${nativeFontFace.family}': '${ex}'.`);\n font.disableFontFace = true;\n throw ex;\n }\n }\n return;\n }\n const rule = font.createFontFaceRule();\n if (rule) {\n this.insertRule(rule);\n if (this.isSyncFontLoadingSupported) {\n return;\n }\n await new Promise(resolve => {\n const request = this._queueLoadingCallback(resolve);\n this._prepareFontLoadEvent(font, request);\n });\n }\n }\n get isFontLoadingAPISupported() {\n const hasFonts = !!this._document?.fonts;\n return shadow(this, \"isFontLoadingAPISupported\", hasFonts);\n }\n get isSyncFontLoadingSupported() {\n let supported = false;\n if (isNodeJS) {\n supported = true;\n } else if (typeof navigator !== \"undefined\" && typeof navigator?.userAgent === \"string\" && /Mozilla\\/5.0.*?rv:\\d+.*? Gecko/.test(navigator.userAgent)) {\n supported = true;\n }\n return shadow(this, \"isSyncFontLoadingSupported\", supported);\n }\n _queueLoadingCallback(callback) {\n function completeRequest() {\n assert(!request.done, \"completeRequest() cannot be called twice.\");\n request.done = true;\n while (loadingRequests.length > 0 && loadingRequests[0].done) {\n const otherRequest = loadingRequests.shift();\n setTimeout(otherRequest.callback, 0);\n }\n }\n const {\n loadingRequests\n } = this;\n const request = {\n done: false,\n complete: completeRequest,\n callback\n };\n loadingRequests.push(request);\n return request;\n }\n get _loadTestFont() {\n const testFont = atob(\"T1RUTwALAIAAAwAwQ0ZGIDHtZg4AAAOYAAAAgUZGVE1lkzZwAAAEHAAAABxHREVGABQA\" + \"FQAABDgAAAAeT1MvMlYNYwkAAAEgAAAAYGNtYXABDQLUAAACNAAAAUJoZWFk/xVFDQAA\" + \"ALwAAAA2aGhlYQdkA+oAAAD0AAAAJGhtdHgD6AAAAAAEWAAAAAZtYXhwAAJQAAAAARgA\" + \"AAAGbmFtZVjmdH4AAAGAAAAAsXBvc3T/hgAzAAADeAAAACAAAQAAAAEAALZRFsRfDzz1\" + \"AAsD6AAAAADOBOTLAAAAAM4KHDwAAAAAA+gDIQAAAAgAAgAAAAAAAAABAAADIQAAAFoD\" + \"6AAAAAAD6AABAAAAAAAAAAAAAAAAAAAAAQAAUAAAAgAAAAQD6AH0AAUAAAKKArwAAACM\" + \"AooCvAAAAeAAMQECAAACAAYJAAAAAAAAAAAAAQAAAAAAAAAAAAAAAFBmRWQAwAAuAC4D\" + \"IP84AFoDIQAAAAAAAQAAAAAAAAAAACAAIAABAAAADgCuAAEAAAAAAAAAAQAAAAEAAAAA\" + \"AAEAAQAAAAEAAAAAAAIAAQAAAAEAAAAAAAMAAQAAAAEAAAAAAAQAAQAAAAEAAAAAAAUA\" + \"AQAAAAEAAAAAAAYAAQAAAAMAAQQJAAAAAgABAAMAAQQJAAEAAgABAAMAAQQJAAIAAgAB\" + \"AAMAAQQJAAMAAgABAAMAAQQJAAQAAgABAAMAAQQJAAUAAgABAAMAAQQJAAYAAgABWABY\" + \"AAAAAAAAAwAAAAMAAAAcAAEAAAAAADwAAwABAAAAHAAEACAAAAAEAAQAAQAAAC7//wAA\" + \"AC7////TAAEAAAAAAAABBgAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\" + \"AAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\" + \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\" + \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\" + \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\" + \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAD/gwAyAAAAAQAAAAAAAAAAAAAAAAAA\" + \"AAABAAQEAAEBAQJYAAEBASH4DwD4GwHEAvgcA/gXBIwMAYuL+nz5tQXkD5j3CBLnEQAC\" + \"AQEBIVhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYAAABAQAADwACAQEEE/t3\" + \"Dov6fAH6fAT+fPp8+nwHDosMCvm1Cvm1DAz6fBQAAAAAAAABAAAAAMmJbzEAAAAAzgTj\" + \"FQAAAADOBOQpAAEAAAAAAAAADAAUAAQAAAABAAAAAgABAAAAAAAAAAAD6AAAAAAAAA==\");\n return shadow(this, \"_loadTestFont\", testFont);\n }\n _prepareFontLoadEvent(font, request) {\n function int32(data, offset) {\n return data.charCodeAt(offset) << 24 | data.charCodeAt(offset + 1) << 16 | data.charCodeAt(offset + 2) << 8 | data.charCodeAt(offset + 3) & 0xff;\n }\n function spliceString(s, offset, remove, insert) {\n const chunk1 = s.substring(0, offset);\n const chunk2 = s.substring(offset + remove);\n return chunk1 + insert + chunk2;\n }\n let i, ii;\n const canvas = this._document.createElement(\"canvas\");\n canvas.width = 1;\n canvas.height = 1;\n const ctx = canvas.getContext(\"2d\");\n let called = 0;\n function isFontReady(name, callback) {\n if (++called > 30) {\n warn(\"Load test font never loaded.\");\n callback();\n return;\n }\n ctx.font = \"30px \" + name;\n ctx.fillText(\".\", 0, 20);\n const imageData = ctx.getImageData(0, 0, 1, 1);\n if (imageData.data[3] > 0) {\n callback();\n return;\n }\n setTimeout(isFontReady.bind(null, name, callback));\n }\n const loadTestFontId = `lt${Date.now()}${this.loadTestFontId++}`;\n let data = this._loadTestFont;\n const COMMENT_OFFSET = 976;\n data = spliceString(data, COMMENT_OFFSET, loadTestFontId.length, loadTestFontId);\n const CFF_CHECKSUM_OFFSET = 16;\n const XXXX_VALUE = 0x58585858;\n let checksum = int32(data, CFF_CHECKSUM_OFFSET);\n for (i = 0, ii = loadTestFontId.length - 3; i < ii; i += 4) {\n checksum = checksum - XXXX_VALUE + int32(loadTestFontId, i) | 0;\n }\n if (i < loadTestFontId.length) {\n checksum = checksum - XXXX_VALUE + int32(loadTestFontId + \"XXX\", i) | 0;\n }\n data = spliceString(data, CFF_CHECKSUM_OFFSET, 4, string32(checksum));\n const url = `url(data:font/opentype;base64,${btoa(data)});`;\n const rule = `@font-face {font-family:\"${loadTestFontId}\";src:${url}}`;\n this.insertRule(rule);\n const div = this._document.createElement(\"div\");\n div.style.visibility = \"hidden\";\n div.style.width = div.style.height = \"10px\";\n div.style.position = \"absolute\";\n div.style.top = div.style.left = \"0px\";\n for (const name of [font.loadedName, loadTestFontId]) {\n const span = this._document.createElement(\"span\");\n span.textContent = \"Hi\";\n span.style.fontFamily = name;\n div.append(span);\n }\n this._document.body.append(div);\n isFontReady(loadTestFontId, () => {\n div.remove();\n request.complete();\n });\n }\n}\nclass FontFaceObject {\n constructor(translatedData, {\n disableFontFace = false,\n inspectFont = null\n }) {\n this.compiledGlyphs = Object.create(null);\n for (const i in translatedData) {\n this[i] = translatedData[i];\n }\n this.disableFontFace = disableFontFace === true;\n this._inspectFont = inspectFont;\n }\n createNativeFontFace() {\n if (!this.data || this.disableFontFace) {\n return null;\n }\n let nativeFontFace;\n if (!this.cssFontInfo) {\n nativeFontFace = new FontFace(this.loadedName, this.data, {});\n } else {\n const css = {\n weight: this.cssFontInfo.fontWeight\n };\n if (this.cssFontInfo.italicAngle) {\n css.style = `oblique ${this.cssFontInfo.italicAngle}deg`;\n }\n nativeFontFace = new FontFace(this.cssFontInfo.fontFamily, this.data, css);\n }\n this._inspectFont?.(this);\n return nativeFontFace;\n }\n createFontFaceRule() {\n if (!this.data || this.disableFontFace) {\n return null;\n }\n const data = bytesToString(this.data);\n const url = `url(data:${this.mimetype};base64,${btoa(data)});`;\n let rule;\n if (!this.cssFontInfo) {\n rule = `@font-face {font-family:\"${this.loadedName}\";src:${url}}`;\n } else {\n let css = `font-weight: ${this.cssFontInfo.fontWeight};`;\n if (this.cssFontInfo.italicAngle) {\n css += `font-style: oblique ${this.cssFontInfo.italicAngle}deg;`;\n }\n rule = `@font-face {font-family:\"${this.cssFontInfo.fontFamily}\";${css}src:${url}}`;\n }\n this._inspectFont?.(this, url);\n return rule;\n }\n getPathGenerator(objs, character) {\n if (this.compiledGlyphs[character] !== undefined) {\n return this.compiledGlyphs[character];\n }\n let cmds;\n try {\n cmds = objs.get(this.loadedName + \"_path_\" + character);\n } catch (ex) {\n warn(`getPathGenerator - ignoring character: \"${ex}\".`);\n }\n if (!Array.isArray(cmds) || cmds.length === 0) {\n return this.compiledGlyphs[character] = function (c, size) {};\n }\n const commands = [];\n for (let i = 0, ii = cmds.length; i < ii;) {\n switch (cmds[i++]) {\n case FontRenderOps.BEZIER_CURVE_TO:\n {\n const [a, b, c, d, e, f] = cmds.slice(i, i + 6);\n commands.push(ctx => ctx.bezierCurveTo(a, b, c, d, e, f));\n i += 6;\n }\n break;\n case FontRenderOps.MOVE_TO:\n {\n const [a, b] = cmds.slice(i, i + 2);\n commands.push(ctx => ctx.moveTo(a, b));\n i += 2;\n }\n break;\n case FontRenderOps.LINE_TO:\n {\n const [a, b] = cmds.slice(i, i + 2);\n commands.push(ctx => ctx.lineTo(a, b));\n i += 2;\n }\n break;\n case FontRenderOps.QUADRATIC_CURVE_TO:\n {\n const [a, b, c, d] = cmds.slice(i, i + 4);\n commands.push(ctx => ctx.quadraticCurveTo(a, b, c, d));\n i += 4;\n }\n break;\n case FontRenderOps.RESTORE:\n commands.push(ctx => ctx.restore());\n break;\n case FontRenderOps.SAVE:\n commands.push(ctx => ctx.save());\n break;\n case FontRenderOps.SCALE:\n assert(commands.length === 2, \"Scale command is only valid at the third position.\");\n break;\n case FontRenderOps.TRANSFORM:\n {\n const [a, b, c, d, e, f] = cmds.slice(i, i + 6);\n commands.push(ctx => ctx.transform(a, b, c, d, e, f));\n i += 6;\n }\n break;\n case FontRenderOps.TRANSLATE:\n {\n const [a, b] = cmds.slice(i, i + 2);\n commands.push(ctx => ctx.translate(a, b));\n i += 2;\n }\n break;\n }\n }\n return this.compiledGlyphs[character] = function glyphDrawer(ctx, size) {\n commands[0](ctx);\n commands[1](ctx);\n ctx.scale(size, -size);\n for (let i = 2, ii = commands.length; i < ii; i++) {\n commands[i](ctx);\n }\n };\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/node_utils.js\n\n\nif (isNodeJS) {\n var packageCapability = Promise.withResolvers();\n var packageMap = null;\n const loadPackages = async () => {\n const fs = await import( /*webpackIgnore: true*/\"fs\"),\n http = await import( /*webpackIgnore: true*/\"http\"),\n https = await import( /*webpackIgnore: true*/\"https\"),\n url = await import( /*webpackIgnore: true*/\"url\");\n let canvas, path2d;\n return new Map(Object.entries({\n fs,\n http,\n https,\n url,\n canvas,\n path2d\n }));\n };\n loadPackages().then(map => {\n packageMap = map;\n packageCapability.resolve();\n }, reason => {\n warn(`loadPackages: ${reason}`);\n packageMap = new Map();\n packageCapability.resolve();\n });\n}\nclass NodePackages {\n static get promise() {\n return packageCapability.promise;\n }\n static get(name) {\n return packageMap?.get(name);\n }\n}\nconst node_utils_fetchData = function (url) {\n const fs = NodePackages.get(\"fs\");\n return fs.promises.readFile(url).then(data => new Uint8Array(data));\n};\nclass NodeFilterFactory extends BaseFilterFactory {}\nclass NodeCanvasFactory extends BaseCanvasFactory {\n _createCanvas(width, height) {\n const canvas = NodePackages.get(\"canvas\");\n return canvas.createCanvas(width, height);\n }\n}\nclass NodeCMapReaderFactory extends BaseCMapReaderFactory {\n _fetchData(url, compressionType) {\n return node_utils_fetchData(url).then(data => ({\n cMapData: data,\n compressionType\n }));\n }\n}\nclass NodeStandardFontDataFactory extends BaseStandardFontDataFactory {\n _fetchData(url) {\n return node_utils_fetchData(url);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/pattern_helper.js\n\n\nconst PathType = {\n FILL: \"Fill\",\n STROKE: \"Stroke\",\n SHADING: \"Shading\"\n};\nfunction applyBoundingBox(ctx, bbox) {\n if (!bbox) {\n return;\n }\n const width = bbox[2] - bbox[0];\n const height = bbox[3] - bbox[1];\n const region = new Path2D();\n region.rect(bbox[0], bbox[1], width, height);\n ctx.clip(region);\n}\nclass BaseShadingPattern {\n constructor() {\n if (this.constructor === BaseShadingPattern) {\n unreachable(\"Cannot initialize BaseShadingPattern.\");\n }\n }\n getPattern() {\n unreachable(\"Abstract method `getPattern` called.\");\n }\n}\nclass RadialAxialShadingPattern extends BaseShadingPattern {\n constructor(IR) {\n super();\n this._type = IR[1];\n this._bbox = IR[2];\n this._colorStops = IR[3];\n this._p0 = IR[4];\n this._p1 = IR[5];\n this._r0 = IR[6];\n this._r1 = IR[7];\n this.matrix = null;\n }\n _createGradient(ctx) {\n let grad;\n if (this._type === \"axial\") {\n grad = ctx.createLinearGradient(this._p0[0], this._p0[1], this._p1[0], this._p1[1]);\n } else if (this._type === \"radial\") {\n grad = ctx.createRadialGradient(this._p0[0], this._p0[1], this._r0, this._p1[0], this._p1[1], this._r1);\n }\n for (const colorStop of this._colorStops) {\n grad.addColorStop(colorStop[0], colorStop[1]);\n }\n return grad;\n }\n getPattern(ctx, owner, inverse, pathType) {\n let pattern;\n if (pathType === PathType.STROKE || pathType === PathType.FILL) {\n const ownerBBox = owner.current.getClippedPathBoundingBox(pathType, getCurrentTransform(ctx)) || [0, 0, 0, 0];\n const width = Math.ceil(ownerBBox[2] - ownerBBox[0]) || 1;\n const height = Math.ceil(ownerBBox[3] - ownerBBox[1]) || 1;\n const tmpCanvas = owner.cachedCanvases.getCanvas(\"pattern\", width, height, true);\n const tmpCtx = tmpCanvas.context;\n tmpCtx.clearRect(0, 0, tmpCtx.canvas.width, tmpCtx.canvas.height);\n tmpCtx.beginPath();\n tmpCtx.rect(0, 0, tmpCtx.canvas.width, tmpCtx.canvas.height);\n tmpCtx.translate(-ownerBBox[0], -ownerBBox[1]);\n inverse = Util.transform(inverse, [1, 0, 0, 1, ownerBBox[0], ownerBBox[1]]);\n tmpCtx.transform(...owner.baseTransform);\n if (this.matrix) {\n tmpCtx.transform(...this.matrix);\n }\n applyBoundingBox(tmpCtx, this._bbox);\n tmpCtx.fillStyle = this._createGradient(tmpCtx);\n tmpCtx.fill();\n pattern = ctx.createPattern(tmpCanvas.canvas, \"no-repeat\");\n const domMatrix = new DOMMatrix(inverse);\n pattern.setTransform(domMatrix);\n } else {\n applyBoundingBox(ctx, this._bbox);\n pattern = this._createGradient(ctx);\n }\n return pattern;\n }\n}\nfunction drawTriangle(data, context, p1, p2, p3, c1, c2, c3) {\n const coords = context.coords,\n colors = context.colors;\n const bytes = data.data,\n rowSize = data.width * 4;\n let tmp;\n if (coords[p1 + 1] > coords[p2 + 1]) {\n tmp = p1;\n p1 = p2;\n p2 = tmp;\n tmp = c1;\n c1 = c2;\n c2 = tmp;\n }\n if (coords[p2 + 1] > coords[p3 + 1]) {\n tmp = p2;\n p2 = p3;\n p3 = tmp;\n tmp = c2;\n c2 = c3;\n c3 = tmp;\n }\n if (coords[p1 + 1] > coords[p2 + 1]) {\n tmp = p1;\n p1 = p2;\n p2 = tmp;\n tmp = c1;\n c1 = c2;\n c2 = tmp;\n }\n const x1 = (coords[p1] + context.offsetX) * context.scaleX;\n const y1 = (coords[p1 + 1] + context.offsetY) * context.scaleY;\n const x2 = (coords[p2] + context.offsetX) * context.scaleX;\n const y2 = (coords[p2 + 1] + context.offsetY) * context.scaleY;\n const x3 = (coords[p3] + context.offsetX) * context.scaleX;\n const y3 = (coords[p3 + 1] + context.offsetY) * context.scaleY;\n if (y1 >= y3) {\n return;\n }\n const c1r = colors[c1],\n c1g = colors[c1 + 1],\n c1b = colors[c1 + 2];\n const c2r = colors[c2],\n c2g = colors[c2 + 1],\n c2b = colors[c2 + 2];\n const c3r = colors[c3],\n c3g = colors[c3 + 1],\n c3b = colors[c3 + 2];\n const minY = Math.round(y1),\n maxY = Math.round(y3);\n let xa, car, cag, cab;\n let xb, cbr, cbg, cbb;\n for (let y = minY; y <= maxY; y++) {\n if (y < y2) {\n const k = y < y1 ? 0 : (y1 - y) / (y1 - y2);\n xa = x1 - (x1 - x2) * k;\n car = c1r - (c1r - c2r) * k;\n cag = c1g - (c1g - c2g) * k;\n cab = c1b - (c1b - c2b) * k;\n } else {\n let k;\n if (y > y3) {\n k = 1;\n } else if (y2 === y3) {\n k = 0;\n } else {\n k = (y2 - y) / (y2 - y3);\n }\n xa = x2 - (x2 - x3) * k;\n car = c2r - (c2r - c3r) * k;\n cag = c2g - (c2g - c3g) * k;\n cab = c2b - (c2b - c3b) * k;\n }\n let k;\n if (y < y1) {\n k = 0;\n } else if (y > y3) {\n k = 1;\n } else {\n k = (y1 - y) / (y1 - y3);\n }\n xb = x1 - (x1 - x3) * k;\n cbr = c1r - (c1r - c3r) * k;\n cbg = c1g - (c1g - c3g) * k;\n cbb = c1b - (c1b - c3b) * k;\n const x1_ = Math.round(Math.min(xa, xb));\n const x2_ = Math.round(Math.max(xa, xb));\n let j = rowSize * y + x1_ * 4;\n for (let x = x1_; x <= x2_; x++) {\n k = (xa - x) / (xa - xb);\n if (k < 0) {\n k = 0;\n } else if (k > 1) {\n k = 1;\n }\n bytes[j++] = car - (car - cbr) * k | 0;\n bytes[j++] = cag - (cag - cbg) * k | 0;\n bytes[j++] = cab - (cab - cbb) * k | 0;\n bytes[j++] = 255;\n }\n }\n}\nfunction drawFigure(data, figure, context) {\n const ps = figure.coords;\n const cs = figure.colors;\n let i, ii;\n switch (figure.type) {\n case \"lattice\":\n const verticesPerRow = figure.verticesPerRow;\n const rows = Math.floor(ps.length / verticesPerRow) - 1;\n const cols = verticesPerRow - 1;\n for (i = 0; i < rows; i++) {\n let q = i * verticesPerRow;\n for (let j = 0; j < cols; j++, q++) {\n drawTriangle(data, context, ps[q], ps[q + 1], ps[q + verticesPerRow], cs[q], cs[q + 1], cs[q + verticesPerRow]);\n drawTriangle(data, context, ps[q + verticesPerRow + 1], ps[q + 1], ps[q + verticesPerRow], cs[q + verticesPerRow + 1], cs[q + 1], cs[q + verticesPerRow]);\n }\n }\n break;\n case \"triangles\":\n for (i = 0, ii = ps.length; i < ii; i += 3) {\n drawTriangle(data, context, ps[i], ps[i + 1], ps[i + 2], cs[i], cs[i + 1], cs[i + 2]);\n }\n break;\n default:\n throw new Error(\"illegal figure\");\n }\n}\nclass MeshShadingPattern extends BaseShadingPattern {\n constructor(IR) {\n super();\n this._coords = IR[2];\n this._colors = IR[3];\n this._figures = IR[4];\n this._bounds = IR[5];\n this._bbox = IR[7];\n this._background = IR[8];\n this.matrix = null;\n }\n _createMeshCanvas(combinedScale, backgroundColor, cachedCanvases) {\n const EXPECTED_SCALE = 1.1;\n const MAX_PATTERN_SIZE = 3000;\n const BORDER_SIZE = 2;\n const offsetX = Math.floor(this._bounds[0]);\n const offsetY = Math.floor(this._bounds[1]);\n const boundsWidth = Math.ceil(this._bounds[2]) - offsetX;\n const boundsHeight = Math.ceil(this._bounds[3]) - offsetY;\n const width = Math.min(Math.ceil(Math.abs(boundsWidth * combinedScale[0] * EXPECTED_SCALE)), MAX_PATTERN_SIZE);\n const height = Math.min(Math.ceil(Math.abs(boundsHeight * combinedScale[1] * EXPECTED_SCALE)), MAX_PATTERN_SIZE);\n const scaleX = boundsWidth / width;\n const scaleY = boundsHeight / height;\n const context = {\n coords: this._coords,\n colors: this._colors,\n offsetX: -offsetX,\n offsetY: -offsetY,\n scaleX: 1 / scaleX,\n scaleY: 1 / scaleY\n };\n const paddedWidth = width + BORDER_SIZE * 2;\n const paddedHeight = height + BORDER_SIZE * 2;\n const tmpCanvas = cachedCanvases.getCanvas(\"mesh\", paddedWidth, paddedHeight, false);\n const tmpCtx = tmpCanvas.context;\n const data = tmpCtx.createImageData(width, height);\n if (backgroundColor) {\n const bytes = data.data;\n for (let i = 0, ii = bytes.length; i < ii; i += 4) {\n bytes[i] = backgroundColor[0];\n bytes[i + 1] = backgroundColor[1];\n bytes[i + 2] = backgroundColor[2];\n bytes[i + 3] = 255;\n }\n }\n for (const figure of this._figures) {\n drawFigure(data, figure, context);\n }\n tmpCtx.putImageData(data, BORDER_SIZE, BORDER_SIZE);\n const canvas = tmpCanvas.canvas;\n return {\n canvas,\n offsetX: offsetX - BORDER_SIZE * scaleX,\n offsetY: offsetY - BORDER_SIZE * scaleY,\n scaleX,\n scaleY\n };\n }\n getPattern(ctx, owner, inverse, pathType) {\n applyBoundingBox(ctx, this._bbox);\n let scale;\n if (pathType === PathType.SHADING) {\n scale = Util.singularValueDecompose2dScale(getCurrentTransform(ctx));\n } else {\n scale = Util.singularValueDecompose2dScale(owner.baseTransform);\n if (this.matrix) {\n const matrixScale = Util.singularValueDecompose2dScale(this.matrix);\n scale = [scale[0] * matrixScale[0], scale[1] * matrixScale[1]];\n }\n }\n const temporaryPatternCanvas = this._createMeshCanvas(scale, pathType === PathType.SHADING ? null : this._background, owner.cachedCanvases);\n if (pathType !== PathType.SHADING) {\n ctx.setTransform(...owner.baseTransform);\n if (this.matrix) {\n ctx.transform(...this.matrix);\n }\n }\n ctx.translate(temporaryPatternCanvas.offsetX, temporaryPatternCanvas.offsetY);\n ctx.scale(temporaryPatternCanvas.scaleX, temporaryPatternCanvas.scaleY);\n return ctx.createPattern(temporaryPatternCanvas.canvas, \"no-repeat\");\n }\n}\nclass DummyShadingPattern extends BaseShadingPattern {\n getPattern() {\n return \"hotpink\";\n }\n}\nfunction getShadingPattern(IR) {\n switch (IR[0]) {\n case \"RadialAxial\":\n return new RadialAxialShadingPattern(IR);\n case \"Mesh\":\n return new MeshShadingPattern(IR);\n case \"Dummy\":\n return new DummyShadingPattern();\n }\n throw new Error(`Unknown IR type: ${IR[0]}`);\n}\nconst PaintType = {\n COLORED: 1,\n UNCOLORED: 2\n};\nclass TilingPattern {\n static MAX_PATTERN_SIZE = 3000;\n constructor(IR, color, ctx, canvasGraphicsFactory, baseTransform) {\n this.operatorList = IR[2];\n this.matrix = IR[3];\n this.bbox = IR[4];\n this.xstep = IR[5];\n this.ystep = IR[6];\n this.paintType = IR[7];\n this.tilingType = IR[8];\n this.color = color;\n this.ctx = ctx;\n this.canvasGraphicsFactory = canvasGraphicsFactory;\n this.baseTransform = baseTransform;\n }\n createPatternCanvas(owner) {\n const operatorList = this.operatorList;\n const bbox = this.bbox;\n const xstep = this.xstep;\n const ystep = this.ystep;\n const paintType = this.paintType;\n const tilingType = this.tilingType;\n const color = this.color;\n const canvasGraphicsFactory = this.canvasGraphicsFactory;\n info(\"TilingType: \" + tilingType);\n const x0 = bbox[0],\n y0 = bbox[1],\n x1 = bbox[2],\n y1 = bbox[3];\n const matrixScale = Util.singularValueDecompose2dScale(this.matrix);\n const curMatrixScale = Util.singularValueDecompose2dScale(this.baseTransform);\n const combinedScale = [matrixScale[0] * curMatrixScale[0], matrixScale[1] * curMatrixScale[1]];\n const dimx = this.getSizeAndScale(xstep, this.ctx.canvas.width, combinedScale[0]);\n const dimy = this.getSizeAndScale(ystep, this.ctx.canvas.height, combinedScale[1]);\n const tmpCanvas = owner.cachedCanvases.getCanvas(\"pattern\", dimx.size, dimy.size, true);\n const tmpCtx = tmpCanvas.context;\n const graphics = canvasGraphicsFactory.createCanvasGraphics(tmpCtx);\n graphics.groupLevel = owner.groupLevel;\n this.setFillAndStrokeStyleToContext(graphics, paintType, color);\n let adjustedX0 = x0;\n let adjustedY0 = y0;\n let adjustedX1 = x1;\n let adjustedY1 = y1;\n if (x0 < 0) {\n adjustedX0 = 0;\n adjustedX1 += Math.abs(x0);\n }\n if (y0 < 0) {\n adjustedY0 = 0;\n adjustedY1 += Math.abs(y0);\n }\n tmpCtx.translate(-(dimx.scale * adjustedX0), -(dimy.scale * adjustedY0));\n graphics.transform(dimx.scale, 0, 0, dimy.scale, 0, 0);\n tmpCtx.save();\n this.clipBbox(graphics, adjustedX0, adjustedY0, adjustedX1, adjustedY1);\n graphics.baseTransform = getCurrentTransform(graphics.ctx);\n graphics.executeOperatorList(operatorList);\n graphics.endDrawing();\n return {\n canvas: tmpCanvas.canvas,\n scaleX: dimx.scale,\n scaleY: dimy.scale,\n offsetX: adjustedX0,\n offsetY: adjustedY0\n };\n }\n getSizeAndScale(step, realOutputSize, scale) {\n step = Math.abs(step);\n const maxSize = Math.max(TilingPattern.MAX_PATTERN_SIZE, realOutputSize);\n let size = Math.ceil(step * scale);\n if (size >= maxSize) {\n size = maxSize;\n } else {\n scale = size / step;\n }\n return {\n scale,\n size\n };\n }\n clipBbox(graphics, x0, y0, x1, y1) {\n const bboxWidth = x1 - x0;\n const bboxHeight = y1 - y0;\n graphics.ctx.rect(x0, y0, bboxWidth, bboxHeight);\n graphics.current.updateRectMinMax(getCurrentTransform(graphics.ctx), [x0, y0, x1, y1]);\n graphics.clip();\n graphics.endPath();\n }\n setFillAndStrokeStyleToContext(graphics, paintType, color) {\n const context = graphics.ctx,\n current = graphics.current;\n switch (paintType) {\n case PaintType.COLORED:\n const ctx = this.ctx;\n context.fillStyle = ctx.fillStyle;\n context.strokeStyle = ctx.strokeStyle;\n current.fillColor = ctx.fillStyle;\n current.strokeColor = ctx.strokeStyle;\n break;\n case PaintType.UNCOLORED:\n const cssColor = Util.makeHexColor(color[0], color[1], color[2]);\n context.fillStyle = cssColor;\n context.strokeStyle = cssColor;\n current.fillColor = cssColor;\n current.strokeColor = cssColor;\n break;\n default:\n throw new FormatError(`Unsupported paint type: ${paintType}`);\n }\n }\n getPattern(ctx, owner, inverse, pathType) {\n let matrix = inverse;\n if (pathType !== PathType.SHADING) {\n matrix = Util.transform(matrix, owner.baseTransform);\n if (this.matrix) {\n matrix = Util.transform(matrix, this.matrix);\n }\n }\n const temporaryPatternCanvas = this.createPatternCanvas(owner);\n let domMatrix = new DOMMatrix(matrix);\n domMatrix = domMatrix.translate(temporaryPatternCanvas.offsetX, temporaryPatternCanvas.offsetY);\n domMatrix = domMatrix.scale(1 / temporaryPatternCanvas.scaleX, 1 / temporaryPatternCanvas.scaleY);\n const pattern = ctx.createPattern(temporaryPatternCanvas.canvas, \"repeat\");\n pattern.setTransform(domMatrix);\n return pattern;\n }\n}\n\n;// CONCATENATED MODULE: ./src/shared/image_utils.js\n\nfunction convertToRGBA(params) {\n switch (params.kind) {\n case ImageKind.GRAYSCALE_1BPP:\n return convertBlackAndWhiteToRGBA(params);\n case ImageKind.RGB_24BPP:\n return convertRGBToRGBA(params);\n }\n return null;\n}\nfunction convertBlackAndWhiteToRGBA({\n src,\n srcPos = 0,\n dest,\n width,\n height,\n nonBlackColor = 0xffffffff,\n inverseDecode = false\n}) {\n const black = util_FeatureTest.isLittleEndian ? 0xff000000 : 0x000000ff;\n const [zeroMapping, oneMapping] = inverseDecode ? [nonBlackColor, black] : [black, nonBlackColor];\n const widthInSource = width >> 3;\n const widthRemainder = width & 7;\n const srcLength = src.length;\n dest = new Uint32Array(dest.buffer);\n let destPos = 0;\n for (let i = 0; i < height; i++) {\n for (const max = srcPos + widthInSource; srcPos < max; srcPos++) {\n const elem = srcPos < srcLength ? src[srcPos] : 255;\n dest[destPos++] = elem & 0b10000000 ? oneMapping : zeroMapping;\n dest[destPos++] = elem & 0b1000000 ? oneMapping : zeroMapping;\n dest[destPos++] = elem & 0b100000 ? oneMapping : zeroMapping;\n dest[destPos++] = elem & 0b10000 ? oneMapping : zeroMapping;\n dest[destPos++] = elem & 0b1000 ? oneMapping : zeroMapping;\n dest[destPos++] = elem & 0b100 ? oneMapping : zeroMapping;\n dest[destPos++] = elem & 0b10 ? oneMapping : zeroMapping;\n dest[destPos++] = elem & 0b1 ? oneMapping : zeroMapping;\n }\n if (widthRemainder === 0) {\n continue;\n }\n const elem = srcPos < srcLength ? src[srcPos++] : 255;\n for (let j = 0; j < widthRemainder; j++) {\n dest[destPos++] = elem & 1 << 7 - j ? oneMapping : zeroMapping;\n }\n }\n return {\n srcPos,\n destPos\n };\n}\nfunction convertRGBToRGBA({\n src,\n srcPos = 0,\n dest,\n destPos = 0,\n width,\n height\n}) {\n let i = 0;\n const len32 = src.length >> 2;\n const src32 = new Uint32Array(src.buffer, srcPos, len32);\n if (FeatureTest.isLittleEndian) {\n for (; i < len32 - 2; i += 3, destPos += 4) {\n const s1 = src32[i];\n const s2 = src32[i + 1];\n const s3 = src32[i + 2];\n dest[destPos] = s1 | 0xff000000;\n dest[destPos + 1] = s1 >>> 24 | s2 << 8 | 0xff000000;\n dest[destPos + 2] = s2 >>> 16 | s3 << 16 | 0xff000000;\n dest[destPos + 3] = s3 >>> 8 | 0xff000000;\n }\n for (let j = i * 4, jj = src.length; j < jj; j += 3) {\n dest[destPos++] = src[j] | src[j + 1] << 8 | src[j + 2] << 16 | 0xff000000;\n }\n } else {\n for (; i < len32 - 2; i += 3, destPos += 4) {\n const s1 = src32[i];\n const s2 = src32[i + 1];\n const s3 = src32[i + 2];\n dest[destPos] = s1 | 0xff;\n dest[destPos + 1] = s1 << 24 | s2 >>> 8 | 0xff;\n dest[destPos + 2] = s2 << 16 | s3 >>> 16 | 0xff;\n dest[destPos + 3] = s3 << 8 | 0xff;\n }\n for (let j = i * 4, jj = src.length; j < jj; j += 3) {\n dest[destPos++] = src[j] << 24 | src[j + 1] << 16 | src[j + 2] << 8 | 0xff;\n }\n }\n return {\n srcPos,\n destPos\n };\n}\nfunction grayToRGBA(src, dest) {\n if (FeatureTest.isLittleEndian) {\n for (let i = 0, ii = src.length; i < ii; i++) {\n dest[i] = src[i] * 0x10101 | 0xff000000;\n }\n } else {\n for (let i = 0, ii = src.length; i < ii; i++) {\n dest[i] = src[i] * 0x1010100 | 0x000000ff;\n }\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/canvas.js\n\n\n\n\nconst MIN_FONT_SIZE = 16;\nconst MAX_FONT_SIZE = 100;\nconst EXECUTION_TIME = 15;\nconst EXECUTION_STEPS = 10;\nconst MAX_SIZE_TO_COMPILE = 1000;\nconst FULL_CHUNK_HEIGHT = 16;\nfunction mirrorContextOperations(ctx, destCtx) {\n if (ctx._removeMirroring) {\n throw new Error(\"Context is already forwarding operations.\");\n }\n ctx.__originalSave = ctx.save;\n ctx.__originalRestore = ctx.restore;\n ctx.__originalRotate = ctx.rotate;\n ctx.__originalScale = ctx.scale;\n ctx.__originalTranslate = ctx.translate;\n ctx.__originalTransform = ctx.transform;\n ctx.__originalSetTransform = ctx.setTransform;\n ctx.__originalResetTransform = ctx.resetTransform;\n ctx.__originalClip = ctx.clip;\n ctx.__originalMoveTo = ctx.moveTo;\n ctx.__originalLineTo = ctx.lineTo;\n ctx.__originalBezierCurveTo = ctx.bezierCurveTo;\n ctx.__originalRect = ctx.rect;\n ctx.__originalClosePath = ctx.closePath;\n ctx.__originalBeginPath = ctx.beginPath;\n ctx._removeMirroring = () => {\n ctx.save = ctx.__originalSave;\n ctx.restore = ctx.__originalRestore;\n ctx.rotate = ctx.__originalRotate;\n ctx.scale = ctx.__originalScale;\n ctx.translate = ctx.__originalTranslate;\n ctx.transform = ctx.__originalTransform;\n ctx.setTransform = ctx.__originalSetTransform;\n ctx.resetTransform = ctx.__originalResetTransform;\n ctx.clip = ctx.__originalClip;\n ctx.moveTo = ctx.__originalMoveTo;\n ctx.lineTo = ctx.__originalLineTo;\n ctx.bezierCurveTo = ctx.__originalBezierCurveTo;\n ctx.rect = ctx.__originalRect;\n ctx.closePath = ctx.__originalClosePath;\n ctx.beginPath = ctx.__originalBeginPath;\n delete ctx._removeMirroring;\n };\n ctx.save = function ctxSave() {\n destCtx.save();\n this.__originalSave();\n };\n ctx.restore = function ctxRestore() {\n destCtx.restore();\n this.__originalRestore();\n };\n ctx.translate = function ctxTranslate(x, y) {\n destCtx.translate(x, y);\n this.__originalTranslate(x, y);\n };\n ctx.scale = function ctxScale(x, y) {\n destCtx.scale(x, y);\n this.__originalScale(x, y);\n };\n ctx.transform = function ctxTransform(a, b, c, d, e, f) {\n destCtx.transform(a, b, c, d, e, f);\n this.__originalTransform(a, b, c, d, e, f);\n };\n ctx.setTransform = function ctxSetTransform(a, b, c, d, e, f) {\n destCtx.setTransform(a, b, c, d, e, f);\n this.__originalSetTransform(a, b, c, d, e, f);\n };\n ctx.resetTransform = function ctxResetTransform() {\n destCtx.resetTransform();\n this.__originalResetTransform();\n };\n ctx.rotate = function ctxRotate(angle) {\n destCtx.rotate(angle);\n this.__originalRotate(angle);\n };\n ctx.clip = function ctxRotate(rule) {\n destCtx.clip(rule);\n this.__originalClip(rule);\n };\n ctx.moveTo = function (x, y) {\n destCtx.moveTo(x, y);\n this.__originalMoveTo(x, y);\n };\n ctx.lineTo = function (x, y) {\n destCtx.lineTo(x, y);\n this.__originalLineTo(x, y);\n };\n ctx.bezierCurveTo = function (cp1x, cp1y, cp2x, cp2y, x, y) {\n destCtx.bezierCurveTo(cp1x, cp1y, cp2x, cp2y, x, y);\n this.__originalBezierCurveTo(cp1x, cp1y, cp2x, cp2y, x, y);\n };\n ctx.rect = function (x, y, width, height) {\n destCtx.rect(x, y, width, height);\n this.__originalRect(x, y, width, height);\n };\n ctx.closePath = function () {\n destCtx.closePath();\n this.__originalClosePath();\n };\n ctx.beginPath = function () {\n destCtx.beginPath();\n this.__originalBeginPath();\n };\n}\nclass CachedCanvases {\n constructor(canvasFactory) {\n this.canvasFactory = canvasFactory;\n this.cache = Object.create(null);\n }\n getCanvas(id, width, height) {\n let canvasEntry;\n if (this.cache[id] !== undefined) {\n canvasEntry = this.cache[id];\n this.canvasFactory.reset(canvasEntry, width, height);\n } else {\n canvasEntry = this.canvasFactory.create(width, height);\n this.cache[id] = canvasEntry;\n }\n return canvasEntry;\n }\n delete(id) {\n delete this.cache[id];\n }\n clear() {\n for (const id in this.cache) {\n const canvasEntry = this.cache[id];\n this.canvasFactory.destroy(canvasEntry);\n delete this.cache[id];\n }\n }\n}\nfunction drawImageAtIntegerCoords(ctx, srcImg, srcX, srcY, srcW, srcH, destX, destY, destW, destH) {\n const [a, b, c, d, tx, ty] = getCurrentTransform(ctx);\n if (b === 0 && c === 0) {\n const tlX = destX * a + tx;\n const rTlX = Math.round(tlX);\n const tlY = destY * d + ty;\n const rTlY = Math.round(tlY);\n const brX = (destX + destW) * a + tx;\n const rWidth = Math.abs(Math.round(brX) - rTlX) || 1;\n const brY = (destY + destH) * d + ty;\n const rHeight = Math.abs(Math.round(brY) - rTlY) || 1;\n ctx.setTransform(Math.sign(a), 0, 0, Math.sign(d), rTlX, rTlY);\n ctx.drawImage(srcImg, srcX, srcY, srcW, srcH, 0, 0, rWidth, rHeight);\n ctx.setTransform(a, b, c, d, tx, ty);\n return [rWidth, rHeight];\n }\n if (a === 0 && d === 0) {\n const tlX = destY * c + tx;\n const rTlX = Math.round(tlX);\n const tlY = destX * b + ty;\n const rTlY = Math.round(tlY);\n const brX = (destY + destH) * c + tx;\n const rWidth = Math.abs(Math.round(brX) - rTlX) || 1;\n const brY = (destX + destW) * b + ty;\n const rHeight = Math.abs(Math.round(brY) - rTlY) || 1;\n ctx.setTransform(0, Math.sign(b), Math.sign(c), 0, rTlX, rTlY);\n ctx.drawImage(srcImg, srcX, srcY, srcW, srcH, 0, 0, rHeight, rWidth);\n ctx.setTransform(a, b, c, d, tx, ty);\n return [rHeight, rWidth];\n }\n ctx.drawImage(srcImg, srcX, srcY, srcW, srcH, destX, destY, destW, destH);\n const scaleX = Math.hypot(a, b);\n const scaleY = Math.hypot(c, d);\n return [scaleX * destW, scaleY * destH];\n}\nfunction compileType3Glyph(imgData) {\n const {\n width,\n height\n } = imgData;\n if (width > MAX_SIZE_TO_COMPILE || height > MAX_SIZE_TO_COMPILE) {\n return null;\n }\n const POINT_TO_PROCESS_LIMIT = 1000;\n const POINT_TYPES = new Uint8Array([0, 2, 4, 0, 1, 0, 5, 4, 8, 10, 0, 8, 0, 2, 1, 0]);\n const width1 = width + 1;\n let points = new Uint8Array(width1 * (height + 1));\n let i, j, j0;\n const lineSize = width + 7 & ~7;\n let data = new Uint8Array(lineSize * height),\n pos = 0;\n for (const elem of imgData.data) {\n let mask = 128;\n while (mask > 0) {\n data[pos++] = elem & mask ? 0 : 255;\n mask >>= 1;\n }\n }\n let count = 0;\n pos = 0;\n if (data[pos] !== 0) {\n points[0] = 1;\n ++count;\n }\n for (j = 1; j < width; j++) {\n if (data[pos] !== data[pos + 1]) {\n points[j] = data[pos] ? 2 : 1;\n ++count;\n }\n pos++;\n }\n if (data[pos] !== 0) {\n points[j] = 2;\n ++count;\n }\n for (i = 1; i < height; i++) {\n pos = i * lineSize;\n j0 = i * width1;\n if (data[pos - lineSize] !== data[pos]) {\n points[j0] = data[pos] ? 1 : 8;\n ++count;\n }\n let sum = (data[pos] ? 4 : 0) + (data[pos - lineSize] ? 8 : 0);\n for (j = 1; j < width; j++) {\n sum = (sum >> 2) + (data[pos + 1] ? 4 : 0) + (data[pos - lineSize + 1] ? 8 : 0);\n if (POINT_TYPES[sum]) {\n points[j0 + j] = POINT_TYPES[sum];\n ++count;\n }\n pos++;\n }\n if (data[pos - lineSize] !== data[pos]) {\n points[j0 + j] = data[pos] ? 2 : 4;\n ++count;\n }\n if (count > POINT_TO_PROCESS_LIMIT) {\n return null;\n }\n }\n pos = lineSize * (height - 1);\n j0 = i * width1;\n if (data[pos] !== 0) {\n points[j0] = 8;\n ++count;\n }\n for (j = 1; j < width; j++) {\n if (data[pos] !== data[pos + 1]) {\n points[j0 + j] = data[pos] ? 4 : 8;\n ++count;\n }\n pos++;\n }\n if (data[pos] !== 0) {\n points[j0 + j] = 4;\n ++count;\n }\n if (count > POINT_TO_PROCESS_LIMIT) {\n return null;\n }\n const steps = new Int32Array([0, width1, -1, 0, -width1, 0, 0, 0, 1]);\n const path = new Path2D();\n for (i = 0; count && i <= height; i++) {\n let p = i * width1;\n const end = p + width;\n while (p < end && !points[p]) {\n p++;\n }\n if (p === end) {\n continue;\n }\n path.moveTo(p % width1, i);\n const p0 = p;\n let type = points[p];\n do {\n const step = steps[type];\n do {\n p += step;\n } while (!points[p]);\n const pp = points[p];\n if (pp !== 5 && pp !== 10) {\n type = pp;\n points[p] = 0;\n } else {\n type = pp & 0x33 * type >> 4;\n points[p] &= type >> 2 | type << 2;\n }\n path.lineTo(p % width1, p / width1 | 0);\n if (!points[p]) {\n --count;\n }\n } while (p0 !== p);\n --i;\n }\n data = null;\n points = null;\n const drawOutline = function (c) {\n c.save();\n c.scale(1 / width, -1 / height);\n c.translate(0, -height);\n c.fill(path);\n c.beginPath();\n c.restore();\n };\n return drawOutline;\n}\nclass CanvasExtraState {\n constructor(width, height) {\n this.alphaIsShape = false;\n this.fontSize = 0;\n this.fontSizeScale = 1;\n this.textMatrix = IDENTITY_MATRIX;\n this.textMatrixScale = 1;\n this.fontMatrix = FONT_IDENTITY_MATRIX;\n this.leading = 0;\n this.x = 0;\n this.y = 0;\n this.lineX = 0;\n this.lineY = 0;\n this.charSpacing = 0;\n this.wordSpacing = 0;\n this.textHScale = 1;\n this.textRenderingMode = TextRenderingMode.FILL;\n this.textRise = 0;\n this.fillColor = \"#000000\";\n this.strokeColor = \"#000000\";\n this.patternFill = false;\n this.fillAlpha = 1;\n this.strokeAlpha = 1;\n this.lineWidth = 1;\n this.activeSMask = null;\n this.transferMaps = \"none\";\n this.startNewPathAndClipBox([0, 0, width, height]);\n }\n clone() {\n const clone = Object.create(this);\n clone.clipBox = this.clipBox.slice();\n return clone;\n }\n setCurrentPoint(x, y) {\n this.x = x;\n this.y = y;\n }\n updatePathMinMax(transform, x, y) {\n [x, y] = Util.applyTransform([x, y], transform);\n this.minX = Math.min(this.minX, x);\n this.minY = Math.min(this.minY, y);\n this.maxX = Math.max(this.maxX, x);\n this.maxY = Math.max(this.maxY, y);\n }\n updateRectMinMax(transform, rect) {\n const p1 = Util.applyTransform(rect, transform);\n const p2 = Util.applyTransform(rect.slice(2), transform);\n const p3 = Util.applyTransform([rect[0], rect[3]], transform);\n const p4 = Util.applyTransform([rect[2], rect[1]], transform);\n this.minX = Math.min(this.minX, p1[0], p2[0], p3[0], p4[0]);\n this.minY = Math.min(this.minY, p1[1], p2[1], p3[1], p4[1]);\n this.maxX = Math.max(this.maxX, p1[0], p2[0], p3[0], p4[0]);\n this.maxY = Math.max(this.maxY, p1[1], p2[1], p3[1], p4[1]);\n }\n updateScalingPathMinMax(transform, minMax) {\n Util.scaleMinMax(transform, minMax);\n this.minX = Math.min(this.minX, minMax[0]);\n this.minY = Math.min(this.minY, minMax[1]);\n this.maxX = Math.max(this.maxX, minMax[2]);\n this.maxY = Math.max(this.maxY, minMax[3]);\n }\n updateCurvePathMinMax(transform, x0, y0, x1, y1, x2, y2, x3, y3, minMax) {\n const box = Util.bezierBoundingBox(x0, y0, x1, y1, x2, y2, x3, y3, minMax);\n if (minMax) {\n return;\n }\n this.updateRectMinMax(transform, box);\n }\n getPathBoundingBox(pathType = PathType.FILL, transform = null) {\n const box = [this.minX, this.minY, this.maxX, this.maxY];\n if (pathType === PathType.STROKE) {\n if (!transform) {\n unreachable(\"Stroke bounding box must include transform.\");\n }\n const scale = Util.singularValueDecompose2dScale(transform);\n const xStrokePad = scale[0] * this.lineWidth / 2;\n const yStrokePad = scale[1] * this.lineWidth / 2;\n box[0] -= xStrokePad;\n box[1] -= yStrokePad;\n box[2] += xStrokePad;\n box[3] += yStrokePad;\n }\n return box;\n }\n updateClipFromPath() {\n const intersect = Util.intersect(this.clipBox, this.getPathBoundingBox());\n this.startNewPathAndClipBox(intersect || [0, 0, 0, 0]);\n }\n isEmptyClip() {\n return this.minX === Infinity;\n }\n startNewPathAndClipBox(box) {\n this.clipBox = box;\n this.minX = Infinity;\n this.minY = Infinity;\n this.maxX = 0;\n this.maxY = 0;\n }\n getClippedPathBoundingBox(pathType = PathType.FILL, transform = null) {\n return Util.intersect(this.clipBox, this.getPathBoundingBox(pathType, transform));\n }\n}\nfunction putBinaryImageData(ctx, imgData) {\n if (typeof ImageData !== \"undefined\" && imgData instanceof ImageData) {\n ctx.putImageData(imgData, 0, 0);\n return;\n }\n const height = imgData.height,\n width = imgData.width;\n const partialChunkHeight = height % FULL_CHUNK_HEIGHT;\n const fullChunks = (height - partialChunkHeight) / FULL_CHUNK_HEIGHT;\n const totalChunks = partialChunkHeight === 0 ? fullChunks : fullChunks + 1;\n const chunkImgData = ctx.createImageData(width, FULL_CHUNK_HEIGHT);\n let srcPos = 0,\n destPos;\n const src = imgData.data;\n const dest = chunkImgData.data;\n let i, j, thisChunkHeight, elemsInThisChunk;\n if (imgData.kind === util_ImageKind.GRAYSCALE_1BPP) {\n const srcLength = src.byteLength;\n const dest32 = new Uint32Array(dest.buffer, 0, dest.byteLength >> 2);\n const dest32DataLength = dest32.length;\n const fullSrcDiff = width + 7 >> 3;\n const white = 0xffffffff;\n const black = util_FeatureTest.isLittleEndian ? 0xff000000 : 0x000000ff;\n for (i = 0; i < totalChunks; i++) {\n thisChunkHeight = i < fullChunks ? FULL_CHUNK_HEIGHT : partialChunkHeight;\n destPos = 0;\n for (j = 0; j < thisChunkHeight; j++) {\n const srcDiff = srcLength - srcPos;\n let k = 0;\n const kEnd = srcDiff > fullSrcDiff ? width : srcDiff * 8 - 7;\n const kEndUnrolled = kEnd & ~7;\n let mask = 0;\n let srcByte = 0;\n for (; k < kEndUnrolled; k += 8) {\n srcByte = src[srcPos++];\n dest32[destPos++] = srcByte & 128 ? white : black;\n dest32[destPos++] = srcByte & 64 ? white : black;\n dest32[destPos++] = srcByte & 32 ? white : black;\n dest32[destPos++] = srcByte & 16 ? white : black;\n dest32[destPos++] = srcByte & 8 ? white : black;\n dest32[destPos++] = srcByte & 4 ? white : black;\n dest32[destPos++] = srcByte & 2 ? white : black;\n dest32[destPos++] = srcByte & 1 ? white : black;\n }\n for (; k < kEnd; k++) {\n if (mask === 0) {\n srcByte = src[srcPos++];\n mask = 128;\n }\n dest32[destPos++] = srcByte & mask ? white : black;\n mask >>= 1;\n }\n }\n while (destPos < dest32DataLength) {\n dest32[destPos++] = 0;\n }\n ctx.putImageData(chunkImgData, 0, i * FULL_CHUNK_HEIGHT);\n }\n } else if (imgData.kind === util_ImageKind.RGBA_32BPP) {\n j = 0;\n elemsInThisChunk = width * FULL_CHUNK_HEIGHT * 4;\n for (i = 0; i < fullChunks; i++) {\n dest.set(src.subarray(srcPos, srcPos + elemsInThisChunk));\n srcPos += elemsInThisChunk;\n ctx.putImageData(chunkImgData, 0, j);\n j += FULL_CHUNK_HEIGHT;\n }\n if (i < totalChunks) {\n elemsInThisChunk = width * partialChunkHeight * 4;\n dest.set(src.subarray(srcPos, srcPos + elemsInThisChunk));\n ctx.putImageData(chunkImgData, 0, j);\n }\n } else if (imgData.kind === util_ImageKind.RGB_24BPP) {\n thisChunkHeight = FULL_CHUNK_HEIGHT;\n elemsInThisChunk = width * thisChunkHeight;\n for (i = 0; i < totalChunks; i++) {\n if (i >= fullChunks) {\n thisChunkHeight = partialChunkHeight;\n elemsInThisChunk = width * thisChunkHeight;\n }\n destPos = 0;\n for (j = elemsInThisChunk; j--;) {\n dest[destPos++] = src[srcPos++];\n dest[destPos++] = src[srcPos++];\n dest[destPos++] = src[srcPos++];\n dest[destPos++] = 255;\n }\n ctx.putImageData(chunkImgData, 0, i * FULL_CHUNK_HEIGHT);\n }\n } else {\n throw new Error(`bad image kind: ${imgData.kind}`);\n }\n}\nfunction putBinaryImageMask(ctx, imgData) {\n if (imgData.bitmap) {\n ctx.drawImage(imgData.bitmap, 0, 0);\n return;\n }\n const height = imgData.height,\n width = imgData.width;\n const partialChunkHeight = height % FULL_CHUNK_HEIGHT;\n const fullChunks = (height - partialChunkHeight) / FULL_CHUNK_HEIGHT;\n const totalChunks = partialChunkHeight === 0 ? fullChunks : fullChunks + 1;\n const chunkImgData = ctx.createImageData(width, FULL_CHUNK_HEIGHT);\n let srcPos = 0;\n const src = imgData.data;\n const dest = chunkImgData.data;\n for (let i = 0; i < totalChunks; i++) {\n const thisChunkHeight = i < fullChunks ? FULL_CHUNK_HEIGHT : partialChunkHeight;\n ({\n srcPos\n } = convertBlackAndWhiteToRGBA({\n src,\n srcPos,\n dest,\n width,\n height: thisChunkHeight,\n nonBlackColor: 0\n }));\n ctx.putImageData(chunkImgData, 0, i * FULL_CHUNK_HEIGHT);\n }\n}\nfunction copyCtxState(sourceCtx, destCtx) {\n const properties = [\"strokeStyle\", \"fillStyle\", \"fillRule\", \"globalAlpha\", \"lineWidth\", \"lineCap\", \"lineJoin\", \"miterLimit\", \"globalCompositeOperation\", \"font\", \"filter\"];\n for (const property of properties) {\n if (sourceCtx[property] !== undefined) {\n destCtx[property] = sourceCtx[property];\n }\n }\n if (sourceCtx.setLineDash !== undefined) {\n destCtx.setLineDash(sourceCtx.getLineDash());\n destCtx.lineDashOffset = sourceCtx.lineDashOffset;\n }\n}\nfunction resetCtxToDefault(ctx) {\n ctx.strokeStyle = ctx.fillStyle = \"#000000\";\n ctx.fillRule = \"nonzero\";\n ctx.globalAlpha = 1;\n ctx.lineWidth = 1;\n ctx.lineCap = \"butt\";\n ctx.lineJoin = \"miter\";\n ctx.miterLimit = 10;\n ctx.globalCompositeOperation = \"source-over\";\n ctx.font = \"10px sans-serif\";\n if (ctx.setLineDash !== undefined) {\n ctx.setLineDash([]);\n ctx.lineDashOffset = 0;\n }\n if (!isNodeJS) {\n const {\n filter\n } = ctx;\n if (filter !== \"none\" && filter !== \"\") {\n ctx.filter = \"none\";\n }\n }\n}\nfunction getImageSmoothingEnabled(transform, interpolate) {\n if (interpolate) {\n return true;\n }\n const scale = Util.singularValueDecompose2dScale(transform);\n scale[0] = Math.fround(scale[0]);\n scale[1] = Math.fround(scale[1]);\n const actualScale = Math.fround((globalThis.devicePixelRatio || 1) * PixelsPerInch.PDF_TO_CSS_UNITS);\n return scale[0] <= actualScale && scale[1] <= actualScale;\n}\nconst LINE_CAP_STYLES = [\"butt\", \"round\", \"square\"];\nconst LINE_JOIN_STYLES = [\"miter\", \"round\", \"bevel\"];\nconst NORMAL_CLIP = {};\nconst EO_CLIP = {};\nclass CanvasGraphics {\n constructor(canvasCtx, commonObjs, objs, canvasFactory, filterFactory, {\n optionalContentConfig,\n markedContentStack = null\n }, annotationCanvasMap, pageColors) {\n this.ctx = canvasCtx;\n this.current = new CanvasExtraState(this.ctx.canvas.width, this.ctx.canvas.height);\n this.stateStack = [];\n this.pendingClip = null;\n this.pendingEOFill = false;\n this.res = null;\n this.xobjs = null;\n this.commonObjs = commonObjs;\n this.objs = objs;\n this.canvasFactory = canvasFactory;\n this.filterFactory = filterFactory;\n this.groupStack = [];\n this.processingType3 = null;\n this.baseTransform = null;\n this.baseTransformStack = [];\n this.groupLevel = 0;\n this.smaskStack = [];\n this.smaskCounter = 0;\n this.tempSMask = null;\n this.suspendedCtx = null;\n this.contentVisible = true;\n this.markedContentStack = markedContentStack || [];\n this.optionalContentConfig = optionalContentConfig;\n this.cachedCanvases = new CachedCanvases(this.canvasFactory);\n this.cachedPatterns = new Map();\n this.annotationCanvasMap = annotationCanvasMap;\n this.viewportScale = 1;\n this.outputScaleX = 1;\n this.outputScaleY = 1;\n this.pageColors = pageColors;\n this._cachedScaleForStroking = [-1, 0];\n this._cachedGetSinglePixelWidth = null;\n this._cachedBitmapsMap = new Map();\n }\n getObject(data, fallback = null) {\n if (typeof data === \"string\") {\n return data.startsWith(\"g_\") ? this.commonObjs.get(data) : this.objs.get(data);\n }\n return fallback;\n }\n beginDrawing({\n transform,\n viewport,\n transparency = false,\n background = null\n }) {\n const width = this.ctx.canvas.width;\n const height = this.ctx.canvas.height;\n const savedFillStyle = this.ctx.fillStyle;\n this.ctx.fillStyle = background || \"#ffffff\";\n this.ctx.fillRect(0, 0, width, height);\n this.ctx.fillStyle = savedFillStyle;\n if (transparency) {\n const transparentCanvas = this.cachedCanvases.getCanvas(\"transparent\", width, height);\n this.compositeCtx = this.ctx;\n this.transparentCanvas = transparentCanvas.canvas;\n this.ctx = transparentCanvas.context;\n this.ctx.save();\n this.ctx.transform(...getCurrentTransform(this.compositeCtx));\n }\n this.ctx.save();\n resetCtxToDefault(this.ctx);\n if (transform) {\n this.ctx.transform(...transform);\n this.outputScaleX = transform[0];\n this.outputScaleY = transform[0];\n }\n this.ctx.transform(...viewport.transform);\n this.viewportScale = viewport.scale;\n this.baseTransform = getCurrentTransform(this.ctx);\n }\n executeOperatorList(operatorList, executionStartIdx, continueCallback, stepper) {\n const argsArray = operatorList.argsArray;\n const fnArray = operatorList.fnArray;\n let i = executionStartIdx || 0;\n const argsArrayLen = argsArray.length;\n if (argsArrayLen === i) {\n return i;\n }\n const chunkOperations = argsArrayLen - i > EXECUTION_STEPS && typeof continueCallback === \"function\";\n const endTime = chunkOperations ? Date.now() + EXECUTION_TIME : 0;\n let steps = 0;\n const commonObjs = this.commonObjs;\n const objs = this.objs;\n let fnId;\n while (true) {\n if (stepper !== undefined && i === stepper.nextBreakPoint) {\n stepper.breakIt(i, continueCallback);\n return i;\n }\n fnId = fnArray[i];\n if (fnId !== OPS.dependency) {\n this[fnId].apply(this, argsArray[i]);\n } else {\n for (const depObjId of argsArray[i]) {\n const objsPool = depObjId.startsWith(\"g_\") ? commonObjs : objs;\n if (!objsPool.has(depObjId)) {\n objsPool.get(depObjId, continueCallback);\n return i;\n }\n }\n }\n i++;\n if (i === argsArrayLen) {\n return i;\n }\n if (chunkOperations && ++steps > EXECUTION_STEPS) {\n if (Date.now() > endTime) {\n continueCallback();\n return i;\n }\n steps = 0;\n }\n }\n }\n #restoreInitialState() {\n while (this.stateStack.length || this.inSMaskMode) {\n this.restore();\n }\n this.ctx.restore();\n if (this.transparentCanvas) {\n this.ctx = this.compositeCtx;\n this.ctx.save();\n this.ctx.setTransform(1, 0, 0, 1, 0, 0);\n this.ctx.drawImage(this.transparentCanvas, 0, 0);\n this.ctx.restore();\n this.transparentCanvas = null;\n }\n }\n endDrawing() {\n this.#restoreInitialState();\n this.cachedCanvases.clear();\n this.cachedPatterns.clear();\n for (const cache of this._cachedBitmapsMap.values()) {\n for (const canvas of cache.values()) {\n if (typeof HTMLCanvasElement !== \"undefined\" && canvas instanceof HTMLCanvasElement) {\n canvas.width = canvas.height = 0;\n }\n }\n cache.clear();\n }\n this._cachedBitmapsMap.clear();\n this.#drawFilter();\n }\n #drawFilter() {\n if (this.pageColors) {\n const hcmFilterId = this.filterFactory.addHCMFilter(this.pageColors.foreground, this.pageColors.background);\n if (hcmFilterId !== \"none\") {\n const savedFilter = this.ctx.filter;\n this.ctx.filter = hcmFilterId;\n this.ctx.drawImage(this.ctx.canvas, 0, 0);\n this.ctx.filter = savedFilter;\n }\n }\n }\n _scaleImage(img, inverseTransform) {\n const width = img.width;\n const height = img.height;\n let widthScale = Math.max(Math.hypot(inverseTransform[0], inverseTransform[1]), 1);\n let heightScale = Math.max(Math.hypot(inverseTransform[2], inverseTransform[3]), 1);\n let paintWidth = width,\n paintHeight = height;\n let tmpCanvasId = \"prescale1\";\n let tmpCanvas, tmpCtx;\n while (widthScale > 2 && paintWidth > 1 || heightScale > 2 && paintHeight > 1) {\n let newWidth = paintWidth,\n newHeight = paintHeight;\n if (widthScale > 2 && paintWidth > 1) {\n newWidth = paintWidth >= 16384 ? Math.floor(paintWidth / 2) - 1 || 1 : Math.ceil(paintWidth / 2);\n widthScale /= paintWidth / newWidth;\n }\n if (heightScale > 2 && paintHeight > 1) {\n newHeight = paintHeight >= 16384 ? Math.floor(paintHeight / 2) - 1 || 1 : Math.ceil(paintHeight) / 2;\n heightScale /= paintHeight / newHeight;\n }\n tmpCanvas = this.cachedCanvases.getCanvas(tmpCanvasId, newWidth, newHeight);\n tmpCtx = tmpCanvas.context;\n tmpCtx.clearRect(0, 0, newWidth, newHeight);\n tmpCtx.drawImage(img, 0, 0, paintWidth, paintHeight, 0, 0, newWidth, newHeight);\n img = tmpCanvas.canvas;\n paintWidth = newWidth;\n paintHeight = newHeight;\n tmpCanvasId = tmpCanvasId === \"prescale1\" ? \"prescale2\" : \"prescale1\";\n }\n return {\n img,\n paintWidth,\n paintHeight\n };\n }\n _createMaskCanvas(img) {\n const ctx = this.ctx;\n const {\n width,\n height\n } = img;\n const fillColor = this.current.fillColor;\n const isPatternFill = this.current.patternFill;\n const currentTransform = getCurrentTransform(ctx);\n let cache, cacheKey, scaled, maskCanvas;\n if ((img.bitmap || img.data) && img.count > 1) {\n const mainKey = img.bitmap || img.data.buffer;\n cacheKey = JSON.stringify(isPatternFill ? currentTransform : [currentTransform.slice(0, 4), fillColor]);\n cache = this._cachedBitmapsMap.get(mainKey);\n if (!cache) {\n cache = new Map();\n this._cachedBitmapsMap.set(mainKey, cache);\n }\n const cachedImage = cache.get(cacheKey);\n if (cachedImage && !isPatternFill) {\n const offsetX = Math.round(Math.min(currentTransform[0], currentTransform[2]) + currentTransform[4]);\n const offsetY = Math.round(Math.min(currentTransform[1], currentTransform[3]) + currentTransform[5]);\n return {\n canvas: cachedImage,\n offsetX,\n offsetY\n };\n }\n scaled = cachedImage;\n }\n if (!scaled) {\n maskCanvas = this.cachedCanvases.getCanvas(\"maskCanvas\", width, height);\n putBinaryImageMask(maskCanvas.context, img);\n }\n let maskToCanvas = Util.transform(currentTransform, [1 / width, 0, 0, -1 / height, 0, 0]);\n maskToCanvas = Util.transform(maskToCanvas, [1, 0, 0, 1, 0, -height]);\n const [minX, minY, maxX, maxY] = Util.getAxialAlignedBoundingBox([0, 0, width, height], maskToCanvas);\n const drawnWidth = Math.round(maxX - minX) || 1;\n const drawnHeight = Math.round(maxY - minY) || 1;\n const fillCanvas = this.cachedCanvases.getCanvas(\"fillCanvas\", drawnWidth, drawnHeight);\n const fillCtx = fillCanvas.context;\n const offsetX = minX;\n const offsetY = minY;\n fillCtx.translate(-offsetX, -offsetY);\n fillCtx.transform(...maskToCanvas);\n if (!scaled) {\n scaled = this._scaleImage(maskCanvas.canvas, getCurrentTransformInverse(fillCtx));\n scaled = scaled.img;\n if (cache && isPatternFill) {\n cache.set(cacheKey, scaled);\n }\n }\n fillCtx.imageSmoothingEnabled = getImageSmoothingEnabled(getCurrentTransform(fillCtx), img.interpolate);\n drawImageAtIntegerCoords(fillCtx, scaled, 0, 0, scaled.width, scaled.height, 0, 0, width, height);\n fillCtx.globalCompositeOperation = \"source-in\";\n const inverse = Util.transform(getCurrentTransformInverse(fillCtx), [1, 0, 0, 1, -offsetX, -offsetY]);\n fillCtx.fillStyle = isPatternFill ? fillColor.getPattern(ctx, this, inverse, PathType.FILL) : fillColor;\n fillCtx.fillRect(0, 0, width, height);\n if (cache && !isPatternFill) {\n this.cachedCanvases.delete(\"fillCanvas\");\n cache.set(cacheKey, fillCanvas.canvas);\n }\n return {\n canvas: fillCanvas.canvas,\n offsetX: Math.round(offsetX),\n offsetY: Math.round(offsetY)\n };\n }\n setLineWidth(width) {\n if (width !== this.current.lineWidth) {\n this._cachedScaleForStroking[0] = -1;\n }\n this.current.lineWidth = width;\n this.ctx.lineWidth = width;\n }\n setLineCap(style) {\n this.ctx.lineCap = LINE_CAP_STYLES[style];\n }\n setLineJoin(style) {\n this.ctx.lineJoin = LINE_JOIN_STYLES[style];\n }\n setMiterLimit(limit) {\n this.ctx.miterLimit = limit;\n }\n setDash(dashArray, dashPhase) {\n const ctx = this.ctx;\n if (ctx.setLineDash !== undefined) {\n ctx.setLineDash(dashArray);\n ctx.lineDashOffset = dashPhase;\n }\n }\n setRenderingIntent(intent) {}\n setFlatness(flatness) {}\n setGState(states) {\n for (const [key, value] of states) {\n switch (key) {\n case \"LW\":\n this.setLineWidth(value);\n break;\n case \"LC\":\n this.setLineCap(value);\n break;\n case \"LJ\":\n this.setLineJoin(value);\n break;\n case \"ML\":\n this.setMiterLimit(value);\n break;\n case \"D\":\n this.setDash(value[0], value[1]);\n break;\n case \"RI\":\n this.setRenderingIntent(value);\n break;\n case \"FL\":\n this.setFlatness(value);\n break;\n case \"Font\":\n this.setFont(value[0], value[1]);\n break;\n case \"CA\":\n this.current.strokeAlpha = value;\n break;\n case \"ca\":\n this.current.fillAlpha = value;\n this.ctx.globalAlpha = value;\n break;\n case \"BM\":\n this.ctx.globalCompositeOperation = value;\n break;\n case \"SMask\":\n this.current.activeSMask = value ? this.tempSMask : null;\n this.tempSMask = null;\n this.checkSMaskState();\n break;\n case \"TR\":\n this.ctx.filter = this.current.transferMaps = this.filterFactory.addFilter(value);\n break;\n }\n }\n }\n get inSMaskMode() {\n return !!this.suspendedCtx;\n }\n checkSMaskState() {\n const inSMaskMode = this.inSMaskMode;\n if (this.current.activeSMask && !inSMaskMode) {\n this.beginSMaskMode();\n } else if (!this.current.activeSMask && inSMaskMode) {\n this.endSMaskMode();\n }\n }\n beginSMaskMode() {\n if (this.inSMaskMode) {\n throw new Error(\"beginSMaskMode called while already in smask mode\");\n }\n const drawnWidth = this.ctx.canvas.width;\n const drawnHeight = this.ctx.canvas.height;\n const cacheId = \"smaskGroupAt\" + this.groupLevel;\n const scratchCanvas = this.cachedCanvases.getCanvas(cacheId, drawnWidth, drawnHeight);\n this.suspendedCtx = this.ctx;\n this.ctx = scratchCanvas.context;\n const ctx = this.ctx;\n ctx.setTransform(...getCurrentTransform(this.suspendedCtx));\n copyCtxState(this.suspendedCtx, ctx);\n mirrorContextOperations(ctx, this.suspendedCtx);\n this.setGState([[\"BM\", \"source-over\"], [\"ca\", 1], [\"CA\", 1]]);\n }\n endSMaskMode() {\n if (!this.inSMaskMode) {\n throw new Error(\"endSMaskMode called while not in smask mode\");\n }\n this.ctx._removeMirroring();\n copyCtxState(this.ctx, this.suspendedCtx);\n this.ctx = this.suspendedCtx;\n this.suspendedCtx = null;\n }\n compose(dirtyBox) {\n if (!this.current.activeSMask) {\n return;\n }\n if (!dirtyBox) {\n dirtyBox = [0, 0, this.ctx.canvas.width, this.ctx.canvas.height];\n } else {\n dirtyBox[0] = Math.floor(dirtyBox[0]);\n dirtyBox[1] = Math.floor(dirtyBox[1]);\n dirtyBox[2] = Math.ceil(dirtyBox[2]);\n dirtyBox[3] = Math.ceil(dirtyBox[3]);\n }\n const smask = this.current.activeSMask;\n const suspendedCtx = this.suspendedCtx;\n this.composeSMask(suspendedCtx, smask, this.ctx, dirtyBox);\n this.ctx.save();\n this.ctx.setTransform(1, 0, 0, 1, 0, 0);\n this.ctx.clearRect(0, 0, this.ctx.canvas.width, this.ctx.canvas.height);\n this.ctx.restore();\n }\n composeSMask(ctx, smask, layerCtx, layerBox) {\n const layerOffsetX = layerBox[0];\n const layerOffsetY = layerBox[1];\n const layerWidth = layerBox[2] - layerOffsetX;\n const layerHeight = layerBox[3] - layerOffsetY;\n if (layerWidth === 0 || layerHeight === 0) {\n return;\n }\n this.genericComposeSMask(smask.context, layerCtx, layerWidth, layerHeight, smask.subtype, smask.backdrop, smask.transferMap, layerOffsetX, layerOffsetY, smask.offsetX, smask.offsetY);\n ctx.save();\n ctx.globalAlpha = 1;\n ctx.globalCompositeOperation = \"source-over\";\n ctx.setTransform(1, 0, 0, 1, 0, 0);\n ctx.drawImage(layerCtx.canvas, 0, 0);\n ctx.restore();\n }\n genericComposeSMask(maskCtx, layerCtx, width, height, subtype, backdrop, transferMap, layerOffsetX, layerOffsetY, maskOffsetX, maskOffsetY) {\n let maskCanvas = maskCtx.canvas;\n let maskX = layerOffsetX - maskOffsetX;\n let maskY = layerOffsetY - maskOffsetY;\n if (backdrop) {\n if (maskX < 0 || maskY < 0 || maskX + width > maskCanvas.width || maskY + height > maskCanvas.height) {\n const canvas = this.cachedCanvases.getCanvas(\"maskExtension\", width, height);\n const ctx = canvas.context;\n ctx.drawImage(maskCanvas, -maskX, -maskY);\n if (backdrop.some(c => c !== 0)) {\n ctx.globalCompositeOperation = \"destination-atop\";\n ctx.fillStyle = Util.makeHexColor(...backdrop);\n ctx.fillRect(0, 0, width, height);\n ctx.globalCompositeOperation = \"source-over\";\n }\n maskCanvas = canvas.canvas;\n maskX = maskY = 0;\n } else if (backdrop.some(c => c !== 0)) {\n maskCtx.save();\n maskCtx.globalAlpha = 1;\n maskCtx.setTransform(1, 0, 0, 1, 0, 0);\n const clip = new Path2D();\n clip.rect(maskX, maskY, width, height);\n maskCtx.clip(clip);\n maskCtx.globalCompositeOperation = \"destination-atop\";\n maskCtx.fillStyle = Util.makeHexColor(...backdrop);\n maskCtx.fillRect(maskX, maskY, width, height);\n maskCtx.restore();\n }\n }\n layerCtx.save();\n layerCtx.globalAlpha = 1;\n layerCtx.setTransform(1, 0, 0, 1, 0, 0);\n if (subtype === \"Alpha\" && transferMap) {\n layerCtx.filter = this.filterFactory.addAlphaFilter(transferMap);\n } else if (subtype === \"Luminosity\") {\n layerCtx.filter = this.filterFactory.addLuminosityFilter(transferMap);\n }\n const clip = new Path2D();\n clip.rect(layerOffsetX, layerOffsetY, width, height);\n layerCtx.clip(clip);\n layerCtx.globalCompositeOperation = \"destination-in\";\n layerCtx.drawImage(maskCanvas, maskX, maskY, width, height, layerOffsetX, layerOffsetY, width, height);\n layerCtx.restore();\n }\n save() {\n if (this.inSMaskMode) {\n copyCtxState(this.ctx, this.suspendedCtx);\n this.suspendedCtx.save();\n } else {\n this.ctx.save();\n }\n const old = this.current;\n this.stateStack.push(old);\n this.current = old.clone();\n }\n restore() {\n if (this.stateStack.length === 0 && this.inSMaskMode) {\n this.endSMaskMode();\n }\n if (this.stateStack.length !== 0) {\n this.current = this.stateStack.pop();\n if (this.inSMaskMode) {\n this.suspendedCtx.restore();\n copyCtxState(this.suspendedCtx, this.ctx);\n } else {\n this.ctx.restore();\n }\n this.checkSMaskState();\n this.pendingClip = null;\n this._cachedScaleForStroking[0] = -1;\n this._cachedGetSinglePixelWidth = null;\n }\n }\n transform(a, b, c, d, e, f) {\n this.ctx.transform(a, b, c, d, e, f);\n this._cachedScaleForStroking[0] = -1;\n this._cachedGetSinglePixelWidth = null;\n }\n constructPath(ops, args, minMax) {\n const ctx = this.ctx;\n const current = this.current;\n let x = current.x,\n y = current.y;\n let startX, startY;\n const currentTransform = getCurrentTransform(ctx);\n const isScalingMatrix = currentTransform[0] === 0 && currentTransform[3] === 0 || currentTransform[1] === 0 && currentTransform[2] === 0;\n const minMaxForBezier = isScalingMatrix ? minMax.slice(0) : null;\n for (let i = 0, j = 0, ii = ops.length; i < ii; i++) {\n switch (ops[i] | 0) {\n case OPS.rectangle:\n x = args[j++];\n y = args[j++];\n const width = args[j++];\n const height = args[j++];\n const xw = x + width;\n const yh = y + height;\n ctx.moveTo(x, y);\n if (width === 0 || height === 0) {\n ctx.lineTo(xw, yh);\n } else {\n ctx.lineTo(xw, y);\n ctx.lineTo(xw, yh);\n ctx.lineTo(x, yh);\n }\n if (!isScalingMatrix) {\n current.updateRectMinMax(currentTransform, [x, y, xw, yh]);\n }\n ctx.closePath();\n break;\n case OPS.moveTo:\n x = args[j++];\n y = args[j++];\n ctx.moveTo(x, y);\n if (!isScalingMatrix) {\n current.updatePathMinMax(currentTransform, x, y);\n }\n break;\n case OPS.lineTo:\n x = args[j++];\n y = args[j++];\n ctx.lineTo(x, y);\n if (!isScalingMatrix) {\n current.updatePathMinMax(currentTransform, x, y);\n }\n break;\n case OPS.curveTo:\n startX = x;\n startY = y;\n x = args[j + 4];\n y = args[j + 5];\n ctx.bezierCurveTo(args[j], args[j + 1], args[j + 2], args[j + 3], x, y);\n current.updateCurvePathMinMax(currentTransform, startX, startY, args[j], args[j + 1], args[j + 2], args[j + 3], x, y, minMaxForBezier);\n j += 6;\n break;\n case OPS.curveTo2:\n startX = x;\n startY = y;\n ctx.bezierCurveTo(x, y, args[j], args[j + 1], args[j + 2], args[j + 3]);\n current.updateCurvePathMinMax(currentTransform, startX, startY, x, y, args[j], args[j + 1], args[j + 2], args[j + 3], minMaxForBezier);\n x = args[j + 2];\n y = args[j + 3];\n j += 4;\n break;\n case OPS.curveTo3:\n startX = x;\n startY = y;\n x = args[j + 2];\n y = args[j + 3];\n ctx.bezierCurveTo(args[j], args[j + 1], x, y, x, y);\n current.updateCurvePathMinMax(currentTransform, startX, startY, args[j], args[j + 1], x, y, x, y, minMaxForBezier);\n j += 4;\n break;\n case OPS.closePath:\n ctx.closePath();\n break;\n }\n }\n if (isScalingMatrix) {\n current.updateScalingPathMinMax(currentTransform, minMaxForBezier);\n }\n current.setCurrentPoint(x, y);\n }\n closePath() {\n this.ctx.closePath();\n }\n stroke(consumePath = true) {\n const ctx = this.ctx;\n const strokeColor = this.current.strokeColor;\n ctx.globalAlpha = this.current.strokeAlpha;\n if (this.contentVisible) {\n if (typeof strokeColor === \"object\" && strokeColor?.getPattern) {\n ctx.save();\n ctx.strokeStyle = strokeColor.getPattern(ctx, this, getCurrentTransformInverse(ctx), PathType.STROKE);\n this.rescaleAndStroke(false);\n ctx.restore();\n } else {\n this.rescaleAndStroke(true);\n }\n }\n if (consumePath) {\n this.consumePath(this.current.getClippedPathBoundingBox());\n }\n ctx.globalAlpha = this.current.fillAlpha;\n }\n closeStroke() {\n this.closePath();\n this.stroke();\n }\n fill(consumePath = true) {\n const ctx = this.ctx;\n const fillColor = this.current.fillColor;\n const isPatternFill = this.current.patternFill;\n let needRestore = false;\n if (isPatternFill) {\n ctx.save();\n ctx.fillStyle = fillColor.getPattern(ctx, this, getCurrentTransformInverse(ctx), PathType.FILL);\n needRestore = true;\n }\n const intersect = this.current.getClippedPathBoundingBox();\n if (this.contentVisible && intersect !== null) {\n if (this.pendingEOFill) {\n ctx.fill(\"evenodd\");\n this.pendingEOFill = false;\n } else {\n ctx.fill();\n }\n }\n if (needRestore) {\n ctx.restore();\n }\n if (consumePath) {\n this.consumePath(intersect);\n }\n }\n eoFill() {\n this.pendingEOFill = true;\n this.fill();\n }\n fillStroke() {\n this.fill(false);\n this.stroke(false);\n this.consumePath();\n }\n eoFillStroke() {\n this.pendingEOFill = true;\n this.fillStroke();\n }\n closeFillStroke() {\n this.closePath();\n this.fillStroke();\n }\n closeEOFillStroke() {\n this.pendingEOFill = true;\n this.closePath();\n this.fillStroke();\n }\n endPath() {\n this.consumePath();\n }\n clip() {\n this.pendingClip = NORMAL_CLIP;\n }\n eoClip() {\n this.pendingClip = EO_CLIP;\n }\n beginText() {\n this.current.textMatrix = IDENTITY_MATRIX;\n this.current.textMatrixScale = 1;\n this.current.x = this.current.lineX = 0;\n this.current.y = this.current.lineY = 0;\n }\n endText() {\n const paths = this.pendingTextPaths;\n const ctx = this.ctx;\n if (paths === undefined) {\n ctx.beginPath();\n return;\n }\n ctx.save();\n ctx.beginPath();\n for (const path of paths) {\n ctx.setTransform(...path.transform);\n ctx.translate(path.x, path.y);\n path.addToPath(ctx, path.fontSize);\n }\n ctx.restore();\n ctx.clip();\n ctx.beginPath();\n delete this.pendingTextPaths;\n }\n setCharSpacing(spacing) {\n this.current.charSpacing = spacing;\n }\n setWordSpacing(spacing) {\n this.current.wordSpacing = spacing;\n }\n setHScale(scale) {\n this.current.textHScale = scale / 100;\n }\n setLeading(leading) {\n this.current.leading = -leading;\n }\n setFont(fontRefName, size) {\n const fontObj = this.commonObjs.get(fontRefName);\n const current = this.current;\n if (!fontObj) {\n throw new Error(`Can't find font for ${fontRefName}`);\n }\n current.fontMatrix = fontObj.fontMatrix || FONT_IDENTITY_MATRIX;\n if (current.fontMatrix[0] === 0 || current.fontMatrix[3] === 0) {\n warn(\"Invalid font matrix for font \" + fontRefName);\n }\n if (size < 0) {\n size = -size;\n current.fontDirection = -1;\n } else {\n current.fontDirection = 1;\n }\n this.current.font = fontObj;\n this.current.fontSize = size;\n if (fontObj.isType3Font) {\n return;\n }\n const name = fontObj.loadedName || \"sans-serif\";\n const typeface = fontObj.systemFontInfo?.css || `\"${name}\", ${fontObj.fallbackName}`;\n let bold = \"normal\";\n if (fontObj.black) {\n bold = \"900\";\n } else if (fontObj.bold) {\n bold = \"bold\";\n }\n const italic = fontObj.italic ? \"italic\" : \"normal\";\n let browserFontSize = size;\n if (size < MIN_FONT_SIZE) {\n browserFontSize = MIN_FONT_SIZE;\n } else if (size > MAX_FONT_SIZE) {\n browserFontSize = MAX_FONT_SIZE;\n }\n this.current.fontSizeScale = size / browserFontSize;\n this.ctx.font = `${italic} ${bold} ${browserFontSize}px ${typeface}`;\n }\n setTextRenderingMode(mode) {\n this.current.textRenderingMode = mode;\n }\n setTextRise(rise) {\n this.current.textRise = rise;\n }\n moveText(x, y) {\n this.current.x = this.current.lineX += x;\n this.current.y = this.current.lineY += y;\n }\n setLeadingMoveText(x, y) {\n this.setLeading(-y);\n this.moveText(x, y);\n }\n setTextMatrix(a, b, c, d, e, f) {\n this.current.textMatrix = [a, b, c, d, e, f];\n this.current.textMatrixScale = Math.hypot(a, b);\n this.current.x = this.current.lineX = 0;\n this.current.y = this.current.lineY = 0;\n }\n nextLine() {\n this.moveText(0, this.current.leading);\n }\n paintChar(character, x, y, patternTransform) {\n const ctx = this.ctx;\n const current = this.current;\n const font = current.font;\n const textRenderingMode = current.textRenderingMode;\n const fontSize = current.fontSize / current.fontSizeScale;\n const fillStrokeMode = textRenderingMode & TextRenderingMode.FILL_STROKE_MASK;\n const isAddToPathSet = !!(textRenderingMode & TextRenderingMode.ADD_TO_PATH_FLAG);\n const patternFill = current.patternFill && !font.missingFile;\n let addToPath;\n if (font.disableFontFace || isAddToPathSet || patternFill) {\n addToPath = font.getPathGenerator(this.commonObjs, character);\n }\n if (font.disableFontFace || patternFill) {\n ctx.save();\n ctx.translate(x, y);\n ctx.beginPath();\n addToPath(ctx, fontSize);\n if (patternTransform) {\n ctx.setTransform(...patternTransform);\n }\n if (fillStrokeMode === TextRenderingMode.FILL || fillStrokeMode === TextRenderingMode.FILL_STROKE) {\n ctx.fill();\n }\n if (fillStrokeMode === TextRenderingMode.STROKE || fillStrokeMode === TextRenderingMode.FILL_STROKE) {\n ctx.stroke();\n }\n ctx.restore();\n } else {\n if (fillStrokeMode === TextRenderingMode.FILL || fillStrokeMode === TextRenderingMode.FILL_STROKE) {\n ctx.fillText(character, x, y);\n }\n if (fillStrokeMode === TextRenderingMode.STROKE || fillStrokeMode === TextRenderingMode.FILL_STROKE) {\n ctx.strokeText(character, x, y);\n }\n }\n if (isAddToPathSet) {\n const paths = this.pendingTextPaths ||= [];\n paths.push({\n transform: getCurrentTransform(ctx),\n x,\n y,\n fontSize,\n addToPath\n });\n }\n }\n get isFontSubpixelAAEnabled() {\n const {\n context: ctx\n } = this.cachedCanvases.getCanvas(\"isFontSubpixelAAEnabled\", 10, 10);\n ctx.scale(1.5, 1);\n ctx.fillText(\"I\", 0, 10);\n const data = ctx.getImageData(0, 0, 10, 10).data;\n let enabled = false;\n for (let i = 3; i < data.length; i += 4) {\n if (data[i] > 0 && data[i] < 255) {\n enabled = true;\n break;\n }\n }\n return shadow(this, \"isFontSubpixelAAEnabled\", enabled);\n }\n showText(glyphs) {\n const current = this.current;\n const font = current.font;\n if (font.isType3Font) {\n return this.showType3Text(glyphs);\n }\n const fontSize = current.fontSize;\n if (fontSize === 0) {\n return undefined;\n }\n const ctx = this.ctx;\n const fontSizeScale = current.fontSizeScale;\n const charSpacing = current.charSpacing;\n const wordSpacing = current.wordSpacing;\n const fontDirection = current.fontDirection;\n const textHScale = current.textHScale * fontDirection;\n const glyphsLength = glyphs.length;\n const vertical = font.vertical;\n const spacingDir = vertical ? 1 : -1;\n const defaultVMetrics = font.defaultVMetrics;\n const widthAdvanceScale = fontSize * current.fontMatrix[0];\n const simpleFillText = current.textRenderingMode === TextRenderingMode.FILL && !font.disableFontFace && !current.patternFill;\n ctx.save();\n ctx.transform(...current.textMatrix);\n ctx.translate(current.x, current.y + current.textRise);\n if (fontDirection > 0) {\n ctx.scale(textHScale, -1);\n } else {\n ctx.scale(textHScale, 1);\n }\n let patternTransform;\n if (current.patternFill) {\n ctx.save();\n const pattern = current.fillColor.getPattern(ctx, this, getCurrentTransformInverse(ctx), PathType.FILL);\n patternTransform = getCurrentTransform(ctx);\n ctx.restore();\n ctx.fillStyle = pattern;\n }\n let lineWidth = current.lineWidth;\n const scale = current.textMatrixScale;\n if (scale === 0 || lineWidth === 0) {\n const fillStrokeMode = current.textRenderingMode & TextRenderingMode.FILL_STROKE_MASK;\n if (fillStrokeMode === TextRenderingMode.STROKE || fillStrokeMode === TextRenderingMode.FILL_STROKE) {\n lineWidth = this.getSinglePixelWidth();\n }\n } else {\n lineWidth /= scale;\n }\n if (fontSizeScale !== 1.0) {\n ctx.scale(fontSizeScale, fontSizeScale);\n lineWidth /= fontSizeScale;\n }\n ctx.lineWidth = lineWidth;\n if (font.isInvalidPDFjsFont) {\n const chars = [];\n let width = 0;\n for (const glyph of glyphs) {\n chars.push(glyph.unicode);\n width += glyph.width;\n }\n ctx.fillText(chars.join(\"\"), 0, 0);\n current.x += width * widthAdvanceScale * textHScale;\n ctx.restore();\n this.compose();\n return undefined;\n }\n let x = 0,\n i;\n for (i = 0; i < glyphsLength; ++i) {\n const glyph = glyphs[i];\n if (typeof glyph === \"number\") {\n x += spacingDir * glyph * fontSize / 1000;\n continue;\n }\n let restoreNeeded = false;\n const spacing = (glyph.isSpace ? wordSpacing : 0) + charSpacing;\n const character = glyph.fontChar;\n const accent = glyph.accent;\n let scaledX, scaledY;\n let width = glyph.width;\n if (vertical) {\n const vmetric = glyph.vmetric || defaultVMetrics;\n const vx = -(glyph.vmetric ? vmetric[1] : width * 0.5) * widthAdvanceScale;\n const vy = vmetric[2] * widthAdvanceScale;\n width = vmetric ? -vmetric[0] : width;\n scaledX = vx / fontSizeScale;\n scaledY = (x + vy) / fontSizeScale;\n } else {\n scaledX = x / fontSizeScale;\n scaledY = 0;\n }\n if (font.remeasure && width > 0) {\n const measuredWidth = ctx.measureText(character).width * 1000 / fontSize * fontSizeScale;\n if (width < measuredWidth && this.isFontSubpixelAAEnabled) {\n const characterScaleX = width / measuredWidth;\n restoreNeeded = true;\n ctx.save();\n ctx.scale(characterScaleX, 1);\n scaledX /= characterScaleX;\n } else if (width !== measuredWidth) {\n scaledX += (width - measuredWidth) / 2000 * fontSize / fontSizeScale;\n }\n }\n if (this.contentVisible && (glyph.isInFont || font.missingFile)) {\n if (simpleFillText && !accent) {\n ctx.fillText(character, scaledX, scaledY);\n } else {\n this.paintChar(character, scaledX, scaledY, patternTransform);\n if (accent) {\n const scaledAccentX = scaledX + fontSize * accent.offset.x / fontSizeScale;\n const scaledAccentY = scaledY - fontSize * accent.offset.y / fontSizeScale;\n this.paintChar(accent.fontChar, scaledAccentX, scaledAccentY, patternTransform);\n }\n }\n }\n const charWidth = vertical ? width * widthAdvanceScale - spacing * fontDirection : width * widthAdvanceScale + spacing * fontDirection;\n x += charWidth;\n if (restoreNeeded) {\n ctx.restore();\n }\n }\n if (vertical) {\n current.y -= x;\n } else {\n current.x += x * textHScale;\n }\n ctx.restore();\n this.compose();\n return undefined;\n }\n showType3Text(glyphs) {\n const ctx = this.ctx;\n const current = this.current;\n const font = current.font;\n const fontSize = current.fontSize;\n const fontDirection = current.fontDirection;\n const spacingDir = font.vertical ? 1 : -1;\n const charSpacing = current.charSpacing;\n const wordSpacing = current.wordSpacing;\n const textHScale = current.textHScale * fontDirection;\n const fontMatrix = current.fontMatrix || FONT_IDENTITY_MATRIX;\n const glyphsLength = glyphs.length;\n const isTextInvisible = current.textRenderingMode === TextRenderingMode.INVISIBLE;\n let i, glyph, width, spacingLength;\n if (isTextInvisible || fontSize === 0) {\n return;\n }\n this._cachedScaleForStroking[0] = -1;\n this._cachedGetSinglePixelWidth = null;\n ctx.save();\n ctx.transform(...current.textMatrix);\n ctx.translate(current.x, current.y);\n ctx.scale(textHScale, fontDirection);\n for (i = 0; i < glyphsLength; ++i) {\n glyph = glyphs[i];\n if (typeof glyph === \"number\") {\n spacingLength = spacingDir * glyph * fontSize / 1000;\n this.ctx.translate(spacingLength, 0);\n current.x += spacingLength * textHScale;\n continue;\n }\n const spacing = (glyph.isSpace ? wordSpacing : 0) + charSpacing;\n const operatorList = font.charProcOperatorList[glyph.operatorListId];\n if (!operatorList) {\n warn(`Type3 character \"${glyph.operatorListId}\" is not available.`);\n continue;\n }\n if (this.contentVisible) {\n this.processingType3 = glyph;\n this.save();\n ctx.scale(fontSize, fontSize);\n ctx.transform(...fontMatrix);\n this.executeOperatorList(operatorList);\n this.restore();\n }\n const transformed = Util.applyTransform([glyph.width, 0], fontMatrix);\n width = transformed[0] * fontSize + spacing;\n ctx.translate(width, 0);\n current.x += width * textHScale;\n }\n ctx.restore();\n this.processingType3 = null;\n }\n setCharWidth(xWidth, yWidth) {}\n setCharWidthAndBounds(xWidth, yWidth, llx, lly, urx, ury) {\n this.ctx.rect(llx, lly, urx - llx, ury - lly);\n this.ctx.clip();\n this.endPath();\n }\n getColorN_Pattern(IR) {\n let pattern;\n if (IR[0] === \"TilingPattern\") {\n const color = IR[1];\n const baseTransform = this.baseTransform || getCurrentTransform(this.ctx);\n const canvasGraphicsFactory = {\n createCanvasGraphics: ctx => new CanvasGraphics(ctx, this.commonObjs, this.objs, this.canvasFactory, this.filterFactory, {\n optionalContentConfig: this.optionalContentConfig,\n markedContentStack: this.markedContentStack\n })\n };\n pattern = new TilingPattern(IR, color, this.ctx, canvasGraphicsFactory, baseTransform);\n } else {\n pattern = this._getPattern(IR[1], IR[2]);\n }\n return pattern;\n }\n setStrokeColorN() {\n this.current.strokeColor = this.getColorN_Pattern(arguments);\n }\n setFillColorN() {\n this.current.fillColor = this.getColorN_Pattern(arguments);\n this.current.patternFill = true;\n }\n setStrokeRGBColor(r, g, b) {\n const color = Util.makeHexColor(r, g, b);\n this.ctx.strokeStyle = color;\n this.current.strokeColor = color;\n }\n setFillRGBColor(r, g, b) {\n const color = Util.makeHexColor(r, g, b);\n this.ctx.fillStyle = color;\n this.current.fillColor = color;\n this.current.patternFill = false;\n }\n _getPattern(objId, matrix = null) {\n let pattern;\n if (this.cachedPatterns.has(objId)) {\n pattern = this.cachedPatterns.get(objId);\n } else {\n pattern = getShadingPattern(this.getObject(objId));\n this.cachedPatterns.set(objId, pattern);\n }\n if (matrix) {\n pattern.matrix = matrix;\n }\n return pattern;\n }\n shadingFill(objId) {\n if (!this.contentVisible) {\n return;\n }\n const ctx = this.ctx;\n this.save();\n const pattern = this._getPattern(objId);\n ctx.fillStyle = pattern.getPattern(ctx, this, getCurrentTransformInverse(ctx), PathType.SHADING);\n const inv = getCurrentTransformInverse(ctx);\n if (inv) {\n const {\n width,\n height\n } = ctx.canvas;\n const [x0, y0, x1, y1] = Util.getAxialAlignedBoundingBox([0, 0, width, height], inv);\n this.ctx.fillRect(x0, y0, x1 - x0, y1 - y0);\n } else {\n this.ctx.fillRect(-1e10, -1e10, 2e10, 2e10);\n }\n this.compose(this.current.getClippedPathBoundingBox());\n this.restore();\n }\n beginInlineImage() {\n unreachable(\"Should not call beginInlineImage\");\n }\n beginImageData() {\n unreachable(\"Should not call beginImageData\");\n }\n paintFormXObjectBegin(matrix, bbox) {\n if (!this.contentVisible) {\n return;\n }\n this.save();\n this.baseTransformStack.push(this.baseTransform);\n if (matrix) {\n this.transform(...matrix);\n }\n this.baseTransform = getCurrentTransform(this.ctx);\n if (bbox) {\n const width = bbox[2] - bbox[0];\n const height = bbox[3] - bbox[1];\n this.ctx.rect(bbox[0], bbox[1], width, height);\n this.current.updateRectMinMax(getCurrentTransform(this.ctx), bbox);\n this.clip();\n this.endPath();\n }\n }\n paintFormXObjectEnd() {\n if (!this.contentVisible) {\n return;\n }\n this.restore();\n this.baseTransform = this.baseTransformStack.pop();\n }\n beginGroup(group) {\n if (!this.contentVisible) {\n return;\n }\n this.save();\n if (this.inSMaskMode) {\n this.endSMaskMode();\n this.current.activeSMask = null;\n }\n const currentCtx = this.ctx;\n if (!group.isolated) {\n info(\"TODO: Support non-isolated groups.\");\n }\n if (group.knockout) {\n warn(\"Knockout groups not supported.\");\n }\n const currentTransform = getCurrentTransform(currentCtx);\n if (group.matrix) {\n currentCtx.transform(...group.matrix);\n }\n if (!group.bbox) {\n throw new Error(\"Bounding box is required.\");\n }\n let bounds = Util.getAxialAlignedBoundingBox(group.bbox, getCurrentTransform(currentCtx));\n const canvasBounds = [0, 0, currentCtx.canvas.width, currentCtx.canvas.height];\n bounds = Util.intersect(bounds, canvasBounds) || [0, 0, 0, 0];\n const offsetX = Math.floor(bounds[0]);\n const offsetY = Math.floor(bounds[1]);\n const drawnWidth = Math.max(Math.ceil(bounds[2]) - offsetX, 1);\n const drawnHeight = Math.max(Math.ceil(bounds[3]) - offsetY, 1);\n this.current.startNewPathAndClipBox([0, 0, drawnWidth, drawnHeight]);\n let cacheId = \"groupAt\" + this.groupLevel;\n if (group.smask) {\n cacheId += \"_smask_\" + this.smaskCounter++ % 2;\n }\n const scratchCanvas = this.cachedCanvases.getCanvas(cacheId, drawnWidth, drawnHeight);\n const groupCtx = scratchCanvas.context;\n groupCtx.translate(-offsetX, -offsetY);\n groupCtx.transform(...currentTransform);\n if (group.smask) {\n this.smaskStack.push({\n canvas: scratchCanvas.canvas,\n context: groupCtx,\n offsetX,\n offsetY,\n subtype: group.smask.subtype,\n backdrop: group.smask.backdrop,\n transferMap: group.smask.transferMap || null,\n startTransformInverse: null\n });\n } else {\n currentCtx.setTransform(1, 0, 0, 1, 0, 0);\n currentCtx.translate(offsetX, offsetY);\n currentCtx.save();\n }\n copyCtxState(currentCtx, groupCtx);\n this.ctx = groupCtx;\n this.setGState([[\"BM\", \"source-over\"], [\"ca\", 1], [\"CA\", 1]]);\n this.groupStack.push(currentCtx);\n this.groupLevel++;\n }\n endGroup(group) {\n if (!this.contentVisible) {\n return;\n }\n this.groupLevel--;\n const groupCtx = this.ctx;\n const ctx = this.groupStack.pop();\n this.ctx = ctx;\n this.ctx.imageSmoothingEnabled = false;\n if (group.smask) {\n this.tempSMask = this.smaskStack.pop();\n this.restore();\n } else {\n this.ctx.restore();\n const currentMtx = getCurrentTransform(this.ctx);\n this.restore();\n this.ctx.save();\n this.ctx.setTransform(...currentMtx);\n const dirtyBox = Util.getAxialAlignedBoundingBox([0, 0, groupCtx.canvas.width, groupCtx.canvas.height], currentMtx);\n this.ctx.drawImage(groupCtx.canvas, 0, 0);\n this.ctx.restore();\n this.compose(dirtyBox);\n }\n }\n beginAnnotation(id, rect, transform, matrix, hasOwnCanvas) {\n this.#restoreInitialState();\n resetCtxToDefault(this.ctx);\n this.ctx.save();\n this.save();\n if (this.baseTransform) {\n this.ctx.setTransform(...this.baseTransform);\n }\n if (rect) {\n const width = rect[2] - rect[0];\n const height = rect[3] - rect[1];\n if (hasOwnCanvas && this.annotationCanvasMap) {\n transform = transform.slice();\n transform[4] -= rect[0];\n transform[5] -= rect[1];\n rect = rect.slice();\n rect[0] = rect[1] = 0;\n rect[2] = width;\n rect[3] = height;\n const [scaleX, scaleY] = Util.singularValueDecompose2dScale(getCurrentTransform(this.ctx));\n const {\n viewportScale\n } = this;\n const canvasWidth = Math.ceil(width * this.outputScaleX * viewportScale);\n const canvasHeight = Math.ceil(height * this.outputScaleY * viewportScale);\n this.annotationCanvas = this.canvasFactory.create(canvasWidth, canvasHeight);\n const {\n canvas,\n context\n } = this.annotationCanvas;\n this.annotationCanvasMap.set(id, canvas);\n this.annotationCanvas.savedCtx = this.ctx;\n this.ctx = context;\n this.ctx.save();\n this.ctx.setTransform(scaleX, 0, 0, -scaleY, 0, height * scaleY);\n resetCtxToDefault(this.ctx);\n } else {\n resetCtxToDefault(this.ctx);\n this.ctx.rect(rect[0], rect[1], width, height);\n this.ctx.clip();\n this.endPath();\n }\n }\n this.current = new CanvasExtraState(this.ctx.canvas.width, this.ctx.canvas.height);\n this.transform(...transform);\n this.transform(...matrix);\n }\n endAnnotation() {\n if (this.annotationCanvas) {\n this.ctx.restore();\n this.#drawFilter();\n this.ctx = this.annotationCanvas.savedCtx;\n delete this.annotationCanvas.savedCtx;\n delete this.annotationCanvas;\n }\n }\n paintImageMaskXObject(img) {\n if (!this.contentVisible) {\n return;\n }\n const count = img.count;\n img = this.getObject(img.data, img);\n img.count = count;\n const ctx = this.ctx;\n const glyph = this.processingType3;\n if (glyph) {\n if (glyph.compiled === undefined) {\n glyph.compiled = compileType3Glyph(img);\n }\n if (glyph.compiled) {\n glyph.compiled(ctx);\n return;\n }\n }\n const mask = this._createMaskCanvas(img);\n const maskCanvas = mask.canvas;\n ctx.save();\n ctx.setTransform(1, 0, 0, 1, 0, 0);\n ctx.drawImage(maskCanvas, mask.offsetX, mask.offsetY);\n ctx.restore();\n this.compose();\n }\n paintImageMaskXObjectRepeat(img, scaleX, skewX = 0, skewY = 0, scaleY, positions) {\n if (!this.contentVisible) {\n return;\n }\n img = this.getObject(img.data, img);\n const ctx = this.ctx;\n ctx.save();\n const currentTransform = getCurrentTransform(ctx);\n ctx.transform(scaleX, skewX, skewY, scaleY, 0, 0);\n const mask = this._createMaskCanvas(img);\n ctx.setTransform(1, 0, 0, 1, mask.offsetX - currentTransform[4], mask.offsetY - currentTransform[5]);\n for (let i = 0, ii = positions.length; i < ii; i += 2) {\n const trans = Util.transform(currentTransform, [scaleX, skewX, skewY, scaleY, positions[i], positions[i + 1]]);\n const [x, y] = Util.applyTransform([0, 0], trans);\n ctx.drawImage(mask.canvas, x, y);\n }\n ctx.restore();\n this.compose();\n }\n paintImageMaskXObjectGroup(images) {\n if (!this.contentVisible) {\n return;\n }\n const ctx = this.ctx;\n const fillColor = this.current.fillColor;\n const isPatternFill = this.current.patternFill;\n for (const image of images) {\n const {\n data,\n width,\n height,\n transform\n } = image;\n const maskCanvas = this.cachedCanvases.getCanvas(\"maskCanvas\", width, height);\n const maskCtx = maskCanvas.context;\n maskCtx.save();\n const img = this.getObject(data, image);\n putBinaryImageMask(maskCtx, img);\n maskCtx.globalCompositeOperation = \"source-in\";\n maskCtx.fillStyle = isPatternFill ? fillColor.getPattern(maskCtx, this, getCurrentTransformInverse(ctx), PathType.FILL) : fillColor;\n maskCtx.fillRect(0, 0, width, height);\n maskCtx.restore();\n ctx.save();\n ctx.transform(...transform);\n ctx.scale(1, -1);\n drawImageAtIntegerCoords(ctx, maskCanvas.canvas, 0, 0, width, height, 0, -1, 1, 1);\n ctx.restore();\n }\n this.compose();\n }\n paintImageXObject(objId) {\n if (!this.contentVisible) {\n return;\n }\n const imgData = this.getObject(objId);\n if (!imgData) {\n warn(\"Dependent image isn't ready yet\");\n return;\n }\n this.paintInlineImageXObject(imgData);\n }\n paintImageXObjectRepeat(objId, scaleX, scaleY, positions) {\n if (!this.contentVisible) {\n return;\n }\n const imgData = this.getObject(objId);\n if (!imgData) {\n warn(\"Dependent image isn't ready yet\");\n return;\n }\n const width = imgData.width;\n const height = imgData.height;\n const map = [];\n for (let i = 0, ii = positions.length; i < ii; i += 2) {\n map.push({\n transform: [scaleX, 0, 0, scaleY, positions[i], positions[i + 1]],\n x: 0,\n y: 0,\n w: width,\n h: height\n });\n }\n this.paintInlineImageXObjectGroup(imgData, map);\n }\n applyTransferMapsToCanvas(ctx) {\n if (this.current.transferMaps !== \"none\") {\n ctx.filter = this.current.transferMaps;\n ctx.drawImage(ctx.canvas, 0, 0);\n ctx.filter = \"none\";\n }\n return ctx.canvas;\n }\n applyTransferMapsToBitmap(imgData) {\n if (this.current.transferMaps === \"none\") {\n return imgData.bitmap;\n }\n const {\n bitmap,\n width,\n height\n } = imgData;\n const tmpCanvas = this.cachedCanvases.getCanvas(\"inlineImage\", width, height);\n const tmpCtx = tmpCanvas.context;\n tmpCtx.filter = this.current.transferMaps;\n tmpCtx.drawImage(bitmap, 0, 0);\n tmpCtx.filter = \"none\";\n return tmpCanvas.canvas;\n }\n paintInlineImageXObject(imgData) {\n if (!this.contentVisible) {\n return;\n }\n const width = imgData.width;\n const height = imgData.height;\n const ctx = this.ctx;\n this.save();\n if (!isNodeJS) {\n const {\n filter\n } = ctx;\n if (filter !== \"none\" && filter !== \"\") {\n ctx.filter = \"none\";\n }\n }\n ctx.scale(1 / width, -1 / height);\n let imgToPaint;\n if (imgData.bitmap) {\n imgToPaint = this.applyTransferMapsToBitmap(imgData);\n } else if (typeof HTMLElement === \"function\" && imgData instanceof HTMLElement || !imgData.data) {\n imgToPaint = imgData;\n } else {\n const tmpCanvas = this.cachedCanvases.getCanvas(\"inlineImage\", width, height);\n const tmpCtx = tmpCanvas.context;\n putBinaryImageData(tmpCtx, imgData);\n imgToPaint = this.applyTransferMapsToCanvas(tmpCtx);\n }\n const scaled = this._scaleImage(imgToPaint, getCurrentTransformInverse(ctx));\n ctx.imageSmoothingEnabled = getImageSmoothingEnabled(getCurrentTransform(ctx), imgData.interpolate);\n drawImageAtIntegerCoords(ctx, scaled.img, 0, 0, scaled.paintWidth, scaled.paintHeight, 0, -height, width, height);\n this.compose();\n this.restore();\n }\n paintInlineImageXObjectGroup(imgData, map) {\n if (!this.contentVisible) {\n return;\n }\n const ctx = this.ctx;\n let imgToPaint;\n if (imgData.bitmap) {\n imgToPaint = imgData.bitmap;\n } else {\n const w = imgData.width;\n const h = imgData.height;\n const tmpCanvas = this.cachedCanvases.getCanvas(\"inlineImage\", w, h);\n const tmpCtx = tmpCanvas.context;\n putBinaryImageData(tmpCtx, imgData);\n imgToPaint = this.applyTransferMapsToCanvas(tmpCtx);\n }\n for (const entry of map) {\n ctx.save();\n ctx.transform(...entry.transform);\n ctx.scale(1, -1);\n drawImageAtIntegerCoords(ctx, imgToPaint, entry.x, entry.y, entry.w, entry.h, 0, -1, 1, 1);\n ctx.restore();\n }\n this.compose();\n }\n paintSolidColorImageMask() {\n if (!this.contentVisible) {\n return;\n }\n this.ctx.fillRect(0, 0, 1, 1);\n this.compose();\n }\n markPoint(tag) {}\n markPointProps(tag, properties) {}\n beginMarkedContent(tag) {\n this.markedContentStack.push({\n visible: true\n });\n }\n beginMarkedContentProps(tag, properties) {\n if (tag === \"OC\") {\n this.markedContentStack.push({\n visible: this.optionalContentConfig.isVisible(properties)\n });\n } else {\n this.markedContentStack.push({\n visible: true\n });\n }\n this.contentVisible = this.isContentVisible();\n }\n endMarkedContent() {\n this.markedContentStack.pop();\n this.contentVisible = this.isContentVisible();\n }\n beginCompat() {}\n endCompat() {}\n consumePath(clipBox) {\n const isEmpty = this.current.isEmptyClip();\n if (this.pendingClip) {\n this.current.updateClipFromPath();\n }\n if (!this.pendingClip) {\n this.compose(clipBox);\n }\n const ctx = this.ctx;\n if (this.pendingClip) {\n if (!isEmpty) {\n if (this.pendingClip === EO_CLIP) {\n ctx.clip(\"evenodd\");\n } else {\n ctx.clip();\n }\n }\n this.pendingClip = null;\n }\n this.current.startNewPathAndClipBox(this.current.clipBox);\n ctx.beginPath();\n }\n getSinglePixelWidth() {\n if (!this._cachedGetSinglePixelWidth) {\n const m = getCurrentTransform(this.ctx);\n if (m[1] === 0 && m[2] === 0) {\n this._cachedGetSinglePixelWidth = 1 / Math.min(Math.abs(m[0]), Math.abs(m[3]));\n } else {\n const absDet = Math.abs(m[0] * m[3] - m[2] * m[1]);\n const normX = Math.hypot(m[0], m[2]);\n const normY = Math.hypot(m[1], m[3]);\n this._cachedGetSinglePixelWidth = Math.max(normX, normY) / absDet;\n }\n }\n return this._cachedGetSinglePixelWidth;\n }\n getScaleForStroking() {\n if (this._cachedScaleForStroking[0] === -1) {\n const {\n lineWidth\n } = this.current;\n const {\n a,\n b,\n c,\n d\n } = this.ctx.getTransform();\n let scaleX, scaleY;\n if (b === 0 && c === 0) {\n const normX = Math.abs(a);\n const normY = Math.abs(d);\n if (normX === normY) {\n if (lineWidth === 0) {\n scaleX = scaleY = 1 / normX;\n } else {\n const scaledLineWidth = normX * lineWidth;\n scaleX = scaleY = scaledLineWidth < 1 ? 1 / scaledLineWidth : 1;\n }\n } else if (lineWidth === 0) {\n scaleX = 1 / normX;\n scaleY = 1 / normY;\n } else {\n const scaledXLineWidth = normX * lineWidth;\n const scaledYLineWidth = normY * lineWidth;\n scaleX = scaledXLineWidth < 1 ? 1 / scaledXLineWidth : 1;\n scaleY = scaledYLineWidth < 1 ? 1 / scaledYLineWidth : 1;\n }\n } else {\n const absDet = Math.abs(a * d - b * c);\n const normX = Math.hypot(a, b);\n const normY = Math.hypot(c, d);\n if (lineWidth === 0) {\n scaleX = normY / absDet;\n scaleY = normX / absDet;\n } else {\n const baseArea = lineWidth * absDet;\n scaleX = normY > baseArea ? normY / baseArea : 1;\n scaleY = normX > baseArea ? normX / baseArea : 1;\n }\n }\n this._cachedScaleForStroking[0] = scaleX;\n this._cachedScaleForStroking[1] = scaleY;\n }\n return this._cachedScaleForStroking;\n }\n rescaleAndStroke(saveRestore) {\n const {\n ctx\n } = this;\n const {\n lineWidth\n } = this.current;\n const [scaleX, scaleY] = this.getScaleForStroking();\n ctx.lineWidth = lineWidth || 1;\n if (scaleX === 1 && scaleY === 1) {\n ctx.stroke();\n return;\n }\n const dashes = ctx.getLineDash();\n if (saveRestore) {\n ctx.save();\n }\n ctx.scale(scaleX, scaleY);\n if (dashes.length > 0) {\n const scale = Math.max(scaleX, scaleY);\n ctx.setLineDash(dashes.map(x => x / scale));\n ctx.lineDashOffset /= scale;\n }\n ctx.stroke();\n if (saveRestore) {\n ctx.restore();\n }\n }\n isContentVisible() {\n for (let i = this.markedContentStack.length - 1; i >= 0; i--) {\n if (!this.markedContentStack[i].visible) {\n return false;\n }\n }\n return true;\n }\n}\nfor (const op in OPS) {\n if (CanvasGraphics.prototype[op] !== undefined) {\n CanvasGraphics.prototype[OPS[op]] = CanvasGraphics.prototype[op];\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/worker_options.js\nclass GlobalWorkerOptions {\n static #port = null;\n static #src = \"\";\n static get workerPort() {\n return this.#port;\n }\n static set workerPort(val) {\n if (!(typeof Worker !== \"undefined\" && val instanceof Worker) && val !== null) {\n throw new Error(\"Invalid `workerPort` type.\");\n }\n this.#port = val;\n }\n static get workerSrc() {\n return this.#src;\n }\n static set workerSrc(val) {\n if (typeof val !== \"string\") {\n throw new Error(\"Invalid `workerSrc` type.\");\n }\n this.#src = val;\n }\n}\n\n;// CONCATENATED MODULE: ./src/shared/message_handler.js\n\nconst CallbackKind = {\n UNKNOWN: 0,\n DATA: 1,\n ERROR: 2\n};\nconst StreamKind = {\n UNKNOWN: 0,\n CANCEL: 1,\n CANCEL_COMPLETE: 2,\n CLOSE: 3,\n ENQUEUE: 4,\n ERROR: 5,\n PULL: 6,\n PULL_COMPLETE: 7,\n START_COMPLETE: 8\n};\nfunction wrapReason(reason) {\n if (!(reason instanceof Error || typeof reason === \"object\" && reason !== null)) {\n unreachable('wrapReason: Expected \"reason\" to be a (possibly cloned) Error.');\n }\n switch (reason.name) {\n case \"AbortException\":\n return new AbortException(reason.message);\n case \"MissingPDFException\":\n return new MissingPDFException(reason.message);\n case \"PasswordException\":\n return new PasswordException(reason.message, reason.code);\n case \"UnexpectedResponseException\":\n return new UnexpectedResponseException(reason.message, reason.status);\n case \"UnknownErrorException\":\n return new UnknownErrorException(reason.message, reason.details);\n default:\n return new UnknownErrorException(reason.message, reason.toString());\n }\n}\nclass MessageHandler {\n constructor(sourceName, targetName, comObj) {\n this.sourceName = sourceName;\n this.targetName = targetName;\n this.comObj = comObj;\n this.callbackId = 1;\n this.streamId = 1;\n this.streamSinks = Object.create(null);\n this.streamControllers = Object.create(null);\n this.callbackCapabilities = Object.create(null);\n this.actionHandler = Object.create(null);\n this._onComObjOnMessage = event => {\n const data = event.data;\n if (data.targetName !== this.sourceName) {\n return;\n }\n if (data.stream) {\n this.#processStreamMessage(data);\n return;\n }\n if (data.callback) {\n const callbackId = data.callbackId;\n const capability = this.callbackCapabilities[callbackId];\n if (!capability) {\n throw new Error(`Cannot resolve callback ${callbackId}`);\n }\n delete this.callbackCapabilities[callbackId];\n if (data.callback === CallbackKind.DATA) {\n capability.resolve(data.data);\n } else if (data.callback === CallbackKind.ERROR) {\n capability.reject(wrapReason(data.reason));\n } else {\n throw new Error(\"Unexpected callback case\");\n }\n return;\n }\n const action = this.actionHandler[data.action];\n if (!action) {\n throw new Error(`Unknown action from worker: ${data.action}`);\n }\n if (data.callbackId) {\n const cbSourceName = this.sourceName;\n const cbTargetName = data.sourceName;\n new Promise(function (resolve) {\n resolve(action(data.data));\n }).then(function (result) {\n comObj.postMessage({\n sourceName: cbSourceName,\n targetName: cbTargetName,\n callback: CallbackKind.DATA,\n callbackId: data.callbackId,\n data: result\n });\n }, function (reason) {\n comObj.postMessage({\n sourceName: cbSourceName,\n targetName: cbTargetName,\n callback: CallbackKind.ERROR,\n callbackId: data.callbackId,\n reason: wrapReason(reason)\n });\n });\n return;\n }\n if (data.streamId) {\n this.#createStreamSink(data);\n return;\n }\n action(data.data);\n };\n comObj.addEventListener(\"message\", this._onComObjOnMessage);\n }\n on(actionName, handler) {\n const ah = this.actionHandler;\n if (ah[actionName]) {\n throw new Error(`There is already an actionName called \"${actionName}\"`);\n }\n ah[actionName] = handler;\n }\n send(actionName, data, transfers) {\n this.comObj.postMessage({\n sourceName: this.sourceName,\n targetName: this.targetName,\n action: actionName,\n data\n }, transfers);\n }\n sendWithPromise(actionName, data, transfers) {\n const callbackId = this.callbackId++;\n const capability = Promise.withResolvers();\n this.callbackCapabilities[callbackId] = capability;\n try {\n this.comObj.postMessage({\n sourceName: this.sourceName,\n targetName: this.targetName,\n action: actionName,\n callbackId,\n data\n }, transfers);\n } catch (ex) {\n capability.reject(ex);\n }\n return capability.promise;\n }\n sendWithStream(actionName, data, queueingStrategy, transfers) {\n const streamId = this.streamId++,\n sourceName = this.sourceName,\n targetName = this.targetName,\n comObj = this.comObj;\n return new ReadableStream({\n start: controller => {\n const startCapability = Promise.withResolvers();\n this.streamControllers[streamId] = {\n controller,\n startCall: startCapability,\n pullCall: null,\n cancelCall: null,\n isClosed: false\n };\n comObj.postMessage({\n sourceName,\n targetName,\n action: actionName,\n streamId,\n data,\n desiredSize: controller.desiredSize\n }, transfers);\n return startCapability.promise;\n },\n pull: controller => {\n const pullCapability = Promise.withResolvers();\n this.streamControllers[streamId].pullCall = pullCapability;\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.PULL,\n streamId,\n desiredSize: controller.desiredSize\n });\n return pullCapability.promise;\n },\n cancel: reason => {\n assert(reason instanceof Error, \"cancel must have a valid reason\");\n const cancelCapability = Promise.withResolvers();\n this.streamControllers[streamId].cancelCall = cancelCapability;\n this.streamControllers[streamId].isClosed = true;\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.CANCEL,\n streamId,\n reason: wrapReason(reason)\n });\n return cancelCapability.promise;\n }\n }, queueingStrategy);\n }\n #createStreamSink(data) {\n const streamId = data.streamId,\n sourceName = this.sourceName,\n targetName = data.sourceName,\n comObj = this.comObj;\n const self = this,\n action = this.actionHandler[data.action];\n const streamSink = {\n enqueue(chunk, size = 1, transfers) {\n if (this.isCancelled) {\n return;\n }\n const lastDesiredSize = this.desiredSize;\n this.desiredSize -= size;\n if (lastDesiredSize > 0 && this.desiredSize <= 0) {\n this.sinkCapability = Promise.withResolvers();\n this.ready = this.sinkCapability.promise;\n }\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.ENQUEUE,\n streamId,\n chunk\n }, transfers);\n },\n close() {\n if (this.isCancelled) {\n return;\n }\n this.isCancelled = true;\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.CLOSE,\n streamId\n });\n delete self.streamSinks[streamId];\n },\n error(reason) {\n assert(reason instanceof Error, \"error must have a valid reason\");\n if (this.isCancelled) {\n return;\n }\n this.isCancelled = true;\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.ERROR,\n streamId,\n reason: wrapReason(reason)\n });\n },\n sinkCapability: Promise.withResolvers(),\n onPull: null,\n onCancel: null,\n isCancelled: false,\n desiredSize: data.desiredSize,\n ready: null\n };\n streamSink.sinkCapability.resolve();\n streamSink.ready = streamSink.sinkCapability.promise;\n this.streamSinks[streamId] = streamSink;\n new Promise(function (resolve) {\n resolve(action(data.data, streamSink));\n }).then(function () {\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.START_COMPLETE,\n streamId,\n success: true\n });\n }, function (reason) {\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.START_COMPLETE,\n streamId,\n reason: wrapReason(reason)\n });\n });\n }\n #processStreamMessage(data) {\n const streamId = data.streamId,\n sourceName = this.sourceName,\n targetName = data.sourceName,\n comObj = this.comObj;\n const streamController = this.streamControllers[streamId],\n streamSink = this.streamSinks[streamId];\n switch (data.stream) {\n case StreamKind.START_COMPLETE:\n if (data.success) {\n streamController.startCall.resolve();\n } else {\n streamController.startCall.reject(wrapReason(data.reason));\n }\n break;\n case StreamKind.PULL_COMPLETE:\n if (data.success) {\n streamController.pullCall.resolve();\n } else {\n streamController.pullCall.reject(wrapReason(data.reason));\n }\n break;\n case StreamKind.PULL:\n if (!streamSink) {\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.PULL_COMPLETE,\n streamId,\n success: true\n });\n break;\n }\n if (streamSink.desiredSize <= 0 && data.desiredSize > 0) {\n streamSink.sinkCapability.resolve();\n }\n streamSink.desiredSize = data.desiredSize;\n new Promise(function (resolve) {\n resolve(streamSink.onPull?.());\n }).then(function () {\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.PULL_COMPLETE,\n streamId,\n success: true\n });\n }, function (reason) {\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.PULL_COMPLETE,\n streamId,\n reason: wrapReason(reason)\n });\n });\n break;\n case StreamKind.ENQUEUE:\n assert(streamController, \"enqueue should have stream controller\");\n if (streamController.isClosed) {\n break;\n }\n streamController.controller.enqueue(data.chunk);\n break;\n case StreamKind.CLOSE:\n assert(streamController, \"close should have stream controller\");\n if (streamController.isClosed) {\n break;\n }\n streamController.isClosed = true;\n streamController.controller.close();\n this.#deleteStreamController(streamController, streamId);\n break;\n case StreamKind.ERROR:\n assert(streamController, \"error should have stream controller\");\n streamController.controller.error(wrapReason(data.reason));\n this.#deleteStreamController(streamController, streamId);\n break;\n case StreamKind.CANCEL_COMPLETE:\n if (data.success) {\n streamController.cancelCall.resolve();\n } else {\n streamController.cancelCall.reject(wrapReason(data.reason));\n }\n this.#deleteStreamController(streamController, streamId);\n break;\n case StreamKind.CANCEL:\n if (!streamSink) {\n break;\n }\n new Promise(function (resolve) {\n resolve(streamSink.onCancel?.(wrapReason(data.reason)));\n }).then(function () {\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.CANCEL_COMPLETE,\n streamId,\n success: true\n });\n }, function (reason) {\n comObj.postMessage({\n sourceName,\n targetName,\n stream: StreamKind.CANCEL_COMPLETE,\n streamId,\n reason: wrapReason(reason)\n });\n });\n streamSink.sinkCapability.reject(wrapReason(data.reason));\n streamSink.isCancelled = true;\n delete this.streamSinks[streamId];\n break;\n default:\n throw new Error(\"Unexpected stream case\");\n }\n }\n async #deleteStreamController(streamController, streamId) {\n await Promise.allSettled([streamController.startCall?.promise, streamController.pullCall?.promise, streamController.cancelCall?.promise]);\n delete this.streamControllers[streamId];\n }\n destroy() {\n this.comObj.removeEventListener(\"message\", this._onComObjOnMessage);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/metadata.js\n\nclass Metadata {\n #metadataMap;\n #data;\n constructor({\n parsedData,\n rawData\n }) {\n this.#metadataMap = parsedData;\n this.#data = rawData;\n }\n getRaw() {\n return this.#data;\n }\n get(name) {\n return this.#metadataMap.get(name) ?? null;\n }\n getAll() {\n return objectFromMap(this.#metadataMap);\n }\n has(name) {\n return this.#metadataMap.has(name);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/optional_content_config.js\n\n\nconst INTERNAL = Symbol(\"INTERNAL\");\nclass OptionalContentGroup {\n #isDisplay = false;\n #isPrint = false;\n #userSet = false;\n #visible = true;\n constructor(renderingIntent, {\n name,\n intent,\n usage\n }) {\n this.#isDisplay = !!(renderingIntent & RenderingIntentFlag.DISPLAY);\n this.#isPrint = !!(renderingIntent & RenderingIntentFlag.PRINT);\n this.name = name;\n this.intent = intent;\n this.usage = usage;\n }\n get visible() {\n if (this.#userSet) {\n return this.#visible;\n }\n if (!this.#visible) {\n return false;\n }\n const {\n print,\n view\n } = this.usage;\n if (this.#isDisplay) {\n return view?.viewState !== \"OFF\";\n } else if (this.#isPrint) {\n return print?.printState !== \"OFF\";\n }\n return true;\n }\n _setVisible(internal, visible, userSet = false) {\n if (internal !== INTERNAL) {\n unreachable(\"Internal method `_setVisible` called.\");\n }\n this.#userSet = userSet;\n this.#visible = visible;\n }\n}\nclass OptionalContentConfig {\n #cachedGetHash = null;\n #groups = new Map();\n #initialHash = null;\n #order = null;\n constructor(data, renderingIntent = RenderingIntentFlag.DISPLAY) {\n this.renderingIntent = renderingIntent;\n this.name = null;\n this.creator = null;\n if (data === null) {\n return;\n }\n this.name = data.name;\n this.creator = data.creator;\n this.#order = data.order;\n for (const group of data.groups) {\n this.#groups.set(group.id, new OptionalContentGroup(renderingIntent, group));\n }\n if (data.baseState === \"OFF\") {\n for (const group of this.#groups.values()) {\n group._setVisible(INTERNAL, false);\n }\n }\n for (const on of data.on) {\n this.#groups.get(on)._setVisible(INTERNAL, true);\n }\n for (const off of data.off) {\n this.#groups.get(off)._setVisible(INTERNAL, false);\n }\n this.#initialHash = this.getHash();\n }\n #evaluateVisibilityExpression(array) {\n const length = array.length;\n if (length < 2) {\n return true;\n }\n const operator = array[0];\n for (let i = 1; i < length; i++) {\n const element = array[i];\n let state;\n if (Array.isArray(element)) {\n state = this.#evaluateVisibilityExpression(element);\n } else if (this.#groups.has(element)) {\n state = this.#groups.get(element).visible;\n } else {\n warn(`Optional content group not found: ${element}`);\n return true;\n }\n switch (operator) {\n case \"And\":\n if (!state) {\n return false;\n }\n break;\n case \"Or\":\n if (state) {\n return true;\n }\n break;\n case \"Not\":\n return !state;\n default:\n return true;\n }\n }\n return operator === \"And\";\n }\n isVisible(group) {\n if (this.#groups.size === 0) {\n return true;\n }\n if (!group) {\n info(\"Optional content group not defined.\");\n return true;\n }\n if (group.type === \"OCG\") {\n if (!this.#groups.has(group.id)) {\n warn(`Optional content group not found: ${group.id}`);\n return true;\n }\n return this.#groups.get(group.id).visible;\n } else if (group.type === \"OCMD\") {\n if (group.expression) {\n return this.#evaluateVisibilityExpression(group.expression);\n }\n if (!group.policy || group.policy === \"AnyOn\") {\n for (const id of group.ids) {\n if (!this.#groups.has(id)) {\n warn(`Optional content group not found: ${id}`);\n return true;\n }\n if (this.#groups.get(id).visible) {\n return true;\n }\n }\n return false;\n } else if (group.policy === \"AllOn\") {\n for (const id of group.ids) {\n if (!this.#groups.has(id)) {\n warn(`Optional content group not found: ${id}`);\n return true;\n }\n if (!this.#groups.get(id).visible) {\n return false;\n }\n }\n return true;\n } else if (group.policy === \"AnyOff\") {\n for (const id of group.ids) {\n if (!this.#groups.has(id)) {\n warn(`Optional content group not found: ${id}`);\n return true;\n }\n if (!this.#groups.get(id).visible) {\n return true;\n }\n }\n return false;\n } else if (group.policy === \"AllOff\") {\n for (const id of group.ids) {\n if (!this.#groups.has(id)) {\n warn(`Optional content group not found: ${id}`);\n return true;\n }\n if (this.#groups.get(id).visible) {\n return false;\n }\n }\n return true;\n }\n warn(`Unknown optional content policy ${group.policy}.`);\n return true;\n }\n warn(`Unknown group type ${group.type}.`);\n return true;\n }\n setVisibility(id, visible = true) {\n const group = this.#groups.get(id);\n if (!group) {\n warn(`Optional content group not found: ${id}`);\n return;\n }\n group._setVisible(INTERNAL, !!visible, true);\n this.#cachedGetHash = null;\n }\n setOCGState({\n state,\n preserveRB\n }) {\n let operator;\n for (const elem of state) {\n switch (elem) {\n case \"ON\":\n case \"OFF\":\n case \"Toggle\":\n operator = elem;\n continue;\n }\n const group = this.#groups.get(elem);\n if (!group) {\n continue;\n }\n switch (operator) {\n case \"ON\":\n group._setVisible(INTERNAL, true);\n break;\n case \"OFF\":\n group._setVisible(INTERNAL, false);\n break;\n case \"Toggle\":\n group._setVisible(INTERNAL, !group.visible);\n break;\n }\n }\n this.#cachedGetHash = null;\n }\n get hasInitialVisibility() {\n return this.#initialHash === null || this.getHash() === this.#initialHash;\n }\n getOrder() {\n if (!this.#groups.size) {\n return null;\n }\n if (this.#order) {\n return this.#order.slice();\n }\n return [...this.#groups.keys()];\n }\n getGroups() {\n return this.#groups.size > 0 ? objectFromMap(this.#groups) : null;\n }\n getGroup(id) {\n return this.#groups.get(id) || null;\n }\n getHash() {\n if (this.#cachedGetHash !== null) {\n return this.#cachedGetHash;\n }\n const hash = new MurmurHash3_64();\n for (const [id, group] of this.#groups) {\n hash.update(`${id}:${group.visible}`);\n }\n return this.#cachedGetHash = hash.hexdigest();\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/transport_stream.js\n\n\nclass PDFDataTransportStream {\n constructor(pdfDataRangeTransport, {\n disableRange = false,\n disableStream = false\n }) {\n assert(pdfDataRangeTransport, 'PDFDataTransportStream - missing required \"pdfDataRangeTransport\" argument.');\n const {\n length,\n initialData,\n progressiveDone,\n contentDispositionFilename\n } = pdfDataRangeTransport;\n this._queuedChunks = [];\n this._progressiveDone = progressiveDone;\n this._contentDispositionFilename = contentDispositionFilename;\n if (initialData?.length > 0) {\n const buffer = initialData instanceof Uint8Array && initialData.byteLength === initialData.buffer.byteLength ? initialData.buffer : new Uint8Array(initialData).buffer;\n this._queuedChunks.push(buffer);\n }\n this._pdfDataRangeTransport = pdfDataRangeTransport;\n this._isStreamingSupported = !disableStream;\n this._isRangeSupported = !disableRange;\n this._contentLength = length;\n this._fullRequestReader = null;\n this._rangeReaders = [];\n pdfDataRangeTransport.addRangeListener((begin, chunk) => {\n this._onReceiveData({\n begin,\n chunk\n });\n });\n pdfDataRangeTransport.addProgressListener((loaded, total) => {\n this._onProgress({\n loaded,\n total\n });\n });\n pdfDataRangeTransport.addProgressiveReadListener(chunk => {\n this._onReceiveData({\n chunk\n });\n });\n pdfDataRangeTransport.addProgressiveDoneListener(() => {\n this._onProgressiveDone();\n });\n pdfDataRangeTransport.transportReady();\n }\n _onReceiveData({\n begin,\n chunk\n }) {\n const buffer = chunk instanceof Uint8Array && chunk.byteLength === chunk.buffer.byteLength ? chunk.buffer : new Uint8Array(chunk).buffer;\n if (begin === undefined) {\n if (this._fullRequestReader) {\n this._fullRequestReader._enqueue(buffer);\n } else {\n this._queuedChunks.push(buffer);\n }\n } else {\n const found = this._rangeReaders.some(function (rangeReader) {\n if (rangeReader._begin !== begin) {\n return false;\n }\n rangeReader._enqueue(buffer);\n return true;\n });\n assert(found, \"_onReceiveData - no `PDFDataTransportStreamRangeReader` instance found.\");\n }\n }\n get _progressiveDataLength() {\n return this._fullRequestReader?._loaded ?? 0;\n }\n _onProgress(evt) {\n if (evt.total === undefined) {\n this._rangeReaders[0]?.onProgress?.({\n loaded: evt.loaded\n });\n } else {\n this._fullRequestReader?.onProgress?.({\n loaded: evt.loaded,\n total: evt.total\n });\n }\n }\n _onProgressiveDone() {\n this._fullRequestReader?.progressiveDone();\n this._progressiveDone = true;\n }\n _removeRangeReader(reader) {\n const i = this._rangeReaders.indexOf(reader);\n if (i >= 0) {\n this._rangeReaders.splice(i, 1);\n }\n }\n getFullReader() {\n assert(!this._fullRequestReader, \"PDFDataTransportStream.getFullReader can only be called once.\");\n const queuedChunks = this._queuedChunks;\n this._queuedChunks = null;\n return new PDFDataTransportStreamReader(this, queuedChunks, this._progressiveDone, this._contentDispositionFilename);\n }\n getRangeReader(begin, end) {\n if (end <= this._progressiveDataLength) {\n return null;\n }\n const reader = new PDFDataTransportStreamRangeReader(this, begin, end);\n this._pdfDataRangeTransport.requestDataRange(begin, end);\n this._rangeReaders.push(reader);\n return reader;\n }\n cancelAllRequests(reason) {\n this._fullRequestReader?.cancel(reason);\n for (const reader of this._rangeReaders.slice(0)) {\n reader.cancel(reason);\n }\n this._pdfDataRangeTransport.abort();\n }\n}\nclass PDFDataTransportStreamReader {\n constructor(stream, queuedChunks, progressiveDone = false, contentDispositionFilename = null) {\n this._stream = stream;\n this._done = progressiveDone || false;\n this._filename = isPdfFile(contentDispositionFilename) ? contentDispositionFilename : null;\n this._queuedChunks = queuedChunks || [];\n this._loaded = 0;\n for (const chunk of this._queuedChunks) {\n this._loaded += chunk.byteLength;\n }\n this._requests = [];\n this._headersReady = Promise.resolve();\n stream._fullRequestReader = this;\n this.onProgress = null;\n }\n _enqueue(chunk) {\n if (this._done) {\n return;\n }\n if (this._requests.length > 0) {\n const requestCapability = this._requests.shift();\n requestCapability.resolve({\n value: chunk,\n done: false\n });\n } else {\n this._queuedChunks.push(chunk);\n }\n this._loaded += chunk.byteLength;\n }\n get headersReady() {\n return this._headersReady;\n }\n get filename() {\n return this._filename;\n }\n get isRangeSupported() {\n return this._stream._isRangeSupported;\n }\n get isStreamingSupported() {\n return this._stream._isStreamingSupported;\n }\n get contentLength() {\n return this._stream._contentLength;\n }\n async read() {\n if (this._queuedChunks.length > 0) {\n const chunk = this._queuedChunks.shift();\n return {\n value: chunk,\n done: false\n };\n }\n if (this._done) {\n return {\n value: undefined,\n done: true\n };\n }\n const requestCapability = Promise.withResolvers();\n this._requests.push(requestCapability);\n return requestCapability.promise;\n }\n cancel(reason) {\n this._done = true;\n for (const requestCapability of this._requests) {\n requestCapability.resolve({\n value: undefined,\n done: true\n });\n }\n this._requests.length = 0;\n }\n progressiveDone() {\n if (this._done) {\n return;\n }\n this._done = true;\n }\n}\nclass PDFDataTransportStreamRangeReader {\n constructor(stream, begin, end) {\n this._stream = stream;\n this._begin = begin;\n this._end = end;\n this._queuedChunk = null;\n this._requests = [];\n this._done = false;\n this.onProgress = null;\n }\n _enqueue(chunk) {\n if (this._done) {\n return;\n }\n if (this._requests.length === 0) {\n this._queuedChunk = chunk;\n } else {\n const requestsCapability = this._requests.shift();\n requestsCapability.resolve({\n value: chunk,\n done: false\n });\n for (const requestCapability of this._requests) {\n requestCapability.resolve({\n value: undefined,\n done: true\n });\n }\n this._requests.length = 0;\n }\n this._done = true;\n this._stream._removeRangeReader(this);\n }\n get isStreamingSupported() {\n return false;\n }\n async read() {\n if (this._queuedChunk) {\n const chunk = this._queuedChunk;\n this._queuedChunk = null;\n return {\n value: chunk,\n done: false\n };\n }\n if (this._done) {\n return {\n value: undefined,\n done: true\n };\n }\n const requestCapability = Promise.withResolvers();\n this._requests.push(requestCapability);\n return requestCapability.promise;\n }\n cancel(reason) {\n this._done = true;\n for (const requestCapability of this._requests) {\n requestCapability.resolve({\n value: undefined,\n done: true\n });\n }\n this._requests.length = 0;\n this._stream._removeRangeReader(this);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/content_disposition.js\n\nfunction getFilenameFromContentDispositionHeader(contentDisposition) {\n let needsEncodingFixup = true;\n let tmp = toParamRegExp(\"filename\\\\*\", \"i\").exec(contentDisposition);\n if (tmp) {\n tmp = tmp[1];\n let filename = rfc2616unquote(tmp);\n filename = unescape(filename);\n filename = rfc5987decode(filename);\n filename = rfc2047decode(filename);\n return fixupEncoding(filename);\n }\n tmp = rfc2231getparam(contentDisposition);\n if (tmp) {\n const filename = rfc2047decode(tmp);\n return fixupEncoding(filename);\n }\n tmp = toParamRegExp(\"filename\", \"i\").exec(contentDisposition);\n if (tmp) {\n tmp = tmp[1];\n let filename = rfc2616unquote(tmp);\n filename = rfc2047decode(filename);\n return fixupEncoding(filename);\n }\n function toParamRegExp(attributePattern, flags) {\n return new RegExp(\"(?:^|;)\\\\s*\" + attributePattern + \"\\\\s*=\\\\s*\" + \"(\" + '[^\";\\\\s][^;\\\\s]*' + \"|\" + '\"(?:[^\"\\\\\\\\]|\\\\\\\\\"?)+\"?' + \")\", flags);\n }\n function textdecode(encoding, value) {\n if (encoding) {\n if (!/^[\\x00-\\xFF]+$/.test(value)) {\n return value;\n }\n try {\n const decoder = new TextDecoder(encoding, {\n fatal: true\n });\n const buffer = stringToBytes(value);\n value = decoder.decode(buffer);\n needsEncodingFixup = false;\n } catch {}\n }\n return value;\n }\n function fixupEncoding(value) {\n if (needsEncodingFixup && /[\\x80-\\xff]/.test(value)) {\n value = textdecode(\"utf-8\", value);\n if (needsEncodingFixup) {\n value = textdecode(\"iso-8859-1\", value);\n }\n }\n return value;\n }\n function rfc2231getparam(contentDispositionStr) {\n const matches = [];\n let match;\n const iter = toParamRegExp(\"filename\\\\*((?!0\\\\d)\\\\d+)(\\\\*?)\", \"ig\");\n while ((match = iter.exec(contentDispositionStr)) !== null) {\n let [, n, quot, part] = match;\n n = parseInt(n, 10);\n if (n in matches) {\n if (n === 0) {\n break;\n }\n continue;\n }\n matches[n] = [quot, part];\n }\n const parts = [];\n for (let n = 0; n < matches.length; ++n) {\n if (!(n in matches)) {\n break;\n }\n let [quot, part] = matches[n];\n part = rfc2616unquote(part);\n if (quot) {\n part = unescape(part);\n if (n === 0) {\n part = rfc5987decode(part);\n }\n }\n parts.push(part);\n }\n return parts.join(\"\");\n }\n function rfc2616unquote(value) {\n if (value.startsWith('\"')) {\n const parts = value.slice(1).split('\\\\\"');\n for (let i = 0; i < parts.length; ++i) {\n const quotindex = parts[i].indexOf('\"');\n if (quotindex !== -1) {\n parts[i] = parts[i].slice(0, quotindex);\n parts.length = i + 1;\n }\n parts[i] = parts[i].replaceAll(/\\\\(.)/g, \"$1\");\n }\n value = parts.join('\"');\n }\n return value;\n }\n function rfc5987decode(extvalue) {\n const encodingend = extvalue.indexOf(\"'\");\n if (encodingend === -1) {\n return extvalue;\n }\n const encoding = extvalue.slice(0, encodingend);\n const langvalue = extvalue.slice(encodingend + 1);\n const value = langvalue.replace(/^[^']*'/, \"\");\n return textdecode(encoding, value);\n }\n function rfc2047decode(value) {\n if (!value.startsWith(\"=?\") || /[\\x00-\\x19\\x80-\\xff]/.test(value)) {\n return value;\n }\n return value.replaceAll(/=\\?([\\w-]*)\\?([QqBb])\\?((?:[^?]|\\?(?!=))*)\\?=/g, function (matches, charset, encoding, text) {\n if (encoding === \"q\" || encoding === \"Q\") {\n text = text.replaceAll(\"_\", \" \");\n text = text.replaceAll(/=([0-9a-fA-F]{2})/g, function (match, hex) {\n return String.fromCharCode(parseInt(hex, 16));\n });\n return textdecode(charset, text);\n }\n try {\n text = atob(text);\n } catch {}\n return textdecode(charset, text);\n });\n }\n return \"\";\n}\n\n;// CONCATENATED MODULE: ./src/display/network_utils.js\n\n\n\nfunction validateRangeRequestCapabilities({\n getResponseHeader,\n isHttp,\n rangeChunkSize,\n disableRange\n}) {\n const returnValues = {\n allowRangeRequests: false,\n suggestedLength: undefined\n };\n const length = parseInt(getResponseHeader(\"Content-Length\"), 10);\n if (!Number.isInteger(length)) {\n return returnValues;\n }\n returnValues.suggestedLength = length;\n if (length <= 2 * rangeChunkSize) {\n return returnValues;\n }\n if (disableRange || !isHttp) {\n return returnValues;\n }\n if (getResponseHeader(\"Accept-Ranges\") !== \"bytes\") {\n return returnValues;\n }\n const contentEncoding = getResponseHeader(\"Content-Encoding\") || \"identity\";\n if (contentEncoding !== \"identity\") {\n return returnValues;\n }\n returnValues.allowRangeRequests = true;\n return returnValues;\n}\nfunction extractFilenameFromHeader(getResponseHeader) {\n const contentDisposition = getResponseHeader(\"Content-Disposition\");\n if (contentDisposition) {\n let filename = getFilenameFromContentDispositionHeader(contentDisposition);\n if (filename.includes(\"%\")) {\n try {\n filename = decodeURIComponent(filename);\n } catch {}\n }\n if (isPdfFile(filename)) {\n return filename;\n }\n }\n return null;\n}\nfunction createResponseStatusError(status, url) {\n if (status === 404 || status === 0 && url.startsWith(\"file:\")) {\n return new MissingPDFException('Missing PDF \"' + url + '\".');\n }\n return new UnexpectedResponseException(`Unexpected server response (${status}) while retrieving PDF \"${url}\".`, status);\n}\nfunction validateResponseStatus(status) {\n return status === 200 || status === 206;\n}\n\n;// CONCATENATED MODULE: ./src/display/fetch_stream.js\n\n\nfunction createFetchOptions(headers, withCredentials, abortController) {\n return {\n method: \"GET\",\n headers,\n signal: abortController.signal,\n mode: \"cors\",\n credentials: withCredentials ? \"include\" : \"same-origin\",\n redirect: \"follow\"\n };\n}\nfunction createHeaders(httpHeaders) {\n const headers = new Headers();\n for (const property in httpHeaders) {\n const value = httpHeaders[property];\n if (value === undefined) {\n continue;\n }\n headers.append(property, value);\n }\n return headers;\n}\nfunction getArrayBuffer(val) {\n if (val instanceof Uint8Array) {\n return val.buffer;\n }\n if (val instanceof ArrayBuffer) {\n return val;\n }\n warn(`getArrayBuffer - unexpected data format: ${val}`);\n return new Uint8Array(val).buffer;\n}\nclass PDFFetchStream {\n constructor(source) {\n this.source = source;\n this.isHttp = /^https?:/i.test(source.url);\n this.httpHeaders = this.isHttp && source.httpHeaders || {};\n this._fullRequestReader = null;\n this._rangeRequestReaders = [];\n }\n get _progressiveDataLength() {\n return this._fullRequestReader?._loaded ?? 0;\n }\n getFullReader() {\n assert(!this._fullRequestReader, \"PDFFetchStream.getFullReader can only be called once.\");\n this._fullRequestReader = new PDFFetchStreamReader(this);\n return this._fullRequestReader;\n }\n getRangeReader(begin, end) {\n if (end <= this._progressiveDataLength) {\n return null;\n }\n const reader = new PDFFetchStreamRangeReader(this, begin, end);\n this._rangeRequestReaders.push(reader);\n return reader;\n }\n cancelAllRequests(reason) {\n this._fullRequestReader?.cancel(reason);\n for (const reader of this._rangeRequestReaders.slice(0)) {\n reader.cancel(reason);\n }\n }\n}\nclass PDFFetchStreamReader {\n constructor(stream) {\n this._stream = stream;\n this._reader = null;\n this._loaded = 0;\n this._filename = null;\n const source = stream.source;\n this._withCredentials = source.withCredentials || false;\n this._contentLength = source.length;\n this._headersCapability = Promise.withResolvers();\n this._disableRange = source.disableRange || false;\n this._rangeChunkSize = source.rangeChunkSize;\n if (!this._rangeChunkSize && !this._disableRange) {\n this._disableRange = true;\n }\n this._abortController = new AbortController();\n this._isStreamingSupported = !source.disableStream;\n this._isRangeSupported = !source.disableRange;\n this._headers = createHeaders(this._stream.httpHeaders);\n const url = source.url;\n fetch(url, createFetchOptions(this._headers, this._withCredentials, this._abortController)).then(response => {\n if (!validateResponseStatus(response.status)) {\n throw createResponseStatusError(response.status, url);\n }\n this._reader = response.body.getReader();\n this._headersCapability.resolve();\n const getResponseHeader = name => response.headers.get(name);\n const {\n allowRangeRequests,\n suggestedLength\n } = validateRangeRequestCapabilities({\n getResponseHeader,\n isHttp: this._stream.isHttp,\n rangeChunkSize: this._rangeChunkSize,\n disableRange: this._disableRange\n });\n this._isRangeSupported = allowRangeRequests;\n this._contentLength = suggestedLength || this._contentLength;\n this._filename = extractFilenameFromHeader(getResponseHeader);\n if (!this._isStreamingSupported && this._isRangeSupported) {\n this.cancel(new AbortException(\"Streaming is disabled.\"));\n }\n }).catch(this._headersCapability.reject);\n this.onProgress = null;\n }\n get headersReady() {\n return this._headersCapability.promise;\n }\n get filename() {\n return this._filename;\n }\n get contentLength() {\n return this._contentLength;\n }\n get isRangeSupported() {\n return this._isRangeSupported;\n }\n get isStreamingSupported() {\n return this._isStreamingSupported;\n }\n async read() {\n await this._headersCapability.promise;\n const {\n value,\n done\n } = await this._reader.read();\n if (done) {\n return {\n value,\n done\n };\n }\n this._loaded += value.byteLength;\n this.onProgress?.({\n loaded: this._loaded,\n total: this._contentLength\n });\n return {\n value: getArrayBuffer(value),\n done: false\n };\n }\n cancel(reason) {\n this._reader?.cancel(reason);\n this._abortController.abort();\n }\n}\nclass PDFFetchStreamRangeReader {\n constructor(stream, begin, end) {\n this._stream = stream;\n this._reader = null;\n this._loaded = 0;\n const source = stream.source;\n this._withCredentials = source.withCredentials || false;\n this._readCapability = Promise.withResolvers();\n this._isStreamingSupported = !source.disableStream;\n this._abortController = new AbortController();\n this._headers = createHeaders(this._stream.httpHeaders);\n this._headers.append(\"Range\", `bytes=${begin}-${end - 1}`);\n const url = source.url;\n fetch(url, createFetchOptions(this._headers, this._withCredentials, this._abortController)).then(response => {\n if (!validateResponseStatus(response.status)) {\n throw createResponseStatusError(response.status, url);\n }\n this._readCapability.resolve();\n this._reader = response.body.getReader();\n }).catch(this._readCapability.reject);\n this.onProgress = null;\n }\n get isStreamingSupported() {\n return this._isStreamingSupported;\n }\n async read() {\n await this._readCapability.promise;\n const {\n value,\n done\n } = await this._reader.read();\n if (done) {\n return {\n value,\n done\n };\n }\n this._loaded += value.byteLength;\n this.onProgress?.({\n loaded: this._loaded\n });\n return {\n value: getArrayBuffer(value),\n done: false\n };\n }\n cancel(reason) {\n this._reader?.cancel(reason);\n this._abortController.abort();\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/network.js\n\n\nconst OK_RESPONSE = 200;\nconst PARTIAL_CONTENT_RESPONSE = 206;\nfunction network_getArrayBuffer(xhr) {\n const data = xhr.response;\n if (typeof data !== \"string\") {\n return data;\n }\n return stringToBytes(data).buffer;\n}\nclass NetworkManager {\n constructor(url, args = {}) {\n this.url = url;\n this.isHttp = /^https?:/i.test(url);\n this.httpHeaders = this.isHttp && args.httpHeaders || Object.create(null);\n this.withCredentials = args.withCredentials || false;\n this.currXhrId = 0;\n this.pendingRequests = Object.create(null);\n }\n requestRange(begin, end, listeners) {\n const args = {\n begin,\n end\n };\n for (const prop in listeners) {\n args[prop] = listeners[prop];\n }\n return this.request(args);\n }\n requestFull(listeners) {\n return this.request(listeners);\n }\n request(args) {\n const xhr = new XMLHttpRequest();\n const xhrId = this.currXhrId++;\n const pendingRequest = this.pendingRequests[xhrId] = {\n xhr\n };\n xhr.open(\"GET\", this.url);\n xhr.withCredentials = this.withCredentials;\n for (const property in this.httpHeaders) {\n const value = this.httpHeaders[property];\n if (value === undefined) {\n continue;\n }\n xhr.setRequestHeader(property, value);\n }\n if (this.isHttp && \"begin\" in args && \"end\" in args) {\n xhr.setRequestHeader(\"Range\", `bytes=${args.begin}-${args.end - 1}`);\n pendingRequest.expectedStatus = PARTIAL_CONTENT_RESPONSE;\n } else {\n pendingRequest.expectedStatus = OK_RESPONSE;\n }\n xhr.responseType = \"arraybuffer\";\n if (args.onError) {\n xhr.onerror = function (evt) {\n args.onError(xhr.status);\n };\n }\n xhr.onreadystatechange = this.onStateChange.bind(this, xhrId);\n xhr.onprogress = this.onProgress.bind(this, xhrId);\n pendingRequest.onHeadersReceived = args.onHeadersReceived;\n pendingRequest.onDone = args.onDone;\n pendingRequest.onError = args.onError;\n pendingRequest.onProgress = args.onProgress;\n xhr.send(null);\n return xhrId;\n }\n onProgress(xhrId, evt) {\n const pendingRequest = this.pendingRequests[xhrId];\n if (!pendingRequest) {\n return;\n }\n pendingRequest.onProgress?.(evt);\n }\n onStateChange(xhrId, evt) {\n const pendingRequest = this.pendingRequests[xhrId];\n if (!pendingRequest) {\n return;\n }\n const xhr = pendingRequest.xhr;\n if (xhr.readyState >= 2 && pendingRequest.onHeadersReceived) {\n pendingRequest.onHeadersReceived();\n delete pendingRequest.onHeadersReceived;\n }\n if (xhr.readyState !== 4) {\n return;\n }\n if (!(xhrId in this.pendingRequests)) {\n return;\n }\n delete this.pendingRequests[xhrId];\n if (xhr.status === 0 && this.isHttp) {\n pendingRequest.onError?.(xhr.status);\n return;\n }\n const xhrStatus = xhr.status || OK_RESPONSE;\n const ok_response_on_range_request = xhrStatus === OK_RESPONSE && pendingRequest.expectedStatus === PARTIAL_CONTENT_RESPONSE;\n if (!ok_response_on_range_request && xhrStatus !== pendingRequest.expectedStatus) {\n pendingRequest.onError?.(xhr.status);\n return;\n }\n const chunk = network_getArrayBuffer(xhr);\n if (xhrStatus === PARTIAL_CONTENT_RESPONSE) {\n const rangeHeader = xhr.getResponseHeader(\"Content-Range\");\n const matches = /bytes (\\d+)-(\\d+)\\/(\\d+)/.exec(rangeHeader);\n pendingRequest.onDone({\n begin: parseInt(matches[1], 10),\n chunk\n });\n } else if (chunk) {\n pendingRequest.onDone({\n begin: 0,\n chunk\n });\n } else {\n pendingRequest.onError?.(xhr.status);\n }\n }\n getRequestXhr(xhrId) {\n return this.pendingRequests[xhrId].xhr;\n }\n isPendingRequest(xhrId) {\n return xhrId in this.pendingRequests;\n }\n abortRequest(xhrId) {\n const xhr = this.pendingRequests[xhrId].xhr;\n delete this.pendingRequests[xhrId];\n xhr.abort();\n }\n}\nclass PDFNetworkStream {\n constructor(source) {\n this._source = source;\n this._manager = new NetworkManager(source.url, {\n httpHeaders: source.httpHeaders,\n withCredentials: source.withCredentials\n });\n this._rangeChunkSize = source.rangeChunkSize;\n this._fullRequestReader = null;\n this._rangeRequestReaders = [];\n }\n _onRangeRequestReaderClosed(reader) {\n const i = this._rangeRequestReaders.indexOf(reader);\n if (i >= 0) {\n this._rangeRequestReaders.splice(i, 1);\n }\n }\n getFullReader() {\n assert(!this._fullRequestReader, \"PDFNetworkStream.getFullReader can only be called once.\");\n this._fullRequestReader = new PDFNetworkStreamFullRequestReader(this._manager, this._source);\n return this._fullRequestReader;\n }\n getRangeReader(begin, end) {\n const reader = new PDFNetworkStreamRangeRequestReader(this._manager, begin, end);\n reader.onClosed = this._onRangeRequestReaderClosed.bind(this);\n this._rangeRequestReaders.push(reader);\n return reader;\n }\n cancelAllRequests(reason) {\n this._fullRequestReader?.cancel(reason);\n for (const reader of this._rangeRequestReaders.slice(0)) {\n reader.cancel(reason);\n }\n }\n}\nclass PDFNetworkStreamFullRequestReader {\n constructor(manager, source) {\n this._manager = manager;\n const args = {\n onHeadersReceived: this._onHeadersReceived.bind(this),\n onDone: this._onDone.bind(this),\n onError: this._onError.bind(this),\n onProgress: this._onProgress.bind(this)\n };\n this._url = source.url;\n this._fullRequestId = manager.requestFull(args);\n this._headersReceivedCapability = Promise.withResolvers();\n this._disableRange = source.disableRange || false;\n this._contentLength = source.length;\n this._rangeChunkSize = source.rangeChunkSize;\n if (!this._rangeChunkSize && !this._disableRange) {\n this._disableRange = true;\n }\n this._isStreamingSupported = false;\n this._isRangeSupported = false;\n this._cachedChunks = [];\n this._requests = [];\n this._done = false;\n this._storedError = undefined;\n this._filename = null;\n this.onProgress = null;\n }\n _onHeadersReceived() {\n const fullRequestXhrId = this._fullRequestId;\n const fullRequestXhr = this._manager.getRequestXhr(fullRequestXhrId);\n const getResponseHeader = name => fullRequestXhr.getResponseHeader(name);\n const {\n allowRangeRequests,\n suggestedLength\n } = validateRangeRequestCapabilities({\n getResponseHeader,\n isHttp: this._manager.isHttp,\n rangeChunkSize: this._rangeChunkSize,\n disableRange: this._disableRange\n });\n if (allowRangeRequests) {\n this._isRangeSupported = true;\n }\n this._contentLength = suggestedLength || this._contentLength;\n this._filename = extractFilenameFromHeader(getResponseHeader);\n if (this._isRangeSupported) {\n this._manager.abortRequest(fullRequestXhrId);\n }\n this._headersReceivedCapability.resolve();\n }\n _onDone(data) {\n if (data) {\n if (this._requests.length > 0) {\n const requestCapability = this._requests.shift();\n requestCapability.resolve({\n value: data.chunk,\n done: false\n });\n } else {\n this._cachedChunks.push(data.chunk);\n }\n }\n this._done = true;\n if (this._cachedChunks.length > 0) {\n return;\n }\n for (const requestCapability of this._requests) {\n requestCapability.resolve({\n value: undefined,\n done: true\n });\n }\n this._requests.length = 0;\n }\n _onError(status) {\n this._storedError = createResponseStatusError(status, this._url);\n this._headersReceivedCapability.reject(this._storedError);\n for (const requestCapability of this._requests) {\n requestCapability.reject(this._storedError);\n }\n this._requests.length = 0;\n this._cachedChunks.length = 0;\n }\n _onProgress(evt) {\n this.onProgress?.({\n loaded: evt.loaded,\n total: evt.lengthComputable ? evt.total : this._contentLength\n });\n }\n get filename() {\n return this._filename;\n }\n get isRangeSupported() {\n return this._isRangeSupported;\n }\n get isStreamingSupported() {\n return this._isStreamingSupported;\n }\n get contentLength() {\n return this._contentLength;\n }\n get headersReady() {\n return this._headersReceivedCapability.promise;\n }\n async read() {\n if (this._storedError) {\n throw this._storedError;\n }\n if (this._cachedChunks.length > 0) {\n const chunk = this._cachedChunks.shift();\n return {\n value: chunk,\n done: false\n };\n }\n if (this._done) {\n return {\n value: undefined,\n done: true\n };\n }\n const requestCapability = Promise.withResolvers();\n this._requests.push(requestCapability);\n return requestCapability.promise;\n }\n cancel(reason) {\n this._done = true;\n this._headersReceivedCapability.reject(reason);\n for (const requestCapability of this._requests) {\n requestCapability.resolve({\n value: undefined,\n done: true\n });\n }\n this._requests.length = 0;\n if (this._manager.isPendingRequest(this._fullRequestId)) {\n this._manager.abortRequest(this._fullRequestId);\n }\n this._fullRequestReader = null;\n }\n}\nclass PDFNetworkStreamRangeRequestReader {\n constructor(manager, begin, end) {\n this._manager = manager;\n const args = {\n onDone: this._onDone.bind(this),\n onError: this._onError.bind(this),\n onProgress: this._onProgress.bind(this)\n };\n this._url = manager.url;\n this._requestId = manager.requestRange(begin, end, args);\n this._requests = [];\n this._queuedChunk = null;\n this._done = false;\n this._storedError = undefined;\n this.onProgress = null;\n this.onClosed = null;\n }\n _close() {\n this.onClosed?.(this);\n }\n _onDone(data) {\n const chunk = data.chunk;\n if (this._requests.length > 0) {\n const requestCapability = this._requests.shift();\n requestCapability.resolve({\n value: chunk,\n done: false\n });\n } else {\n this._queuedChunk = chunk;\n }\n this._done = true;\n for (const requestCapability of this._requests) {\n requestCapability.resolve({\n value: undefined,\n done: true\n });\n }\n this._requests.length = 0;\n this._close();\n }\n _onError(status) {\n this._storedError = createResponseStatusError(status, this._url);\n for (const requestCapability of this._requests) {\n requestCapability.reject(this._storedError);\n }\n this._requests.length = 0;\n this._queuedChunk = null;\n }\n _onProgress(evt) {\n if (!this.isStreamingSupported) {\n this.onProgress?.({\n loaded: evt.loaded\n });\n }\n }\n get isStreamingSupported() {\n return false;\n }\n async read() {\n if (this._storedError) {\n throw this._storedError;\n }\n if (this._queuedChunk !== null) {\n const chunk = this._queuedChunk;\n this._queuedChunk = null;\n return {\n value: chunk,\n done: false\n };\n }\n if (this._done) {\n return {\n value: undefined,\n done: true\n };\n }\n const requestCapability = Promise.withResolvers();\n this._requests.push(requestCapability);\n return requestCapability.promise;\n }\n cancel(reason) {\n this._done = true;\n for (const requestCapability of this._requests) {\n requestCapability.resolve({\n value: undefined,\n done: true\n });\n }\n this._requests.length = 0;\n if (this._manager.isPendingRequest(this._requestId)) {\n this._manager.abortRequest(this._requestId);\n }\n this._close();\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/node_stream.js\n\n\n\nconst fileUriRegex = /^file:\\/\\/\\/[a-zA-Z]:\\//;\nfunction parseUrl(sourceUrl) {\n const url = NodePackages.get(\"url\");\n const parsedUrl = url.parse(sourceUrl);\n if (parsedUrl.protocol === \"file:\" || parsedUrl.host) {\n return parsedUrl;\n }\n if (/^[a-z]:[/\\\\]/i.test(sourceUrl)) {\n return url.parse(`file:///${sourceUrl}`);\n }\n if (!parsedUrl.host) {\n parsedUrl.protocol = \"file:\";\n }\n return parsedUrl;\n}\nclass PDFNodeStream {\n constructor(source) {\n this.source = source;\n this.url = parseUrl(source.url);\n this.isHttp = this.url.protocol === \"http:\" || this.url.protocol === \"https:\";\n this.isFsUrl = this.url.protocol === \"file:\";\n this.httpHeaders = this.isHttp && source.httpHeaders || {};\n this._fullRequestReader = null;\n this._rangeRequestReaders = [];\n }\n get _progressiveDataLength() {\n return this._fullRequestReader?._loaded ?? 0;\n }\n getFullReader() {\n assert(!this._fullRequestReader, \"PDFNodeStream.getFullReader can only be called once.\");\n this._fullRequestReader = this.isFsUrl ? new PDFNodeStreamFsFullReader(this) : new PDFNodeStreamFullReader(this);\n return this._fullRequestReader;\n }\n getRangeReader(start, end) {\n if (end <= this._progressiveDataLength) {\n return null;\n }\n const rangeReader = this.isFsUrl ? new PDFNodeStreamFsRangeReader(this, start, end) : new PDFNodeStreamRangeReader(this, start, end);\n this._rangeRequestReaders.push(rangeReader);\n return rangeReader;\n }\n cancelAllRequests(reason) {\n this._fullRequestReader?.cancel(reason);\n for (const reader of this._rangeRequestReaders.slice(0)) {\n reader.cancel(reason);\n }\n }\n}\nclass BaseFullReader {\n constructor(stream) {\n this._url = stream.url;\n this._done = false;\n this._storedError = null;\n this.onProgress = null;\n const source = stream.source;\n this._contentLength = source.length;\n this._loaded = 0;\n this._filename = null;\n this._disableRange = source.disableRange || false;\n this._rangeChunkSize = source.rangeChunkSize;\n if (!this._rangeChunkSize && !this._disableRange) {\n this._disableRange = true;\n }\n this._isStreamingSupported = !source.disableStream;\n this._isRangeSupported = !source.disableRange;\n this._readableStream = null;\n this._readCapability = Promise.withResolvers();\n this._headersCapability = Promise.withResolvers();\n }\n get headersReady() {\n return this._headersCapability.promise;\n }\n get filename() {\n return this._filename;\n }\n get contentLength() {\n return this._contentLength;\n }\n get isRangeSupported() {\n return this._isRangeSupported;\n }\n get isStreamingSupported() {\n return this._isStreamingSupported;\n }\n async read() {\n await this._readCapability.promise;\n if (this._done) {\n return {\n value: undefined,\n done: true\n };\n }\n if (this._storedError) {\n throw this._storedError;\n }\n const chunk = this._readableStream.read();\n if (chunk === null) {\n this._readCapability = Promise.withResolvers();\n return this.read();\n }\n this._loaded += chunk.length;\n this.onProgress?.({\n loaded: this._loaded,\n total: this._contentLength\n });\n const buffer = new Uint8Array(chunk).buffer;\n return {\n value: buffer,\n done: false\n };\n }\n cancel(reason) {\n if (!this._readableStream) {\n this._error(reason);\n return;\n }\n this._readableStream.destroy(reason);\n }\n _error(reason) {\n this._storedError = reason;\n this._readCapability.resolve();\n }\n _setReadableStream(readableStream) {\n this._readableStream = readableStream;\n readableStream.on(\"readable\", () => {\n this._readCapability.resolve();\n });\n readableStream.on(\"end\", () => {\n readableStream.destroy();\n this._done = true;\n this._readCapability.resolve();\n });\n readableStream.on(\"error\", reason => {\n this._error(reason);\n });\n if (!this._isStreamingSupported && this._isRangeSupported) {\n this._error(new AbortException(\"streaming is disabled\"));\n }\n if (this._storedError) {\n this._readableStream.destroy(this._storedError);\n }\n }\n}\nclass BaseRangeReader {\n constructor(stream) {\n this._url = stream.url;\n this._done = false;\n this._storedError = null;\n this.onProgress = null;\n this._loaded = 0;\n this._readableStream = null;\n this._readCapability = Promise.withResolvers();\n const source = stream.source;\n this._isStreamingSupported = !source.disableStream;\n }\n get isStreamingSupported() {\n return this._isStreamingSupported;\n }\n async read() {\n await this._readCapability.promise;\n if (this._done) {\n return {\n value: undefined,\n done: true\n };\n }\n if (this._storedError) {\n throw this._storedError;\n }\n const chunk = this._readableStream.read();\n if (chunk === null) {\n this._readCapability = Promise.withResolvers();\n return this.read();\n }\n this._loaded += chunk.length;\n this.onProgress?.({\n loaded: this._loaded\n });\n const buffer = new Uint8Array(chunk).buffer;\n return {\n value: buffer,\n done: false\n };\n }\n cancel(reason) {\n if (!this._readableStream) {\n this._error(reason);\n return;\n }\n this._readableStream.destroy(reason);\n }\n _error(reason) {\n this._storedError = reason;\n this._readCapability.resolve();\n }\n _setReadableStream(readableStream) {\n this._readableStream = readableStream;\n readableStream.on(\"readable\", () => {\n this._readCapability.resolve();\n });\n readableStream.on(\"end\", () => {\n readableStream.destroy();\n this._done = true;\n this._readCapability.resolve();\n });\n readableStream.on(\"error\", reason => {\n this._error(reason);\n });\n if (this._storedError) {\n this._readableStream.destroy(this._storedError);\n }\n }\n}\nfunction createRequestOptions(parsedUrl, headers) {\n return {\n protocol: parsedUrl.protocol,\n auth: parsedUrl.auth,\n host: parsedUrl.hostname,\n port: parsedUrl.port,\n path: parsedUrl.path,\n method: \"GET\",\n headers\n };\n}\nclass PDFNodeStreamFullReader extends BaseFullReader {\n constructor(stream) {\n super(stream);\n const handleResponse = response => {\n if (response.statusCode === 404) {\n const error = new MissingPDFException(`Missing PDF \"${this._url}\".`);\n this._storedError = error;\n this._headersCapability.reject(error);\n return;\n }\n this._headersCapability.resolve();\n this._setReadableStream(response);\n const getResponseHeader = name => this._readableStream.headers[name.toLowerCase()];\n const {\n allowRangeRequests,\n suggestedLength\n } = validateRangeRequestCapabilities({\n getResponseHeader,\n isHttp: stream.isHttp,\n rangeChunkSize: this._rangeChunkSize,\n disableRange: this._disableRange\n });\n this._isRangeSupported = allowRangeRequests;\n this._contentLength = suggestedLength || this._contentLength;\n this._filename = extractFilenameFromHeader(getResponseHeader);\n };\n this._request = null;\n if (this._url.protocol === \"http:\") {\n const http = NodePackages.get(\"http\");\n this._request = http.request(createRequestOptions(this._url, stream.httpHeaders), handleResponse);\n } else {\n const https = NodePackages.get(\"https\");\n this._request = https.request(createRequestOptions(this._url, stream.httpHeaders), handleResponse);\n }\n this._request.on(\"error\", reason => {\n this._storedError = reason;\n this._headersCapability.reject(reason);\n });\n this._request.end();\n }\n}\nclass PDFNodeStreamRangeReader extends BaseRangeReader {\n constructor(stream, start, end) {\n super(stream);\n this._httpHeaders = {};\n for (const property in stream.httpHeaders) {\n const value = stream.httpHeaders[property];\n if (value === undefined) {\n continue;\n }\n this._httpHeaders[property] = value;\n }\n this._httpHeaders.Range = `bytes=${start}-${end - 1}`;\n const handleResponse = response => {\n if (response.statusCode === 404) {\n const error = new MissingPDFException(`Missing PDF \"${this._url}\".`);\n this._storedError = error;\n return;\n }\n this._setReadableStream(response);\n };\n this._request = null;\n if (this._url.protocol === \"http:\") {\n const http = NodePackages.get(\"http\");\n this._request = http.request(createRequestOptions(this._url, this._httpHeaders), handleResponse);\n } else {\n const https = NodePackages.get(\"https\");\n this._request = https.request(createRequestOptions(this._url, this._httpHeaders), handleResponse);\n }\n this._request.on(\"error\", reason => {\n this._storedError = reason;\n });\n this._request.end();\n }\n}\nclass PDFNodeStreamFsFullReader extends BaseFullReader {\n constructor(stream) {\n super(stream);\n let path = decodeURIComponent(this._url.path);\n if (fileUriRegex.test(this._url.href)) {\n path = path.replace(/^\\//, \"\");\n }\n const fs = NodePackages.get(\"fs\");\n fs.promises.lstat(path).then(stat => {\n this._contentLength = stat.size;\n this._setReadableStream(fs.createReadStream(path));\n this._headersCapability.resolve();\n }, error => {\n if (error.code === \"ENOENT\") {\n error = new MissingPDFException(`Missing PDF \"${path}\".`);\n }\n this._storedError = error;\n this._headersCapability.reject(error);\n });\n }\n}\nclass PDFNodeStreamFsRangeReader extends BaseRangeReader {\n constructor(stream, start, end) {\n super(stream);\n let path = decodeURIComponent(this._url.path);\n if (fileUriRegex.test(this._url.href)) {\n path = path.replace(/^\\//, \"\");\n }\n const fs = NodePackages.get(\"fs\");\n this._setReadableStream(fs.createReadStream(path, {\n start,\n end: end - 1\n }));\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/text_layer.js\n\n\nconst MAX_TEXT_DIVS_TO_RENDER = 100000;\nconst DEFAULT_FONT_SIZE = 30;\nconst DEFAULT_FONT_ASCENT = 0.8;\nclass TextLayer {\n #capability = Promise.withResolvers();\n #container = null;\n #disableProcessItems = false;\n #fontInspectorEnabled = !!globalThis.FontInspector?.enabled;\n #lang = null;\n #layoutTextParams = null;\n #pageHeight = 0;\n #pageWidth = 0;\n #reader = null;\n #rootContainer = null;\n #rotation = 0;\n #scale = 0;\n #styleCache = Object.create(null);\n #textContentItemsStr = [];\n #textContentSource = null;\n #textDivs = [];\n #textDivProperties = new WeakMap();\n #transform = null;\n static #ascentCache = new Map();\n static #canvasContexts = new Map();\n static #minFontSize = null;\n static #pendingTextLayers = new Set();\n constructor({\n textContentSource,\n container,\n viewport\n }) {\n if (textContentSource instanceof ReadableStream) {\n this.#textContentSource = textContentSource;\n } else if (typeof textContentSource === \"object\") {\n this.#textContentSource = new ReadableStream({\n start(controller) {\n controller.enqueue(textContentSource);\n controller.close();\n }\n });\n } else {\n throw new Error('No \"textContentSource\" parameter specified.');\n }\n this.#container = this.#rootContainer = container;\n this.#scale = viewport.scale * (globalThis.devicePixelRatio || 1);\n this.#rotation = viewport.rotation;\n this.#layoutTextParams = {\n prevFontSize: null,\n prevFontFamily: null,\n div: null,\n properties: null,\n ctx: null\n };\n const {\n pageWidth,\n pageHeight,\n pageX,\n pageY\n } = viewport.rawDims;\n this.#transform = [1, 0, 0, -1, -pageX, pageY + pageHeight];\n this.#pageWidth = pageWidth;\n this.#pageHeight = pageHeight;\n TextLayer.#ensureMinFontSizeComputed();\n setLayerDimensions(container, viewport);\n this.#capability.promise.catch(() => {}).then(() => {\n TextLayer.#pendingTextLayers.delete(this);\n this.#layoutTextParams = null;\n this.#styleCache = null;\n });\n }\n render() {\n const pump = () => {\n this.#reader.read().then(({\n value,\n done\n }) => {\n if (done) {\n this.#capability.resolve();\n return;\n }\n this.#lang ??= value.lang;\n Object.assign(this.#styleCache, value.styles);\n this.#processItems(value.items);\n pump();\n }, this.#capability.reject);\n };\n this.#reader = this.#textContentSource.getReader();\n TextLayer.#pendingTextLayers.add(this);\n pump();\n return this.#capability.promise;\n }\n update({\n viewport,\n onBefore = null\n }) {\n const scale = viewport.scale * (globalThis.devicePixelRatio || 1);\n const rotation = viewport.rotation;\n if (rotation !== this.#rotation) {\n onBefore?.();\n this.#rotation = rotation;\n setLayerDimensions(this.#rootContainer, {\n rotation\n });\n }\n if (scale !== this.#scale) {\n onBefore?.();\n this.#scale = scale;\n const params = {\n prevFontSize: null,\n prevFontFamily: null,\n div: null,\n properties: null,\n ctx: TextLayer.#getCtx(this.#lang)\n };\n for (const div of this.#textDivs) {\n params.properties = this.#textDivProperties.get(div);\n params.div = div;\n this.#layout(params);\n }\n }\n }\n cancel() {\n const abortEx = new AbortException(\"TextLayer task cancelled.\");\n this.#reader?.cancel(abortEx).catch(() => {});\n this.#reader = null;\n this.#capability.reject(abortEx);\n }\n get textDivs() {\n return this.#textDivs;\n }\n get textContentItemsStr() {\n return this.#textContentItemsStr;\n }\n #processItems(items) {\n if (this.#disableProcessItems) {\n return;\n }\n this.#layoutTextParams.ctx ??= TextLayer.#getCtx(this.#lang);\n const textDivs = this.#textDivs,\n textContentItemsStr = this.#textContentItemsStr;\n for (const item of items) {\n if (textDivs.length > MAX_TEXT_DIVS_TO_RENDER) {\n warn(\"Ignoring additional textDivs for performance reasons.\");\n this.#disableProcessItems = true;\n return;\n }\n if (item.str === undefined) {\n if (item.type === \"beginMarkedContentProps\" || item.type === \"beginMarkedContent\") {\n const parent = this.#container;\n this.#container = document.createElement(\"span\");\n this.#container.classList.add(\"markedContent\");\n if (item.id !== null) {\n this.#container.setAttribute(\"id\", `${item.id}`);\n }\n parent.append(this.#container);\n } else if (item.type === \"endMarkedContent\") {\n this.#container = this.#container.parentNode;\n }\n continue;\n }\n textContentItemsStr.push(item.str);\n this.#appendText(item);\n }\n }\n #appendText(geom) {\n const textDiv = document.createElement(\"span\");\n const textDivProperties = {\n angle: 0,\n canvasWidth: 0,\n hasText: geom.str !== \"\",\n hasEOL: geom.hasEOL,\n fontSize: 0\n };\n this.#textDivs.push(textDiv);\n const tx = Util.transform(this.#transform, geom.transform);\n let angle = Math.atan2(tx[1], tx[0]);\n const style = this.#styleCache[geom.fontName];\n if (style.vertical) {\n angle += Math.PI / 2;\n }\n const fontFamily = this.#fontInspectorEnabled && style.fontSubstitution || style.fontFamily;\n const fontHeight = Math.hypot(tx[2], tx[3]);\n const fontAscent = fontHeight * TextLayer.#getAscent(fontFamily, this.#lang);\n let left, top;\n if (angle === 0) {\n left = tx[4];\n top = tx[5] - fontAscent;\n } else {\n left = tx[4] + fontAscent * Math.sin(angle);\n top = tx[5] - fontAscent * Math.cos(angle);\n }\n const scaleFactorStr = \"calc(var(--scale-factor)*\";\n const divStyle = textDiv.style;\n if (this.#container === this.#rootContainer) {\n divStyle.left = `${(100 * left / this.#pageWidth).toFixed(2)}%`;\n divStyle.top = `${(100 * top / this.#pageHeight).toFixed(2)}%`;\n } else {\n divStyle.left = `${scaleFactorStr}${left.toFixed(2)}px)`;\n divStyle.top = `${scaleFactorStr}${top.toFixed(2)}px)`;\n }\n divStyle.fontSize = `${scaleFactorStr}${(TextLayer.#minFontSize * fontHeight).toFixed(2)}px)`;\n divStyle.fontFamily = fontFamily;\n textDivProperties.fontSize = fontHeight;\n textDiv.setAttribute(\"role\", \"presentation\");\n textDiv.textContent = geom.str;\n textDiv.dir = geom.dir;\n if (this.#fontInspectorEnabled) {\n textDiv.dataset.fontName = style.fontSubstitutionLoadedName || geom.fontName;\n }\n if (angle !== 0) {\n textDivProperties.angle = angle * (180 / Math.PI);\n }\n let shouldScaleText = false;\n if (geom.str.length > 1) {\n shouldScaleText = true;\n } else if (geom.str !== \" \" && geom.transform[0] !== geom.transform[3]) {\n const absScaleX = Math.abs(geom.transform[0]),\n absScaleY = Math.abs(geom.transform[3]);\n if (absScaleX !== absScaleY && Math.max(absScaleX, absScaleY) / Math.min(absScaleX, absScaleY) > 1.5) {\n shouldScaleText = true;\n }\n }\n if (shouldScaleText) {\n textDivProperties.canvasWidth = style.vertical ? geom.height : geom.width;\n }\n this.#textDivProperties.set(textDiv, textDivProperties);\n this.#layoutTextParams.div = textDiv;\n this.#layoutTextParams.properties = textDivProperties;\n this.#layout(this.#layoutTextParams);\n if (textDivProperties.hasText) {\n this.#container.append(textDiv);\n }\n if (textDivProperties.hasEOL) {\n const br = document.createElement(\"br\");\n br.setAttribute(\"role\", \"presentation\");\n this.#container.append(br);\n }\n }\n #layout(params) {\n const {\n div,\n properties,\n ctx,\n prevFontSize,\n prevFontFamily\n } = params;\n const {\n style\n } = div;\n let transform = \"\";\n if (TextLayer.#minFontSize > 1) {\n transform = `scale(${1 / TextLayer.#minFontSize})`;\n }\n if (properties.canvasWidth !== 0 && properties.hasText) {\n const {\n fontFamily\n } = style;\n const {\n canvasWidth,\n fontSize\n } = properties;\n if (prevFontSize !== fontSize || prevFontFamily !== fontFamily) {\n ctx.font = `${fontSize * this.#scale}px ${fontFamily}`;\n params.prevFontSize = fontSize;\n params.prevFontFamily = fontFamily;\n }\n const {\n width\n } = ctx.measureText(div.textContent);\n if (width > 0) {\n transform = `scaleX(${canvasWidth * this.#scale / width}) ${transform}`;\n }\n }\n if (properties.angle !== 0) {\n transform = `rotate(${properties.angle}deg) ${transform}`;\n }\n if (transform.length > 0) {\n style.transform = transform;\n }\n }\n static cleanup() {\n if (this.#pendingTextLayers.size > 0) {\n return;\n }\n this.#ascentCache.clear();\n for (const {\n canvas\n } of this.#canvasContexts.values()) {\n canvas.remove();\n }\n this.#canvasContexts.clear();\n }\n static #getCtx(lang = null) {\n let canvasContext = this.#canvasContexts.get(lang ||= \"\");\n if (!canvasContext) {\n const canvas = document.createElement(\"canvas\");\n canvas.className = \"hiddenCanvasElement\";\n canvas.lang = lang;\n document.body.append(canvas);\n canvasContext = canvas.getContext(\"2d\", {\n alpha: false,\n willReadFrequently: true\n });\n this.#canvasContexts.set(lang, canvasContext);\n }\n return canvasContext;\n }\n static #ensureMinFontSizeComputed() {\n if (this.#minFontSize !== null) {\n return;\n }\n const div = document.createElement(\"div\");\n div.style.opacity = 0;\n div.style.lineHeight = 1;\n div.style.fontSize = \"1px\";\n div.textContent = \"X\";\n document.body.append(div);\n this.#minFontSize = div.getBoundingClientRect().height;\n div.remove();\n }\n static #getAscent(fontFamily, lang) {\n const cachedAscent = this.#ascentCache.get(fontFamily);\n if (cachedAscent) {\n return cachedAscent;\n }\n const ctx = this.#getCtx(lang);\n const savedFont = ctx.font;\n ctx.canvas.width = ctx.canvas.height = DEFAULT_FONT_SIZE;\n ctx.font = `${DEFAULT_FONT_SIZE}px ${fontFamily}`;\n const metrics = ctx.measureText(\"\");\n let ascent = metrics.fontBoundingBoxAscent;\n let descent = Math.abs(metrics.fontBoundingBoxDescent);\n if (ascent) {\n const ratio = ascent / (ascent + descent);\n this.#ascentCache.set(fontFamily, ratio);\n ctx.canvas.width = ctx.canvas.height = 0;\n ctx.font = savedFont;\n return ratio;\n }\n ctx.strokeStyle = \"red\";\n ctx.clearRect(0, 0, DEFAULT_FONT_SIZE, DEFAULT_FONT_SIZE);\n ctx.strokeText(\"g\", 0, 0);\n let pixels = ctx.getImageData(0, 0, DEFAULT_FONT_SIZE, DEFAULT_FONT_SIZE).data;\n descent = 0;\n for (let i = pixels.length - 1 - 3; i >= 0; i -= 4) {\n if (pixels[i] > 0) {\n descent = Math.ceil(i / 4 / DEFAULT_FONT_SIZE);\n break;\n }\n }\n ctx.clearRect(0, 0, DEFAULT_FONT_SIZE, DEFAULT_FONT_SIZE);\n ctx.strokeText(\"A\", 0, DEFAULT_FONT_SIZE);\n pixels = ctx.getImageData(0, 0, DEFAULT_FONT_SIZE, DEFAULT_FONT_SIZE).data;\n ascent = 0;\n for (let i = 0, ii = pixels.length; i < ii; i += 4) {\n if (pixels[i] > 0) {\n ascent = DEFAULT_FONT_SIZE - Math.floor(i / 4 / DEFAULT_FONT_SIZE);\n break;\n }\n }\n ctx.canvas.width = ctx.canvas.height = 0;\n ctx.font = savedFont;\n const ratio = ascent ? ascent / (ascent + descent) : DEFAULT_FONT_ASCENT;\n this.#ascentCache.set(fontFamily, ratio);\n return ratio;\n }\n}\nfunction renderTextLayer() {\n deprecated(\"`renderTextLayer`, please use `TextLayer` instead.\");\n const {\n textContentSource,\n container,\n viewport,\n ...rest\n } = arguments[0];\n const restKeys = Object.keys(rest);\n if (restKeys.length > 0) {\n warn(\"Ignoring `renderTextLayer` parameters: \" + restKeys.join(\", \"));\n }\n const textLayer = new TextLayer({\n textContentSource,\n container,\n viewport\n });\n const {\n textDivs,\n textContentItemsStr\n } = textLayer;\n const promise = textLayer.render();\n return {\n promise,\n textDivs,\n textContentItemsStr\n };\n}\nfunction updateTextLayer() {\n deprecated(\"`updateTextLayer`, please use `TextLayer` instead.\");\n}\n\n;// CONCATENATED MODULE: ./src/display/xfa_text.js\nclass XfaText {\n static textContent(xfa) {\n const items = [];\n const output = {\n items,\n styles: Object.create(null)\n };\n function walk(node) {\n if (!node) {\n return;\n }\n let str = null;\n const name = node.name;\n if (name === \"#text\") {\n str = node.value;\n } else if (!XfaText.shouldBuildText(name)) {\n return;\n } else if (node?.attributes?.textContent) {\n str = node.attributes.textContent;\n } else if (node.value) {\n str = node.value;\n }\n if (str !== null) {\n items.push({\n str\n });\n }\n if (!node.children) {\n return;\n }\n for (const child of node.children) {\n walk(child);\n }\n }\n walk(xfa);\n return output;\n }\n static shouldBuildText(name) {\n return !(name === \"textarea\" || name === \"input\" || name === \"option\" || name === \"select\");\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/api.js\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nconst DEFAULT_RANGE_CHUNK_SIZE = 65536;\nconst RENDERING_CANCELLED_TIMEOUT = 100;\nconst DELAYED_CLEANUP_TIMEOUT = 5000;\nconst DefaultCanvasFactory = isNodeJS ? NodeCanvasFactory : DOMCanvasFactory;\nconst DefaultCMapReaderFactory = isNodeJS ? NodeCMapReaderFactory : DOMCMapReaderFactory;\nconst DefaultFilterFactory = isNodeJS ? NodeFilterFactory : DOMFilterFactory;\nconst DefaultStandardFontDataFactory = isNodeJS ? NodeStandardFontDataFactory : DOMStandardFontDataFactory;\nfunction getDocument(src = {}) {\n if (typeof src === \"string\" || src instanceof URL) {\n src = {\n url: src\n };\n } else if (src instanceof ArrayBuffer || ArrayBuffer.isView(src)) {\n src = {\n data: src\n };\n }\n const task = new PDFDocumentLoadingTask();\n const {\n docId\n } = task;\n const url = src.url ? getUrlProp(src.url) : null;\n const data = src.data ? getDataProp(src.data) : null;\n const httpHeaders = src.httpHeaders || null;\n const withCredentials = src.withCredentials === true;\n const password = src.password ?? null;\n const rangeTransport = src.range instanceof PDFDataRangeTransport ? src.range : null;\n const rangeChunkSize = Number.isInteger(src.rangeChunkSize) && src.rangeChunkSize > 0 ? src.rangeChunkSize : DEFAULT_RANGE_CHUNK_SIZE;\n let worker = src.worker instanceof PDFWorker ? src.worker : null;\n const verbosity = src.verbosity;\n const docBaseUrl = typeof src.docBaseUrl === \"string\" && !isDataScheme(src.docBaseUrl) ? src.docBaseUrl : null;\n const cMapUrl = typeof src.cMapUrl === \"string\" ? src.cMapUrl : null;\n const cMapPacked = src.cMapPacked !== false;\n const CMapReaderFactory = src.CMapReaderFactory || DefaultCMapReaderFactory;\n const standardFontDataUrl = typeof src.standardFontDataUrl === \"string\" ? src.standardFontDataUrl : null;\n const StandardFontDataFactory = src.StandardFontDataFactory || DefaultStandardFontDataFactory;\n const ignoreErrors = src.stopAtErrors !== true;\n const maxImageSize = Number.isInteger(src.maxImageSize) && src.maxImageSize > -1 ? src.maxImageSize : -1;\n const isEvalSupported = src.isEvalSupported !== false;\n const isOffscreenCanvasSupported = typeof src.isOffscreenCanvasSupported === \"boolean\" ? src.isOffscreenCanvasSupported : !isNodeJS;\n const canvasMaxAreaInBytes = Number.isInteger(src.canvasMaxAreaInBytes) ? src.canvasMaxAreaInBytes : -1;\n const disableFontFace = typeof src.disableFontFace === \"boolean\" ? src.disableFontFace : isNodeJS;\n const fontExtraProperties = src.fontExtraProperties === true;\n const enableXfa = src.enableXfa === true;\n const ownerDocument = src.ownerDocument || globalThis.document;\n const disableRange = src.disableRange === true;\n const disableStream = src.disableStream === true;\n const disableAutoFetch = src.disableAutoFetch === true;\n const pdfBug = src.pdfBug === true;\n const enableHWA = src.enableHWA === true;\n const length = rangeTransport ? rangeTransport.length : src.length ?? NaN;\n const useSystemFonts = typeof src.useSystemFonts === \"boolean\" ? src.useSystemFonts : !isNodeJS && !disableFontFace;\n const useWorkerFetch = typeof src.useWorkerFetch === \"boolean\" ? src.useWorkerFetch : CMapReaderFactory === DOMCMapReaderFactory && StandardFontDataFactory === DOMStandardFontDataFactory && cMapUrl && standardFontDataUrl && isValidFetchUrl(cMapUrl, document.baseURI) && isValidFetchUrl(standardFontDataUrl, document.baseURI);\n const canvasFactory = src.canvasFactory || new DefaultCanvasFactory({\n ownerDocument,\n enableHWA\n });\n const filterFactory = src.filterFactory || new DefaultFilterFactory({\n docId,\n ownerDocument\n });\n const styleElement = null;\n setVerbosityLevel(verbosity);\n const transportFactory = {\n canvasFactory,\n filterFactory\n };\n if (!useWorkerFetch) {\n transportFactory.cMapReaderFactory = new CMapReaderFactory({\n baseUrl: cMapUrl,\n isCompressed: cMapPacked\n });\n transportFactory.standardFontDataFactory = new StandardFontDataFactory({\n baseUrl: standardFontDataUrl\n });\n }\n if (!worker) {\n const workerParams = {\n verbosity,\n port: GlobalWorkerOptions.workerPort\n };\n worker = workerParams.port ? PDFWorker.fromPort(workerParams) : new PDFWorker(workerParams);\n task._worker = worker;\n }\n const docParams = {\n docId,\n apiVersion: \"4.4.168\",\n data,\n password,\n disableAutoFetch,\n rangeChunkSize,\n length,\n docBaseUrl,\n enableXfa,\n evaluatorOptions: {\n maxImageSize,\n disableFontFace,\n ignoreErrors,\n isEvalSupported,\n isOffscreenCanvasSupported,\n canvasMaxAreaInBytes,\n fontExtraProperties,\n useSystemFonts,\n cMapUrl: useWorkerFetch ? cMapUrl : null,\n standardFontDataUrl: useWorkerFetch ? standardFontDataUrl : null\n }\n };\n const transportParams = {\n disableFontFace,\n fontExtraProperties,\n ownerDocument,\n pdfBug,\n styleElement,\n loadingParams: {\n disableAutoFetch,\n enableXfa\n }\n };\n worker.promise.then(function () {\n if (task.destroyed) {\n throw new Error(\"Loading aborted\");\n }\n if (worker.destroyed) {\n throw new Error(\"Worker was destroyed\");\n }\n const workerIdPromise = worker.messageHandler.sendWithPromise(\"GetDocRequest\", docParams, data ? [data.buffer] : null);\n let networkStream;\n if (rangeTransport) {\n networkStream = new PDFDataTransportStream(rangeTransport, {\n disableRange,\n disableStream\n });\n } else if (!data) {\n if (!url) {\n throw new Error(\"getDocument - no `url` parameter provided.\");\n }\n const createPDFNetworkStream = params => {\n if (isNodeJS) {\n const isFetchSupported = function () {\n return typeof fetch !== \"undefined\" && typeof Response !== \"undefined\" && \"body\" in Response.prototype;\n };\n return isFetchSupported() && isValidFetchUrl(params.url) ? new PDFFetchStream(params) : new PDFNodeStream(params);\n }\n return isValidFetchUrl(params.url) ? new PDFFetchStream(params) : new PDFNetworkStream(params);\n };\n networkStream = createPDFNetworkStream({\n url,\n length,\n httpHeaders,\n withCredentials,\n rangeChunkSize,\n disableRange,\n disableStream\n });\n }\n return workerIdPromise.then(workerId => {\n if (task.destroyed) {\n throw new Error(\"Loading aborted\");\n }\n if (worker.destroyed) {\n throw new Error(\"Worker was destroyed\");\n }\n const messageHandler = new MessageHandler(docId, workerId, worker.port);\n const transport = new WorkerTransport(messageHandler, task, networkStream, transportParams, transportFactory);\n task._transport = transport;\n messageHandler.send(\"Ready\", null);\n });\n }).catch(task._capability.reject);\n return task;\n}\nfunction getUrlProp(val) {\n if (val instanceof URL) {\n return val.href;\n }\n try {\n return new URL(val, window.location).href;\n } catch {\n if (isNodeJS && typeof val === \"string\") {\n return val;\n }\n }\n throw new Error(\"Invalid PDF url data: \" + \"either string or URL-object is expected in the url property.\");\n}\nfunction getDataProp(val) {\n if (isNodeJS && typeof Buffer !== \"undefined\" && val instanceof Buffer) {\n throw new Error(\"Please provide binary data as `Uint8Array`, rather than `Buffer`.\");\n }\n if (val instanceof Uint8Array && val.byteLength === val.buffer.byteLength) {\n return val;\n }\n if (typeof val === \"string\") {\n return stringToBytes(val);\n }\n if (val instanceof ArrayBuffer || ArrayBuffer.isView(val) || typeof val === \"object\" && !isNaN(val?.length)) {\n return new Uint8Array(val);\n }\n throw new Error(\"Invalid PDF binary data: either TypedArray, \" + \"string, or array-like object is expected in the data property.\");\n}\nfunction isRefProxy(ref) {\n return typeof ref === \"object\" && Number.isInteger(ref?.num) && ref.num >= 0 && Number.isInteger(ref?.gen) && ref.gen >= 0;\n}\nclass PDFDocumentLoadingTask {\n static #docId = 0;\n constructor() {\n this._capability = Promise.withResolvers();\n this._transport = null;\n this._worker = null;\n this.docId = `d${PDFDocumentLoadingTask.#docId++}`;\n this.destroyed = false;\n this.onPassword = null;\n this.onProgress = null;\n }\n get promise() {\n return this._capability.promise;\n }\n async destroy() {\n this.destroyed = true;\n try {\n if (this._worker?.port) {\n this._worker._pendingDestroy = true;\n }\n await this._transport?.destroy();\n } catch (ex) {\n if (this._worker?.port) {\n delete this._worker._pendingDestroy;\n }\n throw ex;\n }\n this._transport = null;\n if (this._worker) {\n this._worker.destroy();\n this._worker = null;\n }\n }\n}\nclass PDFDataRangeTransport {\n constructor(length, initialData, progressiveDone = false, contentDispositionFilename = null) {\n this.length = length;\n this.initialData = initialData;\n this.progressiveDone = progressiveDone;\n this.contentDispositionFilename = contentDispositionFilename;\n this._rangeListeners = [];\n this._progressListeners = [];\n this._progressiveReadListeners = [];\n this._progressiveDoneListeners = [];\n this._readyCapability = Promise.withResolvers();\n }\n addRangeListener(listener) {\n this._rangeListeners.push(listener);\n }\n addProgressListener(listener) {\n this._progressListeners.push(listener);\n }\n addProgressiveReadListener(listener) {\n this._progressiveReadListeners.push(listener);\n }\n addProgressiveDoneListener(listener) {\n this._progressiveDoneListeners.push(listener);\n }\n onDataRange(begin, chunk) {\n for (const listener of this._rangeListeners) {\n listener(begin, chunk);\n }\n }\n onDataProgress(loaded, total) {\n this._readyCapability.promise.then(() => {\n for (const listener of this._progressListeners) {\n listener(loaded, total);\n }\n });\n }\n onDataProgressiveRead(chunk) {\n this._readyCapability.promise.then(() => {\n for (const listener of this._progressiveReadListeners) {\n listener(chunk);\n }\n });\n }\n onDataProgressiveDone() {\n this._readyCapability.promise.then(() => {\n for (const listener of this._progressiveDoneListeners) {\n listener();\n }\n });\n }\n transportReady() {\n this._readyCapability.resolve();\n }\n requestDataRange(begin, end) {\n unreachable(\"Abstract method PDFDataRangeTransport.requestDataRange\");\n }\n abort() {}\n}\nclass PDFDocumentProxy {\n constructor(pdfInfo, transport) {\n this._pdfInfo = pdfInfo;\n this._transport = transport;\n }\n get annotationStorage() {\n return this._transport.annotationStorage;\n }\n get filterFactory() {\n return this._transport.filterFactory;\n }\n get numPages() {\n return this._pdfInfo.numPages;\n }\n get fingerprints() {\n return this._pdfInfo.fingerprints;\n }\n get isPureXfa() {\n return shadow(this, \"isPureXfa\", !!this._transport._htmlForXfa);\n }\n get allXfaHtml() {\n return this._transport._htmlForXfa;\n }\n getPage(pageNumber) {\n return this._transport.getPage(pageNumber);\n }\n getPageIndex(ref) {\n return this._transport.getPageIndex(ref);\n }\n getDestinations() {\n return this._transport.getDestinations();\n }\n getDestination(id) {\n return this._transport.getDestination(id);\n }\n getPageLabels() {\n return this._transport.getPageLabels();\n }\n getPageLayout() {\n return this._transport.getPageLayout();\n }\n getPageMode() {\n return this._transport.getPageMode();\n }\n getViewerPreferences() {\n return this._transport.getViewerPreferences();\n }\n getOpenAction() {\n return this._transport.getOpenAction();\n }\n getAttachments() {\n return this._transport.getAttachments();\n }\n getJSActions() {\n return this._transport.getDocJSActions();\n }\n getOutline() {\n return this._transport.getOutline();\n }\n getOptionalContentConfig({\n intent = \"display\"\n } = {}) {\n const {\n renderingIntent\n } = this._transport.getRenderingIntent(intent);\n return this._transport.getOptionalContentConfig(renderingIntent);\n }\n getPermissions() {\n return this._transport.getPermissions();\n }\n getMetadata() {\n return this._transport.getMetadata();\n }\n getMarkInfo() {\n return this._transport.getMarkInfo();\n }\n getData() {\n return this._transport.getData();\n }\n saveDocument() {\n return this._transport.saveDocument();\n }\n getDownloadInfo() {\n return this._transport.downloadInfoCapability.promise;\n }\n cleanup(keepLoadedFonts = false) {\n return this._transport.startCleanup(keepLoadedFonts || this.isPureXfa);\n }\n destroy() {\n return this.loadingTask.destroy();\n }\n cachedPageNumber(ref) {\n return this._transport.cachedPageNumber(ref);\n }\n get loadingParams() {\n return this._transport.loadingParams;\n }\n get loadingTask() {\n return this._transport.loadingTask;\n }\n getFieldObjects() {\n return this._transport.getFieldObjects();\n }\n hasJSActions() {\n return this._transport.hasJSActions();\n }\n getCalculationOrderIds() {\n return this._transport.getCalculationOrderIds();\n }\n}\nclass PDFPageProxy {\n #delayedCleanupTimeout = null;\n #pendingCleanup = false;\n constructor(pageIndex, pageInfo, transport, pdfBug = false) {\n this._pageIndex = pageIndex;\n this._pageInfo = pageInfo;\n this._transport = transport;\n this._stats = pdfBug ? new StatTimer() : null;\n this._pdfBug = pdfBug;\n this.commonObjs = transport.commonObjs;\n this.objs = new PDFObjects();\n this._maybeCleanupAfterRender = false;\n this._intentStates = new Map();\n this.destroyed = false;\n }\n get pageNumber() {\n return this._pageIndex + 1;\n }\n get rotate() {\n return this._pageInfo.rotate;\n }\n get ref() {\n return this._pageInfo.ref;\n }\n get userUnit() {\n return this._pageInfo.userUnit;\n }\n get view() {\n return this._pageInfo.view;\n }\n getViewport({\n scale,\n rotation = this.rotate,\n offsetX = 0,\n offsetY = 0,\n dontFlip = false\n } = {}) {\n return new PageViewport({\n viewBox: this.view,\n scale,\n rotation,\n offsetX,\n offsetY,\n dontFlip\n });\n }\n getAnnotations({\n intent = \"display\"\n } = {}) {\n const {\n renderingIntent\n } = this._transport.getRenderingIntent(intent);\n return this._transport.getAnnotations(this._pageIndex, renderingIntent);\n }\n getJSActions() {\n return this._transport.getPageJSActions(this._pageIndex);\n }\n get filterFactory() {\n return this._transport.filterFactory;\n }\n get isPureXfa() {\n return shadow(this, \"isPureXfa\", !!this._transport._htmlForXfa);\n }\n async getXfa() {\n return this._transport._htmlForXfa?.children[this._pageIndex] || null;\n }\n render({\n canvasContext,\n viewport,\n intent = \"display\",\n annotationMode = AnnotationMode.ENABLE,\n transform = null,\n background = null,\n optionalContentConfigPromise = null,\n annotationCanvasMap = null,\n pageColors = null,\n printAnnotationStorage = null\n }) {\n this._stats?.time(\"Overall\");\n const intentArgs = this._transport.getRenderingIntent(intent, annotationMode, printAnnotationStorage);\n const {\n renderingIntent,\n cacheKey\n } = intentArgs;\n this.#pendingCleanup = false;\n this.#abortDelayedCleanup();\n optionalContentConfigPromise ||= this._transport.getOptionalContentConfig(renderingIntent);\n let intentState = this._intentStates.get(cacheKey);\n if (!intentState) {\n intentState = Object.create(null);\n this._intentStates.set(cacheKey, intentState);\n }\n if (intentState.streamReaderCancelTimeout) {\n clearTimeout(intentState.streamReaderCancelTimeout);\n intentState.streamReaderCancelTimeout = null;\n }\n const intentPrint = !!(renderingIntent & RenderingIntentFlag.PRINT);\n if (!intentState.displayReadyCapability) {\n intentState.displayReadyCapability = Promise.withResolvers();\n intentState.operatorList = {\n fnArray: [],\n argsArray: [],\n lastChunk: false,\n separateAnnots: null\n };\n this._stats?.time(\"Page Request\");\n this._pumpOperatorList(intentArgs);\n }\n const complete = error => {\n intentState.renderTasks.delete(internalRenderTask);\n if (this._maybeCleanupAfterRender || intentPrint) {\n this.#pendingCleanup = true;\n }\n this.#tryCleanup(!intentPrint);\n if (error) {\n internalRenderTask.capability.reject(error);\n this._abortOperatorList({\n intentState,\n reason: error instanceof Error ? error : new Error(error)\n });\n } else {\n internalRenderTask.capability.resolve();\n }\n if (this._stats) {\n this._stats.timeEnd(\"Rendering\");\n this._stats.timeEnd(\"Overall\");\n if (globalThis.Stats?.enabled) {\n globalThis.Stats.add(this.pageNumber, this._stats);\n }\n }\n };\n const internalRenderTask = new InternalRenderTask({\n callback: complete,\n params: {\n canvasContext,\n viewport,\n transform,\n background\n },\n objs: this.objs,\n commonObjs: this.commonObjs,\n annotationCanvasMap,\n operatorList: intentState.operatorList,\n pageIndex: this._pageIndex,\n canvasFactory: this._transport.canvasFactory,\n filterFactory: this._transport.filterFactory,\n useRequestAnimationFrame: !intentPrint,\n pdfBug: this._pdfBug,\n pageColors\n });\n (intentState.renderTasks ||= new Set()).add(internalRenderTask);\n const renderTask = internalRenderTask.task;\n Promise.all([intentState.displayReadyCapability.promise, optionalContentConfigPromise]).then(([transparency, optionalContentConfig]) => {\n if (this.destroyed) {\n complete();\n return;\n }\n this._stats?.time(\"Rendering\");\n if (!(optionalContentConfig.renderingIntent & renderingIntent)) {\n throw new Error(\"Must use the same `intent`-argument when calling the `PDFPageProxy.render` \" + \"and `PDFDocumentProxy.getOptionalContentConfig` methods.\");\n }\n internalRenderTask.initializeGraphics({\n transparency,\n optionalContentConfig\n });\n internalRenderTask.operatorListChanged();\n }).catch(complete);\n return renderTask;\n }\n getOperatorList({\n intent = \"display\",\n annotationMode = AnnotationMode.ENABLE,\n printAnnotationStorage = null\n } = {}) {\n function operatorListChanged() {\n if (intentState.operatorList.lastChunk) {\n intentState.opListReadCapability.resolve(intentState.operatorList);\n intentState.renderTasks.delete(opListTask);\n }\n }\n const intentArgs = this._transport.getRenderingIntent(intent, annotationMode, printAnnotationStorage, true);\n let intentState = this._intentStates.get(intentArgs.cacheKey);\n if (!intentState) {\n intentState = Object.create(null);\n this._intentStates.set(intentArgs.cacheKey, intentState);\n }\n let opListTask;\n if (!intentState.opListReadCapability) {\n opListTask = Object.create(null);\n opListTask.operatorListChanged = operatorListChanged;\n intentState.opListReadCapability = Promise.withResolvers();\n (intentState.renderTasks ||= new Set()).add(opListTask);\n intentState.operatorList = {\n fnArray: [],\n argsArray: [],\n lastChunk: false,\n separateAnnots: null\n };\n this._stats?.time(\"Page Request\");\n this._pumpOperatorList(intentArgs);\n }\n return intentState.opListReadCapability.promise;\n }\n streamTextContent({\n includeMarkedContent = false,\n disableNormalization = false\n } = {}) {\n const TEXT_CONTENT_CHUNK_SIZE = 100;\n return this._transport.messageHandler.sendWithStream(\"GetTextContent\", {\n pageIndex: this._pageIndex,\n includeMarkedContent: includeMarkedContent === true,\n disableNormalization: disableNormalization === true\n }, {\n highWaterMark: TEXT_CONTENT_CHUNK_SIZE,\n size(textContent) {\n return textContent.items.length;\n }\n });\n }\n getTextContent(params = {}) {\n if (this._transport._htmlForXfa) {\n return this.getXfa().then(xfa => XfaText.textContent(xfa));\n }\n const readableStream = this.streamTextContent(params);\n return new Promise(function (resolve, reject) {\n function pump() {\n reader.read().then(function ({\n value,\n done\n }) {\n if (done) {\n resolve(textContent);\n return;\n }\n textContent.lang ??= value.lang;\n Object.assign(textContent.styles, value.styles);\n textContent.items.push(...value.items);\n pump();\n }, reject);\n }\n const reader = readableStream.getReader();\n const textContent = {\n items: [],\n styles: Object.create(null),\n lang: null\n };\n pump();\n });\n }\n getStructTree() {\n return this._transport.getStructTree(this._pageIndex);\n }\n _destroy() {\n this.destroyed = true;\n const waitOn = [];\n for (const intentState of this._intentStates.values()) {\n this._abortOperatorList({\n intentState,\n reason: new Error(\"Page was destroyed.\"),\n force: true\n });\n if (intentState.opListReadCapability) {\n continue;\n }\n for (const internalRenderTask of intentState.renderTasks) {\n waitOn.push(internalRenderTask.completed);\n internalRenderTask.cancel();\n }\n }\n this.objs.clear();\n this.#pendingCleanup = false;\n this.#abortDelayedCleanup();\n return Promise.all(waitOn);\n }\n cleanup(resetStats = false) {\n this.#pendingCleanup = true;\n const success = this.#tryCleanup(false);\n if (resetStats && success) {\n this._stats &&= new StatTimer();\n }\n return success;\n }\n #tryCleanup(delayed = false) {\n this.#abortDelayedCleanup();\n if (!this.#pendingCleanup || this.destroyed) {\n return false;\n }\n if (delayed) {\n this.#delayedCleanupTimeout = setTimeout(() => {\n this.#delayedCleanupTimeout = null;\n this.#tryCleanup(false);\n }, DELAYED_CLEANUP_TIMEOUT);\n return false;\n }\n for (const {\n renderTasks,\n operatorList\n } of this._intentStates.values()) {\n if (renderTasks.size > 0 || !operatorList.lastChunk) {\n return false;\n }\n }\n this._intentStates.clear();\n this.objs.clear();\n this.#pendingCleanup = false;\n return true;\n }\n #abortDelayedCleanup() {\n if (this.#delayedCleanupTimeout) {\n clearTimeout(this.#delayedCleanupTimeout);\n this.#delayedCleanupTimeout = null;\n }\n }\n _startRenderPage(transparency, cacheKey) {\n const intentState = this._intentStates.get(cacheKey);\n if (!intentState) {\n return;\n }\n this._stats?.timeEnd(\"Page Request\");\n intentState.displayReadyCapability?.resolve(transparency);\n }\n _renderPageChunk(operatorListChunk, intentState) {\n for (let i = 0, ii = operatorListChunk.length; i < ii; i++) {\n intentState.operatorList.fnArray.push(operatorListChunk.fnArray[i]);\n intentState.operatorList.argsArray.push(operatorListChunk.argsArray[i]);\n }\n intentState.operatorList.lastChunk = operatorListChunk.lastChunk;\n intentState.operatorList.separateAnnots = operatorListChunk.separateAnnots;\n for (const internalRenderTask of intentState.renderTasks) {\n internalRenderTask.operatorListChanged();\n }\n if (operatorListChunk.lastChunk) {\n this.#tryCleanup(true);\n }\n }\n _pumpOperatorList({\n renderingIntent,\n cacheKey,\n annotationStorageSerializable\n }) {\n const {\n map,\n transfer\n } = annotationStorageSerializable;\n const readableStream = this._transport.messageHandler.sendWithStream(\"GetOperatorList\", {\n pageIndex: this._pageIndex,\n intent: renderingIntent,\n cacheKey,\n annotationStorage: map\n }, transfer);\n const reader = readableStream.getReader();\n const intentState = this._intentStates.get(cacheKey);\n intentState.streamReader = reader;\n const pump = () => {\n reader.read().then(({\n value,\n done\n }) => {\n if (done) {\n intentState.streamReader = null;\n return;\n }\n if (this._transport.destroyed) {\n return;\n }\n this._renderPageChunk(value, intentState);\n pump();\n }, reason => {\n intentState.streamReader = null;\n if (this._transport.destroyed) {\n return;\n }\n if (intentState.operatorList) {\n intentState.operatorList.lastChunk = true;\n for (const internalRenderTask of intentState.renderTasks) {\n internalRenderTask.operatorListChanged();\n }\n this.#tryCleanup(true);\n }\n if (intentState.displayReadyCapability) {\n intentState.displayReadyCapability.reject(reason);\n } else if (intentState.opListReadCapability) {\n intentState.opListReadCapability.reject(reason);\n } else {\n throw reason;\n }\n });\n };\n pump();\n }\n _abortOperatorList({\n intentState,\n reason,\n force = false\n }) {\n if (!intentState.streamReader) {\n return;\n }\n if (intentState.streamReaderCancelTimeout) {\n clearTimeout(intentState.streamReaderCancelTimeout);\n intentState.streamReaderCancelTimeout = null;\n }\n if (!force) {\n if (intentState.renderTasks.size > 0) {\n return;\n }\n if (reason instanceof RenderingCancelledException) {\n let delay = RENDERING_CANCELLED_TIMEOUT;\n if (reason.extraDelay > 0 && reason.extraDelay < 1000) {\n delay += reason.extraDelay;\n }\n intentState.streamReaderCancelTimeout = setTimeout(() => {\n intentState.streamReaderCancelTimeout = null;\n this._abortOperatorList({\n intentState,\n reason,\n force: true\n });\n }, delay);\n return;\n }\n }\n intentState.streamReader.cancel(new AbortException(reason.message)).catch(() => {});\n intentState.streamReader = null;\n if (this._transport.destroyed) {\n return;\n }\n for (const [curCacheKey, curIntentState] of this._intentStates) {\n if (curIntentState === intentState) {\n this._intentStates.delete(curCacheKey);\n break;\n }\n }\n this.cleanup();\n }\n get stats() {\n return this._stats;\n }\n}\nclass LoopbackPort {\n #listeners = new Set();\n #deferred = Promise.resolve();\n postMessage(obj, transfer) {\n const event = {\n data: structuredClone(obj, transfer ? {\n transfer\n } : null)\n };\n this.#deferred.then(() => {\n for (const listener of this.#listeners) {\n listener.call(this, event);\n }\n });\n }\n addEventListener(name, listener) {\n this.#listeners.add(listener);\n }\n removeEventListener(name, listener) {\n this.#listeners.delete(listener);\n }\n terminate() {\n this.#listeners.clear();\n }\n}\nconst PDFWorkerUtil = {\n isWorkerDisabled: false,\n fakeWorkerId: 0\n};\n{\n if (isNodeJS) {\n PDFWorkerUtil.isWorkerDisabled = true;\n GlobalWorkerOptions.workerSrc ||= \"./pdf.worker.mjs\";\n }\n PDFWorkerUtil.isSameOrigin = function (baseUrl, otherUrl) {\n let base;\n try {\n base = new URL(baseUrl);\n if (!base.origin || base.origin === \"null\") {\n return false;\n }\n } catch {\n return false;\n }\n const other = new URL(otherUrl, base);\n return base.origin === other.origin;\n };\n PDFWorkerUtil.createCDNWrapper = function (url) {\n const wrapper = `await import(\"${url}\");`;\n return URL.createObjectURL(new Blob([wrapper], {\n type: \"text/javascript\"\n }));\n };\n}\nclass PDFWorker {\n static #workerPorts;\n constructor({\n name = null,\n port = null,\n verbosity = getVerbosityLevel()\n } = {}) {\n this.name = name;\n this.destroyed = false;\n this.verbosity = verbosity;\n this._readyCapability = Promise.withResolvers();\n this._port = null;\n this._webWorker = null;\n this._messageHandler = null;\n if (port) {\n if (PDFWorker.#workerPorts?.has(port)) {\n throw new Error(\"Cannot use more than one PDFWorker per port.\");\n }\n (PDFWorker.#workerPorts ||= new WeakMap()).set(port, this);\n this._initializeFromPort(port);\n return;\n }\n this._initialize();\n }\n get promise() {\n if (isNodeJS) {\n return Promise.all([NodePackages.promise, this._readyCapability.promise]);\n }\n return this._readyCapability.promise;\n }\n #resolve() {\n this._readyCapability.resolve();\n this._messageHandler.send(\"configure\", {\n verbosity: this.verbosity\n });\n }\n get port() {\n return this._port;\n }\n get messageHandler() {\n return this._messageHandler;\n }\n _initializeFromPort(port) {\n this._port = port;\n this._messageHandler = new MessageHandler(\"main\", \"worker\", port);\n this._messageHandler.on(\"ready\", function () {});\n this.#resolve();\n }\n _initialize() {\n if (PDFWorkerUtil.isWorkerDisabled || PDFWorker.#mainThreadWorkerMessageHandler) {\n this._setupFakeWorker();\n return;\n }\n let {\n workerSrc\n } = PDFWorker;\n try {\n if (!PDFWorkerUtil.isSameOrigin(window.location.href, workerSrc)) {\n workerSrc = PDFWorkerUtil.createCDNWrapper(new URL(workerSrc, window.location).href);\n }\n const worker = new Worker(workerSrc, {\n type: \"module\"\n });\n const messageHandler = new MessageHandler(\"main\", \"worker\", worker);\n const terminateEarly = () => {\n ac.abort();\n messageHandler.destroy();\n worker.terminate();\n if (this.destroyed) {\n this._readyCapability.reject(new Error(\"Worker was destroyed\"));\n } else {\n this._setupFakeWorker();\n }\n };\n const ac = new AbortController();\n worker.addEventListener(\"error\", () => {\n if (!this._webWorker) {\n terminateEarly();\n }\n }, {\n signal: ac.signal\n });\n messageHandler.on(\"test\", data => {\n ac.abort();\n if (this.destroyed || !data) {\n terminateEarly();\n return;\n }\n this._messageHandler = messageHandler;\n this._port = worker;\n this._webWorker = worker;\n this.#resolve();\n });\n messageHandler.on(\"ready\", data => {\n ac.abort();\n if (this.destroyed) {\n terminateEarly();\n return;\n }\n try {\n sendTest();\n } catch {\n this._setupFakeWorker();\n }\n });\n const sendTest = () => {\n const testObj = new Uint8Array();\n messageHandler.send(\"test\", testObj, [testObj.buffer]);\n };\n sendTest();\n return;\n } catch {\n info(\"The worker has been disabled.\");\n }\n this._setupFakeWorker();\n }\n _setupFakeWorker() {\n if (!PDFWorkerUtil.isWorkerDisabled) {\n warn(\"Setting up fake worker.\");\n PDFWorkerUtil.isWorkerDisabled = true;\n }\n PDFWorker._setupFakeWorkerGlobal.then(WorkerMessageHandler => {\n if (this.destroyed) {\n this._readyCapability.reject(new Error(\"Worker was destroyed\"));\n return;\n }\n const port = new LoopbackPort();\n this._port = port;\n const id = `fake${PDFWorkerUtil.fakeWorkerId++}`;\n const workerHandler = new MessageHandler(id + \"_worker\", id, port);\n WorkerMessageHandler.setup(workerHandler, port);\n this._messageHandler = new MessageHandler(id, id + \"_worker\", port);\n this.#resolve();\n }).catch(reason => {\n this._readyCapability.reject(new Error(`Setting up fake worker failed: \"${reason.message}\".`));\n });\n }\n destroy() {\n this.destroyed = true;\n if (this._webWorker) {\n this._webWorker.terminate();\n this._webWorker = null;\n }\n PDFWorker.#workerPorts?.delete(this._port);\n this._port = null;\n if (this._messageHandler) {\n this._messageHandler.destroy();\n this._messageHandler = null;\n }\n }\n static fromPort(params) {\n if (!params?.port) {\n throw new Error(\"PDFWorker.fromPort - invalid method signature.\");\n }\n const cachedPort = this.#workerPorts?.get(params.port);\n if (cachedPort) {\n if (cachedPort._pendingDestroy) {\n throw new Error(\"PDFWorker.fromPort - the worker is being destroyed.\\n\" + \"Please remember to await `PDFDocumentLoadingTask.destroy()`-calls.\");\n }\n return cachedPort;\n }\n return new PDFWorker(params);\n }\n static get workerSrc() {\n if (GlobalWorkerOptions.workerSrc) {\n return GlobalWorkerOptions.workerSrc;\n }\n throw new Error('No \"GlobalWorkerOptions.workerSrc\" specified.');\n }\n static get #mainThreadWorkerMessageHandler() {\n try {\n return globalThis.pdfjsWorker?.WorkerMessageHandler || null;\n } catch {\n return null;\n }\n }\n static get _setupFakeWorkerGlobal() {\n const loader = async () => {\n if (this.#mainThreadWorkerMessageHandler) {\n return this.#mainThreadWorkerMessageHandler;\n }\n const worker = await import( /*webpackIgnore: true*/this.workerSrc);\n return worker.WorkerMessageHandler;\n };\n return shadow(this, \"_setupFakeWorkerGlobal\", loader());\n }\n}\nclass WorkerTransport {\n #methodPromises = new Map();\n #pageCache = new Map();\n #pagePromises = new Map();\n #pageRefCache = new Map();\n #passwordCapability = null;\n constructor(messageHandler, loadingTask, networkStream, params, factory) {\n this.messageHandler = messageHandler;\n this.loadingTask = loadingTask;\n this.commonObjs = new PDFObjects();\n this.fontLoader = new FontLoader({\n ownerDocument: params.ownerDocument,\n styleElement: params.styleElement\n });\n this.loadingParams = params.loadingParams;\n this._params = params;\n this.canvasFactory = factory.canvasFactory;\n this.filterFactory = factory.filterFactory;\n this.cMapReaderFactory = factory.cMapReaderFactory;\n this.standardFontDataFactory = factory.standardFontDataFactory;\n this.destroyed = false;\n this.destroyCapability = null;\n this._networkStream = networkStream;\n this._fullReader = null;\n this._lastProgress = null;\n this.downloadInfoCapability = Promise.withResolvers();\n this.setupMessageHandler();\n }\n #cacheSimpleMethod(name, data = null) {\n const cachedPromise = this.#methodPromises.get(name);\n if (cachedPromise) {\n return cachedPromise;\n }\n const promise = this.messageHandler.sendWithPromise(name, data);\n this.#methodPromises.set(name, promise);\n return promise;\n }\n get annotationStorage() {\n return shadow(this, \"annotationStorage\", new AnnotationStorage());\n }\n getRenderingIntent(intent, annotationMode = AnnotationMode.ENABLE, printAnnotationStorage = null, isOpList = false) {\n let renderingIntent = RenderingIntentFlag.DISPLAY;\n let annotationStorageSerializable = SerializableEmpty;\n switch (intent) {\n case \"any\":\n renderingIntent = RenderingIntentFlag.ANY;\n break;\n case \"display\":\n break;\n case \"print\":\n renderingIntent = RenderingIntentFlag.PRINT;\n break;\n default:\n warn(`getRenderingIntent - invalid intent: ${intent}`);\n }\n switch (annotationMode) {\n case AnnotationMode.DISABLE:\n renderingIntent += RenderingIntentFlag.ANNOTATIONS_DISABLE;\n break;\n case AnnotationMode.ENABLE:\n break;\n case AnnotationMode.ENABLE_FORMS:\n renderingIntent += RenderingIntentFlag.ANNOTATIONS_FORMS;\n break;\n case AnnotationMode.ENABLE_STORAGE:\n renderingIntent += RenderingIntentFlag.ANNOTATIONS_STORAGE;\n const annotationStorage = renderingIntent & RenderingIntentFlag.PRINT && printAnnotationStorage instanceof PrintAnnotationStorage ? printAnnotationStorage : this.annotationStorage;\n annotationStorageSerializable = annotationStorage.serializable;\n break;\n default:\n warn(`getRenderingIntent - invalid annotationMode: ${annotationMode}`);\n }\n if (isOpList) {\n renderingIntent += RenderingIntentFlag.OPLIST;\n }\n return {\n renderingIntent,\n cacheKey: `${renderingIntent}_${annotationStorageSerializable.hash}`,\n annotationStorageSerializable\n };\n }\n destroy() {\n if (this.destroyCapability) {\n return this.destroyCapability.promise;\n }\n this.destroyed = true;\n this.destroyCapability = Promise.withResolvers();\n this.#passwordCapability?.reject(new Error(\"Worker was destroyed during onPassword callback\"));\n const waitOn = [];\n for (const page of this.#pageCache.values()) {\n waitOn.push(page._destroy());\n }\n this.#pageCache.clear();\n this.#pagePromises.clear();\n this.#pageRefCache.clear();\n if (this.hasOwnProperty(\"annotationStorage\")) {\n this.annotationStorage.resetModified();\n }\n const terminated = this.messageHandler.sendWithPromise(\"Terminate\", null);\n waitOn.push(terminated);\n Promise.all(waitOn).then(() => {\n this.commonObjs.clear();\n this.fontLoader.clear();\n this.#methodPromises.clear();\n this.filterFactory.destroy();\n TextLayer.cleanup();\n this._networkStream?.cancelAllRequests(new AbortException(\"Worker was terminated.\"));\n if (this.messageHandler) {\n this.messageHandler.destroy();\n this.messageHandler = null;\n }\n this.destroyCapability.resolve();\n }, this.destroyCapability.reject);\n return this.destroyCapability.promise;\n }\n setupMessageHandler() {\n const {\n messageHandler,\n loadingTask\n } = this;\n messageHandler.on(\"GetReader\", (data, sink) => {\n assert(this._networkStream, \"GetReader - no `IPDFStream` instance available.\");\n this._fullReader = this._networkStream.getFullReader();\n this._fullReader.onProgress = evt => {\n this._lastProgress = {\n loaded: evt.loaded,\n total: evt.total\n };\n };\n sink.onPull = () => {\n this._fullReader.read().then(function ({\n value,\n done\n }) {\n if (done) {\n sink.close();\n return;\n }\n assert(value instanceof ArrayBuffer, \"GetReader - expected an ArrayBuffer.\");\n sink.enqueue(new Uint8Array(value), 1, [value]);\n }).catch(reason => {\n sink.error(reason);\n });\n };\n sink.onCancel = reason => {\n this._fullReader.cancel(reason);\n sink.ready.catch(readyReason => {\n if (this.destroyed) {\n return;\n }\n throw readyReason;\n });\n };\n });\n messageHandler.on(\"ReaderHeadersReady\", data => {\n const headersCapability = Promise.withResolvers();\n const fullReader = this._fullReader;\n fullReader.headersReady.then(() => {\n if (!fullReader.isStreamingSupported || !fullReader.isRangeSupported) {\n if (this._lastProgress) {\n loadingTask.onProgress?.(this._lastProgress);\n }\n fullReader.onProgress = evt => {\n loadingTask.onProgress?.({\n loaded: evt.loaded,\n total: evt.total\n });\n };\n }\n headersCapability.resolve({\n isStreamingSupported: fullReader.isStreamingSupported,\n isRangeSupported: fullReader.isRangeSupported,\n contentLength: fullReader.contentLength\n });\n }, headersCapability.reject);\n return headersCapability.promise;\n });\n messageHandler.on(\"GetRangeReader\", (data, sink) => {\n assert(this._networkStream, \"GetRangeReader - no `IPDFStream` instance available.\");\n const rangeReader = this._networkStream.getRangeReader(data.begin, data.end);\n if (!rangeReader) {\n sink.close();\n return;\n }\n sink.onPull = () => {\n rangeReader.read().then(function ({\n value,\n done\n }) {\n if (done) {\n sink.close();\n return;\n }\n assert(value instanceof ArrayBuffer, \"GetRangeReader - expected an ArrayBuffer.\");\n sink.enqueue(new Uint8Array(value), 1, [value]);\n }).catch(reason => {\n sink.error(reason);\n });\n };\n sink.onCancel = reason => {\n rangeReader.cancel(reason);\n sink.ready.catch(readyReason => {\n if (this.destroyed) {\n return;\n }\n throw readyReason;\n });\n };\n });\n messageHandler.on(\"GetDoc\", ({\n pdfInfo\n }) => {\n this._numPages = pdfInfo.numPages;\n this._htmlForXfa = pdfInfo.htmlForXfa;\n delete pdfInfo.htmlForXfa;\n loadingTask._capability.resolve(new PDFDocumentProxy(pdfInfo, this));\n });\n messageHandler.on(\"DocException\", function (ex) {\n let reason;\n switch (ex.name) {\n case \"PasswordException\":\n reason = new PasswordException(ex.message, ex.code);\n break;\n case \"InvalidPDFException\":\n reason = new InvalidPDFException(ex.message);\n break;\n case \"MissingPDFException\":\n reason = new MissingPDFException(ex.message);\n break;\n case \"UnexpectedResponseException\":\n reason = new UnexpectedResponseException(ex.message, ex.status);\n break;\n case \"UnknownErrorException\":\n reason = new UnknownErrorException(ex.message, ex.details);\n break;\n default:\n unreachable(\"DocException - expected a valid Error.\");\n }\n loadingTask._capability.reject(reason);\n });\n messageHandler.on(\"PasswordRequest\", exception => {\n this.#passwordCapability = Promise.withResolvers();\n if (loadingTask.onPassword) {\n const updatePassword = password => {\n if (password instanceof Error) {\n this.#passwordCapability.reject(password);\n } else {\n this.#passwordCapability.resolve({\n password\n });\n }\n };\n try {\n loadingTask.onPassword(updatePassword, exception.code);\n } catch (ex) {\n this.#passwordCapability.reject(ex);\n }\n } else {\n this.#passwordCapability.reject(new PasswordException(exception.message, exception.code));\n }\n return this.#passwordCapability.promise;\n });\n messageHandler.on(\"DataLoaded\", data => {\n loadingTask.onProgress?.({\n loaded: data.length,\n total: data.length\n });\n this.downloadInfoCapability.resolve(data);\n });\n messageHandler.on(\"StartRenderPage\", data => {\n if (this.destroyed) {\n return;\n }\n const page = this.#pageCache.get(data.pageIndex);\n page._startRenderPage(data.transparency, data.cacheKey);\n });\n messageHandler.on(\"commonobj\", ([id, type, exportedData]) => {\n if (this.destroyed) {\n return null;\n }\n if (this.commonObjs.has(id)) {\n return null;\n }\n switch (type) {\n case \"Font\":\n const {\n disableFontFace,\n fontExtraProperties,\n pdfBug\n } = this._params;\n if (\"error\" in exportedData) {\n const exportedError = exportedData.error;\n warn(`Error during font loading: ${exportedError}`);\n this.commonObjs.resolve(id, exportedError);\n break;\n }\n const inspectFont = pdfBug && globalThis.FontInspector?.enabled ? (font, url) => globalThis.FontInspector.fontAdded(font, url) : null;\n const font = new FontFaceObject(exportedData, {\n disableFontFace,\n inspectFont\n });\n this.fontLoader.bind(font).catch(() => messageHandler.sendWithPromise(\"FontFallback\", {\n id\n })).finally(() => {\n if (!fontExtraProperties && font.data) {\n font.data = null;\n }\n this.commonObjs.resolve(id, font);\n });\n break;\n case \"CopyLocalImage\":\n const {\n imageRef\n } = exportedData;\n assert(imageRef, \"The imageRef must be defined.\");\n for (const pageProxy of this.#pageCache.values()) {\n for (const [, data] of pageProxy.objs) {\n if (data?.ref !== imageRef) {\n continue;\n }\n if (!data.dataLen) {\n return null;\n }\n this.commonObjs.resolve(id, structuredClone(data));\n return data.dataLen;\n }\n }\n break;\n case \"FontPath\":\n case \"Image\":\n case \"Pattern\":\n this.commonObjs.resolve(id, exportedData);\n break;\n default:\n throw new Error(`Got unknown common object type ${type}`);\n }\n return null;\n });\n messageHandler.on(\"obj\", ([id, pageIndex, type, imageData]) => {\n if (this.destroyed) {\n return;\n }\n const pageProxy = this.#pageCache.get(pageIndex);\n if (pageProxy.objs.has(id)) {\n return;\n }\n if (pageProxy._intentStates.size === 0) {\n imageData?.bitmap?.close();\n return;\n }\n switch (type) {\n case \"Image\":\n pageProxy.objs.resolve(id, imageData);\n if (imageData?.dataLen > MAX_IMAGE_SIZE_TO_CACHE) {\n pageProxy._maybeCleanupAfterRender = true;\n }\n break;\n case \"Pattern\":\n pageProxy.objs.resolve(id, imageData);\n break;\n default:\n throw new Error(`Got unknown object type ${type}`);\n }\n });\n messageHandler.on(\"DocProgress\", data => {\n if (this.destroyed) {\n return;\n }\n loadingTask.onProgress?.({\n loaded: data.loaded,\n total: data.total\n });\n });\n messageHandler.on(\"FetchBuiltInCMap\", data => {\n if (this.destroyed) {\n return Promise.reject(new Error(\"Worker was destroyed.\"));\n }\n if (!this.cMapReaderFactory) {\n return Promise.reject(new Error(\"CMapReaderFactory not initialized, see the `useWorkerFetch` parameter.\"));\n }\n return this.cMapReaderFactory.fetch(data);\n });\n messageHandler.on(\"FetchStandardFontData\", data => {\n if (this.destroyed) {\n return Promise.reject(new Error(\"Worker was destroyed.\"));\n }\n if (!this.standardFontDataFactory) {\n return Promise.reject(new Error(\"StandardFontDataFactory not initialized, see the `useWorkerFetch` parameter.\"));\n }\n return this.standardFontDataFactory.fetch(data);\n });\n }\n getData() {\n return this.messageHandler.sendWithPromise(\"GetData\", null);\n }\n saveDocument() {\n if (this.annotationStorage.size <= 0) {\n warn(\"saveDocument called while `annotationStorage` is empty, \" + \"please use the getData-method instead.\");\n }\n const {\n map,\n transfer\n } = this.annotationStorage.serializable;\n return this.messageHandler.sendWithPromise(\"SaveDocument\", {\n isPureXfa: !!this._htmlForXfa,\n numPages: this._numPages,\n annotationStorage: map,\n filename: this._fullReader?.filename ?? null\n }, transfer).finally(() => {\n this.annotationStorage.resetModified();\n });\n }\n getPage(pageNumber) {\n if (!Number.isInteger(pageNumber) || pageNumber <= 0 || pageNumber > this._numPages) {\n return Promise.reject(new Error(\"Invalid page request.\"));\n }\n const pageIndex = pageNumber - 1,\n cachedPromise = this.#pagePromises.get(pageIndex);\n if (cachedPromise) {\n return cachedPromise;\n }\n const promise = this.messageHandler.sendWithPromise(\"GetPage\", {\n pageIndex\n }).then(pageInfo => {\n if (this.destroyed) {\n throw new Error(\"Transport destroyed\");\n }\n if (pageInfo.refStr) {\n this.#pageRefCache.set(pageInfo.refStr, pageNumber);\n }\n const page = new PDFPageProxy(pageIndex, pageInfo, this, this._params.pdfBug);\n this.#pageCache.set(pageIndex, page);\n return page;\n });\n this.#pagePromises.set(pageIndex, promise);\n return promise;\n }\n getPageIndex(ref) {\n if (!isRefProxy(ref)) {\n return Promise.reject(new Error(\"Invalid pageIndex request.\"));\n }\n return this.messageHandler.sendWithPromise(\"GetPageIndex\", {\n num: ref.num,\n gen: ref.gen\n });\n }\n getAnnotations(pageIndex, intent) {\n return this.messageHandler.sendWithPromise(\"GetAnnotations\", {\n pageIndex,\n intent\n });\n }\n getFieldObjects() {\n return this.#cacheSimpleMethod(\"GetFieldObjects\");\n }\n hasJSActions() {\n return this.#cacheSimpleMethod(\"HasJSActions\");\n }\n getCalculationOrderIds() {\n return this.messageHandler.sendWithPromise(\"GetCalculationOrderIds\", null);\n }\n getDestinations() {\n return this.messageHandler.sendWithPromise(\"GetDestinations\", null);\n }\n getDestination(id) {\n if (typeof id !== \"string\") {\n return Promise.reject(new Error(\"Invalid destination request.\"));\n }\n return this.messageHandler.sendWithPromise(\"GetDestination\", {\n id\n });\n }\n getPageLabels() {\n return this.messageHandler.sendWithPromise(\"GetPageLabels\", null);\n }\n getPageLayout() {\n return this.messageHandler.sendWithPromise(\"GetPageLayout\", null);\n }\n getPageMode() {\n return this.messageHandler.sendWithPromise(\"GetPageMode\", null);\n }\n getViewerPreferences() {\n return this.messageHandler.sendWithPromise(\"GetViewerPreferences\", null);\n }\n getOpenAction() {\n return this.messageHandler.sendWithPromise(\"GetOpenAction\", null);\n }\n getAttachments() {\n return this.messageHandler.sendWithPromise(\"GetAttachments\", null);\n }\n getDocJSActions() {\n return this.#cacheSimpleMethod(\"GetDocJSActions\");\n }\n getPageJSActions(pageIndex) {\n return this.messageHandler.sendWithPromise(\"GetPageJSActions\", {\n pageIndex\n });\n }\n getStructTree(pageIndex) {\n return this.messageHandler.sendWithPromise(\"GetStructTree\", {\n pageIndex\n });\n }\n getOutline() {\n return this.messageHandler.sendWithPromise(\"GetOutline\", null);\n }\n getOptionalContentConfig(renderingIntent) {\n return this.#cacheSimpleMethod(\"GetOptionalContentConfig\").then(data => new OptionalContentConfig(data, renderingIntent));\n }\n getPermissions() {\n return this.messageHandler.sendWithPromise(\"GetPermissions\", null);\n }\n getMetadata() {\n const name = \"GetMetadata\",\n cachedPromise = this.#methodPromises.get(name);\n if (cachedPromise) {\n return cachedPromise;\n }\n const promise = this.messageHandler.sendWithPromise(name, null).then(results => ({\n info: results[0],\n metadata: results[1] ? new Metadata(results[1]) : null,\n contentDispositionFilename: this._fullReader?.filename ?? null,\n contentLength: this._fullReader?.contentLength ?? null\n }));\n this.#methodPromises.set(name, promise);\n return promise;\n }\n getMarkInfo() {\n return this.messageHandler.sendWithPromise(\"GetMarkInfo\", null);\n }\n async startCleanup(keepLoadedFonts = false) {\n if (this.destroyed) {\n return;\n }\n await this.messageHandler.sendWithPromise(\"Cleanup\", null);\n for (const page of this.#pageCache.values()) {\n const cleanupSuccessful = page.cleanup();\n if (!cleanupSuccessful) {\n throw new Error(`startCleanup: Page ${page.pageNumber} is currently rendering.`);\n }\n }\n this.commonObjs.clear();\n if (!keepLoadedFonts) {\n this.fontLoader.clear();\n }\n this.#methodPromises.clear();\n this.filterFactory.destroy(true);\n TextLayer.cleanup();\n }\n cachedPageNumber(ref) {\n if (!isRefProxy(ref)) {\n return null;\n }\n const refStr = ref.gen === 0 ? `${ref.num}R` : `${ref.num}R${ref.gen}`;\n return this.#pageRefCache.get(refStr) ?? null;\n }\n}\nconst INITIAL_DATA = Symbol(\"INITIAL_DATA\");\nclass PDFObjects {\n #objs = Object.create(null);\n #ensureObj(objId) {\n return this.#objs[objId] ||= {\n ...Promise.withResolvers(),\n data: INITIAL_DATA\n };\n }\n get(objId, callback = null) {\n if (callback) {\n const obj = this.#ensureObj(objId);\n obj.promise.then(() => callback(obj.data));\n return null;\n }\n const obj = this.#objs[objId];\n if (!obj || obj.data === INITIAL_DATA) {\n throw new Error(`Requesting object that isn't resolved yet ${objId}.`);\n }\n return obj.data;\n }\n has(objId) {\n const obj = this.#objs[objId];\n return !!obj && obj.data !== INITIAL_DATA;\n }\n resolve(objId, data = null) {\n const obj = this.#ensureObj(objId);\n obj.data = data;\n obj.resolve();\n }\n clear() {\n for (const objId in this.#objs) {\n const {\n data\n } = this.#objs[objId];\n data?.bitmap?.close();\n }\n this.#objs = Object.create(null);\n }\n *[Symbol.iterator]() {\n for (const objId in this.#objs) {\n const {\n data\n } = this.#objs[objId];\n if (data === INITIAL_DATA) {\n continue;\n }\n yield [objId, data];\n }\n }\n}\nclass RenderTask {\n #internalRenderTask = null;\n constructor(internalRenderTask) {\n this.#internalRenderTask = internalRenderTask;\n this.onContinue = null;\n }\n get promise() {\n return this.#internalRenderTask.capability.promise;\n }\n cancel(extraDelay = 0) {\n this.#internalRenderTask.cancel(null, extraDelay);\n }\n get separateAnnots() {\n const {\n separateAnnots\n } = this.#internalRenderTask.operatorList;\n if (!separateAnnots) {\n return false;\n }\n const {\n annotationCanvasMap\n } = this.#internalRenderTask;\n return separateAnnots.form || separateAnnots.canvas && annotationCanvasMap?.size > 0;\n }\n}\nclass InternalRenderTask {\n #rAF = null;\n static #canvasInUse = new WeakSet();\n constructor({\n callback,\n params,\n objs,\n commonObjs,\n annotationCanvasMap,\n operatorList,\n pageIndex,\n canvasFactory,\n filterFactory,\n useRequestAnimationFrame = false,\n pdfBug = false,\n pageColors = null\n }) {\n this.callback = callback;\n this.params = params;\n this.objs = objs;\n this.commonObjs = commonObjs;\n this.annotationCanvasMap = annotationCanvasMap;\n this.operatorListIdx = null;\n this.operatorList = operatorList;\n this._pageIndex = pageIndex;\n this.canvasFactory = canvasFactory;\n this.filterFactory = filterFactory;\n this._pdfBug = pdfBug;\n this.pageColors = pageColors;\n this.running = false;\n this.graphicsReadyCallback = null;\n this.graphicsReady = false;\n this._useRequestAnimationFrame = useRequestAnimationFrame === true && typeof window !== \"undefined\";\n this.cancelled = false;\n this.capability = Promise.withResolvers();\n this.task = new RenderTask(this);\n this._cancelBound = this.cancel.bind(this);\n this._continueBound = this._continue.bind(this);\n this._scheduleNextBound = this._scheduleNext.bind(this);\n this._nextBound = this._next.bind(this);\n this._canvas = params.canvasContext.canvas;\n }\n get completed() {\n return this.capability.promise.catch(function () {});\n }\n initializeGraphics({\n transparency = false,\n optionalContentConfig\n }) {\n if (this.cancelled) {\n return;\n }\n if (this._canvas) {\n if (InternalRenderTask.#canvasInUse.has(this._canvas)) {\n throw new Error(\"Cannot use the same canvas during multiple render() operations. \" + \"Use different canvas or ensure previous operations were \" + \"cancelled or completed.\");\n }\n InternalRenderTask.#canvasInUse.add(this._canvas);\n }\n if (this._pdfBug && globalThis.StepperManager?.enabled) {\n this.stepper = globalThis.StepperManager.create(this._pageIndex);\n this.stepper.init(this.operatorList);\n this.stepper.nextBreakPoint = this.stepper.getNextBreakPoint();\n }\n const {\n canvasContext,\n viewport,\n transform,\n background\n } = this.params;\n this.gfx = new CanvasGraphics(canvasContext, this.commonObjs, this.objs, this.canvasFactory, this.filterFactory, {\n optionalContentConfig\n }, this.annotationCanvasMap, this.pageColors);\n this.gfx.beginDrawing({\n transform,\n viewport,\n transparency,\n background\n });\n this.operatorListIdx = 0;\n this.graphicsReady = true;\n this.graphicsReadyCallback?.();\n }\n cancel(error = null, extraDelay = 0) {\n this.running = false;\n this.cancelled = true;\n this.gfx?.endDrawing();\n if (this.#rAF) {\n window.cancelAnimationFrame(this.#rAF);\n this.#rAF = null;\n }\n InternalRenderTask.#canvasInUse.delete(this._canvas);\n this.callback(error || new RenderingCancelledException(`Rendering cancelled, page ${this._pageIndex + 1}`, extraDelay));\n }\n operatorListChanged() {\n if (!this.graphicsReady) {\n this.graphicsReadyCallback ||= this._continueBound;\n return;\n }\n this.stepper?.updateOperatorList(this.operatorList);\n if (this.running) {\n return;\n }\n this._continue();\n }\n _continue() {\n this.running = true;\n if (this.cancelled) {\n return;\n }\n if (this.task.onContinue) {\n this.task.onContinue(this._scheduleNextBound);\n } else {\n this._scheduleNext();\n }\n }\n _scheduleNext() {\n if (this._useRequestAnimationFrame) {\n this.#rAF = window.requestAnimationFrame(() => {\n this.#rAF = null;\n this._nextBound().catch(this._cancelBound);\n });\n } else {\n Promise.resolve().then(this._nextBound).catch(this._cancelBound);\n }\n }\n async _next() {\n if (this.cancelled) {\n return;\n }\n this.operatorListIdx = this.gfx.executeOperatorList(this.operatorList, this.operatorListIdx, this._continueBound, this.stepper);\n if (this.operatorListIdx === this.operatorList.argsArray.length) {\n this.running = false;\n if (this.operatorList.lastChunk) {\n this.gfx.endDrawing();\n InternalRenderTask.#canvasInUse.delete(this._canvas);\n this.callback();\n }\n }\n }\n}\nconst version = \"4.4.168\";\nconst build = \"19fbc8998\";\n\n;// CONCATENATED MODULE: ./src/shared/scripting_utils.js\nfunction makeColorComp(n) {\n return Math.floor(Math.max(0, Math.min(1, n)) * 255).toString(16).padStart(2, \"0\");\n}\nfunction scaleAndClamp(x) {\n return Math.max(0, Math.min(255, 255 * x));\n}\nclass ColorConverters {\n static CMYK_G([c, y, m, k]) {\n return [\"G\", 1 - Math.min(1, 0.3 * c + 0.59 * m + 0.11 * y + k)];\n }\n static G_CMYK([g]) {\n return [\"CMYK\", 0, 0, 0, 1 - g];\n }\n static G_RGB([g]) {\n return [\"RGB\", g, g, g];\n }\n static G_rgb([g]) {\n g = scaleAndClamp(g);\n return [g, g, g];\n }\n static G_HTML([g]) {\n const G = makeColorComp(g);\n return `#${G}${G}${G}`;\n }\n static RGB_G([r, g, b]) {\n return [\"G\", 0.3 * r + 0.59 * g + 0.11 * b];\n }\n static RGB_rgb(color) {\n return color.map(scaleAndClamp);\n }\n static RGB_HTML(color) {\n return `#${color.map(makeColorComp).join(\"\")}`;\n }\n static T_HTML() {\n return \"#00000000\";\n }\n static T_rgb() {\n return [null];\n }\n static CMYK_RGB([c, y, m, k]) {\n return [\"RGB\", 1 - Math.min(1, c + k), 1 - Math.min(1, m + k), 1 - Math.min(1, y + k)];\n }\n static CMYK_rgb([c, y, m, k]) {\n return [scaleAndClamp(1 - Math.min(1, c + k)), scaleAndClamp(1 - Math.min(1, m + k)), scaleAndClamp(1 - Math.min(1, y + k))];\n }\n static CMYK_HTML(components) {\n const rgb = this.CMYK_RGB(components).slice(1);\n return this.RGB_HTML(rgb);\n }\n static RGB_CMYK([r, g, b]) {\n const c = 1 - r;\n const m = 1 - g;\n const y = 1 - b;\n const k = Math.min(c, m, y);\n return [\"CMYK\", c, m, y, k];\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/xfa_layer.js\n\nclass XfaLayer {\n static setupStorage(html, id, element, storage, intent) {\n const storedData = storage.getValue(id, {\n value: null\n });\n switch (element.name) {\n case \"textarea\":\n if (storedData.value !== null) {\n html.textContent = storedData.value;\n }\n if (intent === \"print\") {\n break;\n }\n html.addEventListener(\"input\", event => {\n storage.setValue(id, {\n value: event.target.value\n });\n });\n break;\n case \"input\":\n if (element.attributes.type === \"radio\" || element.attributes.type === \"checkbox\") {\n if (storedData.value === element.attributes.xfaOn) {\n html.setAttribute(\"checked\", true);\n } else if (storedData.value === element.attributes.xfaOff) {\n html.removeAttribute(\"checked\");\n }\n if (intent === \"print\") {\n break;\n }\n html.addEventListener(\"change\", event => {\n storage.setValue(id, {\n value: event.target.checked ? event.target.getAttribute(\"xfaOn\") : event.target.getAttribute(\"xfaOff\")\n });\n });\n } else {\n if (storedData.value !== null) {\n html.setAttribute(\"value\", storedData.value);\n }\n if (intent === \"print\") {\n break;\n }\n html.addEventListener(\"input\", event => {\n storage.setValue(id, {\n value: event.target.value\n });\n });\n }\n break;\n case \"select\":\n if (storedData.value !== null) {\n html.setAttribute(\"value\", storedData.value);\n for (const option of element.children) {\n if (option.attributes.value === storedData.value) {\n option.attributes.selected = true;\n } else if (option.attributes.hasOwnProperty(\"selected\")) {\n delete option.attributes.selected;\n }\n }\n }\n html.addEventListener(\"input\", event => {\n const options = event.target.options;\n const value = options.selectedIndex === -1 ? \"\" : options[options.selectedIndex].value;\n storage.setValue(id, {\n value\n });\n });\n break;\n }\n }\n static setAttributes({\n html,\n element,\n storage = null,\n intent,\n linkService\n }) {\n const {\n attributes\n } = element;\n const isHTMLAnchorElement = html instanceof HTMLAnchorElement;\n if (attributes.type === \"radio\") {\n attributes.name = `${attributes.name}-${intent}`;\n }\n for (const [key, value] of Object.entries(attributes)) {\n if (value === null || value === undefined) {\n continue;\n }\n switch (key) {\n case \"class\":\n if (value.length) {\n html.setAttribute(key, value.join(\" \"));\n }\n break;\n case \"dataId\":\n break;\n case \"id\":\n html.setAttribute(\"data-element-id\", value);\n break;\n case \"style\":\n Object.assign(html.style, value);\n break;\n case \"textContent\":\n html.textContent = value;\n break;\n default:\n if (!isHTMLAnchorElement || key !== \"href\" && key !== \"newWindow\") {\n html.setAttribute(key, value);\n }\n }\n }\n if (isHTMLAnchorElement) {\n linkService.addLinkAttributes(html, attributes.href, attributes.newWindow);\n }\n if (storage && attributes.dataId) {\n this.setupStorage(html, attributes.dataId, element, storage);\n }\n }\n static render(parameters) {\n const storage = parameters.annotationStorage;\n const linkService = parameters.linkService;\n const root = parameters.xfaHtml;\n const intent = parameters.intent || \"display\";\n const rootHtml = document.createElement(root.name);\n if (root.attributes) {\n this.setAttributes({\n html: rootHtml,\n element: root,\n intent,\n linkService\n });\n }\n const isNotForRichText = intent !== \"richText\";\n const rootDiv = parameters.div;\n rootDiv.append(rootHtml);\n if (parameters.viewport) {\n const transform = `matrix(${parameters.viewport.transform.join(\",\")})`;\n rootDiv.style.transform = transform;\n }\n if (isNotForRichText) {\n rootDiv.setAttribute(\"class\", \"xfaLayer xfaFont\");\n }\n const textDivs = [];\n if (root.children.length === 0) {\n if (root.value) {\n const node = document.createTextNode(root.value);\n rootHtml.append(node);\n if (isNotForRichText && XfaText.shouldBuildText(root.name)) {\n textDivs.push(node);\n }\n }\n return {\n textDivs\n };\n }\n const stack = [[root, -1, rootHtml]];\n while (stack.length > 0) {\n const [parent, i, html] = stack.at(-1);\n if (i + 1 === parent.children.length) {\n stack.pop();\n continue;\n }\n const child = parent.children[++stack.at(-1)[1]];\n if (child === null) {\n continue;\n }\n const {\n name\n } = child;\n if (name === \"#text\") {\n const node = document.createTextNode(child.value);\n textDivs.push(node);\n html.append(node);\n continue;\n }\n const childHtml = child?.attributes?.xmlns ? document.createElementNS(child.attributes.xmlns, name) : document.createElement(name);\n html.append(childHtml);\n if (child.attributes) {\n this.setAttributes({\n html: childHtml,\n element: child,\n storage,\n intent,\n linkService\n });\n }\n if (child.children?.length > 0) {\n stack.push([child, -1, childHtml]);\n } else if (child.value) {\n const node = document.createTextNode(child.value);\n if (isNotForRichText && XfaText.shouldBuildText(name)) {\n textDivs.push(node);\n }\n childHtml.append(node);\n }\n }\n for (const el of rootDiv.querySelectorAll(\".xfaNonInteractive input, .xfaNonInteractive textarea\")) {\n el.setAttribute(\"readOnly\", true);\n }\n return {\n textDivs\n };\n }\n static update(parameters) {\n const transform = `matrix(${parameters.viewport.transform.join(\",\")})`;\n parameters.div.style.transform = transform;\n parameters.div.hidden = false;\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/annotation_layer.js\n\n\n\n\n\nconst DEFAULT_TAB_INDEX = 1000;\nconst annotation_layer_DEFAULT_FONT_SIZE = 9;\nconst GetElementsByNameSet = new WeakSet();\nfunction getRectDims(rect) {\n return {\n width: rect[2] - rect[0],\n height: rect[3] - rect[1]\n };\n}\nclass AnnotationElementFactory {\n static create(parameters) {\n const subtype = parameters.data.annotationType;\n switch (subtype) {\n case AnnotationType.LINK:\n return new LinkAnnotationElement(parameters);\n case AnnotationType.TEXT:\n return new TextAnnotationElement(parameters);\n case AnnotationType.WIDGET:\n const fieldType = parameters.data.fieldType;\n switch (fieldType) {\n case \"Tx\":\n return new TextWidgetAnnotationElement(parameters);\n case \"Btn\":\n if (parameters.data.radioButton) {\n return new RadioButtonWidgetAnnotationElement(parameters);\n } else if (parameters.data.checkBox) {\n return new CheckboxWidgetAnnotationElement(parameters);\n }\n return new PushButtonWidgetAnnotationElement(parameters);\n case \"Ch\":\n return new ChoiceWidgetAnnotationElement(parameters);\n case \"Sig\":\n return new SignatureWidgetAnnotationElement(parameters);\n }\n return new WidgetAnnotationElement(parameters);\n case AnnotationType.POPUP:\n return new PopupAnnotationElement(parameters);\n case AnnotationType.FREETEXT:\n return new FreeTextAnnotationElement(parameters);\n case AnnotationType.LINE:\n return new LineAnnotationElement(parameters);\n case AnnotationType.SQUARE:\n return new SquareAnnotationElement(parameters);\n case AnnotationType.CIRCLE:\n return new CircleAnnotationElement(parameters);\n case AnnotationType.POLYLINE:\n return new PolylineAnnotationElement(parameters);\n case AnnotationType.CARET:\n return new CaretAnnotationElement(parameters);\n case AnnotationType.INK:\n return new InkAnnotationElement(parameters);\n case AnnotationType.POLYGON:\n return new PolygonAnnotationElement(parameters);\n case AnnotationType.HIGHLIGHT:\n return new HighlightAnnotationElement(parameters);\n case AnnotationType.UNDERLINE:\n return new UnderlineAnnotationElement(parameters);\n case AnnotationType.SQUIGGLY:\n return new SquigglyAnnotationElement(parameters);\n case AnnotationType.STRIKEOUT:\n return new StrikeOutAnnotationElement(parameters);\n case AnnotationType.STAMP:\n return new StampAnnotationElement(parameters);\n case AnnotationType.FILEATTACHMENT:\n return new FileAttachmentAnnotationElement(parameters);\n default:\n return new AnnotationElement(parameters);\n }\n }\n}\nclass AnnotationElement {\n #updates = null;\n #hasBorder = false;\n #popupElement = null;\n constructor(parameters, {\n isRenderable = false,\n ignoreBorder = false,\n createQuadrilaterals = false\n } = {}) {\n this.isRenderable = isRenderable;\n this.data = parameters.data;\n this.layer = parameters.layer;\n this.linkService = parameters.linkService;\n this.downloadManager = parameters.downloadManager;\n this.imageResourcesPath = parameters.imageResourcesPath;\n this.renderForms = parameters.renderForms;\n this.svgFactory = parameters.svgFactory;\n this.annotationStorage = parameters.annotationStorage;\n this.enableScripting = parameters.enableScripting;\n this.hasJSActions = parameters.hasJSActions;\n this._fieldObjects = parameters.fieldObjects;\n this.parent = parameters.parent;\n if (isRenderable) {\n this.container = this._createContainer(ignoreBorder);\n }\n if (createQuadrilaterals) {\n this._createQuadrilaterals();\n }\n }\n static _hasPopupData({\n titleObj,\n contentsObj,\n richText\n }) {\n return !!(titleObj?.str || contentsObj?.str || richText?.str);\n }\n get hasPopupData() {\n return AnnotationElement._hasPopupData(this.data);\n }\n updateEdited(params) {\n if (!this.container) {\n return;\n }\n this.#updates ||= {\n rect: this.data.rect.slice(0)\n };\n const {\n rect\n } = params;\n if (rect) {\n this.#setRectEdited(rect);\n }\n this.#popupElement?.popup.updateEdited(params);\n }\n resetEdited() {\n if (!this.#updates) {\n return;\n }\n this.#setRectEdited(this.#updates.rect);\n this.#popupElement?.popup.resetEdited();\n this.#updates = null;\n }\n #setRectEdited(rect) {\n const {\n container: {\n style\n },\n data: {\n rect: currentRect,\n rotation\n },\n parent: {\n viewport: {\n rawDims: {\n pageWidth,\n pageHeight,\n pageX,\n pageY\n }\n }\n }\n } = this;\n currentRect?.splice(0, 4, ...rect);\n const {\n width,\n height\n } = getRectDims(rect);\n style.left = `${100 * (rect[0] - pageX) / pageWidth}%`;\n style.top = `${100 * (pageHeight - rect[3] + pageY) / pageHeight}%`;\n if (rotation === 0) {\n style.width = `${100 * width / pageWidth}%`;\n style.height = `${100 * height / pageHeight}%`;\n } else {\n this.setRotation(rotation);\n }\n }\n _createContainer(ignoreBorder) {\n const {\n data,\n parent: {\n page,\n viewport\n }\n } = this;\n const container = document.createElement(\"section\");\n container.setAttribute(\"data-annotation-id\", data.id);\n if (!(this instanceof WidgetAnnotationElement)) {\n container.tabIndex = DEFAULT_TAB_INDEX;\n }\n const {\n style\n } = container;\n style.zIndex = this.parent.zIndex++;\n if (data.popupRef) {\n container.setAttribute(\"aria-haspopup\", \"dialog\");\n }\n if (data.alternativeText) {\n container.title = data.alternativeText;\n }\n if (data.noRotate) {\n container.classList.add(\"norotate\");\n }\n if (!data.rect || this instanceof PopupAnnotationElement) {\n const {\n rotation\n } = data;\n if (!data.hasOwnCanvas && rotation !== 0) {\n this.setRotation(rotation, container);\n }\n return container;\n }\n const {\n width,\n height\n } = getRectDims(data.rect);\n if (!ignoreBorder && data.borderStyle.width > 0) {\n style.borderWidth = `${data.borderStyle.width}px`;\n const horizontalRadius = data.borderStyle.horizontalCornerRadius;\n const verticalRadius = data.borderStyle.verticalCornerRadius;\n if (horizontalRadius > 0 || verticalRadius > 0) {\n const radius = `calc(${horizontalRadius}px * var(--scale-factor)) / calc(${verticalRadius}px * var(--scale-factor))`;\n style.borderRadius = radius;\n } else if (this instanceof RadioButtonWidgetAnnotationElement) {\n const radius = `calc(${width}px * var(--scale-factor)) / calc(${height}px * var(--scale-factor))`;\n style.borderRadius = radius;\n }\n switch (data.borderStyle.style) {\n case AnnotationBorderStyleType.SOLID:\n style.borderStyle = \"solid\";\n break;\n case AnnotationBorderStyleType.DASHED:\n style.borderStyle = \"dashed\";\n break;\n case AnnotationBorderStyleType.BEVELED:\n warn(\"Unimplemented border style: beveled\");\n break;\n case AnnotationBorderStyleType.INSET:\n warn(\"Unimplemented border style: inset\");\n break;\n case AnnotationBorderStyleType.UNDERLINE:\n style.borderBottomStyle = \"solid\";\n break;\n default:\n break;\n }\n const borderColor = data.borderColor || null;\n if (borderColor) {\n this.#hasBorder = true;\n style.borderColor = Util.makeHexColor(borderColor[0] | 0, borderColor[1] | 0, borderColor[2] | 0);\n } else {\n style.borderWidth = 0;\n }\n }\n const rect = Util.normalizeRect([data.rect[0], page.view[3] - data.rect[1] + page.view[1], data.rect[2], page.view[3] - data.rect[3] + page.view[1]]);\n const {\n pageWidth,\n pageHeight,\n pageX,\n pageY\n } = viewport.rawDims;\n style.left = `${100 * (rect[0] - pageX) / pageWidth}%`;\n style.top = `${100 * (rect[1] - pageY) / pageHeight}%`;\n const {\n rotation\n } = data;\n if (data.hasOwnCanvas || rotation === 0) {\n style.width = `${100 * width / pageWidth}%`;\n style.height = `${100 * height / pageHeight}%`;\n } else {\n this.setRotation(rotation, container);\n }\n return container;\n }\n setRotation(angle, container = this.container) {\n if (!this.data.rect) {\n return;\n }\n const {\n pageWidth,\n pageHeight\n } = this.parent.viewport.rawDims;\n const {\n width,\n height\n } = getRectDims(this.data.rect);\n let elementWidth, elementHeight;\n if (angle % 180 === 0) {\n elementWidth = 100 * width / pageWidth;\n elementHeight = 100 * height / pageHeight;\n } else {\n elementWidth = 100 * height / pageWidth;\n elementHeight = 100 * width / pageHeight;\n }\n container.style.width = `${elementWidth}%`;\n container.style.height = `${elementHeight}%`;\n container.setAttribute(\"data-main-rotation\", (360 - angle) % 360);\n }\n get _commonActions() {\n const setColor = (jsName, styleName, event) => {\n const color = event.detail[jsName];\n const colorType = color[0];\n const colorArray = color.slice(1);\n event.target.style[styleName] = ColorConverters[`${colorType}_HTML`](colorArray);\n this.annotationStorage.setValue(this.data.id, {\n [styleName]: ColorConverters[`${colorType}_rgb`](colorArray)\n });\n };\n return shadow(this, \"_commonActions\", {\n display: event => {\n const {\n display\n } = event.detail;\n const hidden = display % 2 === 1;\n this.container.style.visibility = hidden ? \"hidden\" : \"visible\";\n this.annotationStorage.setValue(this.data.id, {\n noView: hidden,\n noPrint: display === 1 || display === 2\n });\n },\n print: event => {\n this.annotationStorage.setValue(this.data.id, {\n noPrint: !event.detail.print\n });\n },\n hidden: event => {\n const {\n hidden\n } = event.detail;\n this.container.style.visibility = hidden ? \"hidden\" : \"visible\";\n this.annotationStorage.setValue(this.data.id, {\n noPrint: hidden,\n noView: hidden\n });\n },\n focus: event => {\n setTimeout(() => event.target.focus({\n preventScroll: false\n }), 0);\n },\n userName: event => {\n event.target.title = event.detail.userName;\n },\n readonly: event => {\n event.target.disabled = event.detail.readonly;\n },\n required: event => {\n this._setRequired(event.target, event.detail.required);\n },\n bgColor: event => {\n setColor(\"bgColor\", \"backgroundColor\", event);\n },\n fillColor: event => {\n setColor(\"fillColor\", \"backgroundColor\", event);\n },\n fgColor: event => {\n setColor(\"fgColor\", \"color\", event);\n },\n textColor: event => {\n setColor(\"textColor\", \"color\", event);\n },\n borderColor: event => {\n setColor(\"borderColor\", \"borderColor\", event);\n },\n strokeColor: event => {\n setColor(\"strokeColor\", \"borderColor\", event);\n },\n rotation: event => {\n const angle = event.detail.rotation;\n this.setRotation(angle);\n this.annotationStorage.setValue(this.data.id, {\n rotation: angle\n });\n }\n });\n }\n _dispatchEventFromSandbox(actions, jsEvent) {\n const commonActions = this._commonActions;\n for (const name of Object.keys(jsEvent.detail)) {\n const action = actions[name] || commonActions[name];\n action?.(jsEvent);\n }\n }\n _setDefaultPropertiesFromJS(element) {\n if (!this.enableScripting) {\n return;\n }\n const storedData = this.annotationStorage.getRawValue(this.data.id);\n if (!storedData) {\n return;\n }\n const commonActions = this._commonActions;\n for (const [actionName, detail] of Object.entries(storedData)) {\n const action = commonActions[actionName];\n if (action) {\n const eventProxy = {\n detail: {\n [actionName]: detail\n },\n target: element\n };\n action(eventProxy);\n delete storedData[actionName];\n }\n }\n }\n _createQuadrilaterals() {\n if (!this.container) {\n return;\n }\n const {\n quadPoints\n } = this.data;\n if (!quadPoints) {\n return;\n }\n const [rectBlX, rectBlY, rectTrX, rectTrY] = this.data.rect.map(x => Math.fround(x));\n if (quadPoints.length === 8) {\n const [trX, trY, blX, blY] = quadPoints.subarray(2, 6);\n if (rectTrX === trX && rectTrY === trY && rectBlX === blX && rectBlY === blY) {\n return;\n }\n }\n const {\n style\n } = this.container;\n let svgBuffer;\n if (this.#hasBorder) {\n const {\n borderColor,\n borderWidth\n } = style;\n style.borderWidth = 0;\n svgBuffer = [\"url('data:image/svg+xml;utf8,\", ``, ``];\n this.container.classList.add(\"hasBorder\");\n }\n const width = rectTrX - rectBlX;\n const height = rectTrY - rectBlY;\n const {\n svgFactory\n } = this;\n const svg = svgFactory.createElement(\"svg\");\n svg.classList.add(\"quadrilateralsContainer\");\n svg.setAttribute(\"width\", 0);\n svg.setAttribute(\"height\", 0);\n const defs = svgFactory.createElement(\"defs\");\n svg.append(defs);\n const clipPath = svgFactory.createElement(\"clipPath\");\n const id = `clippath_${this.data.id}`;\n clipPath.setAttribute(\"id\", id);\n clipPath.setAttribute(\"clipPathUnits\", \"objectBoundingBox\");\n defs.append(clipPath);\n for (let i = 2, ii = quadPoints.length; i < ii; i += 8) {\n const trX = quadPoints[i];\n const trY = quadPoints[i + 1];\n const blX = quadPoints[i + 2];\n const blY = quadPoints[i + 3];\n const rect = svgFactory.createElement(\"rect\");\n const x = (blX - rectBlX) / width;\n const y = (rectTrY - trY) / height;\n const rectWidth = (trX - blX) / width;\n const rectHeight = (trY - blY) / height;\n rect.setAttribute(\"x\", x);\n rect.setAttribute(\"y\", y);\n rect.setAttribute(\"width\", rectWidth);\n rect.setAttribute(\"height\", rectHeight);\n clipPath.append(rect);\n svgBuffer?.push(``);\n }\n if (this.#hasBorder) {\n svgBuffer.push(` ')`);\n style.backgroundImage = svgBuffer.join(\"\");\n }\n this.container.append(svg);\n this.container.style.clipPath = `url(#${id})`;\n }\n _createPopup() {\n const {\n container,\n data\n } = this;\n container.setAttribute(\"aria-haspopup\", \"dialog\");\n const popup = this.#popupElement = new PopupAnnotationElement({\n data: {\n color: data.color,\n titleObj: data.titleObj,\n modificationDate: data.modificationDate,\n contentsObj: data.contentsObj,\n richText: data.richText,\n parentRect: data.rect,\n borderStyle: 0,\n id: `popup_${data.id}`,\n rotation: data.rotation\n },\n parent: this.parent,\n elements: [this]\n });\n this.parent.div.append(popup.render());\n }\n render() {\n unreachable(\"Abstract method `AnnotationElement.render` called\");\n }\n _getElementsByName(name, skipId = null) {\n const fields = [];\n if (this._fieldObjects) {\n const fieldObj = this._fieldObjects[name];\n if (fieldObj) {\n for (const {\n page,\n id,\n exportValues\n } of fieldObj) {\n if (page === -1) {\n continue;\n }\n if (id === skipId) {\n continue;\n }\n const exportValue = typeof exportValues === \"string\" ? exportValues : null;\n const domElement = document.querySelector(`[data-element-id=\"${id}\"]`);\n if (domElement && !GetElementsByNameSet.has(domElement)) {\n warn(`_getElementsByName - element not allowed: ${id}`);\n continue;\n }\n fields.push({\n id,\n exportValue,\n domElement\n });\n }\n }\n return fields;\n }\n for (const domElement of document.getElementsByName(name)) {\n const {\n exportValue\n } = domElement;\n const id = domElement.getAttribute(\"data-element-id\");\n if (id === skipId) {\n continue;\n }\n if (!GetElementsByNameSet.has(domElement)) {\n continue;\n }\n fields.push({\n id,\n exportValue,\n domElement\n });\n }\n return fields;\n }\n show() {\n if (this.container) {\n this.container.hidden = false;\n }\n this.popup?.maybeShow();\n }\n hide() {\n if (this.container) {\n this.container.hidden = true;\n }\n this.popup?.forceHide();\n }\n getElementsToTriggerPopup() {\n return this.container;\n }\n addHighlightArea() {\n const triggers = this.getElementsToTriggerPopup();\n if (Array.isArray(triggers)) {\n for (const element of triggers) {\n element.classList.add(\"highlightArea\");\n }\n } else {\n triggers.classList.add(\"highlightArea\");\n }\n }\n get _isEditable() {\n return false;\n }\n _editOnDoubleClick() {\n if (!this._isEditable) {\n return;\n }\n const {\n annotationEditorType: mode,\n data: {\n id: editId\n }\n } = this;\n this.container.addEventListener(\"dblclick\", () => {\n this.linkService.eventBus?.dispatch(\"switchannotationeditormode\", {\n source: this,\n mode,\n editId\n });\n });\n }\n}\nclass LinkAnnotationElement extends AnnotationElement {\n constructor(parameters, options = null) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: !!options?.ignoreBorder,\n createQuadrilaterals: true\n });\n this.isTooltipOnly = parameters.data.isTooltipOnly;\n }\n render() {\n const {\n data,\n linkService\n } = this;\n const link = document.createElement(\"a\");\n link.setAttribute(\"data-element-id\", data.id);\n let isBound = false;\n if (data.url) {\n linkService.addLinkAttributes(link, data.url, data.newWindow);\n isBound = true;\n } else if (data.action) {\n this._bindNamedAction(link, data.action);\n isBound = true;\n } else if (data.attachment) {\n this.#bindAttachment(link, data.attachment, data.attachmentDest);\n isBound = true;\n } else if (data.setOCGState) {\n this.#bindSetOCGState(link, data.setOCGState);\n isBound = true;\n } else if (data.dest) {\n this._bindLink(link, data.dest);\n isBound = true;\n } else {\n if (data.actions && (data.actions.Action || data.actions[\"Mouse Up\"] || data.actions[\"Mouse Down\"]) && this.enableScripting && this.hasJSActions) {\n this._bindJSAction(link, data);\n isBound = true;\n }\n if (data.resetForm) {\n this._bindResetFormAction(link, data.resetForm);\n isBound = true;\n } else if (this.isTooltipOnly && !isBound) {\n this._bindLink(link, \"\");\n isBound = true;\n }\n }\n this.container.classList.add(\"linkAnnotation\");\n if (isBound) {\n this.container.append(link);\n }\n return this.container;\n }\n #setInternalLink() {\n this.container.setAttribute(\"data-internal-link\", \"\");\n }\n _bindLink(link, destination) {\n link.href = this.linkService.getDestinationHash(destination);\n link.onclick = () => {\n if (destination) {\n this.linkService.goToDestination(destination);\n }\n return false;\n };\n if (destination || destination === \"\") {\n this.#setInternalLink();\n }\n }\n _bindNamedAction(link, action) {\n link.href = this.linkService.getAnchorUrl(\"\");\n link.onclick = () => {\n this.linkService.executeNamedAction(action);\n return false;\n };\n this.#setInternalLink();\n }\n #bindAttachment(link, attachment, dest = null) {\n link.href = this.linkService.getAnchorUrl(\"\");\n if (attachment.description) {\n link.title = attachment.description;\n }\n link.onclick = () => {\n this.downloadManager?.openOrDownloadData(attachment.content, attachment.filename, dest);\n return false;\n };\n this.#setInternalLink();\n }\n #bindSetOCGState(link, action) {\n link.href = this.linkService.getAnchorUrl(\"\");\n link.onclick = () => {\n this.linkService.executeSetOCGState(action);\n return false;\n };\n this.#setInternalLink();\n }\n _bindJSAction(link, data) {\n link.href = this.linkService.getAnchorUrl(\"\");\n const map = new Map([[\"Action\", \"onclick\"], [\"Mouse Up\", \"onmouseup\"], [\"Mouse Down\", \"onmousedown\"]]);\n for (const name of Object.keys(data.actions)) {\n const jsName = map.get(name);\n if (!jsName) {\n continue;\n }\n link[jsName] = () => {\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id: data.id,\n name\n }\n });\n return false;\n };\n }\n if (!link.onclick) {\n link.onclick = () => false;\n }\n this.#setInternalLink();\n }\n _bindResetFormAction(link, resetForm) {\n const otherClickAction = link.onclick;\n if (!otherClickAction) {\n link.href = this.linkService.getAnchorUrl(\"\");\n }\n this.#setInternalLink();\n if (!this._fieldObjects) {\n warn(`_bindResetFormAction - \"resetForm\" action not supported, ` + \"ensure that the `fieldObjects` parameter is provided.\");\n if (!otherClickAction) {\n link.onclick = () => false;\n }\n return;\n }\n link.onclick = () => {\n otherClickAction?.();\n const {\n fields: resetFormFields,\n refs: resetFormRefs,\n include\n } = resetForm;\n const allFields = [];\n if (resetFormFields.length !== 0 || resetFormRefs.length !== 0) {\n const fieldIds = new Set(resetFormRefs);\n for (const fieldName of resetFormFields) {\n const fields = this._fieldObjects[fieldName] || [];\n for (const {\n id\n } of fields) {\n fieldIds.add(id);\n }\n }\n for (const fields of Object.values(this._fieldObjects)) {\n for (const field of fields) {\n if (fieldIds.has(field.id) === include) {\n allFields.push(field);\n }\n }\n }\n } else {\n for (const fields of Object.values(this._fieldObjects)) {\n allFields.push(...fields);\n }\n }\n const storage = this.annotationStorage;\n const allIds = [];\n for (const field of allFields) {\n const {\n id\n } = field;\n allIds.push(id);\n switch (field.type) {\n case \"text\":\n {\n const value = field.defaultValue || \"\";\n storage.setValue(id, {\n value\n });\n break;\n }\n case \"checkbox\":\n case \"radiobutton\":\n {\n const value = field.defaultValue === field.exportValues;\n storage.setValue(id, {\n value\n });\n break;\n }\n case \"combobox\":\n case \"listbox\":\n {\n const value = field.defaultValue || \"\";\n storage.setValue(id, {\n value\n });\n break;\n }\n default:\n continue;\n }\n const domElement = document.querySelector(`[data-element-id=\"${id}\"]`);\n if (!domElement) {\n continue;\n } else if (!GetElementsByNameSet.has(domElement)) {\n warn(`_bindResetFormAction - element not allowed: ${id}`);\n continue;\n }\n domElement.dispatchEvent(new Event(\"resetform\"));\n }\n if (this.enableScripting) {\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id: \"app\",\n ids: allIds,\n name: \"ResetForm\"\n }\n });\n }\n return false;\n };\n }\n}\nclass TextAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true\n });\n }\n render() {\n this.container.classList.add(\"textAnnotation\");\n const image = document.createElement(\"img\");\n image.src = this.imageResourcesPath + \"annotation-\" + this.data.name.toLowerCase() + \".svg\";\n image.setAttribute(\"data-l10n-id\", \"pdfjs-text-annotation-type\");\n image.setAttribute(\"data-l10n-args\", JSON.stringify({\n type: this.data.name\n }));\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n this.container.append(image);\n return this.container;\n }\n}\nclass WidgetAnnotationElement extends AnnotationElement {\n render() {\n return this.container;\n }\n showElementAndHideCanvas(element) {\n if (this.data.hasOwnCanvas) {\n if (element.previousSibling?.nodeName === \"CANVAS\") {\n element.previousSibling.hidden = true;\n }\n element.hidden = false;\n }\n }\n _getKeyModifier(event) {\n return util_FeatureTest.platform.isMac ? event.metaKey : event.ctrlKey;\n }\n _setEventListener(element, elementData, baseName, eventName, valueGetter) {\n if (baseName.includes(\"mouse\")) {\n element.addEventListener(baseName, event => {\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id: this.data.id,\n name: eventName,\n value: valueGetter(event),\n shift: event.shiftKey,\n modifier: this._getKeyModifier(event)\n }\n });\n });\n } else {\n element.addEventListener(baseName, event => {\n if (baseName === \"blur\") {\n if (!elementData.focused || !event.relatedTarget) {\n return;\n }\n elementData.focused = false;\n } else if (baseName === \"focus\") {\n if (elementData.focused) {\n return;\n }\n elementData.focused = true;\n }\n if (!valueGetter) {\n return;\n }\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id: this.data.id,\n name: eventName,\n value: valueGetter(event)\n }\n });\n });\n }\n }\n _setEventListeners(element, elementData, names, getter) {\n for (const [baseName, eventName] of names) {\n if (eventName === \"Action\" || this.data.actions?.[eventName]) {\n if (eventName === \"Focus\" || eventName === \"Blur\") {\n elementData ||= {\n focused: false\n };\n }\n this._setEventListener(element, elementData, baseName, eventName, getter);\n if (eventName === \"Focus\" && !this.data.actions?.Blur) {\n this._setEventListener(element, elementData, \"blur\", \"Blur\", null);\n } else if (eventName === \"Blur\" && !this.data.actions?.Focus) {\n this._setEventListener(element, elementData, \"focus\", \"Focus\", null);\n }\n }\n }\n }\n _setBackgroundColor(element) {\n const color = this.data.backgroundColor || null;\n element.style.backgroundColor = color === null ? \"transparent\" : Util.makeHexColor(color[0], color[1], color[2]);\n }\n _setTextStyle(element) {\n const TEXT_ALIGNMENT = [\"left\", \"center\", \"right\"];\n const {\n fontColor\n } = this.data.defaultAppearanceData;\n const fontSize = this.data.defaultAppearanceData.fontSize || annotation_layer_DEFAULT_FONT_SIZE;\n const style = element.style;\n let computedFontSize;\n const BORDER_SIZE = 2;\n const roundToOneDecimal = x => Math.round(10 * x) / 10;\n if (this.data.multiLine) {\n const height = Math.abs(this.data.rect[3] - this.data.rect[1] - BORDER_SIZE);\n const numberOfLines = Math.round(height / (LINE_FACTOR * fontSize)) || 1;\n const lineHeight = height / numberOfLines;\n computedFontSize = Math.min(fontSize, roundToOneDecimal(lineHeight / LINE_FACTOR));\n } else {\n const height = Math.abs(this.data.rect[3] - this.data.rect[1] - BORDER_SIZE);\n computedFontSize = Math.min(fontSize, roundToOneDecimal(height / LINE_FACTOR));\n }\n style.fontSize = `calc(${computedFontSize}px * var(--scale-factor))`;\n style.color = Util.makeHexColor(fontColor[0], fontColor[1], fontColor[2]);\n if (this.data.textAlignment !== null) {\n style.textAlign = TEXT_ALIGNMENT[this.data.textAlignment];\n }\n }\n _setRequired(element, isRequired) {\n if (isRequired) {\n element.setAttribute(\"required\", true);\n } else {\n element.removeAttribute(\"required\");\n }\n element.setAttribute(\"aria-required\", isRequired);\n }\n}\nclass TextWidgetAnnotationElement extends WidgetAnnotationElement {\n constructor(parameters) {\n const isRenderable = parameters.renderForms || parameters.data.hasOwnCanvas || !parameters.data.hasAppearance && !!parameters.data.fieldValue;\n super(parameters, {\n isRenderable\n });\n }\n setPropertyOnSiblings(base, key, value, keyInStorage) {\n const storage = this.annotationStorage;\n for (const element of this._getElementsByName(base.name, base.id)) {\n if (element.domElement) {\n element.domElement[key] = value;\n }\n storage.setValue(element.id, {\n [keyInStorage]: value\n });\n }\n }\n render() {\n const storage = this.annotationStorage;\n const id = this.data.id;\n this.container.classList.add(\"textWidgetAnnotation\");\n let element = null;\n if (this.renderForms) {\n const storedData = storage.getValue(id, {\n value: this.data.fieldValue\n });\n let textContent = storedData.value || \"\";\n const maxLen = storage.getValue(id, {\n charLimit: this.data.maxLen\n }).charLimit;\n if (maxLen && textContent.length > maxLen) {\n textContent = textContent.slice(0, maxLen);\n }\n let fieldFormattedValues = storedData.formattedValue || this.data.textContent?.join(\"\\n\") || null;\n if (fieldFormattedValues && this.data.comb) {\n fieldFormattedValues = fieldFormattedValues.replaceAll(/\\s+/g, \"\");\n }\n const elementData = {\n userValue: textContent,\n formattedValue: fieldFormattedValues,\n lastCommittedValue: null,\n commitKey: 1,\n focused: false\n };\n if (this.data.multiLine) {\n element = document.createElement(\"textarea\");\n element.textContent = fieldFormattedValues ?? textContent;\n if (this.data.doNotScroll) {\n element.style.overflowY = \"hidden\";\n }\n } else {\n element = document.createElement(\"input\");\n element.type = \"text\";\n element.setAttribute(\"value\", fieldFormattedValues ?? textContent);\n if (this.data.doNotScroll) {\n element.style.overflowX = \"hidden\";\n }\n }\n if (this.data.hasOwnCanvas) {\n element.hidden = true;\n }\n GetElementsByNameSet.add(element);\n element.setAttribute(\"data-element-id\", id);\n element.disabled = this.data.readOnly;\n element.name = this.data.fieldName;\n element.tabIndex = DEFAULT_TAB_INDEX;\n this._setRequired(element, this.data.required);\n if (maxLen) {\n element.maxLength = maxLen;\n }\n element.addEventListener(\"input\", event => {\n storage.setValue(id, {\n value: event.target.value\n });\n this.setPropertyOnSiblings(element, \"value\", event.target.value, \"value\");\n elementData.formattedValue = null;\n });\n element.addEventListener(\"resetform\", event => {\n const defaultValue = this.data.defaultFieldValue ?? \"\";\n element.value = elementData.userValue = defaultValue;\n elementData.formattedValue = null;\n });\n let blurListener = event => {\n const {\n formattedValue\n } = elementData;\n if (formattedValue !== null && formattedValue !== undefined) {\n event.target.value = formattedValue;\n }\n event.target.scrollLeft = 0;\n };\n if (this.enableScripting && this.hasJSActions) {\n element.addEventListener(\"focus\", event => {\n if (elementData.focused) {\n return;\n }\n const {\n target\n } = event;\n if (elementData.userValue) {\n target.value = elementData.userValue;\n }\n elementData.lastCommittedValue = target.value;\n elementData.commitKey = 1;\n if (!this.data.actions?.Focus) {\n elementData.focused = true;\n }\n });\n element.addEventListener(\"updatefromsandbox\", jsEvent => {\n this.showElementAndHideCanvas(jsEvent.target);\n const actions = {\n value(event) {\n elementData.userValue = event.detail.value ?? \"\";\n storage.setValue(id, {\n value: elementData.userValue.toString()\n });\n event.target.value = elementData.userValue;\n },\n formattedValue(event) {\n const {\n formattedValue\n } = event.detail;\n elementData.formattedValue = formattedValue;\n if (formattedValue !== null && formattedValue !== undefined && event.target !== document.activeElement) {\n event.target.value = formattedValue;\n }\n storage.setValue(id, {\n formattedValue\n });\n },\n selRange(event) {\n event.target.setSelectionRange(...event.detail.selRange);\n },\n charLimit: event => {\n const {\n charLimit\n } = event.detail;\n const {\n target\n } = event;\n if (charLimit === 0) {\n target.removeAttribute(\"maxLength\");\n return;\n }\n target.setAttribute(\"maxLength\", charLimit);\n let value = elementData.userValue;\n if (!value || value.length <= charLimit) {\n return;\n }\n value = value.slice(0, charLimit);\n target.value = elementData.userValue = value;\n storage.setValue(id, {\n value\n });\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id,\n name: \"Keystroke\",\n value,\n willCommit: true,\n commitKey: 1,\n selStart: target.selectionStart,\n selEnd: target.selectionEnd\n }\n });\n }\n };\n this._dispatchEventFromSandbox(actions, jsEvent);\n });\n element.addEventListener(\"keydown\", event => {\n elementData.commitKey = 1;\n let commitKey = -1;\n if (event.key === \"Escape\") {\n commitKey = 0;\n } else if (event.key === \"Enter\" && !this.data.multiLine) {\n commitKey = 2;\n } else if (event.key === \"Tab\") {\n elementData.commitKey = 3;\n }\n if (commitKey === -1) {\n return;\n }\n const {\n value\n } = event.target;\n if (elementData.lastCommittedValue === value) {\n return;\n }\n elementData.lastCommittedValue = value;\n elementData.userValue = value;\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id,\n name: \"Keystroke\",\n value,\n willCommit: true,\n commitKey,\n selStart: event.target.selectionStart,\n selEnd: event.target.selectionEnd\n }\n });\n });\n const _blurListener = blurListener;\n blurListener = null;\n element.addEventListener(\"blur\", event => {\n if (!elementData.focused || !event.relatedTarget) {\n return;\n }\n if (!this.data.actions?.Blur) {\n elementData.focused = false;\n }\n const {\n value\n } = event.target;\n elementData.userValue = value;\n if (elementData.lastCommittedValue !== value) {\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id,\n name: \"Keystroke\",\n value,\n willCommit: true,\n commitKey: elementData.commitKey,\n selStart: event.target.selectionStart,\n selEnd: event.target.selectionEnd\n }\n });\n }\n _blurListener(event);\n });\n if (this.data.actions?.Keystroke) {\n element.addEventListener(\"beforeinput\", event => {\n elementData.lastCommittedValue = null;\n const {\n data,\n target\n } = event;\n const {\n value,\n selectionStart,\n selectionEnd\n } = target;\n let selStart = selectionStart,\n selEnd = selectionEnd;\n switch (event.inputType) {\n case \"deleteWordBackward\":\n {\n const match = value.substring(0, selectionStart).match(/\\w*[^\\w]*$/);\n if (match) {\n selStart -= match[0].length;\n }\n break;\n }\n case \"deleteWordForward\":\n {\n const match = value.substring(selectionStart).match(/^[^\\w]*\\w*/);\n if (match) {\n selEnd += match[0].length;\n }\n break;\n }\n case \"deleteContentBackward\":\n if (selectionStart === selectionEnd) {\n selStart -= 1;\n }\n break;\n case \"deleteContentForward\":\n if (selectionStart === selectionEnd) {\n selEnd += 1;\n }\n break;\n }\n event.preventDefault();\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id,\n name: \"Keystroke\",\n value,\n change: data || \"\",\n willCommit: false,\n selStart,\n selEnd\n }\n });\n });\n }\n this._setEventListeners(element, elementData, [[\"focus\", \"Focus\"], [\"blur\", \"Blur\"], [\"mousedown\", \"Mouse Down\"], [\"mouseenter\", \"Mouse Enter\"], [\"mouseleave\", \"Mouse Exit\"], [\"mouseup\", \"Mouse Up\"]], event => event.target.value);\n }\n if (blurListener) {\n element.addEventListener(\"blur\", blurListener);\n }\n if (this.data.comb) {\n const fieldWidth = this.data.rect[2] - this.data.rect[0];\n const combWidth = fieldWidth / maxLen;\n element.classList.add(\"comb\");\n element.style.letterSpacing = `calc(${combWidth}px * var(--scale-factor) - 1ch)`;\n }\n } else {\n element = document.createElement(\"div\");\n element.textContent = this.data.fieldValue;\n element.style.verticalAlign = \"middle\";\n element.style.display = \"table-cell\";\n if (this.data.hasOwnCanvas) {\n element.hidden = true;\n }\n }\n this._setTextStyle(element);\n this._setBackgroundColor(element);\n this._setDefaultPropertiesFromJS(element);\n this.container.append(element);\n return this.container;\n }\n}\nclass SignatureWidgetAnnotationElement extends WidgetAnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: !!parameters.data.hasOwnCanvas\n });\n }\n}\nclass CheckboxWidgetAnnotationElement extends WidgetAnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: parameters.renderForms\n });\n }\n render() {\n const storage = this.annotationStorage;\n const data = this.data;\n const id = data.id;\n let value = storage.getValue(id, {\n value: data.exportValue === data.fieldValue\n }).value;\n if (typeof value === \"string\") {\n value = value !== \"Off\";\n storage.setValue(id, {\n value\n });\n }\n this.container.classList.add(\"buttonWidgetAnnotation\", \"checkBox\");\n const element = document.createElement(\"input\");\n GetElementsByNameSet.add(element);\n element.setAttribute(\"data-element-id\", id);\n element.disabled = data.readOnly;\n this._setRequired(element, this.data.required);\n element.type = \"checkbox\";\n element.name = data.fieldName;\n if (value) {\n element.setAttribute(\"checked\", true);\n }\n element.setAttribute(\"exportValue\", data.exportValue);\n element.tabIndex = DEFAULT_TAB_INDEX;\n element.addEventListener(\"change\", event => {\n const {\n name,\n checked\n } = event.target;\n for (const checkbox of this._getElementsByName(name, id)) {\n const curChecked = checked && checkbox.exportValue === data.exportValue;\n if (checkbox.domElement) {\n checkbox.domElement.checked = curChecked;\n }\n storage.setValue(checkbox.id, {\n value: curChecked\n });\n }\n storage.setValue(id, {\n value: checked\n });\n });\n element.addEventListener(\"resetform\", event => {\n const defaultValue = data.defaultFieldValue || \"Off\";\n event.target.checked = defaultValue === data.exportValue;\n });\n if (this.enableScripting && this.hasJSActions) {\n element.addEventListener(\"updatefromsandbox\", jsEvent => {\n const actions = {\n value(event) {\n event.target.checked = event.detail.value !== \"Off\";\n storage.setValue(id, {\n value: event.target.checked\n });\n }\n };\n this._dispatchEventFromSandbox(actions, jsEvent);\n });\n this._setEventListeners(element, null, [[\"change\", \"Validate\"], [\"change\", \"Action\"], [\"focus\", \"Focus\"], [\"blur\", \"Blur\"], [\"mousedown\", \"Mouse Down\"], [\"mouseenter\", \"Mouse Enter\"], [\"mouseleave\", \"Mouse Exit\"], [\"mouseup\", \"Mouse Up\"]], event => event.target.checked);\n }\n this._setBackgroundColor(element);\n this._setDefaultPropertiesFromJS(element);\n this.container.append(element);\n return this.container;\n }\n}\nclass RadioButtonWidgetAnnotationElement extends WidgetAnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: parameters.renderForms\n });\n }\n render() {\n this.container.classList.add(\"buttonWidgetAnnotation\", \"radioButton\");\n const storage = this.annotationStorage;\n const data = this.data;\n const id = data.id;\n let value = storage.getValue(id, {\n value: data.fieldValue === data.buttonValue\n }).value;\n if (typeof value === \"string\") {\n value = value !== data.buttonValue;\n storage.setValue(id, {\n value\n });\n }\n if (value) {\n for (const radio of this._getElementsByName(data.fieldName, id)) {\n storage.setValue(radio.id, {\n value: false\n });\n }\n }\n const element = document.createElement(\"input\");\n GetElementsByNameSet.add(element);\n element.setAttribute(\"data-element-id\", id);\n element.disabled = data.readOnly;\n this._setRequired(element, this.data.required);\n element.type = \"radio\";\n element.name = data.fieldName;\n if (value) {\n element.setAttribute(\"checked\", true);\n }\n element.tabIndex = DEFAULT_TAB_INDEX;\n element.addEventListener(\"change\", event => {\n const {\n name,\n checked\n } = event.target;\n for (const radio of this._getElementsByName(name, id)) {\n storage.setValue(radio.id, {\n value: false\n });\n }\n storage.setValue(id, {\n value: checked\n });\n });\n element.addEventListener(\"resetform\", event => {\n const defaultValue = data.defaultFieldValue;\n event.target.checked = defaultValue !== null && defaultValue !== undefined && defaultValue === data.buttonValue;\n });\n if (this.enableScripting && this.hasJSActions) {\n const pdfButtonValue = data.buttonValue;\n element.addEventListener(\"updatefromsandbox\", jsEvent => {\n const actions = {\n value: event => {\n const checked = pdfButtonValue === event.detail.value;\n for (const radio of this._getElementsByName(event.target.name)) {\n const curChecked = checked && radio.id === id;\n if (radio.domElement) {\n radio.domElement.checked = curChecked;\n }\n storage.setValue(radio.id, {\n value: curChecked\n });\n }\n }\n };\n this._dispatchEventFromSandbox(actions, jsEvent);\n });\n this._setEventListeners(element, null, [[\"change\", \"Validate\"], [\"change\", \"Action\"], [\"focus\", \"Focus\"], [\"blur\", \"Blur\"], [\"mousedown\", \"Mouse Down\"], [\"mouseenter\", \"Mouse Enter\"], [\"mouseleave\", \"Mouse Exit\"], [\"mouseup\", \"Mouse Up\"]], event => event.target.checked);\n }\n this._setBackgroundColor(element);\n this._setDefaultPropertiesFromJS(element);\n this.container.append(element);\n return this.container;\n }\n}\nclass PushButtonWidgetAnnotationElement extends LinkAnnotationElement {\n constructor(parameters) {\n super(parameters, {\n ignoreBorder: parameters.data.hasAppearance\n });\n }\n render() {\n const container = super.render();\n container.classList.add(\"buttonWidgetAnnotation\", \"pushButton\");\n const linkElement = container.lastChild;\n if (this.enableScripting && this.hasJSActions && linkElement) {\n this._setDefaultPropertiesFromJS(linkElement);\n linkElement.addEventListener(\"updatefromsandbox\", jsEvent => {\n this._dispatchEventFromSandbox({}, jsEvent);\n });\n }\n return container;\n }\n}\nclass ChoiceWidgetAnnotationElement extends WidgetAnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: parameters.renderForms\n });\n }\n render() {\n this.container.classList.add(\"choiceWidgetAnnotation\");\n const storage = this.annotationStorage;\n const id = this.data.id;\n const storedData = storage.getValue(id, {\n value: this.data.fieldValue\n });\n const selectElement = document.createElement(\"select\");\n GetElementsByNameSet.add(selectElement);\n selectElement.setAttribute(\"data-element-id\", id);\n selectElement.disabled = this.data.readOnly;\n this._setRequired(selectElement, this.data.required);\n selectElement.name = this.data.fieldName;\n selectElement.tabIndex = DEFAULT_TAB_INDEX;\n let addAnEmptyEntry = this.data.combo && this.data.options.length > 0;\n if (!this.data.combo) {\n selectElement.size = this.data.options.length;\n if (this.data.multiSelect) {\n selectElement.multiple = true;\n }\n }\n selectElement.addEventListener(\"resetform\", event => {\n const defaultValue = this.data.defaultFieldValue;\n for (const option of selectElement.options) {\n option.selected = option.value === defaultValue;\n }\n });\n for (const option of this.data.options) {\n const optionElement = document.createElement(\"option\");\n optionElement.textContent = option.displayValue;\n optionElement.value = option.exportValue;\n if (storedData.value.includes(option.exportValue)) {\n optionElement.setAttribute(\"selected\", true);\n addAnEmptyEntry = false;\n }\n selectElement.append(optionElement);\n }\n let removeEmptyEntry = null;\n if (addAnEmptyEntry) {\n const noneOptionElement = document.createElement(\"option\");\n noneOptionElement.value = \" \";\n noneOptionElement.setAttribute(\"hidden\", true);\n noneOptionElement.setAttribute(\"selected\", true);\n selectElement.prepend(noneOptionElement);\n removeEmptyEntry = () => {\n noneOptionElement.remove();\n selectElement.removeEventListener(\"input\", removeEmptyEntry);\n removeEmptyEntry = null;\n };\n selectElement.addEventListener(\"input\", removeEmptyEntry);\n }\n const getValue = isExport => {\n const name = isExport ? \"value\" : \"textContent\";\n const {\n options,\n multiple\n } = selectElement;\n if (!multiple) {\n return options.selectedIndex === -1 ? null : options[options.selectedIndex][name];\n }\n return Array.prototype.filter.call(options, option => option.selected).map(option => option[name]);\n };\n let selectedValues = getValue(false);\n const getItems = event => {\n const options = event.target.options;\n return Array.prototype.map.call(options, option => ({\n displayValue: option.textContent,\n exportValue: option.value\n }));\n };\n if (this.enableScripting && this.hasJSActions) {\n selectElement.addEventListener(\"updatefromsandbox\", jsEvent => {\n const actions = {\n value(event) {\n removeEmptyEntry?.();\n const value = event.detail.value;\n const values = new Set(Array.isArray(value) ? value : [value]);\n for (const option of selectElement.options) {\n option.selected = values.has(option.value);\n }\n storage.setValue(id, {\n value: getValue(true)\n });\n selectedValues = getValue(false);\n },\n multipleSelection(event) {\n selectElement.multiple = true;\n },\n remove(event) {\n const options = selectElement.options;\n const index = event.detail.remove;\n options[index].selected = false;\n selectElement.remove(index);\n if (options.length > 0) {\n const i = Array.prototype.findIndex.call(options, option => option.selected);\n if (i === -1) {\n options[0].selected = true;\n }\n }\n storage.setValue(id, {\n value: getValue(true),\n items: getItems(event)\n });\n selectedValues = getValue(false);\n },\n clear(event) {\n while (selectElement.length !== 0) {\n selectElement.remove(0);\n }\n storage.setValue(id, {\n value: null,\n items: []\n });\n selectedValues = getValue(false);\n },\n insert(event) {\n const {\n index,\n displayValue,\n exportValue\n } = event.detail.insert;\n const selectChild = selectElement.children[index];\n const optionElement = document.createElement(\"option\");\n optionElement.textContent = displayValue;\n optionElement.value = exportValue;\n if (selectChild) {\n selectChild.before(optionElement);\n } else {\n selectElement.append(optionElement);\n }\n storage.setValue(id, {\n value: getValue(true),\n items: getItems(event)\n });\n selectedValues = getValue(false);\n },\n items(event) {\n const {\n items\n } = event.detail;\n while (selectElement.length !== 0) {\n selectElement.remove(0);\n }\n for (const item of items) {\n const {\n displayValue,\n exportValue\n } = item;\n const optionElement = document.createElement(\"option\");\n optionElement.textContent = displayValue;\n optionElement.value = exportValue;\n selectElement.append(optionElement);\n }\n if (selectElement.options.length > 0) {\n selectElement.options[0].selected = true;\n }\n storage.setValue(id, {\n value: getValue(true),\n items: getItems(event)\n });\n selectedValues = getValue(false);\n },\n indices(event) {\n const indices = new Set(event.detail.indices);\n for (const option of event.target.options) {\n option.selected = indices.has(option.index);\n }\n storage.setValue(id, {\n value: getValue(true)\n });\n selectedValues = getValue(false);\n },\n editable(event) {\n event.target.disabled = !event.detail.editable;\n }\n };\n this._dispatchEventFromSandbox(actions, jsEvent);\n });\n selectElement.addEventListener(\"input\", event => {\n const exportValue = getValue(true);\n const change = getValue(false);\n storage.setValue(id, {\n value: exportValue\n });\n event.preventDefault();\n this.linkService.eventBus?.dispatch(\"dispatcheventinsandbox\", {\n source: this,\n detail: {\n id,\n name: \"Keystroke\",\n value: selectedValues,\n change,\n changeEx: exportValue,\n willCommit: false,\n commitKey: 1,\n keyDown: false\n }\n });\n });\n this._setEventListeners(selectElement, null, [[\"focus\", \"Focus\"], [\"blur\", \"Blur\"], [\"mousedown\", \"Mouse Down\"], [\"mouseenter\", \"Mouse Enter\"], [\"mouseleave\", \"Mouse Exit\"], [\"mouseup\", \"Mouse Up\"], [\"input\", \"Action\"], [\"input\", \"Validate\"]], event => event.target.value);\n } else {\n selectElement.addEventListener(\"input\", function (event) {\n storage.setValue(id, {\n value: getValue(true)\n });\n });\n }\n if (this.data.combo) {\n this._setTextStyle(selectElement);\n } else {}\n this._setBackgroundColor(selectElement);\n this._setDefaultPropertiesFromJS(selectElement);\n this.container.append(selectElement);\n return this.container;\n }\n}\nclass PopupAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n const {\n data,\n elements\n } = parameters;\n super(parameters, {\n isRenderable: AnnotationElement._hasPopupData(data)\n });\n this.elements = elements;\n this.popup = null;\n }\n render() {\n this.container.classList.add(\"popupAnnotation\");\n const popup = this.popup = new PopupElement({\n container: this.container,\n color: this.data.color,\n titleObj: this.data.titleObj,\n modificationDate: this.data.modificationDate,\n contentsObj: this.data.contentsObj,\n richText: this.data.richText,\n rect: this.data.rect,\n parentRect: this.data.parentRect || null,\n parent: this.parent,\n elements: this.elements,\n open: this.data.open\n });\n const elementIds = [];\n for (const element of this.elements) {\n element.popup = popup;\n elementIds.push(element.data.id);\n element.addHighlightArea();\n }\n this.container.setAttribute(\"aria-controls\", elementIds.map(id => `${AnnotationPrefix}${id}`).join(\",\"));\n return this.container;\n }\n}\nclass PopupElement {\n #boundKeyDown = this.#keyDown.bind(this);\n #boundHide = this.#hide.bind(this);\n #boundShow = this.#show.bind(this);\n #boundToggle = this.#toggle.bind(this);\n #color = null;\n #container = null;\n #contentsObj = null;\n #dateObj = null;\n #elements = null;\n #parent = null;\n #parentRect = null;\n #pinned = false;\n #popup = null;\n #position = null;\n #rect = null;\n #richText = null;\n #titleObj = null;\n #updates = null;\n #wasVisible = false;\n constructor({\n container,\n color,\n elements,\n titleObj,\n modificationDate,\n contentsObj,\n richText,\n parent,\n rect,\n parentRect,\n open\n }) {\n this.#container = container;\n this.#titleObj = titleObj;\n this.#contentsObj = contentsObj;\n this.#richText = richText;\n this.#parent = parent;\n this.#color = color;\n this.#rect = rect;\n this.#parentRect = parentRect;\n this.#elements = elements;\n this.#dateObj = PDFDateString.toDateObject(modificationDate);\n this.trigger = elements.flatMap(e => e.getElementsToTriggerPopup());\n for (const element of this.trigger) {\n element.addEventListener(\"click\", this.#boundToggle);\n element.addEventListener(\"mouseenter\", this.#boundShow);\n element.addEventListener(\"mouseleave\", this.#boundHide);\n element.classList.add(\"popupTriggerArea\");\n }\n for (const element of elements) {\n element.container?.addEventListener(\"keydown\", this.#boundKeyDown);\n }\n this.#container.hidden = true;\n if (open) {\n this.#toggle();\n }\n }\n render() {\n if (this.#popup) {\n return;\n }\n const popup = this.#popup = document.createElement(\"div\");\n popup.className = \"popup\";\n if (this.#color) {\n const baseColor = popup.style.outlineColor = Util.makeHexColor(...this.#color);\n if (CSS.supports(\"background-color\", \"color-mix(in srgb, red 30%, white)\")) {\n popup.style.backgroundColor = `color-mix(in srgb, ${baseColor} 30%, white)`;\n } else {\n const BACKGROUND_ENLIGHT = 0.7;\n popup.style.backgroundColor = Util.makeHexColor(...this.#color.map(c => Math.floor(BACKGROUND_ENLIGHT * (255 - c) + c)));\n }\n }\n const header = document.createElement(\"span\");\n header.className = \"header\";\n const title = document.createElement(\"h1\");\n header.append(title);\n ({\n dir: title.dir,\n str: title.textContent\n } = this.#titleObj);\n popup.append(header);\n if (this.#dateObj) {\n const modificationDate = document.createElement(\"span\");\n modificationDate.classList.add(\"popupDate\");\n modificationDate.setAttribute(\"data-l10n-id\", \"pdfjs-annotation-date-string\");\n modificationDate.setAttribute(\"data-l10n-args\", JSON.stringify({\n date: this.#dateObj.toLocaleDateString(),\n time: this.#dateObj.toLocaleTimeString()\n }));\n header.append(modificationDate);\n }\n const html = this.#html;\n if (html) {\n XfaLayer.render({\n xfaHtml: html,\n intent: \"richText\",\n div: popup\n });\n popup.lastChild.classList.add(\"richText\", \"popupContent\");\n } else {\n const contents = this._formatContents(this.#contentsObj);\n popup.append(contents);\n }\n this.#container.append(popup);\n }\n get #html() {\n const richText = this.#richText;\n const contentsObj = this.#contentsObj;\n if (richText?.str && (!contentsObj?.str || contentsObj.str === richText.str)) {\n return this.#richText.html || null;\n }\n return null;\n }\n get #fontSize() {\n return this.#html?.attributes?.style?.fontSize || 0;\n }\n get #fontColor() {\n return this.#html?.attributes?.style?.color || null;\n }\n #makePopupContent(text) {\n const popupLines = [];\n const popupContent = {\n str: text,\n html: {\n name: \"div\",\n attributes: {\n dir: \"auto\"\n },\n children: [{\n name: \"p\",\n children: popupLines\n }]\n }\n };\n const lineAttributes = {\n style: {\n color: this.#fontColor,\n fontSize: this.#fontSize ? `calc(${this.#fontSize}px * var(--scale-factor))` : \"\"\n }\n };\n for (const line of text.split(\"\\n\")) {\n popupLines.push({\n name: \"span\",\n value: line,\n attributes: lineAttributes\n });\n }\n return popupContent;\n }\n _formatContents({\n str,\n dir\n }) {\n const p = document.createElement(\"p\");\n p.classList.add(\"popupContent\");\n p.dir = dir;\n const lines = str.split(/(?:\\r\\n?|\\n)/);\n for (let i = 0, ii = lines.length; i < ii; ++i) {\n const line = lines[i];\n p.append(document.createTextNode(line));\n if (i < ii - 1) {\n p.append(document.createElement(\"br\"));\n }\n }\n return p;\n }\n #keyDown(event) {\n if (event.altKey || event.shiftKey || event.ctrlKey || event.metaKey) {\n return;\n }\n if (event.key === \"Enter\" || event.key === \"Escape\" && this.#pinned) {\n this.#toggle();\n }\n }\n updateEdited({\n rect,\n popupContent\n }) {\n this.#updates ||= {\n contentsObj: this.#contentsObj,\n richText: this.#richText\n };\n if (rect) {\n this.#position = null;\n }\n if (popupContent) {\n this.#richText = this.#makePopupContent(popupContent);\n this.#contentsObj = null;\n }\n this.#popup?.remove();\n this.#popup = null;\n }\n resetEdited() {\n if (!this.#updates) {\n return;\n }\n ({\n contentsObj: this.#contentsObj,\n richText: this.#richText\n } = this.#updates);\n this.#updates = null;\n this.#popup?.remove();\n this.#popup = null;\n this.#position = null;\n }\n #setPosition() {\n if (this.#position !== null) {\n return;\n }\n const {\n page: {\n view\n },\n viewport: {\n rawDims: {\n pageWidth,\n pageHeight,\n pageX,\n pageY\n }\n }\n } = this.#parent;\n let useParentRect = !!this.#parentRect;\n let rect = useParentRect ? this.#parentRect : this.#rect;\n for (const element of this.#elements) {\n if (!rect || Util.intersect(element.data.rect, rect) !== null) {\n rect = element.data.rect;\n useParentRect = true;\n break;\n }\n }\n const normalizedRect = Util.normalizeRect([rect[0], view[3] - rect[1] + view[1], rect[2], view[3] - rect[3] + view[1]]);\n const HORIZONTAL_SPACE_AFTER_ANNOTATION = 5;\n const parentWidth = useParentRect ? rect[2] - rect[0] + HORIZONTAL_SPACE_AFTER_ANNOTATION : 0;\n const popupLeft = normalizedRect[0] + parentWidth;\n const popupTop = normalizedRect[1];\n this.#position = [100 * (popupLeft - pageX) / pageWidth, 100 * (popupTop - pageY) / pageHeight];\n const {\n style\n } = this.#container;\n style.left = `${this.#position[0]}%`;\n style.top = `${this.#position[1]}%`;\n }\n #toggle() {\n this.#pinned = !this.#pinned;\n if (this.#pinned) {\n this.#show();\n this.#container.addEventListener(\"click\", this.#boundToggle);\n this.#container.addEventListener(\"keydown\", this.#boundKeyDown);\n } else {\n this.#hide();\n this.#container.removeEventListener(\"click\", this.#boundToggle);\n this.#container.removeEventListener(\"keydown\", this.#boundKeyDown);\n }\n }\n #show() {\n if (!this.#popup) {\n this.render();\n }\n if (!this.isVisible) {\n this.#setPosition();\n this.#container.hidden = false;\n this.#container.style.zIndex = parseInt(this.#container.style.zIndex) + 1000;\n } else if (this.#pinned) {\n this.#container.classList.add(\"focused\");\n }\n }\n #hide() {\n this.#container.classList.remove(\"focused\");\n if (this.#pinned || !this.isVisible) {\n return;\n }\n this.#container.hidden = true;\n this.#container.style.zIndex = parseInt(this.#container.style.zIndex) - 1000;\n }\n forceHide() {\n this.#wasVisible = this.isVisible;\n if (!this.#wasVisible) {\n return;\n }\n this.#container.hidden = true;\n }\n maybeShow() {\n if (!this.#wasVisible) {\n return;\n }\n if (!this.#popup) {\n this.#show();\n }\n this.#wasVisible = false;\n this.#container.hidden = false;\n }\n get isVisible() {\n return this.#container.hidden === false;\n }\n}\nclass FreeTextAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n this.textContent = parameters.data.textContent;\n this.textPosition = parameters.data.textPosition;\n this.annotationEditorType = AnnotationEditorType.FREETEXT;\n }\n render() {\n this.container.classList.add(\"freeTextAnnotation\");\n if (this.textContent) {\n const content = document.createElement(\"div\");\n content.classList.add(\"annotationTextContent\");\n content.setAttribute(\"role\", \"comment\");\n for (const line of this.textContent) {\n const lineSpan = document.createElement(\"span\");\n lineSpan.textContent = line;\n content.append(lineSpan);\n }\n this.container.append(content);\n }\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n this._editOnDoubleClick();\n return this.container;\n }\n get _isEditable() {\n return this.data.hasOwnCanvas;\n }\n}\nclass LineAnnotationElement extends AnnotationElement {\n #line = null;\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n }\n render() {\n this.container.classList.add(\"lineAnnotation\");\n const data = this.data;\n const {\n width,\n height\n } = getRectDims(data.rect);\n const svg = this.svgFactory.create(width, height, true);\n const line = this.#line = this.svgFactory.createElement(\"svg:line\");\n line.setAttribute(\"x1\", data.rect[2] - data.lineCoordinates[0]);\n line.setAttribute(\"y1\", data.rect[3] - data.lineCoordinates[1]);\n line.setAttribute(\"x2\", data.rect[2] - data.lineCoordinates[2]);\n line.setAttribute(\"y2\", data.rect[3] - data.lineCoordinates[3]);\n line.setAttribute(\"stroke-width\", data.borderStyle.width || 1);\n line.setAttribute(\"stroke\", \"transparent\");\n line.setAttribute(\"fill\", \"transparent\");\n svg.append(line);\n this.container.append(svg);\n if (!data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n return this.container;\n }\n getElementsToTriggerPopup() {\n return this.#line;\n }\n addHighlightArea() {\n this.container.classList.add(\"highlightArea\");\n }\n}\nclass SquareAnnotationElement extends AnnotationElement {\n #square = null;\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n }\n render() {\n this.container.classList.add(\"squareAnnotation\");\n const data = this.data;\n const {\n width,\n height\n } = getRectDims(data.rect);\n const svg = this.svgFactory.create(width, height, true);\n const borderWidth = data.borderStyle.width;\n const square = this.#square = this.svgFactory.createElement(\"svg:rect\");\n square.setAttribute(\"x\", borderWidth / 2);\n square.setAttribute(\"y\", borderWidth / 2);\n square.setAttribute(\"width\", width - borderWidth);\n square.setAttribute(\"height\", height - borderWidth);\n square.setAttribute(\"stroke-width\", borderWidth || 1);\n square.setAttribute(\"stroke\", \"transparent\");\n square.setAttribute(\"fill\", \"transparent\");\n svg.append(square);\n this.container.append(svg);\n if (!data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n return this.container;\n }\n getElementsToTriggerPopup() {\n return this.#square;\n }\n addHighlightArea() {\n this.container.classList.add(\"highlightArea\");\n }\n}\nclass CircleAnnotationElement extends AnnotationElement {\n #circle = null;\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n }\n render() {\n this.container.classList.add(\"circleAnnotation\");\n const data = this.data;\n const {\n width,\n height\n } = getRectDims(data.rect);\n const svg = this.svgFactory.create(width, height, true);\n const borderWidth = data.borderStyle.width;\n const circle = this.#circle = this.svgFactory.createElement(\"svg:ellipse\");\n circle.setAttribute(\"cx\", width / 2);\n circle.setAttribute(\"cy\", height / 2);\n circle.setAttribute(\"rx\", width / 2 - borderWidth / 2);\n circle.setAttribute(\"ry\", height / 2 - borderWidth / 2);\n circle.setAttribute(\"stroke-width\", borderWidth || 1);\n circle.setAttribute(\"stroke\", \"transparent\");\n circle.setAttribute(\"fill\", \"transparent\");\n svg.append(circle);\n this.container.append(svg);\n if (!data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n return this.container;\n }\n getElementsToTriggerPopup() {\n return this.#circle;\n }\n addHighlightArea() {\n this.container.classList.add(\"highlightArea\");\n }\n}\nclass PolylineAnnotationElement extends AnnotationElement {\n #polyline = null;\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n this.containerClassName = \"polylineAnnotation\";\n this.svgElementName = \"svg:polyline\";\n }\n render() {\n this.container.classList.add(this.containerClassName);\n const {\n data: {\n rect,\n vertices,\n borderStyle,\n popupRef\n }\n } = this;\n if (!vertices) {\n return this.container;\n }\n const {\n width,\n height\n } = getRectDims(rect);\n const svg = this.svgFactory.create(width, height, true);\n let points = [];\n for (let i = 0, ii = vertices.length; i < ii; i += 2) {\n const x = vertices[i] - rect[0];\n const y = rect[3] - vertices[i + 1];\n points.push(`${x},${y}`);\n }\n points = points.join(\" \");\n const polyline = this.#polyline = this.svgFactory.createElement(this.svgElementName);\n polyline.setAttribute(\"points\", points);\n polyline.setAttribute(\"stroke-width\", borderStyle.width || 1);\n polyline.setAttribute(\"stroke\", \"transparent\");\n polyline.setAttribute(\"fill\", \"transparent\");\n svg.append(polyline);\n this.container.append(svg);\n if (!popupRef && this.hasPopupData) {\n this._createPopup();\n }\n return this.container;\n }\n getElementsToTriggerPopup() {\n return this.#polyline;\n }\n addHighlightArea() {\n this.container.classList.add(\"highlightArea\");\n }\n}\nclass PolygonAnnotationElement extends PolylineAnnotationElement {\n constructor(parameters) {\n super(parameters);\n this.containerClassName = \"polygonAnnotation\";\n this.svgElementName = \"svg:polygon\";\n }\n}\nclass CaretAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n }\n render() {\n this.container.classList.add(\"caretAnnotation\");\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n return this.container;\n }\n}\nclass InkAnnotationElement extends AnnotationElement {\n #polylines = [];\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n this.containerClassName = \"inkAnnotation\";\n this.svgElementName = \"svg:polyline\";\n this.annotationEditorType = AnnotationEditorType.INK;\n }\n render() {\n this.container.classList.add(this.containerClassName);\n const {\n data: {\n rect,\n inkLists,\n borderStyle,\n popupRef\n }\n } = this;\n const {\n width,\n height\n } = getRectDims(rect);\n const svg = this.svgFactory.create(width, height, true);\n for (const inkList of inkLists) {\n let points = [];\n for (let i = 0, ii = inkList.length; i < ii; i += 2) {\n const x = inkList[i] - rect[0];\n const y = rect[3] - inkList[i + 1];\n points.push(`${x},${y}`);\n }\n points = points.join(\" \");\n const polyline = this.svgFactory.createElement(this.svgElementName);\n this.#polylines.push(polyline);\n polyline.setAttribute(\"points\", points);\n polyline.setAttribute(\"stroke-width\", borderStyle.width || 1);\n polyline.setAttribute(\"stroke\", \"transparent\");\n polyline.setAttribute(\"fill\", \"transparent\");\n if (!popupRef && this.hasPopupData) {\n this._createPopup();\n }\n svg.append(polyline);\n }\n this.container.append(svg);\n return this.container;\n }\n getElementsToTriggerPopup() {\n return this.#polylines;\n }\n addHighlightArea() {\n this.container.classList.add(\"highlightArea\");\n }\n}\nclass HighlightAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true,\n createQuadrilaterals: true\n });\n }\n render() {\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n this.container.classList.add(\"highlightAnnotation\");\n return this.container;\n }\n}\nclass UnderlineAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true,\n createQuadrilaterals: true\n });\n }\n render() {\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n this.container.classList.add(\"underlineAnnotation\");\n return this.container;\n }\n}\nclass SquigglyAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true,\n createQuadrilaterals: true\n });\n }\n render() {\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n this.container.classList.add(\"squigglyAnnotation\");\n return this.container;\n }\n}\nclass StrikeOutAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true,\n createQuadrilaterals: true\n });\n }\n render() {\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n this.container.classList.add(\"strikeoutAnnotation\");\n return this.container;\n }\n}\nclass StampAnnotationElement extends AnnotationElement {\n constructor(parameters) {\n super(parameters, {\n isRenderable: true,\n ignoreBorder: true\n });\n }\n render() {\n this.container.classList.add(\"stampAnnotation\");\n if (!this.data.popupRef && this.hasPopupData) {\n this._createPopup();\n }\n return this.container;\n }\n}\nclass FileAttachmentAnnotationElement extends AnnotationElement {\n #trigger = null;\n constructor(parameters) {\n super(parameters, {\n isRenderable: true\n });\n const {\n file\n } = this.data;\n this.filename = file.filename;\n this.content = file.content;\n this.linkService.eventBus?.dispatch(\"fileattachmentannotation\", {\n source: this,\n ...file\n });\n }\n render() {\n this.container.classList.add(\"fileAttachmentAnnotation\");\n const {\n container,\n data\n } = this;\n let trigger;\n if (data.hasAppearance || data.fillAlpha === 0) {\n trigger = document.createElement(\"div\");\n } else {\n trigger = document.createElement(\"img\");\n trigger.src = `${this.imageResourcesPath}annotation-${/paperclip/i.test(data.name) ? \"paperclip\" : \"pushpin\"}.svg`;\n if (data.fillAlpha && data.fillAlpha < 1) {\n trigger.style = `filter: opacity(${Math.round(data.fillAlpha * 100)}%);`;\n }\n }\n trigger.addEventListener(\"dblclick\", this.#download.bind(this));\n this.#trigger = trigger;\n const {\n isMac\n } = util_FeatureTest.platform;\n container.addEventListener(\"keydown\", evt => {\n if (evt.key === \"Enter\" && (isMac ? evt.metaKey : evt.ctrlKey)) {\n this.#download();\n }\n });\n if (!data.popupRef && this.hasPopupData) {\n this._createPopup();\n } else {\n trigger.classList.add(\"popupTriggerArea\");\n }\n container.append(trigger);\n return container;\n }\n getElementsToTriggerPopup() {\n return this.#trigger;\n }\n addHighlightArea() {\n this.container.classList.add(\"highlightArea\");\n }\n #download() {\n this.downloadManager?.openOrDownloadData(this.content, this.filename);\n }\n}\nclass AnnotationLayer {\n #accessibilityManager = null;\n #annotationCanvasMap = null;\n #editableAnnotations = new Map();\n constructor({\n div,\n accessibilityManager,\n annotationCanvasMap,\n annotationEditorUIManager,\n page,\n viewport\n }) {\n this.div = div;\n this.#accessibilityManager = accessibilityManager;\n this.#annotationCanvasMap = annotationCanvasMap;\n this.page = page;\n this.viewport = viewport;\n this.zIndex = 0;\n this._annotationEditorUIManager = annotationEditorUIManager;\n }\n #appendElement(element, id) {\n const contentElement = element.firstChild || element;\n contentElement.id = `${AnnotationPrefix}${id}`;\n this.div.append(element);\n this.#accessibilityManager?.moveElementInDOM(this.div, element, contentElement, false);\n }\n async render(params) {\n const {\n annotations\n } = params;\n const layer = this.div;\n setLayerDimensions(layer, this.viewport);\n const popupToElements = new Map();\n const elementParams = {\n data: null,\n layer,\n linkService: params.linkService,\n downloadManager: params.downloadManager,\n imageResourcesPath: params.imageResourcesPath || \"\",\n renderForms: params.renderForms !== false,\n svgFactory: new DOMSVGFactory(),\n annotationStorage: params.annotationStorage || new AnnotationStorage(),\n enableScripting: params.enableScripting === true,\n hasJSActions: params.hasJSActions,\n fieldObjects: params.fieldObjects,\n parent: this,\n elements: null\n };\n for (const data of annotations) {\n if (data.noHTML) {\n continue;\n }\n const isPopupAnnotation = data.annotationType === AnnotationType.POPUP;\n if (!isPopupAnnotation) {\n const {\n width,\n height\n } = getRectDims(data.rect);\n if (width <= 0 || height <= 0) {\n continue;\n }\n } else {\n const elements = popupToElements.get(data.id);\n if (!elements) {\n continue;\n }\n elementParams.elements = elements;\n }\n elementParams.data = data;\n const element = AnnotationElementFactory.create(elementParams);\n if (!element.isRenderable) {\n continue;\n }\n if (!isPopupAnnotation && data.popupRef) {\n const elements = popupToElements.get(data.popupRef);\n if (!elements) {\n popupToElements.set(data.popupRef, [element]);\n } else {\n elements.push(element);\n }\n }\n const rendered = element.render();\n if (data.hidden) {\n rendered.style.visibility = \"hidden\";\n }\n this.#appendElement(rendered, data.id);\n if (element.annotationEditorType > 0) {\n this.#editableAnnotations.set(element.data.id, element);\n this._annotationEditorUIManager?.renderAnnotationElement(element);\n }\n }\n this.#setAnnotationCanvasMap();\n }\n update({\n viewport\n }) {\n const layer = this.div;\n this.viewport = viewport;\n setLayerDimensions(layer, {\n rotation: viewport.rotation\n });\n this.#setAnnotationCanvasMap();\n layer.hidden = false;\n }\n #setAnnotationCanvasMap() {\n if (!this.#annotationCanvasMap) {\n return;\n }\n const layer = this.div;\n for (const [id, canvas] of this.#annotationCanvasMap) {\n const element = layer.querySelector(`[data-annotation-id=\"${id}\"]`);\n if (!element) {\n continue;\n }\n canvas.className = \"annotationContent\";\n const {\n firstChild\n } = element;\n if (!firstChild) {\n element.append(canvas);\n } else if (firstChild.nodeName === \"CANVAS\") {\n firstChild.replaceWith(canvas);\n } else if (!firstChild.classList.contains(\"annotationContent\")) {\n firstChild.before(canvas);\n } else {\n firstChild.after(canvas);\n }\n }\n this.#annotationCanvasMap.clear();\n }\n getEditableAnnotations() {\n return Array.from(this.#editableAnnotations.values());\n }\n getEditableAnnotation(id) {\n return this.#editableAnnotations.get(id);\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/freetext.js\n\n\n\n\nconst EOL_PATTERN = /\\r\\n?|\\n/g;\nclass FreeTextEditor extends AnnotationEditor {\n #boundEditorDivBlur = this.editorDivBlur.bind(this);\n #boundEditorDivFocus = this.editorDivFocus.bind(this);\n #boundEditorDivInput = this.editorDivInput.bind(this);\n #boundEditorDivKeydown = this.editorDivKeydown.bind(this);\n #boundEditorDivPaste = this.editorDivPaste.bind(this);\n #color;\n #content = \"\";\n #editorDivId = `${this.id}-editor`;\n #fontSize;\n #initialData = null;\n static _freeTextDefaultContent = \"\";\n static _internalPadding = 0;\n static _defaultColor = null;\n static _defaultFontSize = 10;\n static get _keyboardManager() {\n const proto = FreeTextEditor.prototype;\n const arrowChecker = self => self.isEmpty();\n const small = AnnotationEditorUIManager.TRANSLATE_SMALL;\n const big = AnnotationEditorUIManager.TRANSLATE_BIG;\n return shadow(this, \"_keyboardManager\", new KeyboardManager([[[\"ctrl+s\", \"mac+meta+s\", \"ctrl+p\", \"mac+meta+p\"], proto.commitOrRemove, {\n bubbles: true\n }], [[\"ctrl+Enter\", \"mac+meta+Enter\", \"Escape\", \"mac+Escape\"], proto.commitOrRemove], [[\"ArrowLeft\", \"mac+ArrowLeft\"], proto._translateEmpty, {\n args: [-small, 0],\n checker: arrowChecker\n }], [[\"ctrl+ArrowLeft\", \"mac+shift+ArrowLeft\"], proto._translateEmpty, {\n args: [-big, 0],\n checker: arrowChecker\n }], [[\"ArrowRight\", \"mac+ArrowRight\"], proto._translateEmpty, {\n args: [small, 0],\n checker: arrowChecker\n }], [[\"ctrl+ArrowRight\", \"mac+shift+ArrowRight\"], proto._translateEmpty, {\n args: [big, 0],\n checker: arrowChecker\n }], [[\"ArrowUp\", \"mac+ArrowUp\"], proto._translateEmpty, {\n args: [0, -small],\n checker: arrowChecker\n }], [[\"ctrl+ArrowUp\", \"mac+shift+ArrowUp\"], proto._translateEmpty, {\n args: [0, -big],\n checker: arrowChecker\n }], [[\"ArrowDown\", \"mac+ArrowDown\"], proto._translateEmpty, {\n args: [0, small],\n checker: arrowChecker\n }], [[\"ctrl+ArrowDown\", \"mac+shift+ArrowDown\"], proto._translateEmpty, {\n args: [0, big],\n checker: arrowChecker\n }]]));\n }\n static _type = \"freetext\";\n static _editorType = AnnotationEditorType.FREETEXT;\n constructor(params) {\n super({\n ...params,\n name: \"freeTextEditor\"\n });\n this.#color = params.color || FreeTextEditor._defaultColor || AnnotationEditor._defaultLineColor;\n this.#fontSize = params.fontSize || FreeTextEditor._defaultFontSize;\n }\n static initialize(l10n, uiManager) {\n AnnotationEditor.initialize(l10n, uiManager, {\n strings: [\"pdfjs-free-text-default-content\"]\n });\n const style = getComputedStyle(document.documentElement);\n this._internalPadding = parseFloat(style.getPropertyValue(\"--freetext-padding\"));\n }\n static updateDefaultParams(type, value) {\n switch (type) {\n case AnnotationEditorParamsType.FREETEXT_SIZE:\n FreeTextEditor._defaultFontSize = value;\n break;\n case AnnotationEditorParamsType.FREETEXT_COLOR:\n FreeTextEditor._defaultColor = value;\n break;\n }\n }\n updateParams(type, value) {\n switch (type) {\n case AnnotationEditorParamsType.FREETEXT_SIZE:\n this.#updateFontSize(value);\n break;\n case AnnotationEditorParamsType.FREETEXT_COLOR:\n this.#updateColor(value);\n break;\n }\n }\n static get defaultPropertiesToUpdate() {\n return [[AnnotationEditorParamsType.FREETEXT_SIZE, FreeTextEditor._defaultFontSize], [AnnotationEditorParamsType.FREETEXT_COLOR, FreeTextEditor._defaultColor || AnnotationEditor._defaultLineColor]];\n }\n get propertiesToUpdate() {\n return [[AnnotationEditorParamsType.FREETEXT_SIZE, this.#fontSize], [AnnotationEditorParamsType.FREETEXT_COLOR, this.#color]];\n }\n #updateFontSize(fontSize) {\n const setFontsize = size => {\n this.editorDiv.style.fontSize = `calc(${size}px * var(--scale-factor))`;\n this.translate(0, -(size - this.#fontSize) * this.parentScale);\n this.#fontSize = size;\n this.#setEditorDimensions();\n };\n const savedFontsize = this.#fontSize;\n this.addCommands({\n cmd: setFontsize.bind(this, fontSize),\n undo: setFontsize.bind(this, savedFontsize),\n post: this._uiManager.updateUI.bind(this._uiManager, this),\n mustExec: true,\n type: AnnotationEditorParamsType.FREETEXT_SIZE,\n overwriteIfSameType: true,\n keepUndo: true\n });\n }\n #updateColor(color) {\n const setColor = col => {\n this.#color = this.editorDiv.style.color = col;\n };\n const savedColor = this.#color;\n this.addCommands({\n cmd: setColor.bind(this, color),\n undo: setColor.bind(this, savedColor),\n post: this._uiManager.updateUI.bind(this._uiManager, this),\n mustExec: true,\n type: AnnotationEditorParamsType.FREETEXT_COLOR,\n overwriteIfSameType: true,\n keepUndo: true\n });\n }\n _translateEmpty(x, y) {\n this._uiManager.translateSelectedEditors(x, y, true);\n }\n getInitialTranslation() {\n const scale = this.parentScale;\n return [-FreeTextEditor._internalPadding * scale, -(FreeTextEditor._internalPadding + this.#fontSize) * scale];\n }\n rebuild() {\n if (!this.parent) {\n return;\n }\n super.rebuild();\n if (this.div === null) {\n return;\n }\n if (!this.isAttachedToDOM) {\n this.parent.add(this);\n }\n }\n enableEditMode() {\n if (this.isInEditMode()) {\n return;\n }\n this.parent.setEditingState(false);\n this.parent.updateToolbar(AnnotationEditorType.FREETEXT);\n super.enableEditMode();\n this.overlayDiv.classList.remove(\"enabled\");\n this.editorDiv.contentEditable = true;\n this._isDraggable = false;\n this.div.removeAttribute(\"aria-activedescendant\");\n const signal = this._uiManager._signal;\n this.editorDiv.addEventListener(\"keydown\", this.#boundEditorDivKeydown, {\n signal\n });\n this.editorDiv.addEventListener(\"focus\", this.#boundEditorDivFocus, {\n signal\n });\n this.editorDiv.addEventListener(\"blur\", this.#boundEditorDivBlur, {\n signal\n });\n this.editorDiv.addEventListener(\"input\", this.#boundEditorDivInput, {\n signal\n });\n this.editorDiv.addEventListener(\"paste\", this.#boundEditorDivPaste, {\n signal\n });\n }\n disableEditMode() {\n if (!this.isInEditMode()) {\n return;\n }\n this.parent.setEditingState(true);\n super.disableEditMode();\n this.overlayDiv.classList.add(\"enabled\");\n this.editorDiv.contentEditable = false;\n this.div.setAttribute(\"aria-activedescendant\", this.#editorDivId);\n this._isDraggable = true;\n this.editorDiv.removeEventListener(\"keydown\", this.#boundEditorDivKeydown);\n this.editorDiv.removeEventListener(\"focus\", this.#boundEditorDivFocus);\n this.editorDiv.removeEventListener(\"blur\", this.#boundEditorDivBlur);\n this.editorDiv.removeEventListener(\"input\", this.#boundEditorDivInput);\n this.editorDiv.removeEventListener(\"paste\", this.#boundEditorDivPaste);\n this.div.focus({\n preventScroll: true\n });\n this.isEditing = false;\n this.parent.div.classList.add(\"freetextEditing\");\n }\n focusin(event) {\n if (!this._focusEventsAllowed) {\n return;\n }\n super.focusin(event);\n if (event.target !== this.editorDiv) {\n this.editorDiv.focus();\n }\n }\n onceAdded() {\n if (this.width) {\n return;\n }\n this.enableEditMode();\n this.editorDiv.focus();\n if (this._initialOptions?.isCentered) {\n this.center();\n }\n this._initialOptions = null;\n }\n isEmpty() {\n return !this.editorDiv || this.editorDiv.innerText.trim() === \"\";\n }\n remove() {\n this.isEditing = false;\n if (this.parent) {\n this.parent.setEditingState(true);\n this.parent.div.classList.add(\"freetextEditing\");\n }\n super.remove();\n }\n #extractText() {\n const buffer = [];\n this.editorDiv.normalize();\n for (const child of this.editorDiv.childNodes) {\n buffer.push(FreeTextEditor.#getNodeContent(child));\n }\n return buffer.join(\"\\n\");\n }\n #setEditorDimensions() {\n const [parentWidth, parentHeight] = this.parentDimensions;\n let rect;\n if (this.isAttachedToDOM) {\n rect = this.div.getBoundingClientRect();\n } else {\n const {\n currentLayer,\n div\n } = this;\n const savedDisplay = div.style.display;\n const savedVisibility = div.classList.contains(\"hidden\");\n div.classList.remove(\"hidden\");\n div.style.display = \"hidden\";\n currentLayer.div.append(this.div);\n rect = div.getBoundingClientRect();\n div.remove();\n div.style.display = savedDisplay;\n div.classList.toggle(\"hidden\", savedVisibility);\n }\n if (this.rotation % 180 === this.parentRotation % 180) {\n this.width = rect.width / parentWidth;\n this.height = rect.height / parentHeight;\n } else {\n this.width = rect.height / parentWidth;\n this.height = rect.width / parentHeight;\n }\n this.fixAndSetPosition();\n }\n commit() {\n if (!this.isInEditMode()) {\n return;\n }\n super.commit();\n this.disableEditMode();\n const savedText = this.#content;\n const newText = this.#content = this.#extractText().trimEnd();\n if (savedText === newText) {\n return;\n }\n const setText = text => {\n this.#content = text;\n if (!text) {\n this.remove();\n return;\n }\n this.#setContent();\n this._uiManager.rebuild(this);\n this.#setEditorDimensions();\n };\n this.addCommands({\n cmd: () => {\n setText(newText);\n },\n undo: () => {\n setText(savedText);\n },\n mustExec: false\n });\n this.#setEditorDimensions();\n }\n shouldGetKeyboardEvents() {\n return this.isInEditMode();\n }\n enterInEditMode() {\n this.enableEditMode();\n this.editorDiv.focus();\n }\n dblclick(event) {\n this.enterInEditMode();\n }\n keydown(event) {\n if (event.target === this.div && event.key === \"Enter\") {\n this.enterInEditMode();\n event.preventDefault();\n }\n }\n editorDivKeydown(event) {\n FreeTextEditor._keyboardManager.exec(this, event);\n }\n editorDivFocus(event) {\n this.isEditing = true;\n }\n editorDivBlur(event) {\n this.isEditing = false;\n }\n editorDivInput(event) {\n this.parent.div.classList.toggle(\"freetextEditing\", this.isEmpty());\n }\n disableEditing() {\n this.editorDiv.setAttribute(\"role\", \"comment\");\n this.editorDiv.removeAttribute(\"aria-multiline\");\n }\n enableEditing() {\n this.editorDiv.setAttribute(\"role\", \"textbox\");\n this.editorDiv.setAttribute(\"aria-multiline\", true);\n }\n render() {\n if (this.div) {\n return this.div;\n }\n let baseX, baseY;\n if (this.width) {\n baseX = this.x;\n baseY = this.y;\n }\n super.render();\n this.editorDiv = document.createElement(\"div\");\n this.editorDiv.className = \"internal\";\n this.editorDiv.setAttribute(\"id\", this.#editorDivId);\n this.editorDiv.setAttribute(\"data-l10n-id\", \"pdfjs-free-text\");\n this.enableEditing();\n AnnotationEditor._l10nPromise.get(\"pdfjs-free-text-default-content\").then(msg => this.editorDiv?.setAttribute(\"default-content\", msg));\n this.editorDiv.contentEditable = true;\n const {\n style\n } = this.editorDiv;\n style.fontSize = `calc(${this.#fontSize}px * var(--scale-factor))`;\n style.color = this.#color;\n this.div.append(this.editorDiv);\n this.overlayDiv = document.createElement(\"div\");\n this.overlayDiv.classList.add(\"overlay\", \"enabled\");\n this.div.append(this.overlayDiv);\n bindEvents(this, this.div, [\"dblclick\", \"keydown\"]);\n if (this.width) {\n const [parentWidth, parentHeight] = this.parentDimensions;\n if (this.annotationElementId) {\n const {\n position\n } = this.#initialData;\n let [tx, ty] = this.getInitialTranslation();\n [tx, ty] = this.pageTranslationToScreen(tx, ty);\n const [pageWidth, pageHeight] = this.pageDimensions;\n const [pageX, pageY] = this.pageTranslation;\n let posX, posY;\n switch (this.rotation) {\n case 0:\n posX = baseX + (position[0] - pageX) / pageWidth;\n posY = baseY + this.height - (position[1] - pageY) / pageHeight;\n break;\n case 90:\n posX = baseX + (position[0] - pageX) / pageWidth;\n posY = baseY - (position[1] - pageY) / pageHeight;\n [tx, ty] = [ty, -tx];\n break;\n case 180:\n posX = baseX - this.width + (position[0] - pageX) / pageWidth;\n posY = baseY - (position[1] - pageY) / pageHeight;\n [tx, ty] = [-tx, -ty];\n break;\n case 270:\n posX = baseX + (position[0] - pageX - this.height * pageHeight) / pageWidth;\n posY = baseY + (position[1] - pageY - this.width * pageWidth) / pageHeight;\n [tx, ty] = [-ty, tx];\n break;\n }\n this.setAt(posX * parentWidth, posY * parentHeight, tx, ty);\n } else {\n this.setAt(baseX * parentWidth, baseY * parentHeight, this.width * parentWidth, this.height * parentHeight);\n }\n this.#setContent();\n this._isDraggable = true;\n this.editorDiv.contentEditable = false;\n } else {\n this._isDraggable = false;\n this.editorDiv.contentEditable = true;\n }\n return this.div;\n }\n static #getNodeContent(node) {\n return (node.nodeType === Node.TEXT_NODE ? node.nodeValue : node.innerText).replaceAll(EOL_PATTERN, \"\");\n }\n editorDivPaste(event) {\n const clipboardData = event.clipboardData || window.clipboardData;\n const {\n types\n } = clipboardData;\n if (types.length === 1 && types[0] === \"text/plain\") {\n return;\n }\n event.preventDefault();\n const paste = FreeTextEditor.#deserializeContent(clipboardData.getData(\"text\") || \"\").replaceAll(EOL_PATTERN, \"\\n\");\n if (!paste) {\n return;\n }\n const selection = window.getSelection();\n if (!selection.rangeCount) {\n return;\n }\n this.editorDiv.normalize();\n selection.deleteFromDocument();\n const range = selection.getRangeAt(0);\n if (!paste.includes(\"\\n\")) {\n range.insertNode(document.createTextNode(paste));\n this.editorDiv.normalize();\n selection.collapseToStart();\n return;\n }\n const {\n startContainer,\n startOffset\n } = range;\n const bufferBefore = [];\n const bufferAfter = [];\n if (startContainer.nodeType === Node.TEXT_NODE) {\n const parent = startContainer.parentElement;\n bufferAfter.push(startContainer.nodeValue.slice(startOffset).replaceAll(EOL_PATTERN, \"\"));\n if (parent !== this.editorDiv) {\n let buffer = bufferBefore;\n for (const child of this.editorDiv.childNodes) {\n if (child === parent) {\n buffer = bufferAfter;\n continue;\n }\n buffer.push(FreeTextEditor.#getNodeContent(child));\n }\n }\n bufferBefore.push(startContainer.nodeValue.slice(0, startOffset).replaceAll(EOL_PATTERN, \"\"));\n } else if (startContainer === this.editorDiv) {\n let buffer = bufferBefore;\n let i = 0;\n for (const child of this.editorDiv.childNodes) {\n if (i++ === startOffset) {\n buffer = bufferAfter;\n }\n buffer.push(FreeTextEditor.#getNodeContent(child));\n }\n }\n this.#content = `${bufferBefore.join(\"\\n\")}${paste}${bufferAfter.join(\"\\n\")}`;\n this.#setContent();\n const newRange = new Range();\n let beforeLength = bufferBefore.reduce((acc, line) => acc + line.length, 0);\n for (const {\n firstChild\n } of this.editorDiv.childNodes) {\n if (firstChild.nodeType === Node.TEXT_NODE) {\n const length = firstChild.nodeValue.length;\n if (beforeLength <= length) {\n newRange.setStart(firstChild, beforeLength);\n newRange.setEnd(firstChild, beforeLength);\n break;\n }\n beforeLength -= length;\n }\n }\n selection.removeAllRanges();\n selection.addRange(newRange);\n }\n #setContent() {\n this.editorDiv.replaceChildren();\n if (!this.#content) {\n return;\n }\n for (const line of this.#content.split(\"\\n\")) {\n const div = document.createElement(\"div\");\n div.append(line ? document.createTextNode(line) : document.createElement(\"br\"));\n this.editorDiv.append(div);\n }\n }\n #serializeContent() {\n return this.#content.replaceAll(\"\\xa0\", \" \");\n }\n static #deserializeContent(content) {\n return content.replaceAll(\" \", \"\\xa0\");\n }\n get contentDiv() {\n return this.editorDiv;\n }\n static deserialize(data, parent, uiManager) {\n let initialData = null;\n if (data instanceof FreeTextAnnotationElement) {\n const {\n data: {\n defaultAppearanceData: {\n fontSize,\n fontColor\n },\n rect,\n rotation,\n id\n },\n textContent,\n textPosition,\n parent: {\n page: {\n pageNumber\n }\n }\n } = data;\n if (!textContent || textContent.length === 0) {\n return null;\n }\n initialData = data = {\n annotationType: AnnotationEditorType.FREETEXT,\n color: Array.from(fontColor),\n fontSize,\n value: textContent.join(\"\\n\"),\n position: textPosition,\n pageIndex: pageNumber - 1,\n rect: rect.slice(0),\n rotation,\n id,\n deleted: false\n };\n }\n const editor = super.deserialize(data, parent, uiManager);\n editor.#fontSize = data.fontSize;\n editor.#color = Util.makeHexColor(...data.color);\n editor.#content = FreeTextEditor.#deserializeContent(data.value);\n editor.annotationElementId = data.id || null;\n editor.#initialData = initialData;\n return editor;\n }\n serialize(isForCopying = false) {\n if (this.isEmpty()) {\n return null;\n }\n if (this.deleted) {\n return {\n pageIndex: this.pageIndex,\n id: this.annotationElementId,\n deleted: true\n };\n }\n const padding = FreeTextEditor._internalPadding * this.parentScale;\n const rect = this.getRect(padding, padding);\n const color = AnnotationEditor._colorManager.convert(this.isAttachedToDOM ? getComputedStyle(this.editorDiv).color : this.#color);\n const serialized = {\n annotationType: AnnotationEditorType.FREETEXT,\n color,\n fontSize: this.#fontSize,\n value: this.#serializeContent(),\n pageIndex: this.pageIndex,\n rect,\n rotation: this.rotation,\n structTreeParentId: this._structTreeParentId\n };\n if (isForCopying) {\n return serialized;\n }\n if (this.annotationElementId && !this.#hasElementChanged(serialized)) {\n return null;\n }\n serialized.id = this.annotationElementId;\n return serialized;\n }\n #hasElementChanged(serialized) {\n const {\n value,\n fontSize,\n color,\n pageIndex\n } = this.#initialData;\n return this._hasBeenMoved || serialized.value !== value || serialized.fontSize !== fontSize || serialized.color.some((c, i) => c !== color[i]) || serialized.pageIndex !== pageIndex;\n }\n renderAnnotationElement(annotation) {\n const content = super.renderAnnotationElement(annotation);\n if (this.deleted) {\n return content;\n }\n const {\n style\n } = content;\n style.fontSize = `calc(${this.#fontSize}px * var(--scale-factor))`;\n style.color = this.#color;\n content.replaceChildren();\n for (const line of this.#content.split(\"\\n\")) {\n const div = document.createElement(\"div\");\n div.append(line ? document.createTextNode(line) : document.createElement(\"br\"));\n content.append(div);\n }\n const padding = FreeTextEditor._internalPadding * this.parentScale;\n annotation.updateEdited({\n rect: this.getRect(padding, padding),\n popupContent: this.#content\n });\n return content;\n }\n resetAnnotationElement(annotation) {\n super.resetAnnotationElement(annotation);\n annotation.resetEdited();\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/outliner.js\n\nclass Outliner {\n #box;\n #verticalEdges = [];\n #intervals = [];\n constructor(boxes, borderWidth = 0, innerMargin = 0, isLTR = true) {\n let minX = Infinity;\n let maxX = -Infinity;\n let minY = Infinity;\n let maxY = -Infinity;\n const NUMBER_OF_DIGITS = 4;\n const EPSILON = 10 ** -NUMBER_OF_DIGITS;\n for (const {\n x,\n y,\n width,\n height\n } of boxes) {\n const x1 = Math.floor((x - borderWidth) / EPSILON) * EPSILON;\n const x2 = Math.ceil((x + width + borderWidth) / EPSILON) * EPSILON;\n const y1 = Math.floor((y - borderWidth) / EPSILON) * EPSILON;\n const y2 = Math.ceil((y + height + borderWidth) / EPSILON) * EPSILON;\n const left = [x1, y1, y2, true];\n const right = [x2, y1, y2, false];\n this.#verticalEdges.push(left, right);\n minX = Math.min(minX, x1);\n maxX = Math.max(maxX, x2);\n minY = Math.min(minY, y1);\n maxY = Math.max(maxY, y2);\n }\n const bboxWidth = maxX - minX + 2 * innerMargin;\n const bboxHeight = maxY - minY + 2 * innerMargin;\n const shiftedMinX = minX - innerMargin;\n const shiftedMinY = minY - innerMargin;\n const lastEdge = this.#verticalEdges.at(isLTR ? -1 : -2);\n const lastPoint = [lastEdge[0], lastEdge[2]];\n for (const edge of this.#verticalEdges) {\n const [x, y1, y2] = edge;\n edge[0] = (x - shiftedMinX) / bboxWidth;\n edge[1] = (y1 - shiftedMinY) / bboxHeight;\n edge[2] = (y2 - shiftedMinY) / bboxHeight;\n }\n this.#box = {\n x: shiftedMinX,\n y: shiftedMinY,\n width: bboxWidth,\n height: bboxHeight,\n lastPoint\n };\n }\n getOutlines() {\n this.#verticalEdges.sort((a, b) => a[0] - b[0] || a[1] - b[1] || a[2] - b[2]);\n const outlineVerticalEdges = [];\n for (const edge of this.#verticalEdges) {\n if (edge[3]) {\n outlineVerticalEdges.push(...this.#breakEdge(edge));\n this.#insert(edge);\n } else {\n this.#remove(edge);\n outlineVerticalEdges.push(...this.#breakEdge(edge));\n }\n }\n return this.#getOutlines(outlineVerticalEdges);\n }\n #getOutlines(outlineVerticalEdges) {\n const edges = [];\n const allEdges = new Set();\n for (const edge of outlineVerticalEdges) {\n const [x, y1, y2] = edge;\n edges.push([x, y1, edge], [x, y2, edge]);\n }\n edges.sort((a, b) => a[1] - b[1] || a[0] - b[0]);\n for (let i = 0, ii = edges.length; i < ii; i += 2) {\n const edge1 = edges[i][2];\n const edge2 = edges[i + 1][2];\n edge1.push(edge2);\n edge2.push(edge1);\n allEdges.add(edge1);\n allEdges.add(edge2);\n }\n const outlines = [];\n let outline;\n while (allEdges.size > 0) {\n const edge = allEdges.values().next().value;\n let [x, y1, y2, edge1, edge2] = edge;\n allEdges.delete(edge);\n let lastPointX = x;\n let lastPointY = y1;\n outline = [x, y2];\n outlines.push(outline);\n while (true) {\n let e;\n if (allEdges.has(edge1)) {\n e = edge1;\n } else if (allEdges.has(edge2)) {\n e = edge2;\n } else {\n break;\n }\n allEdges.delete(e);\n [x, y1, y2, edge1, edge2] = e;\n if (lastPointX !== x) {\n outline.push(lastPointX, lastPointY, x, lastPointY === y1 ? y1 : y2);\n lastPointX = x;\n }\n lastPointY = lastPointY === y1 ? y2 : y1;\n }\n outline.push(lastPointX, lastPointY);\n }\n return new HighlightOutline(outlines, this.#box);\n }\n #binarySearch(y) {\n const array = this.#intervals;\n let start = 0;\n let end = array.length - 1;\n while (start <= end) {\n const middle = start + end >> 1;\n const y1 = array[middle][0];\n if (y1 === y) {\n return middle;\n }\n if (y1 < y) {\n start = middle + 1;\n } else {\n end = middle - 1;\n }\n }\n return end + 1;\n }\n #insert([, y1, y2]) {\n const index = this.#binarySearch(y1);\n this.#intervals.splice(index, 0, [y1, y2]);\n }\n #remove([, y1, y2]) {\n const index = this.#binarySearch(y1);\n for (let i = index; i < this.#intervals.length; i++) {\n const [start, end] = this.#intervals[i];\n if (start !== y1) {\n break;\n }\n if (start === y1 && end === y2) {\n this.#intervals.splice(i, 1);\n return;\n }\n }\n for (let i = index - 1; i >= 0; i--) {\n const [start, end] = this.#intervals[i];\n if (start !== y1) {\n break;\n }\n if (start === y1 && end === y2) {\n this.#intervals.splice(i, 1);\n return;\n }\n }\n }\n #breakEdge(edge) {\n const [x, y1, y2] = edge;\n const results = [[x, y1, y2]];\n const index = this.#binarySearch(y2);\n for (let i = 0; i < index; i++) {\n const [start, end] = this.#intervals[i];\n for (let j = 0, jj = results.length; j < jj; j++) {\n const [, y3, y4] = results[j];\n if (end <= y3 || y4 <= start) {\n continue;\n }\n if (y3 >= start) {\n if (y4 > end) {\n results[j][1] = end;\n } else {\n if (jj === 1) {\n return [];\n }\n results.splice(j, 1);\n j--;\n jj--;\n }\n continue;\n }\n results[j][2] = start;\n if (y4 > end) {\n results.push([x, end, y4]);\n }\n }\n }\n return results;\n }\n}\nclass Outline {\n toSVGPath() {\n throw new Error(\"Abstract method `toSVGPath` must be implemented.\");\n }\n get box() {\n throw new Error(\"Abstract getter `box` must be implemented.\");\n }\n serialize(_bbox, _rotation) {\n throw new Error(\"Abstract method `serialize` must be implemented.\");\n }\n get free() {\n return this instanceof FreeHighlightOutline;\n }\n}\nclass HighlightOutline extends Outline {\n #box;\n #outlines;\n constructor(outlines, box) {\n super();\n this.#outlines = outlines;\n this.#box = box;\n }\n toSVGPath() {\n const buffer = [];\n for (const polygon of this.#outlines) {\n let [prevX, prevY] = polygon;\n buffer.push(`M${prevX} ${prevY}`);\n for (let i = 2; i < polygon.length; i += 2) {\n const x = polygon[i];\n const y = polygon[i + 1];\n if (x === prevX) {\n buffer.push(`V${y}`);\n prevY = y;\n } else if (y === prevY) {\n buffer.push(`H${x}`);\n prevX = x;\n }\n }\n buffer.push(\"Z\");\n }\n return buffer.join(\" \");\n }\n serialize([blX, blY, trX, trY], _rotation) {\n const outlines = [];\n const width = trX - blX;\n const height = trY - blY;\n for (const outline of this.#outlines) {\n const points = new Array(outline.length);\n for (let i = 0; i < outline.length; i += 2) {\n points[i] = blX + outline[i] * width;\n points[i + 1] = trY - outline[i + 1] * height;\n }\n outlines.push(points);\n }\n return outlines;\n }\n get box() {\n return this.#box;\n }\n}\nclass FreeOutliner {\n #box;\n #bottom = [];\n #innerMargin;\n #isLTR;\n #top = [];\n #last = new Float64Array(18);\n #lastX;\n #lastY;\n #min;\n #min_dist;\n #scaleFactor;\n #thickness;\n #points = [];\n static #MIN_DIST = 8;\n static #MIN_DIFF = 2;\n static #MIN = FreeOutliner.#MIN_DIST + FreeOutliner.#MIN_DIFF;\n constructor({\n x,\n y\n }, box, scaleFactor, thickness, isLTR, innerMargin = 0) {\n this.#box = box;\n this.#thickness = thickness * scaleFactor;\n this.#isLTR = isLTR;\n this.#last.set([NaN, NaN, NaN, NaN, x, y], 6);\n this.#innerMargin = innerMargin;\n this.#min_dist = FreeOutliner.#MIN_DIST * scaleFactor;\n this.#min = FreeOutliner.#MIN * scaleFactor;\n this.#scaleFactor = scaleFactor;\n this.#points.push(x, y);\n }\n get free() {\n return true;\n }\n isEmpty() {\n return isNaN(this.#last[8]);\n }\n #getLastCoords() {\n const lastTop = this.#last.subarray(4, 6);\n const lastBottom = this.#last.subarray(16, 18);\n const [x, y, width, height] = this.#box;\n return [(this.#lastX + (lastTop[0] - lastBottom[0]) / 2 - x) / width, (this.#lastY + (lastTop[1] - lastBottom[1]) / 2 - y) / height, (this.#lastX + (lastBottom[0] - lastTop[0]) / 2 - x) / width, (this.#lastY + (lastBottom[1] - lastTop[1]) / 2 - y) / height];\n }\n add({\n x,\n y\n }) {\n this.#lastX = x;\n this.#lastY = y;\n const [layerX, layerY, layerWidth, layerHeight] = this.#box;\n let [x1, y1, x2, y2] = this.#last.subarray(8, 12);\n const diffX = x - x2;\n const diffY = y - y2;\n const d = Math.hypot(diffX, diffY);\n if (d < this.#min) {\n return false;\n }\n const diffD = d - this.#min_dist;\n const K = diffD / d;\n const shiftX = K * diffX;\n const shiftY = K * diffY;\n let x0 = x1;\n let y0 = y1;\n x1 = x2;\n y1 = y2;\n x2 += shiftX;\n y2 += shiftY;\n this.#points?.push(x, y);\n const nX = -shiftY / diffD;\n const nY = shiftX / diffD;\n const thX = nX * this.#thickness;\n const thY = nY * this.#thickness;\n this.#last.set(this.#last.subarray(2, 8), 0);\n this.#last.set([x2 + thX, y2 + thY], 4);\n this.#last.set(this.#last.subarray(14, 18), 12);\n this.#last.set([x2 - thX, y2 - thY], 16);\n if (isNaN(this.#last[6])) {\n if (this.#top.length === 0) {\n this.#last.set([x1 + thX, y1 + thY], 2);\n this.#top.push(NaN, NaN, NaN, NaN, (x1 + thX - layerX) / layerWidth, (y1 + thY - layerY) / layerHeight);\n this.#last.set([x1 - thX, y1 - thY], 14);\n this.#bottom.push(NaN, NaN, NaN, NaN, (x1 - thX - layerX) / layerWidth, (y1 - thY - layerY) / layerHeight);\n }\n this.#last.set([x0, y0, x1, y1, x2, y2], 6);\n return !this.isEmpty();\n }\n this.#last.set([x0, y0, x1, y1, x2, y2], 6);\n const angle = Math.abs(Math.atan2(y0 - y1, x0 - x1) - Math.atan2(shiftY, shiftX));\n if (angle < Math.PI / 2) {\n [x1, y1, x2, y2] = this.#last.subarray(2, 6);\n this.#top.push(NaN, NaN, NaN, NaN, ((x1 + x2) / 2 - layerX) / layerWidth, ((y1 + y2) / 2 - layerY) / layerHeight);\n [x1, y1, x0, y0] = this.#last.subarray(14, 18);\n this.#bottom.push(NaN, NaN, NaN, NaN, ((x0 + x1) / 2 - layerX) / layerWidth, ((y0 + y1) / 2 - layerY) / layerHeight);\n return true;\n }\n [x0, y0, x1, y1, x2, y2] = this.#last.subarray(0, 6);\n this.#top.push(((x0 + 5 * x1) / 6 - layerX) / layerWidth, ((y0 + 5 * y1) / 6 - layerY) / layerHeight, ((5 * x1 + x2) / 6 - layerX) / layerWidth, ((5 * y1 + y2) / 6 - layerY) / layerHeight, ((x1 + x2) / 2 - layerX) / layerWidth, ((y1 + y2) / 2 - layerY) / layerHeight);\n [x2, y2, x1, y1, x0, y0] = this.#last.subarray(12, 18);\n this.#bottom.push(((x0 + 5 * x1) / 6 - layerX) / layerWidth, ((y0 + 5 * y1) / 6 - layerY) / layerHeight, ((5 * x1 + x2) / 6 - layerX) / layerWidth, ((5 * y1 + y2) / 6 - layerY) / layerHeight, ((x1 + x2) / 2 - layerX) / layerWidth, ((y1 + y2) / 2 - layerY) / layerHeight);\n return true;\n }\n toSVGPath() {\n if (this.isEmpty()) {\n return \"\";\n }\n const top = this.#top;\n const bottom = this.#bottom;\n const lastTop = this.#last.subarray(4, 6);\n const lastBottom = this.#last.subarray(16, 18);\n const [x, y, width, height] = this.#box;\n const [lastTopX, lastTopY, lastBottomX, lastBottomY] = this.#getLastCoords();\n if (isNaN(this.#last[6]) && !this.isEmpty()) {\n return `M${(this.#last[2] - x) / width} ${(this.#last[3] - y) / height} L${(this.#last[4] - x) / width} ${(this.#last[5] - y) / height} L${lastTopX} ${lastTopY} L${lastBottomX} ${lastBottomY} L${(this.#last[16] - x) / width} ${(this.#last[17] - y) / height} L${(this.#last[14] - x) / width} ${(this.#last[15] - y) / height} Z`;\n }\n const buffer = [];\n buffer.push(`M${top[4]} ${top[5]}`);\n for (let i = 6; i < top.length; i += 6) {\n if (isNaN(top[i])) {\n buffer.push(`L${top[i + 4]} ${top[i + 5]}`);\n } else {\n buffer.push(`C${top[i]} ${top[i + 1]} ${top[i + 2]} ${top[i + 3]} ${top[i + 4]} ${top[i + 5]}`);\n }\n }\n buffer.push(`L${(lastTop[0] - x) / width} ${(lastTop[1] - y) / height} L${lastTopX} ${lastTopY} L${lastBottomX} ${lastBottomY} L${(lastBottom[0] - x) / width} ${(lastBottom[1] - y) / height}`);\n for (let i = bottom.length - 6; i >= 6; i -= 6) {\n if (isNaN(bottom[i])) {\n buffer.push(`L${bottom[i + 4]} ${bottom[i + 5]}`);\n } else {\n buffer.push(`C${bottom[i]} ${bottom[i + 1]} ${bottom[i + 2]} ${bottom[i + 3]} ${bottom[i + 4]} ${bottom[i + 5]}`);\n }\n }\n buffer.push(`L${bottom[4]} ${bottom[5]} Z`);\n return buffer.join(\" \");\n }\n getOutlines() {\n const top = this.#top;\n const bottom = this.#bottom;\n const last = this.#last;\n const lastTop = last.subarray(4, 6);\n const lastBottom = last.subarray(16, 18);\n const [layerX, layerY, layerWidth, layerHeight] = this.#box;\n const points = new Float64Array((this.#points?.length ?? 0) + 2);\n for (let i = 0, ii = points.length - 2; i < ii; i += 2) {\n points[i] = (this.#points[i] - layerX) / layerWidth;\n points[i + 1] = (this.#points[i + 1] - layerY) / layerHeight;\n }\n points[points.length - 2] = (this.#lastX - layerX) / layerWidth;\n points[points.length - 1] = (this.#lastY - layerY) / layerHeight;\n const [lastTopX, lastTopY, lastBottomX, lastBottomY] = this.#getLastCoords();\n if (isNaN(last[6]) && !this.isEmpty()) {\n const outline = new Float64Array(36);\n outline.set([NaN, NaN, NaN, NaN, (last[2] - layerX) / layerWidth, (last[3] - layerY) / layerHeight, NaN, NaN, NaN, NaN, (last[4] - layerX) / layerWidth, (last[5] - layerY) / layerHeight, NaN, NaN, NaN, NaN, lastTopX, lastTopY, NaN, NaN, NaN, NaN, lastBottomX, lastBottomY, NaN, NaN, NaN, NaN, (last[16] - layerX) / layerWidth, (last[17] - layerY) / layerHeight, NaN, NaN, NaN, NaN, (last[14] - layerX) / layerWidth, (last[15] - layerY) / layerHeight], 0);\n return new FreeHighlightOutline(outline, points, this.#box, this.#scaleFactor, this.#innerMargin, this.#isLTR);\n }\n const outline = new Float64Array(this.#top.length + 24 + this.#bottom.length);\n let N = top.length;\n for (let i = 0; i < N; i += 2) {\n if (isNaN(top[i])) {\n outline[i] = outline[i + 1] = NaN;\n continue;\n }\n outline[i] = top[i];\n outline[i + 1] = top[i + 1];\n }\n outline.set([NaN, NaN, NaN, NaN, (lastTop[0] - layerX) / layerWidth, (lastTop[1] - layerY) / layerHeight, NaN, NaN, NaN, NaN, lastTopX, lastTopY, NaN, NaN, NaN, NaN, lastBottomX, lastBottomY, NaN, NaN, NaN, NaN, (lastBottom[0] - layerX) / layerWidth, (lastBottom[1] - layerY) / layerHeight], N);\n N += 24;\n for (let i = bottom.length - 6; i >= 6; i -= 6) {\n for (let j = 0; j < 6; j += 2) {\n if (isNaN(bottom[i + j])) {\n outline[N] = outline[N + 1] = NaN;\n N += 2;\n continue;\n }\n outline[N] = bottom[i + j];\n outline[N + 1] = bottom[i + j + 1];\n N += 2;\n }\n }\n outline.set([NaN, NaN, NaN, NaN, bottom[4], bottom[5]], N);\n return new FreeHighlightOutline(outline, points, this.#box, this.#scaleFactor, this.#innerMargin, this.#isLTR);\n }\n}\nclass FreeHighlightOutline extends Outline {\n #box;\n #bbox = null;\n #innerMargin;\n #isLTR;\n #points;\n #scaleFactor;\n #outline;\n constructor(outline, points, box, scaleFactor, innerMargin, isLTR) {\n super();\n this.#outline = outline;\n this.#points = points;\n this.#box = box;\n this.#scaleFactor = scaleFactor;\n this.#innerMargin = innerMargin;\n this.#isLTR = isLTR;\n this.#computeMinMax(isLTR);\n const {\n x,\n y,\n width,\n height\n } = this.#bbox;\n for (let i = 0, ii = outline.length; i < ii; i += 2) {\n outline[i] = (outline[i] - x) / width;\n outline[i + 1] = (outline[i + 1] - y) / height;\n }\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n points[i] = (points[i] - x) / width;\n points[i + 1] = (points[i + 1] - y) / height;\n }\n }\n toSVGPath() {\n const buffer = [`M${this.#outline[4]} ${this.#outline[5]}`];\n for (let i = 6, ii = this.#outline.length; i < ii; i += 6) {\n if (isNaN(this.#outline[i])) {\n buffer.push(`L${this.#outline[i + 4]} ${this.#outline[i + 5]}`);\n continue;\n }\n buffer.push(`C${this.#outline[i]} ${this.#outline[i + 1]} ${this.#outline[i + 2]} ${this.#outline[i + 3]} ${this.#outline[i + 4]} ${this.#outline[i + 5]}`);\n }\n buffer.push(\"Z\");\n return buffer.join(\" \");\n }\n serialize([blX, blY, trX, trY], rotation) {\n const width = trX - blX;\n const height = trY - blY;\n let outline;\n let points;\n switch (rotation) {\n case 0:\n outline = this.#rescale(this.#outline, blX, trY, width, -height);\n points = this.#rescale(this.#points, blX, trY, width, -height);\n break;\n case 90:\n outline = this.#rescaleAndSwap(this.#outline, blX, blY, width, height);\n points = this.#rescaleAndSwap(this.#points, blX, blY, width, height);\n break;\n case 180:\n outline = this.#rescale(this.#outline, trX, blY, -width, height);\n points = this.#rescale(this.#points, trX, blY, -width, height);\n break;\n case 270:\n outline = this.#rescaleAndSwap(this.#outline, trX, trY, -width, -height);\n points = this.#rescaleAndSwap(this.#points, trX, trY, -width, -height);\n break;\n }\n return {\n outline: Array.from(outline),\n points: [Array.from(points)]\n };\n }\n #rescale(src, tx, ty, sx, sy) {\n const dest = new Float64Array(src.length);\n for (let i = 0, ii = src.length; i < ii; i += 2) {\n dest[i] = tx + src[i] * sx;\n dest[i + 1] = ty + src[i + 1] * sy;\n }\n return dest;\n }\n #rescaleAndSwap(src, tx, ty, sx, sy) {\n const dest = new Float64Array(src.length);\n for (let i = 0, ii = src.length; i < ii; i += 2) {\n dest[i] = tx + src[i + 1] * sx;\n dest[i + 1] = ty + src[i] * sy;\n }\n return dest;\n }\n #computeMinMax(isLTR) {\n const outline = this.#outline;\n let lastX = outline[4];\n let lastY = outline[5];\n let minX = lastX;\n let minY = lastY;\n let maxX = lastX;\n let maxY = lastY;\n let lastPointX = lastX;\n let lastPointY = lastY;\n const ltrCallback = isLTR ? Math.max : Math.min;\n for (let i = 6, ii = outline.length; i < ii; i += 6) {\n if (isNaN(outline[i])) {\n minX = Math.min(minX, outline[i + 4]);\n minY = Math.min(minY, outline[i + 5]);\n maxX = Math.max(maxX, outline[i + 4]);\n maxY = Math.max(maxY, outline[i + 5]);\n if (lastPointY < outline[i + 5]) {\n lastPointX = outline[i + 4];\n lastPointY = outline[i + 5];\n } else if (lastPointY === outline[i + 5]) {\n lastPointX = ltrCallback(lastPointX, outline[i + 4]);\n }\n } else {\n const bbox = Util.bezierBoundingBox(lastX, lastY, ...outline.slice(i, i + 6));\n minX = Math.min(minX, bbox[0]);\n minY = Math.min(minY, bbox[1]);\n maxX = Math.max(maxX, bbox[2]);\n maxY = Math.max(maxY, bbox[3]);\n if (lastPointY < bbox[3]) {\n lastPointX = bbox[2];\n lastPointY = bbox[3];\n } else if (lastPointY === bbox[3]) {\n lastPointX = ltrCallback(lastPointX, bbox[2]);\n }\n }\n lastX = outline[i + 4];\n lastY = outline[i + 5];\n }\n const x = minX - this.#innerMargin,\n y = minY - this.#innerMargin,\n width = maxX - minX + 2 * this.#innerMargin,\n height = maxY - minY + 2 * this.#innerMargin;\n this.#bbox = {\n x,\n y,\n width,\n height,\n lastPoint: [lastPointX, lastPointY]\n };\n }\n get box() {\n return this.#bbox;\n }\n getNewOutline(thickness, innerMargin) {\n const {\n x,\n y,\n width,\n height\n } = this.#bbox;\n const [layerX, layerY, layerWidth, layerHeight] = this.#box;\n const sx = width * layerWidth;\n const sy = height * layerHeight;\n const tx = x * layerWidth + layerX;\n const ty = y * layerHeight + layerY;\n const outliner = new FreeOutliner({\n x: this.#points[0] * sx + tx,\n y: this.#points[1] * sy + ty\n }, this.#box, this.#scaleFactor, thickness, this.#isLTR, innerMargin ?? this.#innerMargin);\n for (let i = 2; i < this.#points.length; i += 2) {\n outliner.add({\n x: this.#points[i] * sx + tx,\n y: this.#points[i + 1] * sy + ty\n });\n }\n return outliner.getOutlines();\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/color_picker.js\n\n\n\nclass ColorPicker {\n #boundKeyDown = this.#keyDown.bind(this);\n #boundPointerDown = this.#pointerDown.bind(this);\n #button = null;\n #buttonSwatch = null;\n #defaultColor;\n #dropdown = null;\n #dropdownWasFromKeyboard = false;\n #isMainColorPicker = false;\n #editor = null;\n #eventBus;\n #uiManager = null;\n #type;\n static get _keyboardManager() {\n return shadow(this, \"_keyboardManager\", new KeyboardManager([[[\"Escape\", \"mac+Escape\"], ColorPicker.prototype._hideDropdownFromKeyboard], [[\" \", \"mac+ \"], ColorPicker.prototype._colorSelectFromKeyboard], [[\"ArrowDown\", \"ArrowRight\", \"mac+ArrowDown\", \"mac+ArrowRight\"], ColorPicker.prototype._moveToNext], [[\"ArrowUp\", \"ArrowLeft\", \"mac+ArrowUp\", \"mac+ArrowLeft\"], ColorPicker.prototype._moveToPrevious], [[\"Home\", \"mac+Home\"], ColorPicker.prototype._moveToBeginning], [[\"End\", \"mac+End\"], ColorPicker.prototype._moveToEnd]]));\n }\n constructor({\n editor = null,\n uiManager = null\n }) {\n if (editor) {\n this.#isMainColorPicker = false;\n this.#type = AnnotationEditorParamsType.HIGHLIGHT_COLOR;\n this.#editor = editor;\n } else {\n this.#isMainColorPicker = true;\n this.#type = AnnotationEditorParamsType.HIGHLIGHT_DEFAULT_COLOR;\n }\n this.#uiManager = editor?._uiManager || uiManager;\n this.#eventBus = this.#uiManager._eventBus;\n this.#defaultColor = editor?.color || this.#uiManager?.highlightColors.values().next().value || \"#FFFF98\";\n }\n renderButton() {\n const button = this.#button = document.createElement(\"button\");\n button.className = \"colorPicker\";\n button.tabIndex = \"0\";\n button.setAttribute(\"data-l10n-id\", \"pdfjs-editor-colorpicker-button\");\n button.setAttribute(\"aria-haspopup\", true);\n const signal = this.#uiManager._signal;\n button.addEventListener(\"click\", this.#openDropdown.bind(this), {\n signal\n });\n button.addEventListener(\"keydown\", this.#boundKeyDown, {\n signal\n });\n const swatch = this.#buttonSwatch = document.createElement(\"span\");\n swatch.className = \"swatch\";\n swatch.setAttribute(\"aria-hidden\", true);\n swatch.style.backgroundColor = this.#defaultColor;\n button.append(swatch);\n return button;\n }\n renderMainDropdown() {\n const dropdown = this.#dropdown = this.#getDropdownRoot();\n dropdown.setAttribute(\"aria-orientation\", \"horizontal\");\n dropdown.setAttribute(\"aria-labelledby\", \"highlightColorPickerLabel\");\n return dropdown;\n }\n #getDropdownRoot() {\n const div = document.createElement(\"div\");\n const signal = this.#uiManager._signal;\n div.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n div.className = \"dropdown\";\n div.role = \"listbox\";\n div.setAttribute(\"aria-multiselectable\", false);\n div.setAttribute(\"aria-orientation\", \"vertical\");\n div.setAttribute(\"data-l10n-id\", \"pdfjs-editor-colorpicker-dropdown\");\n for (const [name, color] of this.#uiManager.highlightColors) {\n const button = document.createElement(\"button\");\n button.tabIndex = \"0\";\n button.role = \"option\";\n button.setAttribute(\"data-color\", color);\n button.title = name;\n button.setAttribute(\"data-l10n-id\", `pdfjs-editor-colorpicker-${name}`);\n const swatch = document.createElement(\"span\");\n button.append(swatch);\n swatch.className = \"swatch\";\n swatch.style.backgroundColor = color;\n button.setAttribute(\"aria-selected\", color === this.#defaultColor);\n button.addEventListener(\"click\", this.#colorSelect.bind(this, color), {\n signal\n });\n div.append(button);\n }\n div.addEventListener(\"keydown\", this.#boundKeyDown, {\n signal\n });\n return div;\n }\n #colorSelect(color, event) {\n event.stopPropagation();\n this.#eventBus.dispatch(\"switchannotationeditorparams\", {\n source: this,\n type: this.#type,\n value: color\n });\n }\n _colorSelectFromKeyboard(event) {\n if (event.target === this.#button) {\n this.#openDropdown(event);\n return;\n }\n const color = event.target.getAttribute(\"data-color\");\n if (!color) {\n return;\n }\n this.#colorSelect(color, event);\n }\n _moveToNext(event) {\n if (!this.#isDropdownVisible) {\n this.#openDropdown(event);\n return;\n }\n if (event.target === this.#button) {\n this.#dropdown.firstChild?.focus();\n return;\n }\n event.target.nextSibling?.focus();\n }\n _moveToPrevious(event) {\n if (event.target === this.#dropdown?.firstChild || event.target === this.#button) {\n if (this.#isDropdownVisible) {\n this._hideDropdownFromKeyboard();\n }\n return;\n }\n if (!this.#isDropdownVisible) {\n this.#openDropdown(event);\n }\n event.target.previousSibling?.focus();\n }\n _moveToBeginning(event) {\n if (!this.#isDropdownVisible) {\n this.#openDropdown(event);\n return;\n }\n this.#dropdown.firstChild?.focus();\n }\n _moveToEnd(event) {\n if (!this.#isDropdownVisible) {\n this.#openDropdown(event);\n return;\n }\n this.#dropdown.lastChild?.focus();\n }\n #keyDown(event) {\n ColorPicker._keyboardManager.exec(this, event);\n }\n #openDropdown(event) {\n if (this.#isDropdownVisible) {\n this.hideDropdown();\n return;\n }\n this.#dropdownWasFromKeyboard = event.detail === 0;\n window.addEventListener(\"pointerdown\", this.#boundPointerDown, {\n signal: this.#uiManager._signal\n });\n if (this.#dropdown) {\n this.#dropdown.classList.remove(\"hidden\");\n return;\n }\n const root = this.#dropdown = this.#getDropdownRoot();\n this.#button.append(root);\n }\n #pointerDown(event) {\n if (this.#dropdown?.contains(event.target)) {\n return;\n }\n this.hideDropdown();\n }\n hideDropdown() {\n this.#dropdown?.classList.add(\"hidden\");\n window.removeEventListener(\"pointerdown\", this.#boundPointerDown);\n }\n get #isDropdownVisible() {\n return this.#dropdown && !this.#dropdown.classList.contains(\"hidden\");\n }\n _hideDropdownFromKeyboard() {\n if (this.#isMainColorPicker) {\n return;\n }\n if (!this.#isDropdownVisible) {\n this.#editor?.unselect();\n return;\n }\n this.hideDropdown();\n this.#button.focus({\n preventScroll: true,\n focusVisible: this.#dropdownWasFromKeyboard\n });\n }\n updateColor(color) {\n if (this.#buttonSwatch) {\n this.#buttonSwatch.style.backgroundColor = color;\n }\n if (!this.#dropdown) {\n return;\n }\n const i = this.#uiManager.highlightColors.values();\n for (const child of this.#dropdown.children) {\n child.setAttribute(\"aria-selected\", i.next().value === color);\n }\n }\n destroy() {\n this.#button?.remove();\n this.#button = null;\n this.#buttonSwatch = null;\n this.#dropdown?.remove();\n this.#dropdown = null;\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/highlight.js\n\n\n\n\n\n\nclass HighlightEditor extends AnnotationEditor {\n #anchorNode = null;\n #anchorOffset = 0;\n #boxes;\n #clipPathId = null;\n #colorPicker = null;\n #focusOutlines = null;\n #focusNode = null;\n #focusOffset = 0;\n #highlightDiv = null;\n #highlightOutlines = null;\n #id = null;\n #isFreeHighlight = false;\n #boundKeydown = this.#keydown.bind(this);\n #lastPoint = null;\n #opacity;\n #outlineId = null;\n #text = \"\";\n #thickness;\n #methodOfCreation = \"\";\n static _defaultColor = null;\n static _defaultOpacity = 1;\n static _defaultThickness = 12;\n static _l10nPromise;\n static _type = \"highlight\";\n static _editorType = AnnotationEditorType.HIGHLIGHT;\n static _freeHighlightId = -1;\n static _freeHighlight = null;\n static _freeHighlightClipId = \"\";\n static get _keyboardManager() {\n const proto = HighlightEditor.prototype;\n return shadow(this, \"_keyboardManager\", new KeyboardManager([[[\"ArrowLeft\", \"mac+ArrowLeft\"], proto._moveCaret, {\n args: [0]\n }], [[\"ArrowRight\", \"mac+ArrowRight\"], proto._moveCaret, {\n args: [1]\n }], [[\"ArrowUp\", \"mac+ArrowUp\"], proto._moveCaret, {\n args: [2]\n }], [[\"ArrowDown\", \"mac+ArrowDown\"], proto._moveCaret, {\n args: [3]\n }]]));\n }\n constructor(params) {\n super({\n ...params,\n name: \"highlightEditor\"\n });\n this.color = params.color || HighlightEditor._defaultColor;\n this.#thickness = params.thickness || HighlightEditor._defaultThickness;\n this.#opacity = params.opacity || HighlightEditor._defaultOpacity;\n this.#boxes = params.boxes || null;\n this.#methodOfCreation = params.methodOfCreation || \"\";\n this.#text = params.text || \"\";\n this._isDraggable = false;\n if (params.highlightId > -1) {\n this.#isFreeHighlight = true;\n this.#createFreeOutlines(params);\n this.#addToDrawLayer();\n } else {\n this.#anchorNode = params.anchorNode;\n this.#anchorOffset = params.anchorOffset;\n this.#focusNode = params.focusNode;\n this.#focusOffset = params.focusOffset;\n this.#createOutlines();\n this.#addToDrawLayer();\n this.rotate(this.rotation);\n }\n }\n get telemetryInitialData() {\n return {\n action: \"added\",\n type: this.#isFreeHighlight ? \"free_highlight\" : \"highlight\",\n color: this._uiManager.highlightColorNames.get(this.color),\n thickness: this.#thickness,\n methodOfCreation: this.#methodOfCreation\n };\n }\n get telemetryFinalData() {\n return {\n type: \"highlight\",\n color: this._uiManager.highlightColorNames.get(this.color)\n };\n }\n static computeTelemetryFinalData(data) {\n return {\n numberOfColors: data.get(\"color\").size\n };\n }\n #createOutlines() {\n const outliner = new Outliner(this.#boxes, 0.001);\n this.#highlightOutlines = outliner.getOutlines();\n ({\n x: this.x,\n y: this.y,\n width: this.width,\n height: this.height\n } = this.#highlightOutlines.box);\n const outlinerForOutline = new Outliner(this.#boxes, 0.0025, 0.001, this._uiManager.direction === \"ltr\");\n this.#focusOutlines = outlinerForOutline.getOutlines();\n const {\n lastPoint\n } = this.#focusOutlines.box;\n this.#lastPoint = [(lastPoint[0] - this.x) / this.width, (lastPoint[1] - this.y) / this.height];\n }\n #createFreeOutlines({\n highlightOutlines,\n highlightId,\n clipPathId\n }) {\n this.#highlightOutlines = highlightOutlines;\n const extraThickness = 1.5;\n this.#focusOutlines = highlightOutlines.getNewOutline(this.#thickness / 2 + extraThickness, 0.0025);\n if (highlightId >= 0) {\n this.#id = highlightId;\n this.#clipPathId = clipPathId;\n this.parent.drawLayer.finalizeLine(highlightId, highlightOutlines);\n this.#outlineId = this.parent.drawLayer.highlightOutline(this.#focusOutlines);\n } else if (this.parent) {\n const angle = this.parent.viewport.rotation;\n this.parent.drawLayer.updateLine(this.#id, highlightOutlines);\n this.parent.drawLayer.updateBox(this.#id, HighlightEditor.#rotateBbox(this.#highlightOutlines.box, (angle - this.rotation + 360) % 360));\n this.parent.drawLayer.updateLine(this.#outlineId, this.#focusOutlines);\n this.parent.drawLayer.updateBox(this.#outlineId, HighlightEditor.#rotateBbox(this.#focusOutlines.box, angle));\n }\n const {\n x,\n y,\n width,\n height\n } = highlightOutlines.box;\n switch (this.rotation) {\n case 0:\n this.x = x;\n this.y = y;\n this.width = width;\n this.height = height;\n break;\n case 90:\n {\n const [pageWidth, pageHeight] = this.parentDimensions;\n this.x = y;\n this.y = 1 - x;\n this.width = width * pageHeight / pageWidth;\n this.height = height * pageWidth / pageHeight;\n break;\n }\n case 180:\n this.x = 1 - x;\n this.y = 1 - y;\n this.width = width;\n this.height = height;\n break;\n case 270:\n {\n const [pageWidth, pageHeight] = this.parentDimensions;\n this.x = 1 - y;\n this.y = x;\n this.width = width * pageHeight / pageWidth;\n this.height = height * pageWidth / pageHeight;\n break;\n }\n }\n const {\n lastPoint\n } = this.#focusOutlines.box;\n this.#lastPoint = [(lastPoint[0] - x) / width, (lastPoint[1] - y) / height];\n }\n static initialize(l10n, uiManager) {\n AnnotationEditor.initialize(l10n, uiManager);\n HighlightEditor._defaultColor ||= uiManager.highlightColors?.values().next().value || \"#fff066\";\n }\n static updateDefaultParams(type, value) {\n switch (type) {\n case AnnotationEditorParamsType.HIGHLIGHT_DEFAULT_COLOR:\n HighlightEditor._defaultColor = value;\n break;\n case AnnotationEditorParamsType.HIGHLIGHT_THICKNESS:\n HighlightEditor._defaultThickness = value;\n break;\n }\n }\n translateInPage(x, y) {}\n get toolbarPosition() {\n return this.#lastPoint;\n }\n updateParams(type, value) {\n switch (type) {\n case AnnotationEditorParamsType.HIGHLIGHT_COLOR:\n this.#updateColor(value);\n break;\n case AnnotationEditorParamsType.HIGHLIGHT_THICKNESS:\n this.#updateThickness(value);\n break;\n }\n }\n static get defaultPropertiesToUpdate() {\n return [[AnnotationEditorParamsType.HIGHLIGHT_DEFAULT_COLOR, HighlightEditor._defaultColor], [AnnotationEditorParamsType.HIGHLIGHT_THICKNESS, HighlightEditor._defaultThickness]];\n }\n get propertiesToUpdate() {\n return [[AnnotationEditorParamsType.HIGHLIGHT_COLOR, this.color || HighlightEditor._defaultColor], [AnnotationEditorParamsType.HIGHLIGHT_THICKNESS, this.#thickness || HighlightEditor._defaultThickness], [AnnotationEditorParamsType.HIGHLIGHT_FREE, this.#isFreeHighlight]];\n }\n #updateColor(color) {\n const setColor = col => {\n this.color = col;\n this.parent?.drawLayer.changeColor(this.#id, col);\n this.#colorPicker?.updateColor(col);\n };\n const savedColor = this.color;\n this.addCommands({\n cmd: setColor.bind(this, color),\n undo: setColor.bind(this, savedColor),\n post: this._uiManager.updateUI.bind(this._uiManager, this),\n mustExec: true,\n type: AnnotationEditorParamsType.HIGHLIGHT_COLOR,\n overwriteIfSameType: true,\n keepUndo: true\n });\n this._reportTelemetry({\n action: \"color_changed\",\n color: this._uiManager.highlightColorNames.get(color)\n }, true);\n }\n #updateThickness(thickness) {\n const savedThickness = this.#thickness;\n const setThickness = th => {\n this.#thickness = th;\n this.#changeThickness(th);\n };\n this.addCommands({\n cmd: setThickness.bind(this, thickness),\n undo: setThickness.bind(this, savedThickness),\n post: this._uiManager.updateUI.bind(this._uiManager, this),\n mustExec: true,\n type: AnnotationEditorParamsType.INK_THICKNESS,\n overwriteIfSameType: true,\n keepUndo: true\n });\n this._reportTelemetry({\n action: \"thickness_changed\",\n thickness\n }, true);\n }\n async addEditToolbar() {\n const toolbar = await super.addEditToolbar();\n if (!toolbar) {\n return null;\n }\n if (this._uiManager.highlightColors) {\n this.#colorPicker = new ColorPicker({\n editor: this\n });\n toolbar.addColorPicker(this.#colorPicker);\n }\n return toolbar;\n }\n disableEditing() {\n super.disableEditing();\n this.div.classList.toggle(\"disabled\", true);\n }\n enableEditing() {\n super.enableEditing();\n this.div.classList.toggle(\"disabled\", false);\n }\n fixAndSetPosition() {\n return super.fixAndSetPosition(this.#getRotation());\n }\n getBaseTranslation() {\n return [0, 0];\n }\n getRect(tx, ty) {\n return super.getRect(tx, ty, this.#getRotation());\n }\n onceAdded() {\n this.parent.addUndoableEditor(this);\n this.div.focus();\n }\n remove() {\n this.#cleanDrawLayer();\n this._reportTelemetry({\n action: \"deleted\"\n });\n super.remove();\n }\n rebuild() {\n if (!this.parent) {\n return;\n }\n super.rebuild();\n if (this.div === null) {\n return;\n }\n this.#addToDrawLayer();\n if (!this.isAttachedToDOM) {\n this.parent.add(this);\n }\n }\n setParent(parent) {\n let mustBeSelected = false;\n if (this.parent && !parent) {\n this.#cleanDrawLayer();\n } else if (parent) {\n this.#addToDrawLayer(parent);\n mustBeSelected = !this.parent && this.div?.classList.contains(\"selectedEditor\");\n }\n super.setParent(parent);\n this.show(this._isVisible);\n if (mustBeSelected) {\n this.select();\n }\n }\n #changeThickness(thickness) {\n if (!this.#isFreeHighlight) {\n return;\n }\n this.#createFreeOutlines({\n highlightOutlines: this.#highlightOutlines.getNewOutline(thickness / 2)\n });\n this.fixAndSetPosition();\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.setDims(this.width * parentWidth, this.height * parentHeight);\n }\n #cleanDrawLayer() {\n if (this.#id === null || !this.parent) {\n return;\n }\n this.parent.drawLayer.remove(this.#id);\n this.#id = null;\n this.parent.drawLayer.remove(this.#outlineId);\n this.#outlineId = null;\n }\n #addToDrawLayer(parent = this.parent) {\n if (this.#id !== null) {\n return;\n }\n ({\n id: this.#id,\n clipPathId: this.#clipPathId\n } = parent.drawLayer.highlight(this.#highlightOutlines, this.color, this.#opacity));\n this.#outlineId = parent.drawLayer.highlightOutline(this.#focusOutlines);\n if (this.#highlightDiv) {\n this.#highlightDiv.style.clipPath = this.#clipPathId;\n }\n }\n static #rotateBbox({\n x,\n y,\n width,\n height\n }, angle) {\n switch (angle) {\n case 90:\n return {\n x: 1 - y - height,\n y: x,\n width: height,\n height: width\n };\n case 180:\n return {\n x: 1 - x - width,\n y: 1 - y - height,\n width,\n height\n };\n case 270:\n return {\n x: y,\n y: 1 - x - width,\n width: height,\n height: width\n };\n }\n return {\n x,\n y,\n width,\n height\n };\n }\n rotate(angle) {\n const {\n drawLayer\n } = this.parent;\n let box;\n if (this.#isFreeHighlight) {\n angle = (angle - this.rotation + 360) % 360;\n box = HighlightEditor.#rotateBbox(this.#highlightOutlines.box, angle);\n } else {\n box = HighlightEditor.#rotateBbox(this, angle);\n }\n drawLayer.rotate(this.#id, angle);\n drawLayer.rotate(this.#outlineId, angle);\n drawLayer.updateBox(this.#id, box);\n drawLayer.updateBox(this.#outlineId, HighlightEditor.#rotateBbox(this.#focusOutlines.box, angle));\n }\n render() {\n if (this.div) {\n return this.div;\n }\n const div = super.render();\n if (this.#text) {\n div.setAttribute(\"aria-label\", this.#text);\n div.setAttribute(\"role\", \"mark\");\n }\n if (this.#isFreeHighlight) {\n div.classList.add(\"free\");\n } else {\n this.div.addEventListener(\"keydown\", this.#boundKeydown, {\n signal: this._uiManager._signal\n });\n }\n const highlightDiv = this.#highlightDiv = document.createElement(\"div\");\n div.append(highlightDiv);\n highlightDiv.setAttribute(\"aria-hidden\", \"true\");\n highlightDiv.className = \"internal\";\n highlightDiv.style.clipPath = this.#clipPathId;\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.setDims(this.width * parentWidth, this.height * parentHeight);\n bindEvents(this, this.#highlightDiv, [\"pointerover\", \"pointerleave\"]);\n this.enableEditing();\n return div;\n }\n pointerover() {\n this.parent.drawLayer.addClass(this.#outlineId, \"hovered\");\n }\n pointerleave() {\n this.parent.drawLayer.removeClass(this.#outlineId, \"hovered\");\n }\n #keydown(event) {\n HighlightEditor._keyboardManager.exec(this, event);\n }\n _moveCaret(direction) {\n this.parent.unselect(this);\n switch (direction) {\n case 0:\n case 2:\n this.#setCaret(true);\n break;\n case 1:\n case 3:\n this.#setCaret(false);\n break;\n }\n }\n #setCaret(start) {\n if (!this.#anchorNode) {\n return;\n }\n const selection = window.getSelection();\n if (start) {\n selection.setPosition(this.#anchorNode, this.#anchorOffset);\n } else {\n selection.setPosition(this.#focusNode, this.#focusOffset);\n }\n }\n select() {\n super.select();\n if (!this.#outlineId) {\n return;\n }\n this.parent?.drawLayer.removeClass(this.#outlineId, \"hovered\");\n this.parent?.drawLayer.addClass(this.#outlineId, \"selected\");\n }\n unselect() {\n super.unselect();\n if (!this.#outlineId) {\n return;\n }\n this.parent?.drawLayer.removeClass(this.#outlineId, \"selected\");\n if (!this.#isFreeHighlight) {\n this.#setCaret(false);\n }\n }\n get _mustFixPosition() {\n return !this.#isFreeHighlight;\n }\n show(visible = this._isVisible) {\n super.show(visible);\n if (this.parent) {\n this.parent.drawLayer.show(this.#id, visible);\n this.parent.drawLayer.show(this.#outlineId, visible);\n }\n }\n #getRotation() {\n return this.#isFreeHighlight ? this.rotation : 0;\n }\n #serializeBoxes() {\n if (this.#isFreeHighlight) {\n return null;\n }\n const [pageWidth, pageHeight] = this.pageDimensions;\n const boxes = this.#boxes;\n const quadPoints = new Float32Array(boxes.length * 8);\n let i = 0;\n for (const {\n x,\n y,\n width,\n height\n } of boxes) {\n const sx = x * pageWidth;\n const sy = (1 - y - height) * pageHeight;\n quadPoints[i] = quadPoints[i + 4] = sx;\n quadPoints[i + 1] = quadPoints[i + 3] = sy;\n quadPoints[i + 2] = quadPoints[i + 6] = sx + width * pageWidth;\n quadPoints[i + 5] = quadPoints[i + 7] = sy + height * pageHeight;\n i += 8;\n }\n return quadPoints;\n }\n #serializeOutlines(rect) {\n return this.#highlightOutlines.serialize(rect, this.#getRotation());\n }\n static startHighlighting(parent, isLTR, {\n target: textLayer,\n x,\n y\n }) {\n const {\n x: layerX,\n y: layerY,\n width: parentWidth,\n height: parentHeight\n } = textLayer.getBoundingClientRect();\n const pointerMove = e => {\n this.#highlightMove(parent, e);\n };\n const signal = parent._signal;\n const pointerDownOptions = {\n capture: true,\n passive: false,\n signal\n };\n const pointerDown = e => {\n e.preventDefault();\n e.stopPropagation();\n };\n const pointerUpCallback = e => {\n textLayer.removeEventListener(\"pointermove\", pointerMove);\n window.removeEventListener(\"blur\", pointerUpCallback);\n window.removeEventListener(\"pointerup\", pointerUpCallback);\n window.removeEventListener(\"pointerdown\", pointerDown, pointerDownOptions);\n window.removeEventListener(\"contextmenu\", noContextMenu);\n this.#endHighlight(parent, e);\n };\n window.addEventListener(\"blur\", pointerUpCallback, {\n signal\n });\n window.addEventListener(\"pointerup\", pointerUpCallback, {\n signal\n });\n window.addEventListener(\"pointerdown\", pointerDown, pointerDownOptions);\n window.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n textLayer.addEventListener(\"pointermove\", pointerMove, {\n signal\n });\n this._freeHighlight = new FreeOutliner({\n x,\n y\n }, [layerX, layerY, parentWidth, parentHeight], parent.scale, this._defaultThickness / 2, isLTR, 0.001);\n ({\n id: this._freeHighlightId,\n clipPathId: this._freeHighlightClipId\n } = parent.drawLayer.highlight(this._freeHighlight, this._defaultColor, this._defaultOpacity, true));\n }\n static #highlightMove(parent, event) {\n if (this._freeHighlight.add(event)) {\n parent.drawLayer.updatePath(this._freeHighlightId, this._freeHighlight);\n }\n }\n static #endHighlight(parent, event) {\n if (!this._freeHighlight.isEmpty()) {\n parent.createAndAddNewEditor(event, false, {\n highlightId: this._freeHighlightId,\n highlightOutlines: this._freeHighlight.getOutlines(),\n clipPathId: this._freeHighlightClipId,\n methodOfCreation: \"main_toolbar\"\n });\n } else {\n parent.drawLayer.removeFreeHighlight(this._freeHighlightId);\n }\n this._freeHighlightId = -1;\n this._freeHighlight = null;\n this._freeHighlightClipId = \"\";\n }\n static deserialize(data, parent, uiManager) {\n const editor = super.deserialize(data, parent, uiManager);\n const {\n rect: [blX, blY, trX, trY],\n color,\n quadPoints\n } = data;\n editor.color = Util.makeHexColor(...color);\n editor.#opacity = data.opacity;\n const [pageWidth, pageHeight] = editor.pageDimensions;\n editor.width = (trX - blX) / pageWidth;\n editor.height = (trY - blY) / pageHeight;\n const boxes = editor.#boxes = [];\n for (let i = 0; i < quadPoints.length; i += 8) {\n boxes.push({\n x: (quadPoints[4] - trX) / pageWidth,\n y: (trY - (1 - quadPoints[i + 5])) / pageHeight,\n width: (quadPoints[i + 2] - quadPoints[i]) / pageWidth,\n height: (quadPoints[i + 5] - quadPoints[i + 1]) / pageHeight\n });\n }\n editor.#createOutlines();\n return editor;\n }\n serialize(isForCopying = false) {\n if (this.isEmpty() || isForCopying) {\n return null;\n }\n const rect = this.getRect(0, 0);\n const color = AnnotationEditor._colorManager.convert(this.color);\n return {\n annotationType: AnnotationEditorType.HIGHLIGHT,\n color,\n opacity: this.#opacity,\n thickness: this.#thickness,\n quadPoints: this.#serializeBoxes(),\n outlines: this.#serializeOutlines(rect),\n pageIndex: this.pageIndex,\n rect,\n rotation: this.#getRotation(),\n structTreeParentId: this._structTreeParentId\n };\n }\n static canCreateNewEmptyEditor() {\n return false;\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/ink.js\n\n\n\n\n\nclass InkEditor extends AnnotationEditor {\n #baseHeight = 0;\n #baseWidth = 0;\n #boundCanvasPointermove = this.canvasPointermove.bind(this);\n #boundCanvasPointerleave = this.canvasPointerleave.bind(this);\n #boundCanvasPointerup = this.canvasPointerup.bind(this);\n #boundCanvasPointerdown = this.canvasPointerdown.bind(this);\n #canvasContextMenuTimeoutId = null;\n #currentPath2D = new Path2D();\n #disableEditing = false;\n #hasSomethingToDraw = false;\n #isCanvasInitialized = false;\n #observer = null;\n #realWidth = 0;\n #realHeight = 0;\n #requestFrameCallback = null;\n static _defaultColor = null;\n static _defaultOpacity = 1;\n static _defaultThickness = 1;\n static _type = \"ink\";\n static _editorType = AnnotationEditorType.INK;\n constructor(params) {\n super({\n ...params,\n name: \"inkEditor\"\n });\n this.color = params.color || null;\n this.thickness = params.thickness || null;\n this.opacity = params.opacity || null;\n this.paths = [];\n this.bezierPath2D = [];\n this.allRawPaths = [];\n this.currentPath = [];\n this.scaleFactor = 1;\n this.translationX = this.translationY = 0;\n this.x = 0;\n this.y = 0;\n this._willKeepAspectRatio = true;\n }\n static initialize(l10n, uiManager) {\n AnnotationEditor.initialize(l10n, uiManager);\n }\n static updateDefaultParams(type, value) {\n switch (type) {\n case AnnotationEditorParamsType.INK_THICKNESS:\n InkEditor._defaultThickness = value;\n break;\n case AnnotationEditorParamsType.INK_COLOR:\n InkEditor._defaultColor = value;\n break;\n case AnnotationEditorParamsType.INK_OPACITY:\n InkEditor._defaultOpacity = value / 100;\n break;\n }\n }\n updateParams(type, value) {\n switch (type) {\n case AnnotationEditorParamsType.INK_THICKNESS:\n this.#updateThickness(value);\n break;\n case AnnotationEditorParamsType.INK_COLOR:\n this.#updateColor(value);\n break;\n case AnnotationEditorParamsType.INK_OPACITY:\n this.#updateOpacity(value);\n break;\n }\n }\n static get defaultPropertiesToUpdate() {\n return [[AnnotationEditorParamsType.INK_THICKNESS, InkEditor._defaultThickness], [AnnotationEditorParamsType.INK_COLOR, InkEditor._defaultColor || AnnotationEditor._defaultLineColor], [AnnotationEditorParamsType.INK_OPACITY, Math.round(InkEditor._defaultOpacity * 100)]];\n }\n get propertiesToUpdate() {\n return [[AnnotationEditorParamsType.INK_THICKNESS, this.thickness || InkEditor._defaultThickness], [AnnotationEditorParamsType.INK_COLOR, this.color || InkEditor._defaultColor || AnnotationEditor._defaultLineColor], [AnnotationEditorParamsType.INK_OPACITY, Math.round(100 * (this.opacity ?? InkEditor._defaultOpacity))]];\n }\n #updateThickness(thickness) {\n const setThickness = th => {\n this.thickness = th;\n this.#fitToContent();\n };\n const savedThickness = this.thickness;\n this.addCommands({\n cmd: setThickness.bind(this, thickness),\n undo: setThickness.bind(this, savedThickness),\n post: this._uiManager.updateUI.bind(this._uiManager, this),\n mustExec: true,\n type: AnnotationEditorParamsType.INK_THICKNESS,\n overwriteIfSameType: true,\n keepUndo: true\n });\n }\n #updateColor(color) {\n const setColor = col => {\n this.color = col;\n this.#redraw();\n };\n const savedColor = this.color;\n this.addCommands({\n cmd: setColor.bind(this, color),\n undo: setColor.bind(this, savedColor),\n post: this._uiManager.updateUI.bind(this._uiManager, this),\n mustExec: true,\n type: AnnotationEditorParamsType.INK_COLOR,\n overwriteIfSameType: true,\n keepUndo: true\n });\n }\n #updateOpacity(opacity) {\n const setOpacity = op => {\n this.opacity = op;\n this.#redraw();\n };\n opacity /= 100;\n const savedOpacity = this.opacity;\n this.addCommands({\n cmd: setOpacity.bind(this, opacity),\n undo: setOpacity.bind(this, savedOpacity),\n post: this._uiManager.updateUI.bind(this._uiManager, this),\n mustExec: true,\n type: AnnotationEditorParamsType.INK_OPACITY,\n overwriteIfSameType: true,\n keepUndo: true\n });\n }\n rebuild() {\n if (!this.parent) {\n return;\n }\n super.rebuild();\n if (this.div === null) {\n return;\n }\n if (!this.canvas) {\n this.#createCanvas();\n this.#createObserver();\n }\n if (!this.isAttachedToDOM) {\n this.parent.add(this);\n this.#setCanvasDims();\n }\n this.#fitToContent();\n }\n remove() {\n if (this.canvas === null) {\n return;\n }\n if (!this.isEmpty()) {\n this.commit();\n }\n this.canvas.width = this.canvas.height = 0;\n this.canvas.remove();\n this.canvas = null;\n if (this.#canvasContextMenuTimeoutId) {\n clearTimeout(this.#canvasContextMenuTimeoutId);\n this.#canvasContextMenuTimeoutId = null;\n }\n this.#observer?.disconnect();\n this.#observer = null;\n super.remove();\n }\n setParent(parent) {\n if (!this.parent && parent) {\n this._uiManager.removeShouldRescale(this);\n } else if (this.parent && parent === null) {\n this._uiManager.addShouldRescale(this);\n }\n super.setParent(parent);\n }\n onScaleChanging() {\n const [parentWidth, parentHeight] = this.parentDimensions;\n const width = this.width * parentWidth;\n const height = this.height * parentHeight;\n this.setDimensions(width, height);\n }\n enableEditMode() {\n if (this.#disableEditing || this.canvas === null) {\n return;\n }\n super.enableEditMode();\n this._isDraggable = false;\n this.canvas.addEventListener(\"pointerdown\", this.#boundCanvasPointerdown, {\n signal: this._uiManager._signal\n });\n }\n disableEditMode() {\n if (!this.isInEditMode() || this.canvas === null) {\n return;\n }\n super.disableEditMode();\n this._isDraggable = !this.isEmpty();\n this.div.classList.remove(\"editing\");\n this.canvas.removeEventListener(\"pointerdown\", this.#boundCanvasPointerdown);\n }\n onceAdded() {\n this._isDraggable = !this.isEmpty();\n }\n isEmpty() {\n return this.paths.length === 0 || this.paths.length === 1 && this.paths[0].length === 0;\n }\n #getInitialBBox() {\n const {\n parentRotation,\n parentDimensions: [width, height]\n } = this;\n switch (parentRotation) {\n case 90:\n return [0, height, height, width];\n case 180:\n return [width, height, width, height];\n case 270:\n return [width, 0, height, width];\n default:\n return [0, 0, width, height];\n }\n }\n #setStroke() {\n const {\n ctx,\n color,\n opacity,\n thickness,\n parentScale,\n scaleFactor\n } = this;\n ctx.lineWidth = thickness * parentScale / scaleFactor;\n ctx.lineCap = \"round\";\n ctx.lineJoin = \"round\";\n ctx.miterLimit = 10;\n ctx.strokeStyle = `${color}${opacityToHex(opacity)}`;\n }\n #startDrawing(x, y) {\n const signal = this._uiManager._signal;\n this.canvas.addEventListener(\"contextmenu\", noContextMenu, {\n signal\n });\n this.canvas.addEventListener(\"pointerleave\", this.#boundCanvasPointerleave, {\n signal\n });\n this.canvas.addEventListener(\"pointermove\", this.#boundCanvasPointermove, {\n signal\n });\n this.canvas.addEventListener(\"pointerup\", this.#boundCanvasPointerup, {\n signal\n });\n this.canvas.removeEventListener(\"pointerdown\", this.#boundCanvasPointerdown);\n this.isEditing = true;\n if (!this.#isCanvasInitialized) {\n this.#isCanvasInitialized = true;\n this.#setCanvasDims();\n this.thickness ||= InkEditor._defaultThickness;\n this.color ||= InkEditor._defaultColor || AnnotationEditor._defaultLineColor;\n this.opacity ??= InkEditor._defaultOpacity;\n }\n this.currentPath.push([x, y]);\n this.#hasSomethingToDraw = false;\n this.#setStroke();\n this.#requestFrameCallback = () => {\n this.#drawPoints();\n if (this.#requestFrameCallback) {\n window.requestAnimationFrame(this.#requestFrameCallback);\n }\n };\n window.requestAnimationFrame(this.#requestFrameCallback);\n }\n #draw(x, y) {\n const [lastX, lastY] = this.currentPath.at(-1);\n if (this.currentPath.length > 1 && x === lastX && y === lastY) {\n return;\n }\n const currentPath = this.currentPath;\n let path2D = this.#currentPath2D;\n currentPath.push([x, y]);\n this.#hasSomethingToDraw = true;\n if (currentPath.length <= 2) {\n path2D.moveTo(...currentPath[0]);\n path2D.lineTo(x, y);\n return;\n }\n if (currentPath.length === 3) {\n this.#currentPath2D = path2D = new Path2D();\n path2D.moveTo(...currentPath[0]);\n }\n this.#makeBezierCurve(path2D, ...currentPath.at(-3), ...currentPath.at(-2), x, y);\n }\n #endPath() {\n if (this.currentPath.length === 0) {\n return;\n }\n const lastPoint = this.currentPath.at(-1);\n this.#currentPath2D.lineTo(...lastPoint);\n }\n #stopDrawing(x, y) {\n this.#requestFrameCallback = null;\n x = Math.min(Math.max(x, 0), this.canvas.width);\n y = Math.min(Math.max(y, 0), this.canvas.height);\n this.#draw(x, y);\n this.#endPath();\n let bezier;\n if (this.currentPath.length !== 1) {\n bezier = this.#generateBezierPoints();\n } else {\n const xy = [x, y];\n bezier = [[xy, xy.slice(), xy.slice(), xy]];\n }\n const path2D = this.#currentPath2D;\n const currentPath = this.currentPath;\n this.currentPath = [];\n this.#currentPath2D = new Path2D();\n const cmd = () => {\n this.allRawPaths.push(currentPath);\n this.paths.push(bezier);\n this.bezierPath2D.push(path2D);\n this._uiManager.rebuild(this);\n };\n const undo = () => {\n this.allRawPaths.pop();\n this.paths.pop();\n this.bezierPath2D.pop();\n if (this.paths.length === 0) {\n this.remove();\n } else {\n if (!this.canvas) {\n this.#createCanvas();\n this.#createObserver();\n }\n this.#fitToContent();\n }\n };\n this.addCommands({\n cmd,\n undo,\n mustExec: true\n });\n }\n #drawPoints() {\n if (!this.#hasSomethingToDraw) {\n return;\n }\n this.#hasSomethingToDraw = false;\n const thickness = Math.ceil(this.thickness * this.parentScale);\n const lastPoints = this.currentPath.slice(-3);\n const x = lastPoints.map(xy => xy[0]);\n const y = lastPoints.map(xy => xy[1]);\n const xMin = Math.min(...x) - thickness;\n const xMax = Math.max(...x) + thickness;\n const yMin = Math.min(...y) - thickness;\n const yMax = Math.max(...y) + thickness;\n const {\n ctx\n } = this;\n ctx.save();\n ctx.clearRect(0, 0, this.canvas.width, this.canvas.height);\n for (const path of this.bezierPath2D) {\n ctx.stroke(path);\n }\n ctx.stroke(this.#currentPath2D);\n ctx.restore();\n }\n #makeBezierCurve(path2D, x0, y0, x1, y1, x2, y2) {\n const prevX = (x0 + x1) / 2;\n const prevY = (y0 + y1) / 2;\n const x3 = (x1 + x2) / 2;\n const y3 = (y1 + y2) / 2;\n path2D.bezierCurveTo(prevX + 2 * (x1 - prevX) / 3, prevY + 2 * (y1 - prevY) / 3, x3 + 2 * (x1 - x3) / 3, y3 + 2 * (y1 - y3) / 3, x3, y3);\n }\n #generateBezierPoints() {\n const path = this.currentPath;\n if (path.length <= 2) {\n return [[path[0], path[0], path.at(-1), path.at(-1)]];\n }\n const bezierPoints = [];\n let i;\n let [x0, y0] = path[0];\n for (i = 1; i < path.length - 2; i++) {\n const [x1, y1] = path[i];\n const [x2, y2] = path[i + 1];\n const x3 = (x1 + x2) / 2;\n const y3 = (y1 + y2) / 2;\n const control1 = [x0 + 2 * (x1 - x0) / 3, y0 + 2 * (y1 - y0) / 3];\n const control2 = [x3 + 2 * (x1 - x3) / 3, y3 + 2 * (y1 - y3) / 3];\n bezierPoints.push([[x0, y0], control1, control2, [x3, y3]]);\n [x0, y0] = [x3, y3];\n }\n const [x1, y1] = path[i];\n const [x2, y2] = path[i + 1];\n const control1 = [x0 + 2 * (x1 - x0) / 3, y0 + 2 * (y1 - y0) / 3];\n const control2 = [x2 + 2 * (x1 - x2) / 3, y2 + 2 * (y1 - y2) / 3];\n bezierPoints.push([[x0, y0], control1, control2, [x2, y2]]);\n return bezierPoints;\n }\n #redraw() {\n if (this.isEmpty()) {\n this.#updateTransform();\n return;\n }\n this.#setStroke();\n const {\n canvas,\n ctx\n } = this;\n ctx.setTransform(1, 0, 0, 1, 0, 0);\n ctx.clearRect(0, 0, canvas.width, canvas.height);\n this.#updateTransform();\n for (const path of this.bezierPath2D) {\n ctx.stroke(path);\n }\n }\n commit() {\n if (this.#disableEditing) {\n return;\n }\n super.commit();\n this.isEditing = false;\n this.disableEditMode();\n this.setInForeground();\n this.#disableEditing = true;\n this.div.classList.add(\"disabled\");\n this.#fitToContent(true);\n this.select();\n this.parent.addInkEditorIfNeeded(true);\n this.moveInDOM();\n this.div.focus({\n preventScroll: true\n });\n }\n focusin(event) {\n if (!this._focusEventsAllowed) {\n return;\n }\n super.focusin(event);\n this.enableEditMode();\n }\n canvasPointerdown(event) {\n if (event.button !== 0 || !this.isInEditMode() || this.#disableEditing) {\n return;\n }\n this.setInForeground();\n event.preventDefault();\n if (!this.div.contains(document.activeElement)) {\n this.div.focus({\n preventScroll: true\n });\n }\n this.#startDrawing(event.offsetX, event.offsetY);\n }\n canvasPointermove(event) {\n event.preventDefault();\n this.#draw(event.offsetX, event.offsetY);\n }\n canvasPointerup(event) {\n event.preventDefault();\n this.#endDrawing(event);\n }\n canvasPointerleave(event) {\n this.#endDrawing(event);\n }\n #endDrawing(event) {\n this.canvas.removeEventListener(\"pointerleave\", this.#boundCanvasPointerleave);\n this.canvas.removeEventListener(\"pointermove\", this.#boundCanvasPointermove);\n this.canvas.removeEventListener(\"pointerup\", this.#boundCanvasPointerup);\n this.canvas.addEventListener(\"pointerdown\", this.#boundCanvasPointerdown, {\n signal: this._uiManager._signal\n });\n if (this.#canvasContextMenuTimeoutId) {\n clearTimeout(this.#canvasContextMenuTimeoutId);\n }\n this.#canvasContextMenuTimeoutId = setTimeout(() => {\n this.#canvasContextMenuTimeoutId = null;\n this.canvas.removeEventListener(\"contextmenu\", noContextMenu);\n }, 10);\n this.#stopDrawing(event.offsetX, event.offsetY);\n this.addToAnnotationStorage();\n this.setInBackground();\n }\n #createCanvas() {\n this.canvas = document.createElement(\"canvas\");\n this.canvas.width = this.canvas.height = 0;\n this.canvas.className = \"inkEditorCanvas\";\n this.canvas.setAttribute(\"data-l10n-id\", \"pdfjs-ink-canvas\");\n this.div.append(this.canvas);\n this.ctx = this.canvas.getContext(\"2d\");\n }\n #createObserver() {\n this.#observer = new ResizeObserver(entries => {\n const rect = entries[0].contentRect;\n if (rect.width && rect.height) {\n this.setDimensions(rect.width, rect.height);\n }\n });\n this.#observer.observe(this.div);\n this._uiManager._signal.addEventListener(\"abort\", () => {\n this.#observer?.disconnect();\n this.#observer = null;\n }, {\n once: true\n });\n }\n get isResizable() {\n return !this.isEmpty() && this.#disableEditing;\n }\n render() {\n if (this.div) {\n return this.div;\n }\n let baseX, baseY;\n if (this.width) {\n baseX = this.x;\n baseY = this.y;\n }\n super.render();\n this.div.setAttribute(\"data-l10n-id\", \"pdfjs-ink\");\n const [x, y, w, h] = this.#getInitialBBox();\n this.setAt(x, y, 0, 0);\n this.setDims(w, h);\n this.#createCanvas();\n if (this.width) {\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.setAspectRatio(this.width * parentWidth, this.height * parentHeight);\n this.setAt(baseX * parentWidth, baseY * parentHeight, this.width * parentWidth, this.height * parentHeight);\n this.#isCanvasInitialized = true;\n this.#setCanvasDims();\n this.setDims(this.width * parentWidth, this.height * parentHeight);\n this.#redraw();\n this.div.classList.add(\"disabled\");\n } else {\n this.div.classList.add(\"editing\");\n this.enableEditMode();\n }\n this.#createObserver();\n return this.div;\n }\n #setCanvasDims() {\n if (!this.#isCanvasInitialized) {\n return;\n }\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.canvas.width = Math.ceil(this.width * parentWidth);\n this.canvas.height = Math.ceil(this.height * parentHeight);\n this.#updateTransform();\n }\n setDimensions(width, height) {\n const roundedWidth = Math.round(width);\n const roundedHeight = Math.round(height);\n if (this.#realWidth === roundedWidth && this.#realHeight === roundedHeight) {\n return;\n }\n this.#realWidth = roundedWidth;\n this.#realHeight = roundedHeight;\n this.canvas.style.visibility = \"hidden\";\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.width = width / parentWidth;\n this.height = height / parentHeight;\n this.fixAndSetPosition();\n if (this.#disableEditing) {\n this.#setScaleFactor(width, height);\n }\n this.#setCanvasDims();\n this.#redraw();\n this.canvas.style.visibility = \"visible\";\n this.fixDims();\n }\n #setScaleFactor(width, height) {\n const padding = this.#getPadding();\n const scaleFactorW = (width - padding) / this.#baseWidth;\n const scaleFactorH = (height - padding) / this.#baseHeight;\n this.scaleFactor = Math.min(scaleFactorW, scaleFactorH);\n }\n #updateTransform() {\n const padding = this.#getPadding() / 2;\n this.ctx.setTransform(this.scaleFactor, 0, 0, this.scaleFactor, this.translationX * this.scaleFactor + padding, this.translationY * this.scaleFactor + padding);\n }\n static #buildPath2D(bezier) {\n const path2D = new Path2D();\n for (let i = 0, ii = bezier.length; i < ii; i++) {\n const [first, control1, control2, second] = bezier[i];\n if (i === 0) {\n path2D.moveTo(...first);\n }\n path2D.bezierCurveTo(control1[0], control1[1], control2[0], control2[1], second[0], second[1]);\n }\n return path2D;\n }\n static #toPDFCoordinates(points, rect, rotation) {\n const [blX, blY, trX, trY] = rect;\n switch (rotation) {\n case 0:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n points[i] += blX;\n points[i + 1] = trY - points[i + 1];\n }\n break;\n case 90:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n const x = points[i];\n points[i] = points[i + 1] + blX;\n points[i + 1] = x + blY;\n }\n break;\n case 180:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n points[i] = trX - points[i];\n points[i + 1] += blY;\n }\n break;\n case 270:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n const x = points[i];\n points[i] = trX - points[i + 1];\n points[i + 1] = trY - x;\n }\n break;\n default:\n throw new Error(\"Invalid rotation\");\n }\n return points;\n }\n static #fromPDFCoordinates(points, rect, rotation) {\n const [blX, blY, trX, trY] = rect;\n switch (rotation) {\n case 0:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n points[i] -= blX;\n points[i + 1] = trY - points[i + 1];\n }\n break;\n case 90:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n const x = points[i];\n points[i] = points[i + 1] - blY;\n points[i + 1] = x - blX;\n }\n break;\n case 180:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n points[i] = trX - points[i];\n points[i + 1] -= blY;\n }\n break;\n case 270:\n for (let i = 0, ii = points.length; i < ii; i += 2) {\n const x = points[i];\n points[i] = trY - points[i + 1];\n points[i + 1] = trX - x;\n }\n break;\n default:\n throw new Error(\"Invalid rotation\");\n }\n return points;\n }\n #serializePaths(s, tx, ty, rect) {\n const paths = [];\n const padding = this.thickness / 2;\n const shiftX = s * tx + padding;\n const shiftY = s * ty + padding;\n for (const bezier of this.paths) {\n const buffer = [];\n const points = [];\n for (let j = 0, jj = bezier.length; j < jj; j++) {\n const [first, control1, control2, second] = bezier[j];\n if (first[0] === second[0] && first[1] === second[1] && jj === 1) {\n const p0 = s * first[0] + shiftX;\n const p1 = s * first[1] + shiftY;\n buffer.push(p0, p1);\n points.push(p0, p1);\n break;\n }\n const p10 = s * first[0] + shiftX;\n const p11 = s * first[1] + shiftY;\n const p20 = s * control1[0] + shiftX;\n const p21 = s * control1[1] + shiftY;\n const p30 = s * control2[0] + shiftX;\n const p31 = s * control2[1] + shiftY;\n const p40 = s * second[0] + shiftX;\n const p41 = s * second[1] + shiftY;\n if (j === 0) {\n buffer.push(p10, p11);\n points.push(p10, p11);\n }\n buffer.push(p20, p21, p30, p31, p40, p41);\n points.push(p20, p21);\n if (j === jj - 1) {\n points.push(p40, p41);\n }\n }\n paths.push({\n bezier: InkEditor.#toPDFCoordinates(buffer, rect, this.rotation),\n points: InkEditor.#toPDFCoordinates(points, rect, this.rotation)\n });\n }\n return paths;\n }\n #getBbox() {\n let xMin = Infinity;\n let xMax = -Infinity;\n let yMin = Infinity;\n let yMax = -Infinity;\n for (const path of this.paths) {\n for (const [first, control1, control2, second] of path) {\n const bbox = Util.bezierBoundingBox(...first, ...control1, ...control2, ...second);\n xMin = Math.min(xMin, bbox[0]);\n yMin = Math.min(yMin, bbox[1]);\n xMax = Math.max(xMax, bbox[2]);\n yMax = Math.max(yMax, bbox[3]);\n }\n }\n return [xMin, yMin, xMax, yMax];\n }\n #getPadding() {\n return this.#disableEditing ? Math.ceil(this.thickness * this.parentScale) : 0;\n }\n #fitToContent(firstTime = false) {\n if (this.isEmpty()) {\n return;\n }\n if (!this.#disableEditing) {\n this.#redraw();\n return;\n }\n const bbox = this.#getBbox();\n const padding = this.#getPadding();\n this.#baseWidth = Math.max(AnnotationEditor.MIN_SIZE, bbox[2] - bbox[0]);\n this.#baseHeight = Math.max(AnnotationEditor.MIN_SIZE, bbox[3] - bbox[1]);\n const width = Math.ceil(padding + this.#baseWidth * this.scaleFactor);\n const height = Math.ceil(padding + this.#baseHeight * this.scaleFactor);\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.width = width / parentWidth;\n this.height = height / parentHeight;\n this.setAspectRatio(width, height);\n const prevTranslationX = this.translationX;\n const prevTranslationY = this.translationY;\n this.translationX = -bbox[0];\n this.translationY = -bbox[1];\n this.#setCanvasDims();\n this.#redraw();\n this.#realWidth = width;\n this.#realHeight = height;\n this.setDims(width, height);\n const unscaledPadding = firstTime ? padding / this.scaleFactor / 2 : 0;\n this.translate(prevTranslationX - this.translationX - unscaledPadding, prevTranslationY - this.translationY - unscaledPadding);\n }\n static deserialize(data, parent, uiManager) {\n if (data instanceof InkAnnotationElement) {\n return null;\n }\n const editor = super.deserialize(data, parent, uiManager);\n editor.thickness = data.thickness;\n editor.color = Util.makeHexColor(...data.color);\n editor.opacity = data.opacity;\n const [pageWidth, pageHeight] = editor.pageDimensions;\n const width = editor.width * pageWidth;\n const height = editor.height * pageHeight;\n const scaleFactor = editor.parentScale;\n const padding = data.thickness / 2;\n editor.#disableEditing = true;\n editor.#realWidth = Math.round(width);\n editor.#realHeight = Math.round(height);\n const {\n paths,\n rect,\n rotation\n } = data;\n for (let {\n bezier\n } of paths) {\n bezier = InkEditor.#fromPDFCoordinates(bezier, rect, rotation);\n const path = [];\n editor.paths.push(path);\n let p0 = scaleFactor * (bezier[0] - padding);\n let p1 = scaleFactor * (bezier[1] - padding);\n for (let i = 2, ii = bezier.length; i < ii; i += 6) {\n const p10 = scaleFactor * (bezier[i] - padding);\n const p11 = scaleFactor * (bezier[i + 1] - padding);\n const p20 = scaleFactor * (bezier[i + 2] - padding);\n const p21 = scaleFactor * (bezier[i + 3] - padding);\n const p30 = scaleFactor * (bezier[i + 4] - padding);\n const p31 = scaleFactor * (bezier[i + 5] - padding);\n path.push([[p0, p1], [p10, p11], [p20, p21], [p30, p31]]);\n p0 = p30;\n p1 = p31;\n }\n const path2D = this.#buildPath2D(path);\n editor.bezierPath2D.push(path2D);\n }\n const bbox = editor.#getBbox();\n editor.#baseWidth = Math.max(AnnotationEditor.MIN_SIZE, bbox[2] - bbox[0]);\n editor.#baseHeight = Math.max(AnnotationEditor.MIN_SIZE, bbox[3] - bbox[1]);\n editor.#setScaleFactor(width, height);\n return editor;\n }\n serialize() {\n if (this.isEmpty()) {\n return null;\n }\n const rect = this.getRect(0, 0);\n const color = AnnotationEditor._colorManager.convert(this.ctx.strokeStyle);\n return {\n annotationType: AnnotationEditorType.INK,\n color,\n thickness: this.thickness,\n opacity: this.opacity,\n paths: this.#serializePaths(this.scaleFactor / this.parentScale, this.translationX, this.translationY, rect),\n pageIndex: this.pageIndex,\n rect,\n rotation: this.rotation,\n structTreeParentId: this._structTreeParentId\n };\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/stamp.js\n\n\n\n\nclass StampEditor extends AnnotationEditor {\n #bitmap = null;\n #bitmapId = null;\n #bitmapPromise = null;\n #bitmapUrl = null;\n #bitmapFile = null;\n #bitmapFileName = \"\";\n #canvas = null;\n #observer = null;\n #resizeTimeoutId = null;\n #isSvg = false;\n #hasBeenAddedInUndoStack = false;\n static _type = \"stamp\";\n static _editorType = AnnotationEditorType.STAMP;\n constructor(params) {\n super({\n ...params,\n name: \"stampEditor\"\n });\n this.#bitmapUrl = params.bitmapUrl;\n this.#bitmapFile = params.bitmapFile;\n }\n static initialize(l10n, uiManager) {\n AnnotationEditor.initialize(l10n, uiManager);\n }\n static get supportedTypes() {\n const types = [\"apng\", \"avif\", \"bmp\", \"gif\", \"jpeg\", \"png\", \"svg+xml\", \"webp\", \"x-icon\"];\n return shadow(this, \"supportedTypes\", types.map(type => `image/${type}`));\n }\n static get supportedTypesStr() {\n return shadow(this, \"supportedTypesStr\", this.supportedTypes.join(\",\"));\n }\n static isHandlingMimeForPasting(mime) {\n return this.supportedTypes.includes(mime);\n }\n static paste(item, parent) {\n parent.pasteEditor(AnnotationEditorType.STAMP, {\n bitmapFile: item.getAsFile()\n });\n }\n #getBitmapFetched(data, fromId = false) {\n if (!data) {\n this.remove();\n return;\n }\n this.#bitmap = data.bitmap;\n if (!fromId) {\n this.#bitmapId = data.id;\n this.#isSvg = data.isSvg;\n }\n if (data.file) {\n this.#bitmapFileName = data.file.name;\n }\n this.#createCanvas();\n }\n #getBitmapDone() {\n this.#bitmapPromise = null;\n this._uiManager.enableWaiting(false);\n if (this.#canvas) {\n this.div.focus();\n }\n }\n #getBitmap() {\n if (this.#bitmapId) {\n this._uiManager.enableWaiting(true);\n this._uiManager.imageManager.getFromId(this.#bitmapId).then(data => this.#getBitmapFetched(data, true)).finally(() => this.#getBitmapDone());\n return;\n }\n if (this.#bitmapUrl) {\n const url = this.#bitmapUrl;\n this.#bitmapUrl = null;\n this._uiManager.enableWaiting(true);\n this.#bitmapPromise = this._uiManager.imageManager.getFromUrl(url).then(data => this.#getBitmapFetched(data)).finally(() => this.#getBitmapDone());\n return;\n }\n if (this.#bitmapFile) {\n const file = this.#bitmapFile;\n this.#bitmapFile = null;\n this._uiManager.enableWaiting(true);\n this.#bitmapPromise = this._uiManager.imageManager.getFromFile(file).then(data => this.#getBitmapFetched(data)).finally(() => this.#getBitmapDone());\n return;\n }\n const input = document.createElement(\"input\");\n input.type = \"file\";\n input.accept = StampEditor.supportedTypesStr;\n const signal = this._uiManager._signal;\n this.#bitmapPromise = new Promise(resolve => {\n input.addEventListener(\"change\", async () => {\n if (!input.files || input.files.length === 0) {\n this.remove();\n } else {\n this._uiManager.enableWaiting(true);\n const data = await this._uiManager.imageManager.getFromFile(input.files[0]);\n this.#getBitmapFetched(data);\n }\n resolve();\n }, {\n signal\n });\n input.addEventListener(\"cancel\", () => {\n this.remove();\n resolve();\n }, {\n signal\n });\n }).finally(() => this.#getBitmapDone());\n input.click();\n }\n remove() {\n if (this.#bitmapId) {\n this.#bitmap = null;\n this._uiManager.imageManager.deleteId(this.#bitmapId);\n this.#canvas?.remove();\n this.#canvas = null;\n this.#observer?.disconnect();\n this.#observer = null;\n if (this.#resizeTimeoutId) {\n clearTimeout(this.#resizeTimeoutId);\n this.#resizeTimeoutId = null;\n }\n }\n super.remove();\n }\n rebuild() {\n if (!this.parent) {\n if (this.#bitmapId) {\n this.#getBitmap();\n }\n return;\n }\n super.rebuild();\n if (this.div === null) {\n return;\n }\n if (this.#bitmapId && this.#canvas === null) {\n this.#getBitmap();\n }\n if (!this.isAttachedToDOM) {\n this.parent.add(this);\n }\n }\n onceAdded() {\n this._isDraggable = true;\n this.div.focus();\n }\n isEmpty() {\n return !(this.#bitmapPromise || this.#bitmap || this.#bitmapUrl || this.#bitmapFile || this.#bitmapId);\n }\n get isResizable() {\n return true;\n }\n render() {\n if (this.div) {\n return this.div;\n }\n let baseX, baseY;\n if (this.width) {\n baseX = this.x;\n baseY = this.y;\n }\n super.render();\n this.div.hidden = true;\n this.addAltTextButton();\n if (this.#bitmap) {\n this.#createCanvas();\n } else {\n this.#getBitmap();\n }\n if (this.width) {\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.setAt(baseX * parentWidth, baseY * parentHeight, this.width * parentWidth, this.height * parentHeight);\n }\n return this.div;\n }\n #createCanvas() {\n const {\n div\n } = this;\n let {\n width,\n height\n } = this.#bitmap;\n const [pageWidth, pageHeight] = this.pageDimensions;\n const MAX_RATIO = 0.75;\n if (this.width) {\n width = this.width * pageWidth;\n height = this.height * pageHeight;\n } else if (width > MAX_RATIO * pageWidth || height > MAX_RATIO * pageHeight) {\n const factor = Math.min(MAX_RATIO * pageWidth / width, MAX_RATIO * pageHeight / height);\n width *= factor;\n height *= factor;\n }\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.setDims(width * parentWidth / pageWidth, height * parentHeight / pageHeight);\n this._uiManager.enableWaiting(false);\n const canvas = this.#canvas = document.createElement(\"canvas\");\n div.append(canvas);\n div.hidden = false;\n this.#drawBitmap(width, height);\n this.#createObserver();\n if (!this.#hasBeenAddedInUndoStack) {\n this.parent.addUndoableEditor(this);\n this.#hasBeenAddedInUndoStack = true;\n }\n this._reportTelemetry({\n action: \"inserted_image\"\n });\n if (this.#bitmapFileName) {\n canvas.setAttribute(\"aria-label\", this.#bitmapFileName);\n }\n }\n #setDimensions(width, height) {\n const [parentWidth, parentHeight] = this.parentDimensions;\n this.width = width / parentWidth;\n this.height = height / parentHeight;\n this.setDims(width, height);\n if (this._initialOptions?.isCentered) {\n this.center();\n } else {\n this.fixAndSetPosition();\n }\n this._initialOptions = null;\n if (this.#resizeTimeoutId !== null) {\n clearTimeout(this.#resizeTimeoutId);\n }\n const TIME_TO_WAIT = 200;\n this.#resizeTimeoutId = setTimeout(() => {\n this.#resizeTimeoutId = null;\n this.#drawBitmap(width, height);\n }, TIME_TO_WAIT);\n }\n #scaleBitmap(width, height) {\n const {\n width: bitmapWidth,\n height: bitmapHeight\n } = this.#bitmap;\n let newWidth = bitmapWidth;\n let newHeight = bitmapHeight;\n let bitmap = this.#bitmap;\n while (newWidth > 2 * width || newHeight > 2 * height) {\n const prevWidth = newWidth;\n const prevHeight = newHeight;\n if (newWidth > 2 * width) {\n newWidth = newWidth >= 16384 ? Math.floor(newWidth / 2) - 1 : Math.ceil(newWidth / 2);\n }\n if (newHeight > 2 * height) {\n newHeight = newHeight >= 16384 ? Math.floor(newHeight / 2) - 1 : Math.ceil(newHeight / 2);\n }\n const offscreen = new OffscreenCanvas(newWidth, newHeight);\n const ctx = offscreen.getContext(\"2d\");\n ctx.drawImage(bitmap, 0, 0, prevWidth, prevHeight, 0, 0, newWidth, newHeight);\n bitmap = offscreen.transferToImageBitmap();\n }\n return bitmap;\n }\n #drawBitmap(width, height) {\n width = Math.ceil(width);\n height = Math.ceil(height);\n const canvas = this.#canvas;\n if (!canvas || canvas.width === width && canvas.height === height) {\n return;\n }\n canvas.width = width;\n canvas.height = height;\n const bitmap = this.#isSvg ? this.#bitmap : this.#scaleBitmap(width, height);\n if (this._uiManager.hasMLManager && !this.hasAltText()) {\n const offscreen = new OffscreenCanvas(width, height);\n const ctx = offscreen.getContext(\"2d\");\n ctx.drawImage(bitmap, 0, 0, bitmap.width, bitmap.height, 0, 0, width, height);\n this._uiManager.mlGuess({\n service: \"image-to-text\",\n request: {\n data: ctx.getImageData(0, 0, width, height).data,\n width,\n height,\n channels: 4\n }\n }).then(response => {\n const altText = response?.output || \"\";\n if (this.parent && altText && !this.hasAltText()) {\n this.altTextData = {\n altText,\n decorative: false\n };\n }\n });\n }\n const ctx = canvas.getContext(\"2d\");\n ctx.filter = this._uiManager.hcmFilter;\n ctx.drawImage(bitmap, 0, 0, bitmap.width, bitmap.height, 0, 0, width, height);\n }\n getImageForAltText() {\n return this.#canvas;\n }\n #serializeBitmap(toUrl) {\n if (toUrl) {\n if (this.#isSvg) {\n const url = this._uiManager.imageManager.getSvgUrl(this.#bitmapId);\n if (url) {\n return url;\n }\n }\n const canvas = document.createElement(\"canvas\");\n ({\n width: canvas.width,\n height: canvas.height\n } = this.#bitmap);\n const ctx = canvas.getContext(\"2d\");\n ctx.drawImage(this.#bitmap, 0, 0);\n return canvas.toDataURL();\n }\n if (this.#isSvg) {\n const [pageWidth, pageHeight] = this.pageDimensions;\n const width = Math.round(this.width * pageWidth * PixelsPerInch.PDF_TO_CSS_UNITS);\n const height = Math.round(this.height * pageHeight * PixelsPerInch.PDF_TO_CSS_UNITS);\n const offscreen = new OffscreenCanvas(width, height);\n const ctx = offscreen.getContext(\"2d\");\n ctx.drawImage(this.#bitmap, 0, 0, this.#bitmap.width, this.#bitmap.height, 0, 0, width, height);\n return offscreen.transferToImageBitmap();\n }\n return structuredClone(this.#bitmap);\n }\n #createObserver() {\n if (!this._uiManager._signal) {\n return;\n }\n this.#observer = new ResizeObserver(entries => {\n const rect = entries[0].contentRect;\n if (rect.width && rect.height) {\n this.#setDimensions(rect.width, rect.height);\n }\n });\n this.#observer.observe(this.div);\n this._uiManager._signal.addEventListener(\"abort\", () => {\n this.#observer?.disconnect();\n this.#observer = null;\n }, {\n once: true\n });\n }\n static deserialize(data, parent, uiManager) {\n if (data instanceof StampAnnotationElement) {\n return null;\n }\n const editor = super.deserialize(data, parent, uiManager);\n const {\n rect,\n bitmapUrl,\n bitmapId,\n isSvg,\n accessibilityData\n } = data;\n if (bitmapId && uiManager.imageManager.isValidId(bitmapId)) {\n editor.#bitmapId = bitmapId;\n } else {\n editor.#bitmapUrl = bitmapUrl;\n }\n editor.#isSvg = isSvg;\n const [parentWidth, parentHeight] = editor.pageDimensions;\n editor.width = (rect[2] - rect[0]) / parentWidth;\n editor.height = (rect[3] - rect[1]) / parentHeight;\n if (accessibilityData) {\n editor.altTextData = accessibilityData;\n }\n return editor;\n }\n serialize(isForCopying = false, context = null) {\n if (this.isEmpty()) {\n return null;\n }\n const serialized = {\n annotationType: AnnotationEditorType.STAMP,\n bitmapId: this.#bitmapId,\n pageIndex: this.pageIndex,\n rect: this.getRect(0, 0),\n rotation: this.rotation,\n isSvg: this.#isSvg,\n structTreeParentId: this._structTreeParentId\n };\n if (isForCopying) {\n serialized.bitmapUrl = this.#serializeBitmap(true);\n serialized.accessibilityData = this.altTextData;\n return serialized;\n }\n const {\n decorative,\n altText\n } = this.altTextData;\n if (!decorative && altText) {\n serialized.accessibilityData = {\n type: \"Figure\",\n alt: altText\n };\n }\n if (context === null) {\n return serialized;\n }\n context.stamps ||= new Map();\n const area = this.#isSvg ? (serialized.rect[2] - serialized.rect[0]) * (serialized.rect[3] - serialized.rect[1]) : null;\n if (!context.stamps.has(this.#bitmapId)) {\n context.stamps.set(this.#bitmapId, {\n area,\n serialized\n });\n serialized.bitmap = this.#serializeBitmap(false);\n } else if (this.#isSvg) {\n const prevData = context.stamps.get(this.#bitmapId);\n if (area > prevData.area) {\n prevData.area = area;\n prevData.serialized.bitmap.close();\n prevData.serialized.bitmap = this.#serializeBitmap(false);\n }\n }\n return serialized;\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/editor/annotation_editor_layer.js\n\n\n\n\n\n\n\nclass AnnotationEditorLayer {\n #accessibilityManager;\n #allowClick = false;\n #annotationLayer = null;\n #boundPointerup = null;\n #boundPointerdown = null;\n #boundTextLayerPointerDown = null;\n #editorFocusTimeoutId = null;\n #editors = new Map();\n #hadPointerDown = false;\n #isCleaningUp = false;\n #isDisabling = false;\n #textLayer = null;\n #uiManager;\n static _initialized = false;\n static #editorTypes = new Map([FreeTextEditor, InkEditor, StampEditor, HighlightEditor].map(type => [type._editorType, type]));\n constructor({\n uiManager,\n pageIndex,\n div,\n accessibilityManager,\n annotationLayer,\n drawLayer,\n textLayer,\n viewport,\n l10n\n }) {\n const editorTypes = [...AnnotationEditorLayer.#editorTypes.values()];\n if (!AnnotationEditorLayer._initialized) {\n AnnotationEditorLayer._initialized = true;\n for (const editorType of editorTypes) {\n editorType.initialize(l10n, uiManager);\n }\n }\n uiManager.registerEditorTypes(editorTypes);\n this.#uiManager = uiManager;\n this.pageIndex = pageIndex;\n this.div = div;\n this.#accessibilityManager = accessibilityManager;\n this.#annotationLayer = annotationLayer;\n this.viewport = viewport;\n this.#textLayer = textLayer;\n this.drawLayer = drawLayer;\n this.#uiManager.addLayer(this);\n }\n get isEmpty() {\n return this.#editors.size === 0;\n }\n get isInvisible() {\n return this.isEmpty && this.#uiManager.getMode() === AnnotationEditorType.NONE;\n }\n updateToolbar(mode) {\n this.#uiManager.updateToolbar(mode);\n }\n updateMode(mode = this.#uiManager.getMode()) {\n this.#cleanup();\n switch (mode) {\n case AnnotationEditorType.NONE:\n this.disableTextSelection();\n this.togglePointerEvents(false);\n this.toggleAnnotationLayerPointerEvents(true);\n this.disableClick();\n return;\n case AnnotationEditorType.INK:\n this.addInkEditorIfNeeded(false);\n this.disableTextSelection();\n this.togglePointerEvents(true);\n this.disableClick();\n break;\n case AnnotationEditorType.HIGHLIGHT:\n this.enableTextSelection();\n this.togglePointerEvents(false);\n this.disableClick();\n break;\n default:\n this.disableTextSelection();\n this.togglePointerEvents(true);\n this.enableClick();\n }\n this.toggleAnnotationLayerPointerEvents(false);\n const {\n classList\n } = this.div;\n for (const editorType of AnnotationEditorLayer.#editorTypes.values()) {\n classList.toggle(`${editorType._type}Editing`, mode === editorType._editorType);\n }\n this.div.hidden = false;\n }\n hasTextLayer(textLayer) {\n return textLayer === this.#textLayer?.div;\n }\n addInkEditorIfNeeded(isCommitting) {\n if (this.#uiManager.getMode() !== AnnotationEditorType.INK) {\n return;\n }\n if (!isCommitting) {\n for (const editor of this.#editors.values()) {\n if (editor.isEmpty()) {\n editor.setInBackground();\n return;\n }\n }\n }\n const editor = this.createAndAddNewEditor({\n offsetX: 0,\n offsetY: 0\n }, false);\n editor.setInBackground();\n }\n setEditingState(isEditing) {\n this.#uiManager.setEditingState(isEditing);\n }\n addCommands(params) {\n this.#uiManager.addCommands(params);\n }\n togglePointerEvents(enabled = false) {\n this.div.classList.toggle(\"disabled\", !enabled);\n }\n toggleAnnotationLayerPointerEvents(enabled = false) {\n this.#annotationLayer?.div.classList.toggle(\"disabled\", !enabled);\n }\n enable() {\n this.div.tabIndex = 0;\n this.togglePointerEvents(true);\n const annotationElementIds = new Set();\n for (const editor of this.#editors.values()) {\n editor.enableEditing();\n editor.show(true);\n if (editor.annotationElementId) {\n this.#uiManager.removeChangedExistingAnnotation(editor);\n annotationElementIds.add(editor.annotationElementId);\n }\n }\n if (!this.#annotationLayer) {\n return;\n }\n const editables = this.#annotationLayer.getEditableAnnotations();\n for (const editable of editables) {\n editable.hide();\n if (this.#uiManager.isDeletedAnnotationElement(editable.data.id)) {\n continue;\n }\n if (annotationElementIds.has(editable.data.id)) {\n continue;\n }\n const editor = this.deserialize(editable);\n if (!editor) {\n continue;\n }\n this.addOrRebuild(editor);\n editor.enableEditing();\n }\n }\n disable() {\n this.#isDisabling = true;\n this.div.tabIndex = -1;\n this.togglePointerEvents(false);\n const changedAnnotations = new Map();\n const resetAnnotations = new Map();\n for (const editor of this.#editors.values()) {\n editor.disableEditing();\n if (!editor.annotationElementId) {\n continue;\n }\n if (editor.serialize() !== null) {\n changedAnnotations.set(editor.annotationElementId, editor);\n continue;\n } else {\n resetAnnotations.set(editor.annotationElementId, editor);\n }\n this.getEditableAnnotation(editor.annotationElementId)?.show();\n editor.remove();\n }\n if (this.#annotationLayer) {\n const editables = this.#annotationLayer.getEditableAnnotations();\n for (const editable of editables) {\n const {\n id\n } = editable.data;\n if (this.#uiManager.isDeletedAnnotationElement(id)) {\n continue;\n }\n let editor = resetAnnotations.get(id);\n if (editor) {\n editor.resetAnnotationElement(editable);\n editor.show(false);\n editable.show();\n continue;\n }\n editor = changedAnnotations.get(id);\n if (editor) {\n this.#uiManager.addChangedExistingAnnotation(editor);\n editor.renderAnnotationElement(editable);\n editor.show(false);\n }\n editable.show();\n }\n }\n this.#cleanup();\n if (this.isEmpty) {\n this.div.hidden = true;\n }\n const {\n classList\n } = this.div;\n for (const editorType of AnnotationEditorLayer.#editorTypes.values()) {\n classList.remove(`${editorType._type}Editing`);\n }\n this.disableTextSelection();\n this.toggleAnnotationLayerPointerEvents(true);\n this.#isDisabling = false;\n }\n getEditableAnnotation(id) {\n return this.#annotationLayer?.getEditableAnnotation(id) || null;\n }\n setActiveEditor(editor) {\n const currentActive = this.#uiManager.getActive();\n if (currentActive === editor) {\n return;\n }\n this.#uiManager.setActiveEditor(editor);\n }\n enableTextSelection() {\n this.div.tabIndex = -1;\n if (this.#textLayer?.div && !this.#boundTextLayerPointerDown) {\n this.#boundTextLayerPointerDown = this.#textLayerPointerDown.bind(this);\n this.#textLayer.div.addEventListener(\"pointerdown\", this.#boundTextLayerPointerDown, {\n signal: this.#uiManager._signal\n });\n this.#textLayer.div.classList.add(\"highlighting\");\n }\n }\n disableTextSelection() {\n this.div.tabIndex = 0;\n if (this.#textLayer?.div && this.#boundTextLayerPointerDown) {\n this.#textLayer.div.removeEventListener(\"pointerdown\", this.#boundTextLayerPointerDown);\n this.#boundTextLayerPointerDown = null;\n this.#textLayer.div.classList.remove(\"highlighting\");\n }\n }\n #textLayerPointerDown(event) {\n this.#uiManager.unselectAll();\n if (event.target === this.#textLayer.div) {\n const {\n isMac\n } = util_FeatureTest.platform;\n if (event.button !== 0 || event.ctrlKey && isMac) {\n return;\n }\n this.#uiManager.showAllEditors(\"highlight\", true, true);\n this.#textLayer.div.classList.add(\"free\");\n HighlightEditor.startHighlighting(this, this.#uiManager.direction === \"ltr\", event);\n this.#textLayer.div.addEventListener(\"pointerup\", () => {\n this.#textLayer.div.classList.remove(\"free\");\n }, {\n once: true,\n signal: this.#uiManager._signal\n });\n event.preventDefault();\n }\n }\n enableClick() {\n if (this.#boundPointerdown) {\n return;\n }\n const signal = this.#uiManager._signal;\n this.#boundPointerdown = this.pointerdown.bind(this);\n this.#boundPointerup = this.pointerup.bind(this);\n this.div.addEventListener(\"pointerdown\", this.#boundPointerdown, {\n signal\n });\n this.div.addEventListener(\"pointerup\", this.#boundPointerup, {\n signal\n });\n }\n disableClick() {\n if (!this.#boundPointerdown) {\n return;\n }\n this.div.removeEventListener(\"pointerdown\", this.#boundPointerdown);\n this.div.removeEventListener(\"pointerup\", this.#boundPointerup);\n this.#boundPointerdown = null;\n this.#boundPointerup = null;\n }\n attach(editor) {\n this.#editors.set(editor.id, editor);\n const {\n annotationElementId\n } = editor;\n if (annotationElementId && this.#uiManager.isDeletedAnnotationElement(annotationElementId)) {\n this.#uiManager.removeDeletedAnnotationElement(editor);\n }\n }\n detach(editor) {\n this.#editors.delete(editor.id);\n this.#accessibilityManager?.removePointerInTextLayer(editor.contentDiv);\n if (!this.#isDisabling && editor.annotationElementId) {\n this.#uiManager.addDeletedAnnotationElement(editor);\n }\n }\n remove(editor) {\n this.detach(editor);\n this.#uiManager.removeEditor(editor);\n editor.div.remove();\n editor.isAttachedToDOM = false;\n if (!this.#isCleaningUp) {\n this.addInkEditorIfNeeded(false);\n }\n }\n changeParent(editor) {\n if (editor.parent === this) {\n return;\n }\n if (editor.parent && editor.annotationElementId) {\n this.#uiManager.addDeletedAnnotationElement(editor.annotationElementId);\n AnnotationEditor.deleteAnnotationElement(editor);\n editor.annotationElementId = null;\n }\n this.attach(editor);\n editor.parent?.detach(editor);\n editor.setParent(this);\n if (editor.div && editor.isAttachedToDOM) {\n editor.div.remove();\n this.div.append(editor.div);\n }\n }\n add(editor) {\n if (editor.parent === this && editor.isAttachedToDOM) {\n return;\n }\n this.changeParent(editor);\n this.#uiManager.addEditor(editor);\n this.attach(editor);\n if (!editor.isAttachedToDOM) {\n const div = editor.render();\n this.div.append(div);\n editor.isAttachedToDOM = true;\n }\n editor.fixAndSetPosition();\n editor.onceAdded();\n this.#uiManager.addToAnnotationStorage(editor);\n editor._reportTelemetry(editor.telemetryInitialData);\n }\n moveEditorInDOM(editor) {\n if (!editor.isAttachedToDOM) {\n return;\n }\n const {\n activeElement\n } = document;\n if (editor.div.contains(activeElement) && !this.#editorFocusTimeoutId) {\n editor._focusEventsAllowed = false;\n this.#editorFocusTimeoutId = setTimeout(() => {\n this.#editorFocusTimeoutId = null;\n if (!editor.div.contains(document.activeElement)) {\n editor.div.addEventListener(\"focusin\", () => {\n editor._focusEventsAllowed = true;\n }, {\n once: true,\n signal: this.#uiManager._signal\n });\n activeElement.focus();\n } else {\n editor._focusEventsAllowed = true;\n }\n }, 0);\n }\n editor._structTreeParentId = this.#accessibilityManager?.moveElementInDOM(this.div, editor.div, editor.contentDiv, true);\n }\n addOrRebuild(editor) {\n if (editor.needsToBeRebuilt()) {\n editor.parent ||= this;\n editor.rebuild();\n editor.show();\n } else {\n this.add(editor);\n }\n }\n addUndoableEditor(editor) {\n const cmd = () => editor._uiManager.rebuild(editor);\n const undo = () => {\n editor.remove();\n };\n this.addCommands({\n cmd,\n undo,\n mustExec: false\n });\n }\n getNextId() {\n return this.#uiManager.getId();\n }\n get #currentEditorType() {\n return AnnotationEditorLayer.#editorTypes.get(this.#uiManager.getMode());\n }\n get _signal() {\n return this.#uiManager._signal;\n }\n #createNewEditor(params) {\n const editorType = this.#currentEditorType;\n return editorType ? new editorType.prototype.constructor(params) : null;\n }\n canCreateNewEmptyEditor() {\n return this.#currentEditorType?.canCreateNewEmptyEditor();\n }\n pasteEditor(mode, params) {\n this.#uiManager.updateToolbar(mode);\n this.#uiManager.updateMode(mode);\n const {\n offsetX,\n offsetY\n } = this.#getCenterPoint();\n const id = this.getNextId();\n const editor = this.#createNewEditor({\n parent: this,\n id,\n x: offsetX,\n y: offsetY,\n uiManager: this.#uiManager,\n isCentered: true,\n ...params\n });\n if (editor) {\n this.add(editor);\n }\n }\n deserialize(data) {\n return AnnotationEditorLayer.#editorTypes.get(data.annotationType ?? data.annotationEditorType)?.deserialize(data, this, this.#uiManager) || null;\n }\n createAndAddNewEditor(event, isCentered, data = {}) {\n const id = this.getNextId();\n const editor = this.#createNewEditor({\n parent: this,\n id,\n x: event.offsetX,\n y: event.offsetY,\n uiManager: this.#uiManager,\n isCentered,\n ...data\n });\n if (editor) {\n this.add(editor);\n }\n return editor;\n }\n #getCenterPoint() {\n const {\n x,\n y,\n width,\n height\n } = this.div.getBoundingClientRect();\n const tlX = Math.max(0, x);\n const tlY = Math.max(0, y);\n const brX = Math.min(window.innerWidth, x + width);\n const brY = Math.min(window.innerHeight, y + height);\n const centerX = (tlX + brX) / 2 - x;\n const centerY = (tlY + brY) / 2 - y;\n const [offsetX, offsetY] = this.viewport.rotation % 180 === 0 ? [centerX, centerY] : [centerY, centerX];\n return {\n offsetX,\n offsetY\n };\n }\n addNewEditor() {\n this.createAndAddNewEditor(this.#getCenterPoint(), true);\n }\n setSelected(editor) {\n this.#uiManager.setSelected(editor);\n }\n toggleSelected(editor) {\n this.#uiManager.toggleSelected(editor);\n }\n isSelected(editor) {\n return this.#uiManager.isSelected(editor);\n }\n unselect(editor) {\n this.#uiManager.unselect(editor);\n }\n pointerup(event) {\n const {\n isMac\n } = util_FeatureTest.platform;\n if (event.button !== 0 || event.ctrlKey && isMac) {\n return;\n }\n if (event.target !== this.div) {\n return;\n }\n if (!this.#hadPointerDown) {\n return;\n }\n this.#hadPointerDown = false;\n if (!this.#allowClick) {\n this.#allowClick = true;\n return;\n }\n if (this.#uiManager.getMode() === AnnotationEditorType.STAMP) {\n this.#uiManager.unselectAll();\n return;\n }\n this.createAndAddNewEditor(event, false);\n }\n pointerdown(event) {\n if (this.#uiManager.getMode() === AnnotationEditorType.HIGHLIGHT) {\n this.enableTextSelection();\n }\n if (this.#hadPointerDown) {\n this.#hadPointerDown = false;\n return;\n }\n const {\n isMac\n } = util_FeatureTest.platform;\n if (event.button !== 0 || event.ctrlKey && isMac) {\n return;\n }\n if (event.target !== this.div) {\n return;\n }\n this.#hadPointerDown = true;\n const editor = this.#uiManager.getActive();\n this.#allowClick = !editor || editor.isEmpty();\n }\n findNewParent(editor, x, y) {\n const layer = this.#uiManager.findParent(x, y);\n if (layer === null || layer === this) {\n return false;\n }\n layer.changeParent(editor);\n return true;\n }\n destroy() {\n if (this.#uiManager.getActive()?.parent === this) {\n this.#uiManager.commitOrRemove();\n this.#uiManager.setActiveEditor(null);\n }\n if (this.#editorFocusTimeoutId) {\n clearTimeout(this.#editorFocusTimeoutId);\n this.#editorFocusTimeoutId = null;\n }\n for (const editor of this.#editors.values()) {\n this.#accessibilityManager?.removePointerInTextLayer(editor.contentDiv);\n editor.setParent(null);\n editor.isAttachedToDOM = false;\n editor.div.remove();\n }\n this.div = null;\n this.#editors.clear();\n this.#uiManager.removeLayer(this);\n }\n #cleanup() {\n this.#isCleaningUp = true;\n for (const editor of this.#editors.values()) {\n if (editor.isEmpty()) {\n editor.remove();\n }\n }\n this.#isCleaningUp = false;\n }\n render({\n viewport\n }) {\n this.viewport = viewport;\n setLayerDimensions(this.div, viewport);\n for (const editor of this.#uiManager.getEditors(this.pageIndex)) {\n this.add(editor);\n editor.rebuild();\n }\n this.updateMode();\n }\n update({\n viewport\n }) {\n this.#uiManager.commitOrRemove();\n this.#cleanup();\n const oldRotation = this.viewport.rotation;\n const rotation = viewport.rotation;\n this.viewport = viewport;\n setLayerDimensions(this.div, {\n rotation\n });\n if (oldRotation !== rotation) {\n for (const editor of this.#editors.values()) {\n editor.rotate(rotation);\n }\n }\n this.addInkEditorIfNeeded(false);\n }\n get pageDimensions() {\n const {\n pageWidth,\n pageHeight\n } = this.viewport.rawDims;\n return [pageWidth, pageHeight];\n }\n get scale() {\n return this.#uiManager.viewParameters.realScale;\n }\n}\n\n;// CONCATENATED MODULE: ./src/display/draw_layer.js\n\n\nclass DrawLayer {\n #parent = null;\n #id = 0;\n #mapping = new Map();\n #toUpdate = new Map();\n constructor({\n pageIndex\n }) {\n this.pageIndex = pageIndex;\n }\n setParent(parent) {\n if (!this.#parent) {\n this.#parent = parent;\n return;\n }\n if (this.#parent !== parent) {\n if (this.#mapping.size > 0) {\n for (const root of this.#mapping.values()) {\n root.remove();\n parent.append(root);\n }\n }\n this.#parent = parent;\n }\n }\n static get _svgFactory() {\n return shadow(this, \"_svgFactory\", new DOMSVGFactory());\n }\n static #setBox(element, {\n x = 0,\n y = 0,\n width = 1,\n height = 1\n } = {}) {\n const {\n style\n } = element;\n style.top = `${100 * y}%`;\n style.left = `${100 * x}%`;\n style.width = `${100 * width}%`;\n style.height = `${100 * height}%`;\n }\n #createSVG(box) {\n const svg = DrawLayer._svgFactory.create(1, 1, true);\n this.#parent.append(svg);\n svg.setAttribute(\"aria-hidden\", true);\n DrawLayer.#setBox(svg, box);\n return svg;\n }\n #createClipPath(defs, pathId) {\n const clipPath = DrawLayer._svgFactory.createElement(\"clipPath\");\n defs.append(clipPath);\n const clipPathId = `clip_${pathId}`;\n clipPath.setAttribute(\"id\", clipPathId);\n clipPath.setAttribute(\"clipPathUnits\", \"objectBoundingBox\");\n const clipPathUse = DrawLayer._svgFactory.createElement(\"use\");\n clipPath.append(clipPathUse);\n clipPathUse.setAttribute(\"href\", `#${pathId}`);\n clipPathUse.classList.add(\"clip\");\n return clipPathId;\n }\n highlight(outlines, color, opacity, isPathUpdatable = false) {\n const id = this.#id++;\n const root = this.#createSVG(outlines.box);\n root.classList.add(\"highlight\");\n if (outlines.free) {\n root.classList.add(\"free\");\n }\n const defs = DrawLayer._svgFactory.createElement(\"defs\");\n root.append(defs);\n const path = DrawLayer._svgFactory.createElement(\"path\");\n defs.append(path);\n const pathId = `path_p${this.pageIndex}_${id}`;\n path.setAttribute(\"id\", pathId);\n path.setAttribute(\"d\", outlines.toSVGPath());\n if (isPathUpdatable) {\n this.#toUpdate.set(id, path);\n }\n const clipPathId = this.#createClipPath(defs, pathId);\n const use = DrawLayer._svgFactory.createElement(\"use\");\n root.append(use);\n root.setAttribute(\"fill\", color);\n root.setAttribute(\"fill-opacity\", opacity);\n use.setAttribute(\"href\", `#${pathId}`);\n this.#mapping.set(id, root);\n return {\n id,\n clipPathId: `url(#${clipPathId})`\n };\n }\n highlightOutline(outlines) {\n const id = this.#id++;\n const root = this.#createSVG(outlines.box);\n root.classList.add(\"highlightOutline\");\n const defs = DrawLayer._svgFactory.createElement(\"defs\");\n root.append(defs);\n const path = DrawLayer._svgFactory.createElement(\"path\");\n defs.append(path);\n const pathId = `path_p${this.pageIndex}_${id}`;\n path.setAttribute(\"id\", pathId);\n path.setAttribute(\"d\", outlines.toSVGPath());\n path.setAttribute(\"vector-effect\", \"non-scaling-stroke\");\n let maskId;\n if (outlines.free) {\n root.classList.add(\"free\");\n const mask = DrawLayer._svgFactory.createElement(\"mask\");\n defs.append(mask);\n maskId = `mask_p${this.pageIndex}_${id}`;\n mask.setAttribute(\"id\", maskId);\n mask.setAttribute(\"maskUnits\", \"objectBoundingBox\");\n const rect = DrawLayer._svgFactory.createElement(\"rect\");\n mask.append(rect);\n rect.setAttribute(\"width\", \"1\");\n rect.setAttribute(\"height\", \"1\");\n rect.setAttribute(\"fill\", \"white\");\n const use = DrawLayer._svgFactory.createElement(\"use\");\n mask.append(use);\n use.setAttribute(\"href\", `#${pathId}`);\n use.setAttribute(\"stroke\", \"none\");\n use.setAttribute(\"fill\", \"black\");\n use.setAttribute(\"fill-rule\", \"nonzero\");\n use.classList.add(\"mask\");\n }\n const use1 = DrawLayer._svgFactory.createElement(\"use\");\n root.append(use1);\n use1.setAttribute(\"href\", `#${pathId}`);\n if (maskId) {\n use1.setAttribute(\"mask\", `url(#${maskId})`);\n }\n const use2 = use1.cloneNode();\n root.append(use2);\n use1.classList.add(\"mainOutline\");\n use2.classList.add(\"secondaryOutline\");\n this.#mapping.set(id, root);\n return id;\n }\n finalizeLine(id, line) {\n const path = this.#toUpdate.get(id);\n this.#toUpdate.delete(id);\n this.updateBox(id, line.box);\n path.setAttribute(\"d\", line.toSVGPath());\n }\n updateLine(id, line) {\n const root = this.#mapping.get(id);\n const defs = root.firstChild;\n const path = defs.firstChild;\n path.setAttribute(\"d\", line.toSVGPath());\n }\n removeFreeHighlight(id) {\n this.remove(id);\n this.#toUpdate.delete(id);\n }\n updatePath(id, line) {\n this.#toUpdate.get(id).setAttribute(\"d\", line.toSVGPath());\n }\n updateBox(id, box) {\n DrawLayer.#setBox(this.#mapping.get(id), box);\n }\n show(id, visible) {\n this.#mapping.get(id).classList.toggle(\"hidden\", !visible);\n }\n rotate(id, angle) {\n this.#mapping.get(id).setAttribute(\"data-main-rotation\", angle);\n }\n changeColor(id, color) {\n this.#mapping.get(id).setAttribute(\"fill\", color);\n }\n changeOpacity(id, opacity) {\n this.#mapping.get(id).setAttribute(\"fill-opacity\", opacity);\n }\n addClass(id, className) {\n this.#mapping.get(id).classList.add(className);\n }\n removeClass(id, className) {\n this.#mapping.get(id).classList.remove(className);\n }\n remove(id) {\n if (this.#parent === null) {\n return;\n }\n this.#mapping.get(id).remove();\n this.#mapping.delete(id);\n }\n destroy() {\n this.#parent = null;\n for (const root of this.#mapping.values()) {\n root.remove();\n }\n this.#mapping.clear();\n }\n}\n\n;// CONCATENATED MODULE: ./src/pdf.js\n\n\n\n\n\n\n\n\n\n\n\n\nconst pdfjsVersion = \"4.4.168\";\nconst pdfjsBuild = \"19fbc8998\";\n\nvar __webpack_exports__AbortException = __webpack_exports__.AbortException;\nvar __webpack_exports__AnnotationEditorLayer = __webpack_exports__.AnnotationEditorLayer;\nvar __webpack_exports__AnnotationEditorParamsType = __webpack_exports__.AnnotationEditorParamsType;\nvar __webpack_exports__AnnotationEditorType = __webpack_exports__.AnnotationEditorType;\nvar __webpack_exports__AnnotationEditorUIManager = __webpack_exports__.AnnotationEditorUIManager;\nvar __webpack_exports__AnnotationLayer = __webpack_exports__.AnnotationLayer;\nvar __webpack_exports__AnnotationMode = __webpack_exports__.AnnotationMode;\nvar __webpack_exports__CMapCompressionType = __webpack_exports__.CMapCompressionType;\nvar __webpack_exports__ColorPicker = __webpack_exports__.ColorPicker;\nvar __webpack_exports__DOMSVGFactory = __webpack_exports__.DOMSVGFactory;\nvar __webpack_exports__DrawLayer = __webpack_exports__.DrawLayer;\nvar __webpack_exports__FeatureTest = __webpack_exports__.FeatureTest;\nvar __webpack_exports__GlobalWorkerOptions = __webpack_exports__.GlobalWorkerOptions;\nvar __webpack_exports__ImageKind = __webpack_exports__.ImageKind;\nvar __webpack_exports__InvalidPDFException = __webpack_exports__.InvalidPDFException;\nvar __webpack_exports__MissingPDFException = __webpack_exports__.MissingPDFException;\nvar __webpack_exports__OPS = __webpack_exports__.OPS;\nvar __webpack_exports__Outliner = __webpack_exports__.Outliner;\nvar __webpack_exports__PDFDataRangeTransport = __webpack_exports__.PDFDataRangeTransport;\nvar __webpack_exports__PDFDateString = __webpack_exports__.PDFDateString;\nvar __webpack_exports__PDFWorker = __webpack_exports__.PDFWorker;\nvar __webpack_exports__PasswordResponses = __webpack_exports__.PasswordResponses;\nvar __webpack_exports__PermissionFlag = __webpack_exports__.PermissionFlag;\nvar __webpack_exports__PixelsPerInch = __webpack_exports__.PixelsPerInch;\nvar __webpack_exports__RenderingCancelledException = __webpack_exports__.RenderingCancelledException;\nvar __webpack_exports__TextLayer = __webpack_exports__.TextLayer;\nvar __webpack_exports__UnexpectedResponseException = __webpack_exports__.UnexpectedResponseException;\nvar __webpack_exports__Util = __webpack_exports__.Util;\nvar __webpack_exports__VerbosityLevel = __webpack_exports__.VerbosityLevel;\nvar __webpack_exports__XfaLayer = __webpack_exports__.XfaLayer;\nvar __webpack_exports__build = __webpack_exports__.build;\nvar __webpack_exports__createValidAbsoluteUrl = __webpack_exports__.createValidAbsoluteUrl;\nvar __webpack_exports__fetchData = __webpack_exports__.fetchData;\nvar __webpack_exports__getDocument = __webpack_exports__.getDocument;\nvar __webpack_exports__getFilenameFromUrl = __webpack_exports__.getFilenameFromUrl;\nvar __webpack_exports__getPdfFilenameFromUrl = __webpack_exports__.getPdfFilenameFromUrl;\nvar __webpack_exports__getXfaPageViewport = __webpack_exports__.getXfaPageViewport;\nvar __webpack_exports__isDataScheme = __webpack_exports__.isDataScheme;\nvar __webpack_exports__isPdfFile = __webpack_exports__.isPdfFile;\nvar __webpack_exports__noContextMenu = __webpack_exports__.noContextMenu;\nvar __webpack_exports__normalizeUnicode = __webpack_exports__.normalizeUnicode;\nvar __webpack_exports__renderTextLayer = __webpack_exports__.renderTextLayer;\nvar __webpack_exports__setLayerDimensions = __webpack_exports__.setLayerDimensions;\nvar __webpack_exports__shadow = __webpack_exports__.shadow;\nvar __webpack_exports__updateTextLayer = __webpack_exports__.updateTextLayer;\nvar __webpack_exports__version = __webpack_exports__.version;\nexport { __webpack_exports__AbortException as AbortException, __webpack_exports__AnnotationEditorLayer as AnnotationEditorLayer, __webpack_exports__AnnotationEditorParamsType as AnnotationEditorParamsType, __webpack_exports__AnnotationEditorType as AnnotationEditorType, __webpack_exports__AnnotationEditorUIManager as AnnotationEditorUIManager, __webpack_exports__AnnotationLayer as AnnotationLayer, __webpack_exports__AnnotationMode as AnnotationMode, __webpack_exports__CMapCompressionType as CMapCompressionType, __webpack_exports__ColorPicker as ColorPicker, __webpack_exports__DOMSVGFactory as DOMSVGFactory, __webpack_exports__DrawLayer as DrawLayer, __webpack_exports__FeatureTest as FeatureTest, __webpack_exports__GlobalWorkerOptions as GlobalWorkerOptions, __webpack_exports__ImageKind as ImageKind, __webpack_exports__InvalidPDFException as InvalidPDFException, __webpack_exports__MissingPDFException as MissingPDFException, __webpack_exports__OPS as OPS, __webpack_exports__Outliner as Outliner, __webpack_exports__PDFDataRangeTransport as PDFDataRangeTransport, __webpack_exports__PDFDateString as PDFDateString, __webpack_exports__PDFWorker as PDFWorker, __webpack_exports__PasswordResponses as PasswordResponses, __webpack_exports__PermissionFlag as PermissionFlag, __webpack_exports__PixelsPerInch as PixelsPerInch, __webpack_exports__RenderingCancelledException as RenderingCancelledException, __webpack_exports__TextLayer as TextLayer, __webpack_exports__UnexpectedResponseException as UnexpectedResponseException, __webpack_exports__Util as Util, __webpack_exports__VerbosityLevel as VerbosityLevel, __webpack_exports__XfaLayer as XfaLayer, __webpack_exports__build as build, __webpack_exports__createValidAbsoluteUrl as createValidAbsoluteUrl, __webpack_exports__fetchData as fetchData, __webpack_exports__getDocument as getDocument, __webpack_exports__getFilenameFromUrl as getFilenameFromUrl, __webpack_exports__getPdfFilenameFromUrl as getPdfFilenameFromUrl, __webpack_exports__getXfaPageViewport as getXfaPageViewport, __webpack_exports__isDataScheme as isDataScheme, __webpack_exports__isPdfFile as isPdfFile, __webpack_exports__noContextMenu as noContextMenu, __webpack_exports__normalizeUnicode as normalizeUnicode, __webpack_exports__renderTextLayer as renderTextLayer, __webpack_exports__setLayerDimensions as setLayerDimensions, __webpack_exports__shadow as shadow, __webpack_exports__updateTextLayer as updateTextLayer, __webpack_exports__version as version };\n\n","import { HttpClient } from '@angular/common/http';\r\nimport { Injectable } from '@angular/core';\r\nimport { Observable, catchError, of, throwError } from 'rxjs';\r\nimport { environment } from '../../environments/environment';\r\n\r\n@Injectable({\r\n providedIn: 'root'\r\n})\r\nexport class MonthPerformanceService {\r\n\r\n constructor(private http: HttpClient) { }\r\n\r\n getMonthPerformance(): Observable {\r\n \r\n let url=`${environment.api.uri}v1.0/performance/month?kpidate=2024-01`;\r\n return this.http.get(url);\r\n }\r\n\r\n getMonthPerformanceByApexIdAndMonth(apexId:string, month:string): Observable {\r\n let url=`${environment.api.uri}v1.0/performance/month?apexid=${apexId}&kpidate=${month}`;\r\n return this.http.get(url);\r\n }\r\n\r\n // v1.0/performance?apexid=zhangw13&businesstype=PG&country=CN&kpidate=2024-02-01&kpidate=2024-02-01\r\n getMonthPerformanceByApexIdStartEndMonthCountryBusinessType(apexId:string, startMonth:string, endMonth:string, driverCountry:string, driverBusinessType:string): Observable {\r\n let url=`${environment.api.uri}v1.0/performance?apexid=${apexId}&kpidate=${startMonth}&kpidate=${endMonth}&country=${driverCountry}&businesstype=${driverBusinessType}`;\r\n return this.http.get(url);\r\n }\r\n\r\n getMonthPerformanceByApexIdMonthCountryBusinessType(apexId:string, month:string, driverCountry:string, driverBusinessType:string): Observable {\r\n let url=`${environment.api.uri}v1.0/performance?apexid=${apexId}&kpidate=${month}&country=${driverCountry}&businesstype=${driverBusinessType}`;\r\n return this.http.get(url);\r\n }\r\n\r\n // today is 20240322\r\n // v1.0/performance?apexid=zhangw13&businesstype=PG&country=CN&kpidate=2023-10-01&kpidate=2024-02-29\r\n getYearPerformanceByApexIdMonthCountryBusinessType(apexId:string, startMonth:string, endMonth:string, driverCountry:string, driverBusinessType:string): Observable {\r\n let url=`${environment.api.uri}v1.0/performance?apexid=${apexId}&kpidate=${startMonth}&kpidate=${endMonth}&country=${driverCountry}&businesstype=${driverBusinessType}`;\r\n return this.http.get(url);\r\n }\r\n\r\n // v1.0/kpi/others/3/attachment\r\n getOthersKpiAttachment(kpiOthersId:number): Observable {\r\n let url=`${environment.api.uri}v1.0/kpi/others/${kpiOthersId}/attachment`;\r\n \r\n return this.http.get(url, { responseType: 'text' as 'json' });\r\n }\r\n\r\n getOthersKpiAttachmentV2(kpiOthersId:number): Observable {\r\n let url=`${environment.api.uri}v1.0/kpi/others/${kpiOthersId}/attachment`;\r\n \r\n return this.http.get(url, { responseType: 'blob' });\r\n }\r\n\r\n \r\n}\r\n","import { Component, ElementRef, Inject, OnInit, ViewChild } from '@angular/core';\r\nimport {\r\n MatDialog,\r\n MAT_DIALOG_DATA,\r\n MatDialogTitle,\r\n MatDialogContent, \r\n MatDialogModule,\r\n} from '@angular/material/dialog';\r\nimport {MatButtonModule} from '@angular/material/button';\r\nimport { CommonModule } from '@angular/common';\r\nimport { DomSanitizer, SafeResourceUrl } from '@angular/platform-browser';\r\nimport { MonthPerformanceService } from '../service/month-performance.service';\r\nimport { Observable, fromEvent, map } from 'rxjs';\r\nimport {MatProgressSpinnerModule} from '@angular/material/progress-spinner';\r\nimport {ThemePalette} from '@angular/material/core';\r\nimport { MatIconModule } from '@angular/material/icon';\r\nimport { RouterLink } from '@angular/router';\r\nimport { MatToolbarModule } from '@angular/material/toolbar';\r\nimport { TranslateModule } from '@ngx-translate/core';\r\nimport { TranslateService } from '@ngx-translate/core';\r\nimport * as pdfjs from 'pdfjs-dist';\r\nconst pdfWorker = \"../../assets/pdf.worker.mjs\";\r\n@Component({\r\n selector: 'app-attachment-dialog',\r\n standalone: true,\r\n imports: [\r\n CommonModule,\r\n MatDialogTitle, \r\n MatDialogContent,\r\n MatDialogModule, \r\n MatButtonModule,\r\n MatProgressSpinnerModule,\r\n MatIconModule, \r\n MatToolbarModule,\r\n TranslateModule,\r\n ],\r\n templateUrl: './attachment-dialog.component.html',\r\n styleUrl: './attachment-dialog.component.css'\r\n})\r\nexport class AttachmentDialogComponent {\r\n color : ThemePalette = 'accent';\r\n constructor(@Inject(MAT_DIALOG_DATA) \r\n public data: any,\r\n private sanitizer: DomSanitizer,\r\n private monthPerformanceService: MonthPerformanceService,\r\n translate: TranslateService, \r\n ) {\r\n let browserLangToUse = '';\r\n let browserLang = translate.getBrowserCultureLang();\r\n browserLangToUse = browserLang === undefined ? 'en' : browserLang;\r\n console.log(browserLang); \r\n translate.setDefaultLang('en'); \r\n translate.use(browserLangToUse); \r\n }\r\n wordFormat = 'doc';\r\n wordUrl = 'https://view.officeapps.live.com/op/embed.aspx?src=';\r\n showWord = false;\r\n trustedWordUrl! : SafeResourceUrl;\r\n showImg = false;\r\n imgBase64String = '';\r\n trustedImgUrl! : SafeResourceUrl;\r\n showPdf = false;\r\n pdfBase64String = '';\r\n trustedPdfUrl! : SafeResourceUrl;\r\n isLoading = false;\r\n toBase64(blob: Blob): Observable {\r\n const reader = new FileReader();\r\n reader.readAsDataURL(blob);\r\n return fromEvent(reader, 'load')\r\n .pipe(map(() => (reader.result as string)))\r\n }\r\n\r\n ngOnInit(): void {\r\n this.isLoading = true;\r\n this.monthPerformanceService.getOthersKpiAttachmentV2(this.data.kpi?.kpiOthers_Id).subscribe(responseOthersKpiAttachment => {\r\n let index = this.data.kpi.attachment.lastIndexOf('.');\r\n let format = this.data.kpi.attachment.substring(index + 1);\r\n let formatLowerCase = format.toLowerCase(); \r\n if(formatLowerCase.includes('pdf')){\r\n this.showPdfDocV3(responseOthersKpiAttachment);\r\n // this.showPdfDocV5(this.data.kpi.getAttachmentS3Url);\r\n this.showPdf = true;\r\n }\r\n if(formatLowerCase.includes('jpg') ){\r\n this.showJpg(responseOthersKpiAttachment);\r\n // this.showJpgV2(this.data.kpi.getAttachmentS3Url);\r\n this.showImg = true;\r\n }\r\n if(formatLowerCase.includes('png') ){ \r\n this.showJpg(responseOthersKpiAttachment);\r\n // this.showJpgV2(this.data.kpi.getAttachmentS3Url); \r\n this.showImg = true;\r\n }\r\n this.isLoading = false;\r\n });\r\n }\r\n\r\n\r\n pdfURL() {\r\n return this.sanitizer.bypassSecurityTrustResourceUrl(this.pdfBase64String);\r\n }\r\n\r\n pdfURL2() {\r\n return this.sanitizer.bypassSecurityTrustResourceUrl(this.pdfUrl);\r\n }\r\n\r\n jpgUrl!:any;\r\n jpgSafeUrl(){\r\n return this.sanitizer.bypassSecurityTrustResourceUrl(this.jpgUrl);\r\n }\r\n\r\n showJpg(binaryData:Blob){\r\n var objectURL = URL.createObjectURL(binaryData);\r\n this.jpgUrl = objectURL;\r\n }\r\n showJpgV2(urlString:string){ \r\n this.jpgUrl = urlString;\r\n }\r\n\r\n showPDF(base64Data:string) {\r\n var binaryData = atob(base64Data);\r\n // Create a Uint8Array from the binary data\r\n var uint8Array = new Uint8Array(binaryData.length);\r\n for (var i = 0; i < binaryData.length; i++) {\r\n uint8Array[i] = binaryData.charCodeAt(i);\r\n }\r\n\r\n // Create a Blob from the Uint8Array\r\n var blob = new Blob([uint8Array], { type: 'application/pdf' });\r\n\r\n // Create an object URL for the blob\r\n var objectURL = URL.createObjectURL(blob);\r\n this.pdfUrl = objectURL;\r\n\r\n // Display the PDF using or