From bfb5dda5cde059c733ecafa6e77d61836fa1ba98 Mon Sep 17 00:00:00 2001
From: ci-bot <ci-bot@example.com>
Date: Fri, 23 Aug 2024 12:58:23 +0000
Subject: [PATCH] Deployed 6cf6c02 to v10.0.X with MkDocs 1.6.0 and mike 2.1.3

---
 v10.0.X/404.html                              |   4 +--
 v10.0.X/CernVM/index.html                     |   4 +--
 .../assets/javascripts/bundle.af256bd8.min.js |  29 ++++++++++++++++++
 ....min.js.map => bundle.af256bd8.min.js.map} |   8 ++---
 .../assets/javascripts/bundle.fe8b6f2b.min.js |  29 ------------------
 v10.0.X/index.html                            |   4 +--
 .../model_building_exercise/index.html        |   4 +--
 v10.0.X/part2/bin-wise-stats/index.html       |   4 +--
 v10.0.X/part2/bsm-higgs-models/index.html     |   4 +--
 v10.0.X/part2/higgscouplings/index.html       |   4 +--
 v10.0.X/part2/physicsmodels/index.html        |   4 +--
 v10.0.X/part2/settinguptheanalysis/index.html |   4 +--
 v10.0.X/part3/commonstatsmethods/index.html   |   8 +++--
 v10.0.X/part3/debugging/index.html            |   4 +--
 v10.0.X/part3/nonstandard/index.html          |   4 +--
 v10.0.X/part3/regularisation/index.html       |   4 +--
 v10.0.X/part3/runningthetool/index.html       |   4 +--
 v10.0.X/part3/simplifiedlikelihood/index.html |   4 +--
 v10.0.X/part3/validation/index.html           |   4 +--
 v10.0.X/part4/usefullinks/index.html          |   4 +--
 v10.0.X/part5/longexercise/index.html         |   4 +--
 v10.0.X/part5/longexerciseanswers/index.html  |   4 +--
 v10.0.X/part5/roofit/index.html               |   4 +--
 v10.0.X/releaseNotes/index.html               |   4 +--
 v10.0.X/search/search_index.json              |   2 +-
 v10.0.X/sitemap.xml.gz                        | Bin 127 -> 127 bytes
 .../parametric_exercise/index.html            |   4 +--
 .../unfolding_exercise/index.html             |   4 +--
 .../stat_routines/index.html                  |   4 +--
 .../fitting_concepts/index.html               |   4 +--
 .../what_combine_does/introduction/index.html |   4 +--
 .../model_and_likelihood/index.html           |   4 +--
 .../statistical_tests/index.html              |   4 +--
 33 files changed, 93 insertions(+), 91 deletions(-)
 create mode 100644 v10.0.X/assets/javascripts/bundle.af256bd8.min.js
 rename v10.0.X/assets/javascripts/{bundle.fe8b6f2b.min.js.map => bundle.af256bd8.min.js.map} (66%)
 delete mode 100644 v10.0.X/assets/javascripts/bundle.fe8b6f2b.min.js

diff --git a/v10.0.X/404.html b/v10.0.X/404.html
index d3038f5e9d6..b6a238046d0 100644
--- a/v10.0.X/404.html
+++ b/v10.0.X/404.html
@@ -12,7 +12,7 @@
       
       
       <link rel="icon" href="/logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1287,7 +1287,7 @@ <h1>404 - Not found</h1>
     <script id="__config" type="application/json">{"base": "/", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "/assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="/assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="/assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="/javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/CernVM/index.html b/v10.0.X/CernVM/index.html
index d520b804ac5..5bea3d49066 100644
--- a/v10.0.X/CernVM/index.html
+++ b/v10.0.X/CernVM/index.html
@@ -12,7 +12,7 @@
       
       
       <link rel="icon" href="../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1348,7 +1348,7 @@ <h1 id="available-machines-for-standalone-combine">Available machines for standa
     <script id="__config" type="application/json">{"base": "..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/assets/javascripts/bundle.af256bd8.min.js b/v10.0.X/assets/javascripts/bundle.af256bd8.min.js
new file mode 100644
index 00000000000..27355d2bb81
--- /dev/null
+++ b/v10.0.X/assets/javascripts/bundle.af256bd8.min.js
@@ -0,0 +1,29 @@
+"use strict";(()=>{var ji=Object.create;var gr=Object.defineProperty;var Wi=Object.getOwnPropertyDescriptor;var Ui=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Di=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,io=Object.prototype.propertyIsEnumerable;var no=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&no(e,r,t[r]);if(Vt)for(var r of Vt(t))io.call(t,r)&&no(e,r,t[r]);return e};var ao=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&io.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Vi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ui(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Wi(t,n))||o.enumerable});return e};var Lt=(e,t,r)=>(r=e!=null?ji(Di(e)):{},Vi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var so=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var po=yr((Er,co)=>{(function(e,t){typeof Er=="object"&&typeof co!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var ft=H.type,qe=H.tagName;return!!(qe==="INPUT"&&a[ft]&&!H.readOnly||qe==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=yr((lx,Sn)=>{"use strict";/*!
+ * escape-html
+ * Copyright(c) 2012-2013 TJ Holowaychuk
+ * Copyright(c) 2015 Andreas Lubbe
+ * Copyright(c) 2015 Tiancheng "Timothy" Gu
+ * MIT Licensed
+ */var Ha=/["'&<>]/;Sn.exports=ka;function ka(e){var t=""+e,r=Ha.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i<t.length;i++){switch(t.charCodeAt(i)){case 34:o="&quot;";break;case 38:o="&amp;";break;case 39:o="&#39;";break;case 60:o="&lt;";break;case 62:o="&gt;";break;default:continue}a!==i&&(n+=t.substring(a,i)),a=i+1,n+=o}return a!==i?n+t.substring(a,i):n}});var Br=yr((It,Yr)=>{/*!
+ * clipboard.js v2.0.11
+ * https://clipboardjs.com/
+ *
+ * Licensed MIT © Zeno Rocha
+ */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Fi}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var ft=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,$e=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},qe=ft;function je(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?je=function(M){return typeof M}:je=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},je(V)}function Ai(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function oo(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ci(V,_,M){return _&&oo(V.prototype,_),M&&oo(V,M),V}function Hi(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function ki(V){var _=Ri();return function(){var j=Ut(V),D;if(_){var Y=Ut(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return $i(this,D)}}function $i(V,_){return _&&(je(_)==="object"||typeof _=="function")?_:Pi(V)}function Pi(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Ri(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Ut(V){return Ut=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Ut(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ii=function(V){Hi(M,V);var _=ki(M);function M(j,D){var Y;return Ai(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ci(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=je(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Fi=Ii},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p<c;p++)s[p].fn.apply(s[p].ctx,a);return this},off:function(i,a){var s=this.e||(this.e={}),p=s[i],c=[];if(p&&a)for(var l=0,f=p.length;l<f;l++)p[l].fn!==a&&p[l].fn._!==a&&c.push(p[l]);return c.length?s[i]=c:delete s[i],this}},o.exports=n,o.exports.TinyEmitter=n}},t={};function r(o){if(t[o])return t[o].exports;var n=t[o]={exports:{}};return e[o](n,n.exports,r),n.exports}return function(){r.n=function(o){var n=o&&o.__esModule?function(){return o.default}:function(){return o};return r.d(n,{a:n}),n}}(),function(){r.d=function(o,n){for(var i in n)r.o(n,i)&&!r.o(o,i)&&Object.defineProperty(o,i,{enumerable:!0,get:n[i]})}}(),function(){r.o=function(o,n){return Object.prototype.hasOwnProperty.call(o,n)}}(),r(686)}().default})});var o0=Lt(po());/*! *****************************************************************************
+Copyright (c) Microsoft Corporation.
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+***************************************************************************** */var wr=function(e,t){return wr=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(r,o){r.__proto__=o}||function(r,o){for(var n in o)Object.prototype.hasOwnProperty.call(o,n)&&(r[n]=o[n])},wr(e,t)};function re(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");wr(e,t);function r(){this.constructor=e}e.prototype=t===null?Object.create(t):(r.prototype=t.prototype,new r)}function lo(e,t,r,o){function n(i){return i instanceof r?i:new r(function(a){a(i)})}return new(r||(r=Promise))(function(i,a){function s(l){try{c(o.next(l))}catch(f){a(f)}}function p(l){try{c(o.throw(l))}catch(f){a(f)}}function c(l){l.done?i(l.value):n(l.value).then(s,p)}c((o=o.apply(e,t||[])).next())})}function Nt(e,t){var r={label:0,sent:function(){if(i[0]&1)throw i[1];return i[1]},trys:[],ops:[]},o,n,i,a;return a={next:s(0),throw:s(1),return:s(2)},typeof Symbol=="function"&&(a[Symbol.iterator]=function(){return this}),a;function s(c){return function(l){return p([c,l])}}function p(c){if(o)throw new TypeError("Generator is already executing.");for(;r;)try{if(o=1,n&&(i=c[0]&2?n.return:c[0]?n.throw||((i=n.return)&&i.call(n),0):n.next)&&!(i=i.call(n,c[1])).done)return i;switch(n=0,i&&(c=[c[0]&2,i.value]),c[0]){case 0:case 1:i=c;break;case 4:return r.label++,{value:c[1],done:!1};case 5:r.label++,n=c[1],c=[0];continue;case 7:c=r.ops.pop(),r.trys.pop();continue;default:if(i=r.trys,!(i=i.length>0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]<i[3])){r.label=c[1];break}if(c[0]===6&&r.label<i[1]){r.label=i[1],i=c;break}if(i&&r.label<i[2]){r.label=i[2],r.ops.push(c);break}i[2]&&r.ops.pop(),r.trys.pop();continue}c=t.call(e,r)}catch(l){c=[6,l],n=0}finally{o=i=0}if(c[0]&5)throw c[1];return{value:c[0]?c[1]:void 0,done:!0}}}function he(e){var t=typeof Symbol=="function"&&Symbol.iterator,r=t&&e[t],o=0;if(r)return r.call(e);if(e&&typeof e.length=="number")return{next:function(){return e&&o>=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o<n;o++)(i||!(o in t))&&(i||(i=Array.prototype.slice.call(t,0,o)),i[o]=t[o]);return e.concat(i||Array.prototype.slice.call(t))}function nt(e){return this instanceof nt?(this.v=e,this):new nt(e)}function mo(e,t,r){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var o=r.apply(e,t||[]),n,i=[];return n={},a("next"),a("throw"),a("return"),n[Symbol.asyncIterator]=function(){return this},n;function a(u){o[u]&&(n[u]=function(h){return new Promise(function(w,A){i.push([u,h,w,A])>1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function fo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription:
+`+r.map(function(o,n){return n+1+") "+o.toString()}).join(`
+  `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var We=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{uo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)uo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=We.EMPTY;function qt(e){return e instanceof We||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function uo(e){k(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o<arguments.length;o++)r[o-2]=arguments[o];var n=dt.delegate;return n!=null&&n.setTimeout?n.setTimeout.apply(n,q([e,t],N(r))):setTimeout.apply(void 0,q([e,t],N(r)))},clearTimeout:function(e){var t=dt.delegate;return((t==null?void 0:t.clearTimeout)||clearTimeout)(e)},delegate:void 0};function Qt(e){dt.setTimeout(function(){var t=Pe.onUnhandledError;if(t)t(e);else throw e})}function be(){}var ho=function(){return Sr("C",void 0,void 0)}();function bo(e){return Sr("E",void 0,e)}function vo(e){return Sr("N",e,void 0)}function Sr(e,t,r){return{kind:e,value:t,error:r}}var it=null;function ht(e){if(Pe.useDeprecatedSynchronousErrorHandling){var t=!it;if(t&&(it={errorThrown:!1,error:null}),e(),t){var r=it,o=r.errorThrown,n=r.error;if(it=null,o)throw n}}else e()}function go(e){Pe.useDeprecatedSynchronousErrorHandling&&it&&(it.errorThrown=!0,it.error=e)}var _t=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o.isStopped=!1,r?(o.destination=r,qt(r)&&r.add(o)):o.destination=Qi,o}return t.create=function(r,o,n){return new at(r,o,n)},t.prototype.next=function(r){this.isStopped?Mr(vo(r),this):this._next(r)},t.prototype.error=function(r){this.isStopped?Mr(bo(r),this):(this.isStopped=!0,this._error(r))},t.prototype.complete=function(){this.isStopped?Mr(ho,this):(this.isStopped=!0,this._complete())},t.prototype.unsubscribe=function(){this.closed||(this.isStopped=!0,e.prototype.unsubscribe.call(this),this.destination=null)},t.prototype._next=function(r){this.destination.next(r)},t.prototype._error=function(r){try{this.destination.error(r)}finally{this.unsubscribe()}},t.prototype._complete=function(){try{this.destination.complete()}finally{this.unsubscribe()}},t}(We);var Ni=Function.prototype.bind;function Or(e,t){return Ni.call(e,t)}var zi=function(){function e(t){this.partialObserver=t}return e.prototype.next=function(t){var r=this.partialObserver;if(r.next)try{r.next(t)}catch(o){Kt(o)}},e.prototype.error=function(t){var r=this.partialObserver;if(r.error)try{r.error(t)}catch(o){Kt(o)}else Kt(t)},e.prototype.complete=function(){var t=this.partialObserver;if(t.complete)try{t.complete()}catch(r){Kt(r)}},e}(),at=function(e){re(t,e);function t(r,o,n){var i=e.call(this)||this,a;if(k(r)||!r)a={next:r!=null?r:void 0,error:o!=null?o:void 0,complete:n!=null?n:void 0};else{var s;i&&Pe.useDeprecatedNextContext?(s=Object.create(r),s.unsubscribe=function(){return i.unsubscribe()},a={next:r.next&&Or(r.next,s),error:r.error&&Or(r.error,s),complete:r.complete&&Or(r.complete,s)}):a=r}return i.destination=new zi(a),i}return t}(_t);function Kt(e){Pe.useDeprecatedSynchronousErrorHandling?go(e):Qt(e)}function qi(e){throw e}function Mr(e,t){var r=Pe.onStoppedNotification;r&&dt.setTimeout(function(){return r(e,t)})}var Qi={closed:!0,next:be,error:qi,complete:be};var bt=function(){return typeof Symbol=="function"&&Symbol.observable||"@@observable"}();function le(e){return e}function xo(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Lr(e)}function Lr(e){return e.length===0?le:e.length===1?e[0]:function(r){return e.reduce(function(o,n){return n(o)},r)}}var F=function(){function e(t){t&&(this._subscribe=t)}return e.prototype.lift=function(t){var r=new e;return r.source=this,r.operator=t,r},e.prototype.subscribe=function(t,r,o){var n=this,i=Yi(t)?t:new at(t,r,o);return ht(function(){var a=n,s=a.operator,p=a.source;i.add(s?s.call(i,p):p?n._subscribe(i):n._trySubscribe(i))}),i},e.prototype._trySubscribe=function(t){try{return this._subscribe(t)}catch(r){t.error(r)}},e.prototype.forEach=function(t,r){var o=this;return r=yo(r),new r(function(n,i){var a=new at({next:function(s){try{t(s)}catch(p){i(p),a.unsubscribe()}},error:i,complete:n});o.subscribe(a)})},e.prototype._subscribe=function(t){var r;return(r=this.source)===null||r===void 0?void 0:r.subscribe(t)},e.prototype[bt]=function(){return this},e.prototype.pipe=function(){for(var t=[],r=0;r<arguments.length;r++)t[r]=arguments[r];return Lr(t)(this)},e.prototype.toPromise=function(t){var r=this;return t=yo(t),new t(function(o,n){var i;r.subscribe(function(a){return i=a},function(a){return n(a)},function(){return o(i)})})},e.create=function(t){return new e(t)},e}();function yo(e){var t;return(t=e!=null?e:Pe.Promise)!==null&&t!==void 0?t:Promise}function Ki(e){return e&&k(e.next)&&k(e.error)&&k(e.complete)}function Yi(e){return e&&e instanceof _t||Ki(e)&&qt(e)}function Bi(e){return k(e==null?void 0:e.lift)}function y(e){return function(t){if(Bi(t))return t.lift(function(r){try{return e(r,this)}catch(o){this.error(o)}});throw new TypeError("Unable to lift unknown Observable type")}}function T(e,t,r,o,n){return new Gi(e,t,r,o,n)}var Gi=function(e){re(t,e);function t(r,o,n,i,a,s){var p=e.call(this,r)||this;return p.onFinalize=a,p.shouldUnsubscribe=s,p._next=o?function(c){try{o(c)}catch(l){r.error(l)}}:e.prototype._next,p._error=i?function(c){try{i(c)}catch(l){r.error(l)}finally{this.unsubscribe()}}:e.prototype._error,p._complete=n?function(){try{n()}catch(c){r.error(c)}finally{this.unsubscribe()}}:e.prototype._complete,p}return t.prototype.unsubscribe=function(){var r;if(!this.shouldUnsubscribe||this.shouldUnsubscribe()){var o=this.closed;e.prototype.unsubscribe.call(this),!o&&((r=this.onFinalize)===null||r===void 0||r.call(this))}},t}(_t);var vt={schedule:function(e){var t=requestAnimationFrame,r=cancelAnimationFrame,o=vt.delegate;o&&(t=o.requestAnimationFrame,r=o.cancelAnimationFrame);var n=t(function(i){r=void 0,e(i)});return new We(function(){return r==null?void 0:r(n)})},requestAnimationFrame:function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=vt.delegate;return((r==null?void 0:r.requestAnimationFrame)||requestAnimationFrame).apply(void 0,q([],N(e)))},cancelAnimationFrame:function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=vt.delegate;return((r==null?void 0:r.cancelAnimationFrame)||cancelAnimationFrame).apply(void 0,q([],N(e)))},delegate:void 0};var Eo=ut(function(e){return function(){e(this),this.name="ObjectUnsubscribedError",this.message="object unsubscribed"}});var g=function(e){re(t,e);function t(){var r=e.call(this)||this;return r.closed=!1,r.currentObservers=null,r.observers=[],r.isStopped=!1,r.hasError=!1,r.thrownError=null,r}return t.prototype.lift=function(r){var o=new wo(this,this);return o.operator=r,o},t.prototype._throwIfClosed=function(){if(this.closed)throw new Eo},t.prototype.next=function(r){var o=this;ht(function(){var n,i;if(o._throwIfClosed(),!o.isStopped){o.currentObservers||(o.currentObservers=Array.from(o.observers));try{for(var a=he(o.currentObservers),s=a.next();!s.done;s=a.next()){var p=s.value;p.next(r)}}catch(c){n={error:c}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(n)throw n.error}}}})},t.prototype.error=function(r){var o=this;ht(function(){if(o._throwIfClosed(),!o.isStopped){o.hasError=o.isStopped=!0,o.thrownError=r;for(var n=o.observers;n.length;)n.shift().error(r)}})},t.prototype.complete=function(){var r=this;ht(function(){if(r._throwIfClosed(),!r.isStopped){r.isStopped=!0;for(var o=r.observers;o.length;)o.shift().complete()}})},t.prototype.unsubscribe=function(){this.isStopped=this.closed=!0,this.observers=this.currentObservers=null},Object.defineProperty(t.prototype,"observed",{get:function(){var r;return((r=this.observers)===null||r===void 0?void 0:r.length)>0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new We(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new wo(r,o)},t}(F);var wo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p<s.length&&!r.closed;p+=i?1:2)r.next(s[p]);return this._checkFinalizedStatuses(r),o},t.prototype._trimBuffer=function(){var r=this,o=r._bufferSize,n=r._timestampProvider,i=r._buffer,a=r._infiniteTimeWindow,s=(a?1:2)*o;if(o<1/0&&s<i.length&&i.splice(0,i.length-s),!a){for(var p=n.now(),c=0,l=1;l<i.length&&i[l]<=p;l+=2)c=l;c&&i.splice(0,c+1)}},t}(g);var To=function(e){re(t,e);function t(r,o){return e.call(this)||this}return t.prototype.schedule=function(r,o){return o===void 0&&(o=0),this},t}(We);var Ht={setInterval:function(e,t){for(var r=[],o=2;o<arguments.length;o++)r[o-2]=arguments[o];var n=Ht.delegate;return n!=null&&n.setInterval?n.setInterval.apply(n,q([e,t],N(r))):setInterval.apply(void 0,q([e,t],N(r)))},clearInterval:function(e){var t=Ht.delegate;return((t==null?void 0:t.clearInterval)||clearInterval)(e)},delegate:void 0};var gt=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n.pending=!1,n}return t.prototype.schedule=function(r,o){var n;if(o===void 0&&(o=0),this.closed)return this;this.state=r;var i=this.id,a=this.scheduler;return i!=null&&(this.id=this.recycleAsyncId(a,i,o)),this.pending=!0,this.delay=o,this.id=(n=this.id)!==null&&n!==void 0?n:this.requestAsyncId(a,this.id,o),this},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),Ht.setInterval(r.flush.bind(r,this),n)},t.prototype.recycleAsyncId=function(r,o,n){if(n===void 0&&(n=0),n!=null&&this.delay===n&&this.pending===!1)return o;o!=null&&Ht.clearInterval(o)},t.prototype.execute=function(r,o){if(this.closed)return new Error("executing a cancelled action");this.pending=!1;var n=this._execute(r,o);if(n)return n;this.pending===!1&&this.id!=null&&(this.id=this.recycleAsyncId(this.scheduler,this.id,null))},t.prototype._execute=function(r,o){var n=!1,i;try{this.work(r)}catch(a){n=!0,i=a||new Error("Scheduled action threw falsy error")}if(n)return this.unsubscribe(),i},t.prototype.unsubscribe=function(){if(!this.closed){var r=this,o=r.id,n=r.scheduler,i=n.actions;this.work=this.state=this.scheduler=null,this.pending=!1,Qe(i,this),o!=null&&(this.id=this.recycleAsyncId(n,o,null)),this.delay=null,e.prototype.unsubscribe.call(this)}},t}(To);var Ar=function(){function e(t,r){r===void 0&&(r=e.now),this.schedulerActionCtor=t,this.now=r}return e.prototype.schedule=function(t,r,o){return r===void 0&&(r=0),new this.schedulerActionCtor(this,t).schedule(o,r)},e.now=At.now,e}();var xt=function(e){re(t,e);function t(r,o){o===void 0&&(o=Ar.now);var n=e.call(this,r,o)||this;return n.actions=[],n._active=!1,n}return t.prototype.flush=function(r){var o=this.actions;if(this._active){o.push(r);return}var n;this._active=!0;do if(n=r.execute(r.state,r.delay))break;while(r=o.shift());if(this._active=!1,n){for(;r=o.shift();)r.unsubscribe();throw n}},t}(Ar);var se=new xt(gt),Cr=se;var So=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.schedule=function(r,o){return o===void 0&&(o=0),o>0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Oo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(xt);var Hr=new Oo(So);var Mo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var Lo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(xt);var me=new Lo(Mo);var S=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var yt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[bt])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ji(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Ji();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return mo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Xi(e);if(yt(e))return Zi(e);if(Gt(e))return ea(e);if(Xt(e))return _o(e);if(tr(e))return ta(e);if(or(e))return ra(e)}throw Zt(e)}function Xi(e){return new F(function(t){var r=e[bt]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Zi(e){return new F(function(t){for(var r=0;r<e.length&&!t.closed;r++)t.next(e[r]);t.complete()})}function ea(e){return new F(function(t){e.then(function(r){t.closed||(t.next(r),t.complete())},function(r){return t.error(r)}).then(null,Qt)})}function ta(e){return new F(function(t){var r,o;try{for(var n=he(e),i=n.next();!i.done;i=n.next()){var a=i.value;if(t.next(a),t.closed)return}}catch(s){r={error:s}}finally{try{i&&!i.done&&(o=n.return)&&o.call(n)}finally{if(r)throw r.error}}t.complete()})}function _o(e){return new F(function(t){oa(e,t).catch(function(r){return t.error(r)})})}function ra(e){return _o(rr(e))}function oa(e,t){var r,o,n,i;return lo(this,void 0,void 0,function(){var a,s;return Nt(this,function(p){switch(p.label){case 0:p.trys.push([0,5,6,11]),r=fo(e),p.label=1;case 1:return[4,r.next()];case 2:if(o=p.sent(),!!o.done)return[3,4];if(a=o.value,t.next(a),t.closed)return[2];p.label=3;case 3:return[3,1];case 4:return[3,11];case 5:return s=p.sent(),n={error:s},[3,11];case 6:return p.trys.push([6,,9,10]),o&&!o.done&&(i=r.return)?[4,i.call(r)]:[3,8];case 7:p.sent(),p.label=8;case 8:return[3,10];case 9:if(n)throw n.error;return[7];case 10:return[7];case 11:return t.complete(),[2]}})})}function we(e,t,r,o,n){o===void 0&&(o=0),n===void 0&&(n=!1);var i=t.schedule(function(){r(),n?e.add(this.schedule(null,o)):this.unsubscribe()},o);if(e.add(i),!n)return i}function ve(e,t){return t===void 0&&(t=0),y(function(r,o){r.subscribe(T(o,function(n){return we(o,e,function(){return o.next(n)},t)},function(){return we(o,e,function(){return o.complete()},t)},function(n){return we(o,e,function(){return o.error(n)},t)}))})}function Ke(e,t){return t===void 0&&(t=0),y(function(r,o){o.add(e.schedule(function(){return r.subscribe(o)},t))})}function Ao(e,t){return W(e).pipe(Ke(t),ve(t))}function Co(e,t){return W(e).pipe(Ke(t),ve(t))}function Ho(e,t){return new F(function(r){var o=0;return t.schedule(function(){o===e.length?r.complete():(r.next(e[o++]),r.closed||this.schedule())})})}function ko(e,t){return new F(function(r){var o;return we(r,t,function(){o=e[er](),we(r,t,function(){var n,i,a;try{n=o.next(),i=n.value,a=n.done}catch(s){r.error(s);return}a?r.complete():r.next(i)},0,!0)}),function(){return k(o==null?void 0:o.return)&&o.return()}})}function nr(e,t){if(!e)throw new Error("Iterable cannot be null");return new F(function(r){we(r,t,function(){var o=e[Symbol.asyncIterator]();we(r,t,function(){o.next().then(function(n){n.done?r.complete():r.next(n.value)})},0,!0)})})}function $o(e,t){return nr(rr(e),t)}function Po(e,t){if(e!=null){if(Jt(e))return Ao(e,t);if(yt(e))return Ho(e,t);if(Gt(e))return Co(e,t);if(Xt(e))return nr(e,t);if(tr(e))return ko(e,t);if(or(e))return $o(e,t)}throw Zt(e)}function ue(e,t){return t?Po(e,t):W(e)}function I(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e);return ue(e,r)}function $r(e,t){var r=k(e)?e:function(){return e},o=function(n){return n.error(r())};return new F(t?function(n){return t.schedule(o,0,n)}:o)}var ir=ut(function(e){return function(){e(this),this.name="EmptyError",this.message="no elements in sequence"}});function Ro(e){return e instanceof Date&&!isNaN(e)}function m(e,t){return y(function(r,o){var n=0;r.subscribe(T(o,function(i){o.next(e.call(t,i,n++))}))})}var na=Array.isArray;function ia(e,t){return na(t)?e.apply(void 0,q([],N(t))):e(t)}function Ze(e){return m(function(t){return ia(e,t)})}var aa=Array.isArray,sa=Object.getPrototypeOf,ca=Object.prototype,pa=Object.keys;function Io(e){if(e.length===1){var t=e[0];if(aa(t))return{args:t,keys:null};if(la(t)){var r=pa(t);return{args:r.map(function(o){return t[o]}),keys:r}}}return{args:e,keys:null}}function la(e){return e&&typeof e=="object"&&sa(e)===ca}function Fo(e,t){return e.reduce(function(r,o,n){return r[o]=t[n],r},{})}function z(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e),o=Xe(e),n=Io(e),i=n.args,a=n.keys;if(i.length===0)return ue([],r);var s=new F(Pr(i,r,a?function(p){return Fo(a,p)}:le));return o?s.pipe(Ze(o)):s}function Pr(e,t,r){return r===void 0&&(r=le),function(o){jo(t,function(){for(var n=e.length,i=new Array(n),a=n,s=n,p=function(l){jo(t,function(){var f=ue(e[l],t),u=!1;f.subscribe(T(o,function(h){i[l]=h,u||(u=!0,s--),s||o.next(r(i.slice()))},function(){--a||o.complete()}))},o)},c=0;c<n;c++)p(c)},o)}}function jo(e,t,r){e?we(r,e,t):t()}function Wo(e,t,r,o,n,i,a,s){var p=[],c=0,l=0,f=!1,u=function(){f&&!p.length&&!c&&t.complete()},h=function(A){return c<o?w(A):p.push(A)},w=function(A){i&&t.next(A),c++;var te=!1;W(r(A,l++)).subscribe(T(t,function(ie){n==null||n(ie),i?h(ie):t.next(ie)},function(){te=!0},void 0,function(){if(te)try{c--;for(var ie=function(){var J=p.shift();a?we(t,a,function(){return w(J)}):w(J)};p.length&&c<o;)ie();u()}catch(J){t.error(J)}}))};return e.subscribe(T(t,h,function(){f=!0,u()})),function(){s==null||s()}}function oe(e,t,r){return r===void 0&&(r=1/0),k(t)?oe(function(o,n){return m(function(i,a){return t(o,i,n,a)})(W(e(o,n)))},r):(typeof t=="number"&&(r=t),y(function(o,n){return Wo(o,n,e,r)}))}function Et(e){return e===void 0&&(e=1/0),oe(le,e)}function Uo(){return Et(1)}function Ue(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Uo()(ue(e,He(e)))}function C(e){return new F(function(t){W(e()).subscribe(t)})}var ma=["addListener","removeListener"],fa=["addEventListener","removeEventListener"],ua=["on","off"];function d(e,t,r,o){if(k(r)&&(o=r,r=void 0),o)return d(e,t,r).pipe(Ze(o));var n=N(ba(e)?fa.map(function(s){return function(p){return e[s](t,p,r)}}):da(e)?ma.map(Do(e,t)):ha(e)?ua.map(Do(e,t)):[],2),i=n[0],a=n[1];if(!i&&yt(e))return oe(function(s){return d(s,t,r)})(W(e));if(!i)throw new TypeError("Invalid event target");return new F(function(s){var p=function(){for(var c=[],l=0;l<arguments.length;l++)c[l]=arguments[l];return s.next(1<c.length?c:c[0])};return i(p),function(){return a(p)}})}function Do(e,t){return function(r){return function(o){return e[r](t,o)}}}function da(e){return k(e.addListener)&&k(e.removeListener)}function ha(e){return k(e.on)&&k(e.off)}function ba(e){return k(e.addEventListener)&&k(e.removeEventListener)}function ar(e,t,r){return r?ar(e,t).pipe(Ze(r)):new F(function(o){var n=function(){for(var a=[],s=0;s<arguments.length;s++)a[s]=arguments[s];return o.next(a.length===1?a[0]:a)},i=e(n);return k(t)?function(){return t(n,i)}:void 0})}function Me(e,t,r){e===void 0&&(e=0),r===void 0&&(r=Cr);var o=-1;return t!=null&&(Yt(t)?r=t:o=t),new F(function(n){var i=Ro(e)?+e-r.now():e;i<0&&(i=0);var a=0;return r.schedule(function(){n.closed||(n.next(a++),0<=o?this.schedule(void 0,o):n.complete())},i)})}function O(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e),o=Bt(e,1/0),n=e;return n.length?n.length===1?W(n[0]):Et(o)(ue(n,r)):S}var Ye=new F(be);var va=Array.isArray;function wt(e){return e.length===1&&va(e[0])?e[0]:e}function b(e,t){return y(function(r,o){var n=0;r.subscribe(T(o,function(i){return e.call(t,i,n++)&&o.next(i)}))})}function st(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=Xe(e),o=wt(e);return o.length?new F(function(n){var i=o.map(function(){return[]}),a=o.map(function(){return!1});n.add(function(){i=a=null});for(var s=function(c){W(o[c]).subscribe(T(n,function(l){if(i[c].push(l),i.every(function(u){return u.length})){var f=i.map(function(u){return u.shift()});n.next(r?r.apply(void 0,q([],N(f))):f),i.some(function(u,h){return!u.length&&a[h]})&&n.complete()}},function(){a[c]=!0,!i[c].length&&n.complete()}))},p=0;!n.closed&&p<o.length;p++)s(p);return function(){i=a=null}}):S}function Vo(e){return y(function(t,r){var o=!1,n=null,i=null,a=!1,s=function(){if(i==null||i.unsubscribe(),i=null,o){o=!1;var c=n;n=null,r.next(c)}a&&r.complete()},p=function(){i=null,a&&r.complete()};t.subscribe(T(r,function(c){o=!0,n=c,i||W(e(c)).subscribe(i=T(r,s,p))},function(){a=!0,(!o||!i||i.closed)&&r.complete()}))})}function Le(e,t){return t===void 0&&(t=se),Vo(function(){return Me(e,t)})}function Be(e,t){return t===void 0&&(t=null),t=t!=null?t:e,y(function(r,o){var n=[],i=0;r.subscribe(T(o,function(a){var s,p,c,l,f=null;i++%t===0&&n.push([]);try{for(var u=he(n),h=u.next();!h.done;h=u.next()){var w=h.value;w.push(a),e<=w.length&&(f=f!=null?f:[],f.push(w))}}catch(ie){s={error:ie}}finally{try{h&&!h.done&&(p=u.return)&&p.call(u)}finally{if(s)throw s.error}}if(f)try{for(var A=he(f),te=A.next();!te.done;te=A.next()){var w=te.value;Qe(n,w),o.next(w)}}catch(ie){c={error:ie}}finally{try{te&&!te.done&&(l=A.return)&&l.call(A)}finally{if(c)throw c.error}}},function(){var a,s;try{for(var p=he(n),c=p.next();!c.done;c=p.next()){var l=c.value;o.next(l)}}catch(f){a={error:f}}finally{try{c&&!c.done&&(s=p.return)&&s.call(p)}finally{if(a)throw a.error}}o.complete()},void 0,function(){n=null}))})}function de(e){return y(function(t,r){var o=null,n=!1,i;o=t.subscribe(T(r,void 0,void 0,function(a){i=W(e(a,de(e)(t))),o?(o.unsubscribe(),o=null,i.subscribe(r)):n=!0})),n&&(o.unsubscribe(),o=null,i.subscribe(r))})}function No(e,t,r,o,n){return function(i,a){var s=r,p=t,c=0;i.subscribe(T(a,function(l){var f=c++;p=s?e(p,l,f):(s=!0,l),o&&a.next(p)},n&&function(){s&&a.next(p),a.complete()}))}}function Rr(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=Xe(e);return r?xo(Rr.apply(void 0,q([],N(e))),Ze(r)):y(function(o,n){Pr(q([o],N(wt(e))))(n)})}function ke(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Rr.apply(void 0,q([],N(e)))}function kt(e){return y(function(t,r){var o=!1,n=null,i=null,a=function(){if(i==null||i.unsubscribe(),i=null,o){o=!1;var s=n;n=null,r.next(s)}};t.subscribe(T(r,function(s){i==null||i.unsubscribe(),o=!0,n=s,i=T(r,a,be),W(e(s)).subscribe(i)},function(){a(),r.complete()},void 0,function(){n=i=null}))})}function _e(e,t){return t===void 0&&(t=se),y(function(r,o){var n=null,i=null,a=null,s=function(){if(n){n.unsubscribe(),n=null;var c=i;i=null,o.next(c)}};function p(){var c=a+e,l=t.now();if(l<c){n=this.schedule(void 0,c-l),o.add(n);return}s()}r.subscribe(T(o,function(c){i=c,a=t.now(),n||(n=t.schedule(p,e),o.add(n))},function(){s(),o.complete()},void 0,function(){i=n=null}))})}function De(e){return y(function(t,r){var o=!1;t.subscribe(T(r,function(n){o=!0,r.next(n)},function(){o||r.next(e),r.complete()}))})}function Te(e){return e<=0?function(){return S}:y(function(t,r){var o=0;t.subscribe(T(r,function(n){++o<=e&&(r.next(n),e<=o&&r.complete())}))})}function X(){return y(function(e,t){e.subscribe(T(t,be))})}function zo(e){return m(function(){return e})}function Ir(e,t){return t?function(r){return Ue(t.pipe(Te(1),X()),r.pipe(Ir(e)))}:oe(function(r,o){return W(e(r,o)).pipe(Te(1),zo(r))})}function Ge(e,t){t===void 0&&(t=se);var r=Me(e,t);return Ir(function(){return r})}function K(e,t){return t===void 0&&(t=le),e=e!=null?e:ga,y(function(r,o){var n,i=!0;r.subscribe(T(o,function(a){var s=t(a);(i||!e(n,s))&&(i=!1,n=s,o.next(a))}))})}function ga(e,t){return e===t}function Z(e,t){return K(function(r,o){return t?t(r[e],o[e]):r[e]===o[e]})}function qo(e){return e===void 0&&(e=xa),y(function(t,r){var o=!1;t.subscribe(T(r,function(n){o=!0,r.next(n)},function(){return o?r.complete():r.error(e())}))})}function xa(){return new ir}function ne(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return function(r){return Ue(r,I.apply(void 0,q([],N(e))))}}function L(e){return y(function(t,r){try{t.subscribe(r)}finally{r.add(e)}})}function Ae(e,t){var r=arguments.length>=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):qo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return S}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e<o.length&&o.shift()},function(){var n,i;try{for(var a=he(o),s=a.next();!s.done;s=a.next()){var p=s.value;r.next(p)}}catch(c){n={error:c}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(n)throw n.error}}r.complete()},void 0,function(){o=null}))})}function Qo(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e),o=Bt(e,1/0);return e=wt(e),y(function(n,i){Et(o)(ue(q([n],N(e)),r)).subscribe(i)})}function Re(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Qo.apply(void 0,q([],N(e)))}function ct(e){var t,r=1/0,o;return e!=null&&(typeof e=="object"?(t=e.count,r=t===void 0?1/0:t,o=e.delay):r=e),r<=0?function(){return S}:y(function(n,i){var a=0,s,p=function(){if(s==null||s.unsubscribe(),s=null,o!=null){var l=typeof o=="number"?Me(o):W(o(a)),f=T(i,function(){f.unsubscribe(),c()});l.subscribe(f)}else c()},c=function(){var l=!1;s=n.subscribe(T(i,void 0,function(){++a<r?s?p():l=!0:i.complete()})),l&&p()};c()})}function jr(e,t){return y(No(e,t,arguments.length>=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,ft){h++,!A&&!w&&te();var qe=u=u!=null?u:r();ft.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),qe.subscribe(ft),!l&&h>0&&(l=new at({next:function(je){return qe.next(je)},error:function(je){A=!0,te(),f=Wr(ie,n,je),qe.error(je)},complete:function(){w=!0,te(),f=Wr(ie,a),qe.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;o<arguments.length;o++)r[o-2]=arguments[o];if(t===!0){e();return}if(t!==!1){var n=new at({next:function(){n.unsubscribe(),e()}});return W(t.apply(void 0,q([],N(r)))).subscribe(n)}}function G(e,t,r){var o,n,i,a,s=!1;return e&&typeof e=="object"?(o=e.bufferSize,a=o===void 0?1/0:o,n=e.windowTime,t=n===void 0?1/0:n,i=e.refCount,s=i===void 0?!1:i,r=e.scheduler):a=e!=null?e:1/0,pe({connector:function(){return new Ct(a,t,r)},resetOnError:!0,resetOnComplete:!1,resetOnRefCountZero:s})}function Ce(e){return b(function(t,r){return e<=r})}function Ur(e){return y(function(t,r){var o=!1,n=T(r,function(){n==null||n.unsubscribe(),o=!0},be);W(e).subscribe(n),t.subscribe(T(r,function(i){return o&&r.next(i)}))})}function Q(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e);return y(function(o,n){(r?Ue(e,o,r):Ue(e,o)).subscribe(n)})}function v(e,t){return y(function(r,o){var n=null,i=0,a=!1,s=function(){return a&&!n&&o.complete()};r.subscribe(T(o,function(p){n==null||n.unsubscribe();var c=0,l=i++;W(e(p,l)).subscribe(n=T(o,function(f){return o.next(t?t(p,f,l,c++):f)},function(){n=null,s()}))},function(){a=!0,s()}))})}function U(e){return y(function(t,r){W(e).subscribe(T(r,function(){return r.complete()},be)),!r.closed&&t.subscribe(r)})}function Dr(e,t){return t===void 0&&(t=!1),y(function(r,o){var n=0;r.subscribe(T(o,function(i){var a=e(i,n++);(a||t)&&o.next(i),!a&&o.complete()}))})}function E(e,t,r){var o=k(e)||t||r?{next:e,error:t,complete:r}:e;return o?y(function(n,i){var a;(a=o.subscribe)===null||a===void 0||a.call(o);var s=!0;n.subscribe(T(i,function(p){var c;(c=o.next)===null||c===void 0||c.call(o,p),i.next(p)},function(){var p;s=!1,(p=o.complete)===null||p===void 0||p.call(o),i.complete()},function(p){var c;s=!1,(c=o.error)===null||c===void 0||c.call(o,p),i.error(p)},function(){var p,c;s&&((p=o.unsubscribe)===null||p===void 0||p.call(o)),(c=o.finalize)===null||c===void 0||c.call(o)}))}):le}function Ko(e,t){return y(function(r,o){var n=t!=null?t:{},i=n.leading,a=i===void 0?!0:i,s=n.trailing,p=s===void 0?!1:s,c=!1,l=null,f=null,u=!1,h=function(){f==null||f.unsubscribe(),f=null,p&&(te(),u&&o.complete())},w=function(){f=null,u&&o.complete()},A=function(ie){return f=W(e(ie)).subscribe(T(o,h,w))},te=function(){if(c){c=!1;var ie=l;l=null,o.next(ie),!u&&A(ie)}};r.subscribe(T(o,function(ie){c=!0,l=ie,!(f&&!f.closed)&&(a?te():A(ie))},function(){u=!0,!(p&&c&&f&&!f.closed)&&o.complete()}))})}function pt(e,t,r){t===void 0&&(t=se);var o=Me(e,t);return Ko(function(){return o},r)}function ee(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=Xe(e);return y(function(o,n){for(var i=e.length,a=new Array(i),s=e.map(function(){return!1}),p=!1,c=function(f){W(e[f]).subscribe(T(n,function(u){a[f]=u,!p&&!s[f]&&(s[f]=!0,(p=s.every(le))&&(s=null))},be))},l=0;l<i;l++)c(l);o.subscribe(T(n,function(f){if(p){var u=q([f],N(a));n.next(r?r.apply(void 0,q([],N(u))):u)}}))})}function Yo(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return y(function(r,o){st.apply(void 0,q([r],N(e))).subscribe(o)})}function Vr(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Yo.apply(void 0,q([],N(e)))}function Bo(){let e=new Ct(1);return d(document,"DOMContentLoaded",{once:!0}).subscribe(()=>e.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var ya=O(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return ya.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?kt(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Go(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Go(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Go(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Jo=new g,Ea=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Jo.next(t)))),v(e=>O(Ye,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ea.pipe(E(r=>r.observe(t)),v(r=>Jo.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Xo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function Zo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function en(e){return O(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var tn=new g,wa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)tn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return wa.pipe(E(t=>t.observe(e)),v(t=>tn.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function rn(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function on(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Ta(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Sa(){return O(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function nn(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:on("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Ta(o,r)}return!0}),pe());return Sa().pipe(v(t=>t?S:e))}function xe(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function an(){return new g}function sn(){return location.hash.slice(1)}function cn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Oa(e){return O(d(window,"hashchange"),e).pipe(m(sn),Q(sn()),b(t=>t.length>0),G(1))}function pn(e){return Oa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function ln(){let e=matchMedia("print");return O(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Fe(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function un(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function dn(){return O(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(un),Q(un()))}function hn(){return{width:innerWidth,height:innerHeight}}function bn(){return d(window,"resize",{passive:!0}).pipe(m(hn),Q(hn()))}function vn(){return z([dn(),bn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Ma(e){return d(e,"message",t=>t.data)}function La(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function gn(e,t=new Worker(e)){let r=Ma(t),o=La(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Re(r.pipe(U(i))),pe())}var _a=R("#__config"),Ot=JSON.parse(_a.textContent);Ot.base=`${new URL(Ot.base,xe())}`;function ye(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function Aa(e){let t=R(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),Aa(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ca(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function yn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ca(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function En(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function wn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Tn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var On=Lt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,On.default)(c))," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.score<t);a===-1&&(a=r.length);let s=r.slice(0,a),p=r.slice(a),c=[Qr(i,2|+(!n&&a===0)),...s.map(l=>Qr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Ln(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function _n(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function $a(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function An(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map($a)))}var Pa=0;function Ra(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Xo(e)).pipe(oe(Ne),pt(1),ke(t),m(()=>Zo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Ia(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Pa++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(kt(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:S),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),Ra(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Ia(e,{content$:new F(o=>{let n=e.title,i=En(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Fa(e,t){let r=C(()=>z([en(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Cn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Fa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))})}function ja(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Wa(e){let t=[];for(let r of ja(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Wa(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,wn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>Cn(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function $n(e,t){return C(()=>{let r=kn(e);return typeof r!="undefined"?fr(r,e,t):S})}var Pn=Lt(Br());var Ua=0;function Rn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Rn(t)}}function Da(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),Z("scrollable"))}function In(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Pn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Ua++}`;let l=Tn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=Rn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Da(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Va(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Va(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}var jn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,za=0;function qa(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=qa().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:jn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>so(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${za++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Un=x("table");function Dn(e){return e.replaceWith(Un),Un.replaceWith(_n(e)),I({ref:e})}function Qa(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>d(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Vn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.x<h.x||f.x+u>h.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of P("[data-tabs]"))for(let A of P(":scope > input",w)){let te=R(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Qa(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function Nn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>$n(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>In(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Dn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Vn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ka(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function zn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ka(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ya=0;function Ba(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ya++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ba(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Ga({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[n<i,i]),Z(0)),r=z([e,t]).pipe(b(([{offset:n},[,i]])=>Math.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Qn(e,t){return C(()=>z([ge(e),Ga(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Kn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),ke(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>qn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>$({ref:e},a)),Re(i.pipe(U(n))))})}function Ja(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function Yn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Ja(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>$({ref:e},n)))})}function Bn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Xa(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Gn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;s<t.length;s++){let p=t[s].nextElementSibling;p instanceof HTMLElement&&(p.hidden=a.index!==s)}__md_set("__palette",a)}),d(e,"keydown").pipe(b(a=>a.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Xa(t).pipe(U(n.pipe(Ce(1))),ct(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>$({ref:e},a)))})}function Jn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Lt(Br());function Za(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Xn({alert$:e}){Jr.default.isSupported()&&new F(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Za(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Zn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function es(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[Zn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Zn(new URL(s),t))}}return r}function ur(e){return fn(new URL("sitemap.xml",e)).pipe(m(t=>es(t,new URL(e))),de(()=>I(new Map)))}function ts(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ei(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ti(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function rs(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ei(document);for(let[o,n]of ei(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return Ue(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),X(),ne(document))}function ri({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ti);let i=d(document.body,"click").pipe(ke(n),v(([p,c])=>ts(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>mn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ti),v(rs),pe());return O(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",cn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var oi=Lt(qr());function ni(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}<mark data-md-highlight>${a}</mark>`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,oi.default)(a).replace(i,o).replace(/<\/mark>(\s+)<mark[^>]*>/img,"$1")}}function Ft(e){return e.type===1}function dr(e){return e.type===3}function ii(e,t){let r=gn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ai({document$:e}){let t=ye(),r=Fe(new URL("../versions.json",t.base)).pipe(de(()=>S)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(p))}}return S}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(An(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function is(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(Ft)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function si(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(Ft)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),is(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function ci(e,{worker$:t,query$:r}){let o=new g,n=rn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(Ft)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?S:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>$({ref:e},l)))}function as(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function pi(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),as(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>$({ref:e},n)))}function li(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(d(n,"keydown"),d(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(ke(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g,"&nbsp;")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function mi(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ii(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=si(i,{worker$:n});return O(s,ci(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>pi(p,{query$:s})),...ae("search-suggest",e).map(p=>li(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function fi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ni(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function ss(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Xr(e,o){var n=o,{header$:t}=n,r=ao(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue(P("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(ve(se),m(()=>l),U(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),ss(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>$({ref:e},l)))})}function ui(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(Fe(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),Fe(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Fe(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function di(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(Fe(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),Fe(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function hi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return ui(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return di(r,o)}return S}var cs;function ps(e){return cs||(cs=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return hi(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function bi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Ln(o)),t.classList.add("md-source__repository--active")}),ps(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function vi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ls(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}function ms(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),ke(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c<u||w)l=[...l,f.shift()];else break}for(;l.length;){let[,A]=l[l.length-1];if(A-c>=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length<p.prev.length?{prev:p.prev.slice(Math.max(0,s.prev.length-1),p.prev.length),next:[]}:{prev:p.prev.slice(-1),next:p.next.slice(0,p.next.length-s.next.length)}))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),ke(o.pipe(ve(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),ct({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ms(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))})}function fs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),ct({delay:250}),m(a=>({hidden:a})))}function xi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),fs(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))}function yi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth<r.scrollWidth),oe(r=>{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),oe(r=>mt(r,{viewport$:t}))).subscribe()}function Ei({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function us(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function wi({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(us),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Ti({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ds(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",Zr.base)}`).pipe(m(()=>__index),G(1)):Fe(new URL("search/search_index.json",Zr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Bo(),Wt=an(),Mt=pn(Wt),eo=nn(),Oe=vn(),hr=Pt("(min-width: 960px)"),Oi=Pt("(min-width: 1220px)"),Mi=ln(),Zr=ye(),Li=document.forms.namedItem("search")?ds():Ye,to=new g;Xn({alert$:to});var ro=new g;B("navigation.instant")&&ri({location$:Wt,viewport$:Oe,progress$:ro}).subscribe(ot);var Si;((Si=Zr.version)==null?void 0:Si.provider)==="mike"&&ai({document$:ot});O(Wt,Mt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});eo.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&lt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&lt(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});yi({viewport$:Oe,document$:ot});Ei({document$:ot,tablet$:hr});wi({document$:ot});Ti({viewport$:Oe,tablet$:hr});var rt=Qn(Se("header"),{viewport$:Oe}),jt=ot.pipe(m(()=>Se("main")),v(e=>Bn(e,{viewport$:Oe,header$:rt})),G(1)),hs=O(...ae("consent").map(e=>yn(e,{target$:Mt})),...ae("dialog").map(e=>zn(e,{alert$:to})),...ae("header").map(e=>Kn(e,{viewport$:Oe,header$:rt,main$:jt})),...ae("palette").map(e=>Gn(e)),...ae("progress").map(e=>Jn(e,{progress$:ro})),...ae("search").map(e=>mi(e,{index$:Li,keyboard$:eo})),...ae("source").map(e=>bi(e))),bs=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>Nn(e,{viewport$:Oe,target$:Mt,print$:Mi})),...ae("content").map(e=>B("search.highlight")?fi(e,{index$:Li,location$:Wt}):S),...ae("header-title").map(e=>Yn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Oi,()=>Xr(e,{viewport$:Oe,header$:rt,main$:jt})):Nr(hr,()=>Xr(e,{viewport$:Oe,header$:rt,main$:jt}))),...ae("tabs").map(e=>vi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:jt,target$:Mt})),...ae("top").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:jt,target$:Mt})))),_i=ot.pipe(v(()=>bs),Re(hs),G(1));_i.subscribe();window.document$=ot;window.location$=Wt;window.target$=Mt;window.keyboard$=eo;window.viewport$=Oe;window.tablet$=hr;window.screen$=Oi;window.print$=Mi;window.alert$=to;window.progress$=ro;window.component$=_i;})();
+//# sourceMappingURL=bundle.af256bd8.min.js.map
+
diff --git a/v10.0.X/assets/javascripts/bundle.fe8b6f2b.min.js.map b/v10.0.X/assets/javascripts/bundle.af256bd8.min.js.map
similarity index 66%
rename from v10.0.X/assets/javascripts/bundle.fe8b6f2b.min.js.map
rename to v10.0.X/assets/javascripts/bundle.af256bd8.min.js.map
index 82635852aef..0501d11727f 100644
--- a/v10.0.X/assets/javascripts/bundle.fe8b6f2b.min.js.map
+++ b/v10.0.X/assets/javascripts/bundle.af256bd8.min.js.map
@@ -1,7 +1,7 @@
 {
   "version": 3,
-  "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"],
-  "sourcesContent": ["(function (global, factory) {\n  typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n  typeof define === 'function' && define.amd ? define(factory) :\n  (factory());\n}(this, (function () { 'use strict';\n\n  /**\n   * Applies the :focus-visible polyfill at the given scope.\n   * A scope in this case is either the top-level Document or a Shadow Root.\n   *\n   * @param {(Document|ShadowRoot)} scope\n   * @see https://github.com/WICG/focus-visible\n   */\n  function applyFocusVisiblePolyfill(scope) {\n    var hadKeyboardEvent = true;\n    var hadFocusVisibleRecently = false;\n    var hadFocusVisibleRecentlyTimeout = null;\n\n    var inputTypesAllowlist = {\n      text: true,\n      search: true,\n      url: true,\n      tel: true,\n      email: true,\n      password: true,\n      number: true,\n      date: true,\n      month: true,\n      week: true,\n      time: true,\n      datetime: true,\n      'datetime-local': true\n    };\n\n    /**\n     * Helper function for legacy browsers and iframes which sometimes focus\n     * elements like document, body, and non-interactive SVG.\n     * @param {Element} el\n     */\n    function isValidFocusTarget(el) {\n      if (\n        el &&\n        el !== document &&\n        el.nodeName !== 'HTML' &&\n        el.nodeName !== 'BODY' &&\n        'classList' in el &&\n        'contains' in el.classList\n      ) {\n        return true;\n      }\n      return false;\n    }\n\n    /**\n     * Computes whether the given element should automatically trigger the\n     * `focus-visible` class being added, i.e. whether it should always match\n     * `:focus-visible` when focused.\n     * @param {Element} el\n     * @return {boolean}\n     */\n    function focusTriggersKeyboardModality(el) {\n      var type = el.type;\n      var tagName = el.tagName;\n\n      if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n        return true;\n      }\n\n      if (tagName === 'TEXTAREA' && !el.readOnly) {\n        return true;\n      }\n\n      if (el.isContentEditable) {\n        return true;\n      }\n\n      return false;\n    }\n\n    /**\n     * Add the `focus-visible` class to the given element if it was not added by\n     * the author.\n     * @param {Element} el\n     */\n    function addFocusVisibleClass(el) {\n      if (el.classList.contains('focus-visible')) {\n        return;\n      }\n      el.classList.add('focus-visible');\n      el.setAttribute('data-focus-visible-added', '');\n    }\n\n    /**\n     * Remove the `focus-visible` class from the given element if it was not\n     * originally added by the author.\n     * @param {Element} el\n     */\n    function removeFocusVisibleClass(el) {\n      if (!el.hasAttribute('data-focus-visible-added')) {\n        return;\n      }\n      el.classList.remove('focus-visible');\n      el.removeAttribute('data-focus-visible-added');\n    }\n\n    /**\n     * If the most recent user interaction was via the keyboard;\n     * and the key press did not include a meta, alt/option, or control key;\n     * then the modality is keyboard. Otherwise, the modality is not keyboard.\n     * Apply `focus-visible` to any current active element and keep track\n     * of our keyboard modality state with `hadKeyboardEvent`.\n     * @param {KeyboardEvent} e\n     */\n    function onKeyDown(e) {\n      if (e.metaKey || e.altKey || e.ctrlKey) {\n        return;\n      }\n\n      if (isValidFocusTarget(scope.activeElement)) {\n        addFocusVisibleClass(scope.activeElement);\n      }\n\n      hadKeyboardEvent = true;\n    }\n\n    /**\n     * If at any point a user clicks with a pointing device, ensure that we change\n     * the modality away from keyboard.\n     * This avoids the situation where a user presses a key on an already focused\n     * element, and then clicks on a different element, focusing it with a\n     * pointing device, while we still think we're in keyboard modality.\n     * @param {Event} e\n     */\n    function onPointerDown(e) {\n      hadKeyboardEvent = false;\n    }\n\n    /**\n     * On `focus`, add the `focus-visible` class to the target if:\n     * - the target received focus as a result of keyboard navigation, or\n     * - the event target is an element that will likely require interaction\n     *   via the keyboard (e.g. a text box)\n     * @param {Event} e\n     */\n    function onFocus(e) {\n      // Prevent IE from focusing the document or HTML element.\n      if (!isValidFocusTarget(e.target)) {\n        return;\n      }\n\n      if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n        addFocusVisibleClass(e.target);\n      }\n    }\n\n    /**\n     * On `blur`, remove the `focus-visible` class from the target.\n     * @param {Event} e\n     */\n    function onBlur(e) {\n      if (!isValidFocusTarget(e.target)) {\n        return;\n      }\n\n      if (\n        e.target.classList.contains('focus-visible') ||\n        e.target.hasAttribute('data-focus-visible-added')\n      ) {\n        // To detect a tab/window switch, we look for a blur event followed\n        // rapidly by a visibility change.\n        // If we don't see a visibility change within 100ms, it's probably a\n        // regular focus change.\n        hadFocusVisibleRecently = true;\n        window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n        hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n          hadFocusVisibleRecently = false;\n        }, 100);\n        removeFocusVisibleClass(e.target);\n      }\n    }\n\n    /**\n     * If the user changes tabs, keep track of whether or not the previously\n     * focused element had .focus-visible.\n     * @param {Event} e\n     */\n    function onVisibilityChange(e) {\n      if (document.visibilityState === 'hidden') {\n        // If the tab becomes active again, the browser will handle calling focus\n        // on the element (Safari actually calls it twice).\n        // If this tab change caused a blur on an element with focus-visible,\n        // re-apply the class when the user switches back to the tab.\n        if (hadFocusVisibleRecently) {\n          hadKeyboardEvent = true;\n        }\n        addInitialPointerMoveListeners();\n      }\n    }\n\n    /**\n     * Add a group of listeners to detect usage of any pointing devices.\n     * These listeners will be added when the polyfill first loads, and anytime\n     * the window is blurred, so that they are active when the window regains\n     * focus.\n     */\n    function addInitialPointerMoveListeners() {\n      document.addEventListener('mousemove', onInitialPointerMove);\n      document.addEventListener('mousedown', onInitialPointerMove);\n      document.addEventListener('mouseup', onInitialPointerMove);\n      document.addEventListener('pointermove', onInitialPointerMove);\n      document.addEventListener('pointerdown', onInitialPointerMove);\n      document.addEventListener('pointerup', onInitialPointerMove);\n      document.addEventListener('touchmove', onInitialPointerMove);\n      document.addEventListener('touchstart', onInitialPointerMove);\n      document.addEventListener('touchend', onInitialPointerMove);\n    }\n\n    function removeInitialPointerMoveListeners() {\n      document.removeEventListener('mousemove', onInitialPointerMove);\n      document.removeEventListener('mousedown', onInitialPointerMove);\n      document.removeEventListener('mouseup', onInitialPointerMove);\n      document.removeEventListener('pointermove', onInitialPointerMove);\n      document.removeEventListener('pointerdown', onInitialPointerMove);\n      document.removeEventListener('pointerup', onInitialPointerMove);\n      document.removeEventListener('touchmove', onInitialPointerMove);\n      document.removeEventListener('touchstart', onInitialPointerMove);\n      document.removeEventListener('touchend', onInitialPointerMove);\n    }\n\n    /**\n     * When the polfyill first loads, assume the user is in keyboard modality.\n     * If any event is received from a pointing device (e.g. mouse, pointer,\n     * touch), turn off keyboard modality.\n     * This accounts for situations where focus enters the page from the URL bar.\n     * @param {Event} e\n     */\n    function onInitialPointerMove(e) {\n      // Work around a Safari quirk that fires a mousemove on <html> whenever the\n      // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n      if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n        return;\n      }\n\n      hadKeyboardEvent = false;\n      removeInitialPointerMoveListeners();\n    }\n\n    // For some kinds of state, we are interested in changes at the global scope\n    // only. For example, global pointer input, global key presses and global\n    // visibility change should affect the state at every scope:\n    document.addEventListener('keydown', onKeyDown, true);\n    document.addEventListener('mousedown', onPointerDown, true);\n    document.addEventListener('pointerdown', onPointerDown, true);\n    document.addEventListener('touchstart', onPointerDown, true);\n    document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n    addInitialPointerMoveListeners();\n\n    // For focus and blur, we specifically care about state changes in the local\n    // scope. This is because focus / blur events that originate from within a\n    // shadow root are not re-dispatched from the host element if it was already\n    // the active element in its own scope:\n    scope.addEventListener('focus', onFocus, true);\n    scope.addEventListener('blur', onBlur, true);\n\n    // We detect that a node is a ShadowRoot by ensuring that it is a\n    // DocumentFragment and also has a host property. This check covers native\n    // implementation and polyfill implementation transparently. If we only cared\n    // about the native implementation, we could just check if the scope was\n    // an instance of a ShadowRoot.\n    if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n      // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n      // have a root element to add a class to. So, we add this attribute to the\n      // host element instead:\n      scope.host.setAttribute('data-js-focus-visible', '');\n    } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n      document.documentElement.classList.add('js-focus-visible');\n      document.documentElement.setAttribute('data-js-focus-visible', '');\n    }\n  }\n\n  // It is important to wrap all references to global window and document in\n  // these checks to support server-side rendering use cases\n  // @see https://github.com/WICG/focus-visible/issues/199\n  if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n    // Make the polyfill helper globally available. This can be used as a signal\n    // to interested libraries that wish to coordinate with the polyfill for e.g.,\n    // applying the polyfill to a shadow root:\n    window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n    // Notify interested libraries of the polyfill's presence, in case the\n    // polyfill was loaded lazily:\n    var event;\n\n    try {\n      event = new CustomEvent('focus-visible-polyfill-ready');\n    } catch (error) {\n      // IE11 does not support using CustomEvent as a constructor directly:\n      event = document.createEvent('CustomEvent');\n      event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n    }\n\n    window.dispatchEvent(event);\n  }\n\n  if (typeof document !== 'undefined') {\n    // Apply the polyfill to the global document, so that no JavaScript\n    // coordination is required to use the polyfill in the top-level document:\n    applyFocusVisiblePolyfill(document);\n  }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n  \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n  try {\n    return document.execCommand(type);\n  } catch (err) {\n    return false;\n  }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n  var selectedText = select_default()(target);\n  command('cut');\n  return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n  var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n  var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n  fakeElement.style.fontSize = '12pt'; // Reset box model\n\n  fakeElement.style.border = '0';\n  fakeElement.style.padding = '0';\n  fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n  fakeElement.style.position = 'absolute';\n  fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n  var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n  fakeElement.style.top = \"\".concat(yPosition, \"px\");\n  fakeElement.setAttribute('readonly', '');\n  fakeElement.value = value;\n  return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n  var fakeElement = createFakeElement(value);\n  options.container.appendChild(fakeElement);\n  var selectedText = select_default()(fakeElement);\n  command('copy');\n  fakeElement.remove();\n  return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n  var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n    container: document.body\n  };\n  var selectedText = '';\n\n  if (typeof target === 'string') {\n    selectedText = fakeCopyAction(target, options);\n  } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n    // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n    selectedText = fakeCopyAction(target.value, options);\n  } else {\n    selectedText = select_default()(target);\n    command('copy');\n  }\n\n  return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n  var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n  // Defines base properties passed from constructor.\n  var _options$action = options.action,\n      action = _options$action === void 0 ? 'copy' : _options$action,\n      container = options.container,\n      target = options.target,\n      text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n  if (action !== 'copy' && action !== 'cut') {\n    throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n  } // Sets the `target` property using an element that will be have its content copied.\n\n\n  if (target !== undefined) {\n    if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n      if (action === 'copy' && target.hasAttribute('disabled')) {\n        throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n      }\n\n      if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n        throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n      }\n    } else {\n      throw new Error('Invalid \"target\" value, use a valid Element');\n    }\n  } // Define selection strategy based on `text` property.\n\n\n  if (text) {\n    return actions_copy(text, {\n      container: container\n    });\n  } // Defines which selection strategy based on `target` property.\n\n\n  if (target) {\n    return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n      container: container\n    });\n  }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n  var attribute = \"data-clipboard-\".concat(suffix);\n\n  if (!element.hasAttribute(attribute)) {\n    return;\n  }\n\n  return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n  _inherits(Clipboard, _Emitter);\n\n  var _super = _createSuper(Clipboard);\n\n  /**\n   * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n   * @param {Object} options\n   */\n  function Clipboard(trigger, options) {\n    var _this;\n\n    _classCallCheck(this, Clipboard);\n\n    _this = _super.call(this);\n\n    _this.resolveOptions(options);\n\n    _this.listenClick(trigger);\n\n    return _this;\n  }\n  /**\n   * Defines if attributes would be resolved using internal setter functions\n   * or custom functions that were passed in the constructor.\n   * @param {Object} options\n   */\n\n\n  _createClass(Clipboard, [{\n    key: \"resolveOptions\",\n    value: function resolveOptions() {\n      var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n      this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n      this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n      this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n      this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n    }\n    /**\n     * Adds a click event listener to the passed trigger.\n     * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n     */\n\n  }, {\n    key: \"listenClick\",\n    value: function listenClick(trigger) {\n      var _this2 = this;\n\n      this.listener = listen_default()(trigger, 'click', function (e) {\n        return _this2.onClick(e);\n      });\n    }\n    /**\n     * Defines a new `ClipboardAction` on each click event.\n     * @param {Event} e\n     */\n\n  }, {\n    key: \"onClick\",\n    value: function onClick(e) {\n      var trigger = e.delegateTarget || e.currentTarget;\n      var action = this.action(trigger) || 'copy';\n      var text = actions_default({\n        action: action,\n        container: this.container,\n        target: this.target(trigger),\n        text: this.text(trigger)\n      }); // Fires an event based on the copy operation result.\n\n      this.emit(text ? 'success' : 'error', {\n        action: action,\n        text: text,\n        trigger: trigger,\n        clearSelection: function clearSelection() {\n          if (trigger) {\n            trigger.focus();\n          }\n\n          window.getSelection().removeAllRanges();\n        }\n      });\n    }\n    /**\n     * Default `action` lookup function.\n     * @param {Element} trigger\n     */\n\n  }, {\n    key: \"defaultAction\",\n    value: function defaultAction(trigger) {\n      return getAttributeValue('action', trigger);\n    }\n    /**\n     * Default `target` lookup function.\n     * @param {Element} trigger\n     */\n\n  }, {\n    key: \"defaultTarget\",\n    value: function defaultTarget(trigger) {\n      var selector = getAttributeValue('target', trigger);\n\n      if (selector) {\n        return document.querySelector(selector);\n      }\n    }\n    /**\n     * Allow fire programmatically a copy action\n     * @param {String|HTMLElement} target\n     * @param {Object} options\n     * @returns Text copied.\n     */\n\n  }, {\n    key: \"defaultText\",\n\n    /**\n     * Default `text` lookup function.\n     * @param {Element} trigger\n     */\n    value: function defaultText(trigger) {\n      return getAttributeValue('text', trigger);\n    }\n    /**\n     * Destroy lifecycle.\n     */\n\n  }, {\n    key: \"destroy\",\n    value: function destroy() {\n      this.listener.destroy();\n    }\n  }], [{\n    key: \"copy\",\n    value: function copy(target) {\n      var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n        container: document.body\n      };\n      return actions_copy(target, options);\n    }\n    /**\n     * Allow fire programmatically a cut action\n     * @param {String|HTMLElement} target\n     * @returns Text cutted.\n     */\n\n  }, {\n    key: \"cut\",\n    value: function cut(target) {\n      return actions_cut(target);\n    }\n    /**\n     * Returns the support of the given action, or all actions if no action is\n     * given.\n     * @param {String} [action]\n     */\n\n  }, {\n    key: \"isSupported\",\n    value: function isSupported() {\n      var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n      var actions = typeof action === 'string' ? [action] : action;\n      var support = !!document.queryCommandSupported;\n      actions.forEach(function (action) {\n        support = support && !!document.queryCommandSupported(action);\n      });\n      return support;\n    }\n  }]);\n\n  return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n    var proto = Element.prototype;\n\n    proto.matches = proto.matchesSelector ||\n                    proto.mozMatchesSelector ||\n                    proto.msMatchesSelector ||\n                    proto.oMatchesSelector ||\n                    proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n    while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n        if (typeof element.matches === 'function' &&\n            element.matches(selector)) {\n          return element;\n        }\n        element = element.parentNode;\n    }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n    var listenerFn = listener.apply(this, arguments);\n\n    element.addEventListener(type, listenerFn, useCapture);\n\n    return {\n        destroy: function() {\n            element.removeEventListener(type, listenerFn, useCapture);\n        }\n    }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n    // Handle the regular Element usage\n    if (typeof elements.addEventListener === 'function') {\n        return _delegate.apply(null, arguments);\n    }\n\n    // Handle Element-less usage, it defaults to global delegation\n    if (typeof type === 'function') {\n        // Use `document` as the first parameter, then apply arguments\n        // This is a short way to .unshift `arguments` without running into deoptimizations\n        return _delegate.bind(null, document).apply(null, arguments);\n    }\n\n    // Handle Selector-based usage\n    if (typeof elements === 'string') {\n        elements = document.querySelectorAll(elements);\n    }\n\n    // Handle Array-like based usage\n    return Array.prototype.map.call(elements, function (element) {\n        return _delegate(element, selector, type, callback, useCapture);\n    });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n    return function(e) {\n        e.delegateTarget = closest(e.target, selector);\n\n        if (e.delegateTarget) {\n            callback.call(element, e);\n        }\n    }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n    return value !== undefined\n        && value instanceof HTMLElement\n        && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n    var type = Object.prototype.toString.call(value);\n\n    return value !== undefined\n        && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n        && ('length' in value)\n        && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n    return typeof value === 'string'\n        || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n    var type = Object.prototype.toString.call(value);\n\n    return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n    if (!target && !type && !callback) {\n        throw new Error('Missing required arguments');\n    }\n\n    if (!is.string(type)) {\n        throw new TypeError('Second argument must be a String');\n    }\n\n    if (!is.fn(callback)) {\n        throw new TypeError('Third argument must be a Function');\n    }\n\n    if (is.node(target)) {\n        return listenNode(target, type, callback);\n    }\n    else if (is.nodeList(target)) {\n        return listenNodeList(target, type, callback);\n    }\n    else if (is.string(target)) {\n        return listenSelector(target, type, callback);\n    }\n    else {\n        throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n    }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n    node.addEventListener(type, callback);\n\n    return {\n        destroy: function() {\n            node.removeEventListener(type, callback);\n        }\n    }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n    Array.prototype.forEach.call(nodeList, function(node) {\n        node.addEventListener(type, callback);\n    });\n\n    return {\n        destroy: function() {\n            Array.prototype.forEach.call(nodeList, function(node) {\n                node.removeEventListener(type, callback);\n            });\n        }\n    }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n    return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n    var selectedText;\n\n    if (element.nodeName === 'SELECT') {\n        element.focus();\n\n        selectedText = element.value;\n    }\n    else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n        var isReadOnly = element.hasAttribute('readonly');\n\n        if (!isReadOnly) {\n            element.setAttribute('readonly', '');\n        }\n\n        element.select();\n        element.setSelectionRange(0, element.value.length);\n\n        if (!isReadOnly) {\n            element.removeAttribute('readonly');\n        }\n\n        selectedText = element.value;\n    }\n    else {\n        if (element.hasAttribute('contenteditable')) {\n            element.focus();\n        }\n\n        var selection = window.getSelection();\n        var range = document.createRange();\n\n        range.selectNodeContents(element);\n        selection.removeAllRanges();\n        selection.addRange(range);\n\n        selectedText = selection.toString();\n    }\n\n    return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n  // Keep this empty so it's easier to inherit from\n  // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n  on: function (name, callback, ctx) {\n    var e = this.e || (this.e = {});\n\n    (e[name] || (e[name] = [])).push({\n      fn: callback,\n      ctx: ctx\n    });\n\n    return this;\n  },\n\n  once: function (name, callback, ctx) {\n    var self = this;\n    function listener () {\n      self.off(name, listener);\n      callback.apply(ctx, arguments);\n    };\n\n    listener._ = callback\n    return this.on(name, listener, ctx);\n  },\n\n  emit: function (name) {\n    var data = [].slice.call(arguments, 1);\n    var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n    var i = 0;\n    var len = evtArr.length;\n\n    for (i; i < len; i++) {\n      evtArr[i].fn.apply(evtArr[i].ctx, data);\n    }\n\n    return this;\n  },\n\n  off: function (name, callback) {\n    var e = this.e || (this.e = {});\n    var evts = e[name];\n    var liveEvents = [];\n\n    if (evts && callback) {\n      for (var i = 0, len = evts.length; i < len; i++) {\n        if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n          liveEvents.push(evts[i]);\n      }\n    }\n\n    // Remove event from queue to prevent memory leak\n    // Suggested by https://github.com/lazd\n    // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n    (liveEvents.length)\n      ? e[name] = liveEvents\n      : delete e[name];\n\n    return this;\n  }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param  {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n  var str = '' + string;\n  var match = matchHtmlRegExp.exec(str);\n\n  if (!match) {\n    return str;\n  }\n\n  var escape;\n  var html = '';\n  var index = 0;\n  var lastIndex = 0;\n\n  for (index = match.index; index < str.length; index++) {\n    switch (str.charCodeAt(index)) {\n      case 34: // \"\n        escape = '&quot;';\n        break;\n      case 38: // &\n        escape = '&amp;';\n        break;\n      case 39: // '\n        escape = '&#39;';\n        break;\n      case 60: // <\n        escape = '&lt;';\n        break;\n      case 62: // >\n        escape = '&gt;';\n        break;\n      default:\n        continue;\n    }\n\n    if (lastIndex !== index) {\n      html += str.substring(lastIndex, index);\n    }\n\n    lastIndex = index + 1;\n    html += escape;\n  }\n\n  return lastIndex !== index\n    ? html + str.substring(lastIndex, index)\n    : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n  EMPTY,\n  NEVER,\n  Observable,\n  Subject,\n  defer,\n  delay,\n  filter,\n  map,\n  merge,\n  mergeWith,\n  shareReplay,\n  switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n  at,\n  getActiveElement,\n  getOptionalElement,\n  requestJSON,\n  setLocation,\n  setToggle,\n  watchDocument,\n  watchKeyboard,\n  watchLocation,\n  watchLocationTarget,\n  watchMedia,\n  watchPrint,\n  watchScript,\n  watchViewport\n} from \"./browser\"\nimport {\n  getComponentElement,\n  getComponentElements,\n  mountAnnounce,\n  mountBackToTop,\n  mountConsent,\n  mountContent,\n  mountDialog,\n  mountHeader,\n  mountHeaderTitle,\n  mountPalette,\n  mountProgress,\n  mountSearch,\n  mountSearchHiglight,\n  mountSidebar,\n  mountSource,\n  mountTableOfContents,\n  mountTabs,\n  watchHeader,\n  watchMain\n} from \"./components\"\nimport {\n  SearchIndex,\n  setupClipboardJS,\n  setupInstantNavigation,\n  setupVersionSelector\n} from \"./integrations\"\nimport {\n  patchEllipsis,\n  patchIndeterminate,\n  patchScrollfix,\n  patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable<SearchIndex> {\n  if (location.protocol === \"file:\") {\n    return watchScript(\n      `${new URL(\"search/search_index.js\", config.base)}`\n    )\n      .pipe(\n        // @ts-ignore - @todo fix typings\n        map(() => __index),\n        shareReplay(1)\n      )\n  } else {\n    return requestJSON<SearchIndex>(\n      new URL(\"search/search_index.json\", config.base)\n    )\n  }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$   = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$   = watchMedia(\"(min-width: 960px)\")\nconst screen$   = watchMedia(\"(min-width: 1220px)\")\nconst print$    = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n  ? fetchSearchIndex()\n  : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject<string>()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject<number>()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n  setupInstantNavigation({ location$, viewport$, progress$ })\n    .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n  setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n  .pipe(\n    delay(125)\n  )\n    .subscribe(() => {\n      setToggle(\"drawer\", false)\n      setToggle(\"search\", false)\n    })\n\n/* Set up global keyboard handlers */\nkeyboard$\n  .pipe(\n    filter(({ mode }) => mode === \"global\")\n  )\n    .subscribe(key => {\n      switch (key.type) {\n\n        /* Go to previous page */\n        case \"p\":\n        case \",\":\n          const prev = getOptionalElement<HTMLLinkElement>(\"link[rel=prev]\")\n          if (typeof prev !== \"undefined\")\n            setLocation(prev)\n          break\n\n        /* Go to next page */\n        case \"n\":\n        case \".\":\n          const next = getOptionalElement<HTMLLinkElement>(\"link[rel=next]\")\n          if (typeof next !== \"undefined\")\n            setLocation(next)\n          break\n\n        /* Expand navigation, see https://bit.ly/3ZjG5io */\n        case \"Enter\":\n          const active = getActiveElement()\n          if (active instanceof HTMLLabelElement)\n            active.click()\n      }\n    })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n  .pipe(\n    map(() => getComponentElement(\"main\")),\n    switchMap(el => watchMain(el, { viewport$, header$ })),\n    shareReplay(1)\n  )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n  /* Consent */\n  ...getComponentElements(\"consent\")\n    .map(el => mountConsent(el, { target$ })),\n\n  /* Dialog */\n  ...getComponentElements(\"dialog\")\n    .map(el => mountDialog(el, { alert$ })),\n\n  /* Header */\n  ...getComponentElements(\"header\")\n    .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n  /* Color palette */\n  ...getComponentElements(\"palette\")\n    .map(el => mountPalette(el)),\n\n  /* Progress bar */\n  ...getComponentElements(\"progress\")\n    .map(el => mountProgress(el, { progress$ })),\n\n  /* Search */\n  ...getComponentElements(\"search\")\n    .map(el => mountSearch(el, { index$, keyboard$ })),\n\n  /* Repository information */\n  ...getComponentElements(\"source\")\n    .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n  /* Announcement bar */\n  ...getComponentElements(\"announce\")\n    .map(el => mountAnnounce(el)),\n\n  /* Content */\n  ...getComponentElements(\"content\")\n    .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n  /* Search highlighting */\n  ...getComponentElements(\"content\")\n    .map(el => feature(\"search.highlight\")\n      ? mountSearchHiglight(el, { index$, location$ })\n      : EMPTY\n    ),\n\n  /* Header title */\n  ...getComponentElements(\"header-title\")\n    .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n  /* Sidebar */\n  ...getComponentElements(\"sidebar\")\n    .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n      ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n      : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n    ),\n\n  /* Navigation tabs */\n  ...getComponentElements(\"tabs\")\n    .map(el => mountTabs(el, { viewport$, header$ })),\n\n  /* Table of contents */\n  ...getComponentElements(\"toc\")\n    .map(el => mountTableOfContents(el, {\n      viewport$, header$, main$, target$\n    })),\n\n  /* Back-to-top button */\n  ...getComponentElements(\"top\")\n    .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n  .pipe(\n    switchMap(() => content$),\n    mergeWith(control$),\n    shareReplay(1)\n  )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$  = document$          /* Document observable */\nwindow.location$  = location$          /* Location subject */\nwindow.target$    = target$            /* Location target observable */\nwindow.keyboard$  = keyboard$          /* Keyboard observable */\nwindow.viewport$  = viewport$          /* Viewport observable */\nwindow.tablet$    = tablet$            /* Media tablet observable */\nwindow.screen$    = screen$            /* Media screen observable */\nwindow.print$     = print$             /* Media print observable */\nwindow.alert$     = alert$             /* Alert subject */\nwindow.progress$  = progress$          /* Progress indicator subject */\nwindow.component$ = component$         /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n    extendStatics = Object.setPrototypeOf ||\r\n        ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n        function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n    return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n    if (typeof b !== \"function\" && b !== null)\r\n        throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n    extendStatics(d, b);\r\n    function __() { this.constructor = d; }\r\n    d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n    __assign = Object.assign || function __assign(t) {\r\n        for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n            s = arguments[i];\r\n            for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n        }\r\n        return t;\r\n    }\r\n    return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n    var t = {};\r\n    for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n        t[p] = s[p];\r\n    if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n        for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n            if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n                t[p[i]] = s[p[i]];\r\n        }\r\n    return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n    var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n    if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n    else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n    return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n    return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n    if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n    function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n    return new (P || (P = Promise))(function (resolve, reject) {\r\n        function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n        function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n        function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n        step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n    });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n    var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n    return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n    function verb(n) { return function (v) { return step([n, v]); }; }\r\n    function step(op) {\r\n        if (f) throw new TypeError(\"Generator is already executing.\");\r\n        while (_) try {\r\n            if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n            if (y = 0, t) op = [op[0] & 2, t.value];\r\n            switch (op[0]) {\r\n                case 0: case 1: t = op; break;\r\n                case 4: _.label++; return { value: op[1], done: false };\r\n                case 5: _.label++; y = op[1]; op = [0]; continue;\r\n                case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n                default:\r\n                    if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n                    if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n                    if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n                    if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n                    if (t[2]) _.ops.pop();\r\n                    _.trys.pop(); continue;\r\n            }\r\n            op = body.call(thisArg, _);\r\n        } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n        if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n    }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n    if (k2 === undefined) k2 = k;\r\n    Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n    if (k2 === undefined) k2 = k;\r\n    o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n    for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n    var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n    if (m) return m.call(o);\r\n    if (o && typeof o.length === \"number\") return {\r\n        next: function () {\r\n            if (o && i >= o.length) o = void 0;\r\n            return { value: o && o[i++], done: !o };\r\n        }\r\n    };\r\n    throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n    var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n    if (!m) return o;\r\n    var i = m.call(o), r, ar = [], e;\r\n    try {\r\n        while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n    }\r\n    catch (error) { e = { error: error }; }\r\n    finally {\r\n        try {\r\n            if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n        }\r\n        finally { if (e) throw e.error; }\r\n    }\r\n    return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n    for (var ar = [], i = 0; i < arguments.length; i++)\r\n        ar = ar.concat(__read(arguments[i]));\r\n    return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n    for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n    for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n        for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n            r[k] = a[j];\r\n    return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n    if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n        if (ar || !(i in from)) {\r\n            if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n            ar[i] = from[i];\r\n        }\r\n    }\r\n    return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n    return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n    if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n    var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n    return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n    function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n    function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n    function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n    function fulfill(value) { resume(\"next\", value); }\r\n    function reject(value) { resume(\"throw\", value); }\r\n    function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n    var i, p;\r\n    return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n    function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n    if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n    var m = o[Symbol.asyncIterator], i;\r\n    return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n    function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n    function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n    if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n    return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n    Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n    o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n    if (mod && mod.__esModule) return mod;\r\n    var result = {};\r\n    if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n    __setModuleDefault(result, mod);\r\n    return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n    return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n    if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n    if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n    return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n    if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n    if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n    if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n    return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n  return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass<T>(createImpl: (_super: any) => any): T {\n  const _super = (instance: any) => {\n    Error.call(instance);\n    instance.stack = new Error().stack;\n  };\n\n  const ctorFunc = createImpl(_super);\n  ctorFunc.prototype = Object.create(Error.prototype);\n  ctorFunc.prototype.constructor = ctorFunc;\n  return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n  readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n  /**\n   * @deprecated Internal implementation detail. Do not construct error instances.\n   * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n   */\n  new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n  (_super) =>\n    function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n      _super(this);\n      this.message = errors\n        ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n  ')}`\n        : '';\n      this.name = 'UnsubscriptionError';\n      this.errors = errors;\n    }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove<T>(arr: T[] | undefined | null, item: T) {\n  if (arr) {\n    const index = arr.indexOf(item);\n    0 <= index && arr.splice(index, 1);\n  }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n  /** @nocollapse */\n  public static EMPTY = (() => {\n    const empty = new Subscription();\n    empty.closed = true;\n    return empty;\n  })();\n\n  /**\n   * A flag to indicate whether this Subscription has already been unsubscribed.\n   */\n  public closed = false;\n\n  private _parentage: Subscription[] | Subscription | null = null;\n\n  /**\n   * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n   * list occurs in the {@link #add} and {@link #remove} methods.\n   */\n  private _finalizers: Exclude<TeardownLogic, void>[] | null = null;\n\n  /**\n   * @param initialTeardown A function executed first as part of the finalization\n   * process that is kicked off when {@link #unsubscribe} is called.\n   */\n  constructor(private initialTeardown?: () => void) {}\n\n  /**\n   * Disposes the resources held by the subscription. May, for instance, cancel\n   * an ongoing Observable execution or cancel any other type of work that\n   * started when the Subscription was created.\n   * @return {void}\n   */\n  unsubscribe(): void {\n    let errors: any[] | undefined;\n\n    if (!this.closed) {\n      this.closed = true;\n\n      // Remove this from it's parents.\n      const { _parentage } = this;\n      if (_parentage) {\n        this._parentage = null;\n        if (Array.isArray(_parentage)) {\n          for (const parent of _parentage) {\n            parent.remove(this);\n          }\n        } else {\n          _parentage.remove(this);\n        }\n      }\n\n      const { initialTeardown: initialFinalizer } = this;\n      if (isFunction(initialFinalizer)) {\n        try {\n          initialFinalizer();\n        } catch (e) {\n          errors = e instanceof UnsubscriptionError ? e.errors : [e];\n        }\n      }\n\n      const { _finalizers } = this;\n      if (_finalizers) {\n        this._finalizers = null;\n        for (const finalizer of _finalizers) {\n          try {\n            execFinalizer(finalizer);\n          } catch (err) {\n            errors = errors ?? [];\n            if (err instanceof UnsubscriptionError) {\n              errors = [...errors, ...err.errors];\n            } else {\n              errors.push(err);\n            }\n          }\n        }\n      }\n\n      if (errors) {\n        throw new UnsubscriptionError(errors);\n      }\n    }\n  }\n\n  /**\n   * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n   * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n   * because it has already been unsubscribed, then whatever finalizer is passed to it\n   * will automatically be executed (unless the finalizer itself is also a closed subscription).\n   *\n   * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n   * subscription to a any subscription will result in no operation. (A noop).\n   *\n   * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n   * operation at all. (A noop).\n   *\n   * `Subscription` instances that are added to this instance will automatically remove themselves\n   * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n   * will need to be removed manually with {@link #remove}\n   *\n   * @param teardown The finalization logic to add to this subscription.\n   */\n  add(teardown: TeardownLogic): void {\n    // Only add the finalizer if it's not undefined\n    // and don't add a subscription to itself.\n    if (teardown && teardown !== this) {\n      if (this.closed) {\n        // If this subscription is already closed,\n        // execute whatever finalizer is handed to it automatically.\n        execFinalizer(teardown);\n      } else {\n        if (teardown instanceof Subscription) {\n          // We don't add closed subscriptions, and we don't add the same subscription\n          // twice. Subscription unsubscribe is idempotent.\n          if (teardown.closed || teardown._hasParent(this)) {\n            return;\n          }\n          teardown._addParent(this);\n        }\n        (this._finalizers = this._finalizers ?? []).push(teardown);\n      }\n    }\n  }\n\n  /**\n   * Checks to see if a this subscription already has a particular parent.\n   * This will signal that this subscription has already been added to the parent in question.\n   * @param parent the parent to check for\n   */\n  private _hasParent(parent: Subscription) {\n    const { _parentage } = this;\n    return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n  }\n\n  /**\n   * Adds a parent to this subscription so it can be removed from the parent if it\n   * unsubscribes on it's own.\n   *\n   * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n   * @param parent The parent subscription to add\n   */\n  private _addParent(parent: Subscription) {\n    const { _parentage } = this;\n    this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n  }\n\n  /**\n   * Called on a child when it is removed via {@link #remove}.\n   * @param parent The parent to remove\n   */\n  private _removeParent(parent: Subscription) {\n    const { _parentage } = this;\n    if (_parentage === parent) {\n      this._parentage = null;\n    } else if (Array.isArray(_parentage)) {\n      arrRemove(_parentage, parent);\n    }\n  }\n\n  /**\n   * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n   *\n   * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n   * from every other `Subscription` they have been added to. This means that using the `remove` method\n   * is not a common thing and should be used thoughtfully.\n   *\n   * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n   * more than once, you will need to call `remove` the same number of times to remove all instances.\n   *\n   * All finalizer instances are removed to free up memory upon unsubscription.\n   *\n   * @param teardown The finalizer to remove from this subscription\n   */\n  remove(teardown: Exclude<TeardownLogic, void>): void {\n    const { _finalizers } = this;\n    _finalizers && arrRemove(_finalizers, teardown);\n\n    if (teardown instanceof Subscription) {\n      teardown._removeParent(this);\n    }\n  }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n  return (\n    value instanceof Subscription ||\n    (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n  );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n  if (isFunction(finalizer)) {\n    finalizer();\n  } else {\n    finalizer.unsubscribe();\n  }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n  onUnhandledError: null,\n  onStoppedNotification: null,\n  Promise: undefined,\n  useDeprecatedSynchronousErrorHandling: false,\n  useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n  /**\n   * A registration point for unhandled errors from RxJS. These are errors that\n   * cannot were not handled by consuming code in the usual subscription path. For\n   * example, if you have this configured, and you subscribe to an observable without\n   * providing an error handler, errors from that subscription will end up here. This\n   * will _always_ be called asynchronously on another job in the runtime. This is because\n   * we do not want errors thrown in this user-configured handler to interfere with the\n   * behavior of the library.\n   */\n  onUnhandledError: ((err: any) => void) | null;\n\n  /**\n   * A registration point for notifications that cannot be sent to subscribers because they\n   * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n   * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n   * might want a different behavior. For example, with sources that attempt to report errors\n   * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n   * This will _always_ be called asynchronously on another job in the runtime. This is because\n   * we do not want errors thrown in this user-configured handler to interfere with the\n   * behavior of the library.\n   */\n  onStoppedNotification: ((notification: ObservableNotification<any>, subscriber: Subscriber<any>) => void) | null;\n\n  /**\n   * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n   * methods.\n   *\n   * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n   * Promise constructor. If you need a Promise implementation other than native promises,\n   * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n   */\n  Promise?: PromiseConstructorLike;\n\n  /**\n   * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n   * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n   * call in a try/catch block. It also enables producer interference, a nasty bug\n   * where a multicast can be broken for all observers by a downstream consumer with\n   * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n   * FOR MIGRATION REASONS.\n   *\n   * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n   * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n   * behaviors described above. Will be removed in v8.\n   */\n  useDeprecatedSynchronousErrorHandling: boolean;\n\n  /**\n   * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n   * `unsubscribe()` via `this` context in `next` functions created in observers passed\n   * to `subscribe`.\n   *\n   * This is being removed because the performance was severely problematic, and it could also cause\n   * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n   * their `this` context overwritten.\n   *\n   * @deprecated As of version 8, RxJS will no longer support altering the\n   * context of next functions provided as part of an observer to Subscribe. Instead,\n   * you will have access to a subscription or a signal or token that will allow you to do things like\n   * unsubscribe and test closed status. Will be removed in v8.\n   */\n  useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n  setTimeout: SetTimeoutFunction;\n  clearTimeout: ClearTimeoutFunction;\n  delegate:\n    | {\n        setTimeout: SetTimeoutFunction;\n        clearTimeout: ClearTimeoutFunction;\n      }\n    | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n  // When accessing the delegate, use the variable rather than `this` so that\n  // the functions can be called without being bound to the provider.\n  setTimeout(handler: () => void, timeout?: number, ...args) {\n    const { delegate } = timeoutProvider;\n    if (delegate?.setTimeout) {\n      return delegate.setTimeout(handler, timeout, ...args);\n    }\n    return setTimeout(handler, timeout, ...args);\n  },\n  clearTimeout(handle) {\n    const { delegate } = timeoutProvider;\n    return (delegate?.clearTimeout || clearTimeout)(handle as any);\n  },\n  delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n  timeoutProvider.setTimeout(() => {\n    const { onUnhandledError } = config;\n    if (onUnhandledError) {\n      // Execute the user-configured error handler.\n      onUnhandledError(err);\n    } else {\n      // Throw so it is picked up by the runtime's uncaught error mechanism.\n      throw err;\n    }\n  });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n  return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification<T>(value: T) {\n  return createNotification('N', value, undefined) as NextNotification<T>;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n  return {\n    kind,\n    value,\n    error,\n  };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n  if (config.useDeprecatedSynchronousErrorHandling) {\n    const isRoot = !context;\n    if (isRoot) {\n      context = { errorThrown: false, error: null };\n    }\n    cb();\n    if (isRoot) {\n      const { errorThrown, error } = context!;\n      context = null;\n      if (errorThrown) {\n        throw error;\n      }\n    }\n  } else {\n    // This is the general non-deprecated path for everyone that\n    // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n    cb();\n  }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n  if (config.useDeprecatedSynchronousErrorHandling && context) {\n    context.errorThrown = true;\n    context.error = err;\n  }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber<T>\n */\nexport class Subscriber<T> extends Subscription implements Observer<T> {\n  /**\n   * A static factory for a Subscriber, given a (potentially partial) definition\n   * of an Observer.\n   * @param next The `next` callback of an Observer.\n   * @param error The `error` callback of an\n   * Observer.\n   * @param complete The `complete` callback of an\n   * Observer.\n   * @return A Subscriber wrapping the (partially defined)\n   * Observer represented by the given arguments.\n   * @nocollapse\n   * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n   * method, and there is no reason to be creating instances of `Subscriber` directly.\n   * If you have a specific use case, please file an issue.\n   */\n  static create<T>(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber<T> {\n    return new SafeSubscriber(next, error, complete);\n  }\n\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  protected isStopped: boolean = false;\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  protected destination: Subscriber<any> | Observer<any>; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n  /**\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n   */\n  constructor(destination?: Subscriber<any> | Observer<any>) {\n    super();\n    if (destination) {\n      this.destination = destination;\n      // Automatically chain subscriptions together here.\n      // if destination is a Subscription, then it is a Subscriber.\n      if (isSubscription(destination)) {\n        destination.add(this);\n      }\n    } else {\n      this.destination = EMPTY_OBSERVER;\n    }\n  }\n\n  /**\n   * The {@link Observer} callback to receive notifications of type `next` from\n   * the Observable, with a value. The Observable may call this method 0 or more\n   * times.\n   * @param {T} [value] The `next` value.\n   * @return {void}\n   */\n  next(value?: T): void {\n    if (this.isStopped) {\n      handleStoppedNotification(nextNotification(value), this);\n    } else {\n      this._next(value!);\n    }\n  }\n\n  /**\n   * The {@link Observer} callback to receive notifications of type `error` from\n   * the Observable, with an attached `Error`. Notifies the Observer that\n   * the Observable has experienced an error condition.\n   * @param {any} [err] The `error` exception.\n   * @return {void}\n   */\n  error(err?: any): void {\n    if (this.isStopped) {\n      handleStoppedNotification(errorNotification(err), this);\n    } else {\n      this.isStopped = true;\n      this._error(err);\n    }\n  }\n\n  /**\n   * The {@link Observer} callback to receive a valueless notification of type\n   * `complete` from the Observable. Notifies the Observer that the Observable\n   * has finished sending push-based notifications.\n   * @return {void}\n   */\n  complete(): void {\n    if (this.isStopped) {\n      handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n    } else {\n      this.isStopped = true;\n      this._complete();\n    }\n  }\n\n  unsubscribe(): void {\n    if (!this.closed) {\n      this.isStopped = true;\n      super.unsubscribe();\n      this.destination = null!;\n    }\n  }\n\n  protected _next(value: T): void {\n    this.destination.next(value);\n  }\n\n  protected _error(err: any): void {\n    try {\n      this.destination.error(err);\n    } finally {\n      this.unsubscribe();\n    }\n  }\n\n  protected _complete(): void {\n    try {\n      this.destination.complete();\n    } finally {\n      this.unsubscribe();\n    }\n  }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind<Fn extends (...args: any[]) => any>(fn: Fn, thisArg: any): Fn {\n  return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver<T> implements Observer<T> {\n  constructor(private partialObserver: Partial<Observer<T>>) {}\n\n  next(value: T): void {\n    const { partialObserver } = this;\n    if (partialObserver.next) {\n      try {\n        partialObserver.next(value);\n      } catch (error) {\n        handleUnhandledError(error);\n      }\n    }\n  }\n\n  error(err: any): void {\n    const { partialObserver } = this;\n    if (partialObserver.error) {\n      try {\n        partialObserver.error(err);\n      } catch (error) {\n        handleUnhandledError(error);\n      }\n    } else {\n      handleUnhandledError(err);\n    }\n  }\n\n  complete(): void {\n    const { partialObserver } = this;\n    if (partialObserver.complete) {\n      try {\n        partialObserver.complete();\n      } catch (error) {\n        handleUnhandledError(error);\n      }\n    }\n  }\n}\n\nexport class SafeSubscriber<T> extends Subscriber<T> {\n  constructor(\n    observerOrNext?: Partial<Observer<T>> | ((value: T) => void) | null,\n    error?: ((e?: any) => void) | null,\n    complete?: (() => void) | null\n  ) {\n    super();\n\n    let partialObserver: Partial<Observer<T>>;\n    if (isFunction(observerOrNext) || !observerOrNext) {\n      // The first argument is a function, not an observer. The next\n      // two arguments *could* be observers, or they could be empty.\n      partialObserver = {\n        next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n        error: error ?? undefined,\n        complete: complete ?? undefined,\n      };\n    } else {\n      // The first argument is a partial observer.\n      let context: any;\n      if (this && config.useDeprecatedNextContext) {\n        // This is a deprecated path that made `this.unsubscribe()` available in\n        // next handler functions passed to subscribe. This only exists behind a flag\n        // now, as it is *very* slow.\n        context = Object.create(observerOrNext);\n        context.unsubscribe = () => this.unsubscribe();\n        partialObserver = {\n          next: observerOrNext.next && bind(observerOrNext.next, context),\n          error: observerOrNext.error && bind(observerOrNext.error, context),\n          complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n        };\n      } else {\n        // The \"normal\" path. Just use the partial observer directly.\n        partialObserver = observerOrNext;\n      }\n    }\n\n    // Wrap the partial observer to ensure it's a full observer, and\n    // make sure proper error handling is accounted for.\n    this.destination = new ConsumerObserver(partialObserver);\n  }\n}\n\nfunction handleUnhandledError(error: any) {\n  if (config.useDeprecatedSynchronousErrorHandling) {\n    captureError(error);\n  } else {\n    // Ideal path, we report this as an unhandled error,\n    // which is thrown on a new call stack.\n    reportUnhandledError(error);\n  }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n  throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification<any>, subscriber: Subscriber<any>) {\n  const { onStoppedNotification } = config;\n  onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly<Observer<any>> & { closed: true } = {\n  closed: true,\n  next: noop,\n  error: defaultErrorHandler,\n  complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `<T>(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n *   map(i => range(i)),\n *   mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n *   next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n *   next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity<T>(x: T): T {\n  return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe<T, A>(fn1: UnaryFunction<T, A>): UnaryFunction<T, A>;\nexport function pipe<T, A, B>(fn1: UnaryFunction<T, A>, fn2: UnaryFunction<A, B>): UnaryFunction<T, B>;\nexport function pipe<T, A, B, C>(fn1: UnaryFunction<T, A>, fn2: UnaryFunction<A, B>, fn3: UnaryFunction<B, C>): UnaryFunction<T, C>;\nexport function pipe<T, A, B, C, D>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>\n): UnaryFunction<T, D>;\nexport function pipe<T, A, B, C, D, E>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>\n): UnaryFunction<T, E>;\nexport function pipe<T, A, B, C, D, E, F>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>\n): UnaryFunction<T, F>;\nexport function pipe<T, A, B, C, D, E, F, G>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>\n): UnaryFunction<T, G>;\nexport function pipe<T, A, B, C, D, E, F, G, H>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>,\n  fn8: UnaryFunction<G, H>\n): UnaryFunction<T, H>;\nexport function pipe<T, A, B, C, D, E, F, G, H, I>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>,\n  fn8: UnaryFunction<G, H>,\n  fn9: UnaryFunction<H, I>\n): UnaryFunction<T, I>;\nexport function pipe<T, A, B, C, D, E, F, G, H, I>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>,\n  fn8: UnaryFunction<G, H>,\n  fn9: UnaryFunction<H, I>,\n  ...fns: UnaryFunction<any, any>[]\n): UnaryFunction<T, unknown>;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on.  \n */\nexport function pipe(...fns: Array<UnaryFunction<any, any>>): UnaryFunction<any, any> {\n  return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray<T, R>(fns: Array<UnaryFunction<T, R>>): UnaryFunction<T, R> {\n  if (fns.length === 0) {\n    return identity as UnaryFunction<any, any>;\n  }\n\n  if (fns.length === 1) {\n    return fns[0];\n  }\n\n  return function piped(input: T): R {\n    return fns.reduce((prev: any, fn: UnaryFunction<T, R>) => fn(prev), input as any);\n  };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable<T>\n */\nexport class Observable<T> implements Subscribable<T> {\n  /**\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   */\n  source: Observable<any> | undefined;\n\n  /**\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   */\n  operator: Operator<any, T> | undefined;\n\n  /**\n   * @constructor\n   * @param {Function} subscribe the function that is called when the Observable is\n   * initially subscribed to. This function is given a Subscriber, to which new values\n   * can be `next`ed, or an `error` method can be called to raise an error, or\n   * `complete` can be called to notify of a successful completion.\n   */\n  constructor(subscribe?: (this: Observable<T>, subscriber: Subscriber<T>) => TeardownLogic) {\n    if (subscribe) {\n      this._subscribe = subscribe;\n    }\n  }\n\n  // HACK: Since TypeScript inherits static properties too, we have to\n  // fight against TypeScript here so Subject can have a different static create signature\n  /**\n   * Creates a new Observable by calling the Observable constructor\n   * @owner Observable\n   * @method create\n   * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n   * @return {Observable} a new observable\n   * @nocollapse\n   * @deprecated Use `new Observable()` instead. Will be removed in v8.\n   */\n  static create: (...args: any[]) => any = <T>(subscribe?: (subscriber: Subscriber<T>) => TeardownLogic) => {\n    return new Observable<T>(subscribe);\n  };\n\n  /**\n   * Creates a new Observable, with this Observable instance as the source, and the passed\n   * operator defined as the new observable's operator.\n   * @method lift\n   * @param operator the operator defining the operation to take on the observable\n   * @return a new observable with the Operator applied\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   * If you have implemented an operator using `lift`, it is recommended that you create an\n   * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n   * scratch\" section here: https://rxjs.dev/guide/operators\n   */\n  lift<R>(operator?: Operator<T, R>): Observable<R> {\n    const observable = new Observable<R>();\n    observable.source = this;\n    observable.operator = operator;\n    return observable;\n  }\n\n  subscribe(observerOrNext?: Partial<Observer<T>> | ((value: T) => void)): Subscription;\n  /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n  subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n  /**\n   * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n   *\n   * <span class=\"informal\">Use it when you have all these Observables, but still nothing is happening.</span>\n   *\n   * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n   * might be for example a function that you passed to Observable's constructor, but most of the time it is\n   * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n   * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n   * the thought.\n   *\n   * Apart from starting the execution of an Observable, this method allows you to listen for values\n   * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n   * of the following ways.\n   *\n   * The first way is creating an object that implements {@link Observer} interface. It should have methods\n   * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n   * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n   * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n   * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n   * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n   * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n   * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n   * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n   * an `error` method to avoid missing thrown errors.\n   *\n   * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n   * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n   * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n   * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n   * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n   * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n   *\n   * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n   * and you also handled emissions internally by using operators (e.g. using `tap`).\n   *\n   * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n   * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n   * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n   * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n   *\n   * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n   * It is an Observable itself that decides when these functions will be called. For example {@link of}\n   * by default emits all its values synchronously. Always check documentation for how given Observable\n   * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n   *\n   * #### Examples\n   *\n   * Subscribe with an {@link guide/observer Observer}\n   *\n   * ```ts\n   * import { of } from 'rxjs';\n   *\n   * const sumObserver = {\n   *   sum: 0,\n   *   next(value) {\n   *     console.log('Adding: ' + value);\n   *     this.sum = this.sum + value;\n   *   },\n   *   error() {\n   *     // We actually could just remove this method,\n   *     // since we do not really care about errors right now.\n   *   },\n   *   complete() {\n   *     console.log('Sum equals: ' + this.sum);\n   *   }\n   * };\n   *\n   * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n   *   .subscribe(sumObserver);\n   *\n   * // Logs:\n   * // 'Adding: 1'\n   * // 'Adding: 2'\n   * // 'Adding: 3'\n   * // 'Sum equals: 6'\n   * ```\n   *\n   * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n   *\n   * ```ts\n   * import { of } from 'rxjs'\n   *\n   * let sum = 0;\n   *\n   * of(1, 2, 3).subscribe(\n   *   value => {\n   *     console.log('Adding: ' + value);\n   *     sum = sum + value;\n   *   },\n   *   undefined,\n   *   () => console.log('Sum equals: ' + sum)\n   * );\n   *\n   * // Logs:\n   * // 'Adding: 1'\n   * // 'Adding: 2'\n   * // 'Adding: 3'\n   * // 'Sum equals: 6'\n   * ```\n   *\n   * Cancel a subscription\n   *\n   * ```ts\n   * import { interval } from 'rxjs';\n   *\n   * const subscription = interval(1000).subscribe({\n   *   next(num) {\n   *     console.log(num)\n   *   },\n   *   complete() {\n   *     // Will not be called, even when cancelling subscription.\n   *     console.log('completed!');\n   *   }\n   * });\n   *\n   * setTimeout(() => {\n   *   subscription.unsubscribe();\n   *   console.log('unsubscribed!');\n   * }, 2500);\n   *\n   * // Logs:\n   * // 0 after 1s\n   * // 1 after 2s\n   * // 'unsubscribed!' after 2.5s\n   * ```\n   *\n   * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n   * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n   * Observable.\n   * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n   * the error will be thrown asynchronously as unhandled.\n   * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n   * @return {Subscription} a subscription reference to the registered handlers\n   * @method subscribe\n   */\n  subscribe(\n    observerOrNext?: Partial<Observer<T>> | ((value: T) => void) | null,\n    error?: ((error: any) => void) | null,\n    complete?: (() => void) | null\n  ): Subscription {\n    const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n    errorContext(() => {\n      const { operator, source } = this;\n      subscriber.add(\n        operator\n          ? // We're dealing with a subscription in the\n            // operator chain to one of our lifted operators.\n            operator.call(subscriber, source)\n          : source\n          ? // If `source` has a value, but `operator` does not, something that\n            // had intimate knowledge of our API, like our `Subject`, must have\n            // set it. We're going to just call `_subscribe` directly.\n            this._subscribe(subscriber)\n          : // In all other cases, we're likely wrapping a user-provided initializer\n            // function, so we need to catch errors and handle them appropriately.\n            this._trySubscribe(subscriber)\n      );\n    });\n\n    return subscriber;\n  }\n\n  /** @internal */\n  protected _trySubscribe(sink: Subscriber<T>): TeardownLogic {\n    try {\n      return this._subscribe(sink);\n    } catch (err) {\n      // We don't need to return anything in this case,\n      // because it's just going to try to `add()` to a subscription\n      // above.\n      sink.error(err);\n    }\n  }\n\n  /**\n   * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n   * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n   *\n   * **WARNING**: Only use this with observables you *know* will complete. If the source\n   * observable does not complete, you will end up with a promise that is hung up, and\n   * potentially all of the state of an async function hanging out in memory. To avoid\n   * this situation, look into adding something like {@link timeout}, {@link take},\n   * {@link takeWhile}, or {@link takeUntil} amongst others.\n   *\n   * #### Example\n   *\n   * ```ts\n   * import { interval, take } from 'rxjs';\n   *\n   * const source$ = interval(1000).pipe(take(4));\n   *\n   * async function getTotal() {\n   *   let total = 0;\n   *\n   *   await source$.forEach(value => {\n   *     total += value;\n   *     console.log('observable -> ' + value);\n   *   });\n   *\n   *   return total;\n   * }\n   *\n   * getTotal().then(\n   *   total => console.log('Total: ' + total)\n   * );\n   *\n   * // Expected:\n   * // 'observable -> 0'\n   * // 'observable -> 1'\n   * // 'observable -> 2'\n   * // 'observable -> 3'\n   * // 'Total: 6'\n   * ```\n   *\n   * @param next a handler for each value emitted by the observable\n   * @return a promise that either resolves on observable completion or\n   *  rejects with the handled error\n   */\n  forEach(next: (value: T) => void): Promise<void>;\n\n  /**\n   * @param next a handler for each value emitted by the observable\n   * @param promiseCtor a constructor function used to instantiate the Promise\n   * @return a promise that either resolves on observable completion or\n   *  rejects with the handled error\n   * @deprecated Passing a Promise constructor will no longer be available\n   * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n   * little benefit. If you need this functionality, it is recommended that you either\n   * polyfill Promise, or you create an adapter to convert the returned native promise\n   * to whatever promise implementation you wanted. Will be removed in v8.\n   */\n  forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise<void>;\n\n  forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise<void> {\n    promiseCtor = getPromiseCtor(promiseCtor);\n\n    return new promiseCtor<void>((resolve, reject) => {\n      const subscriber = new SafeSubscriber<T>({\n        next: (value) => {\n          try {\n            next(value);\n          } catch (err) {\n            reject(err);\n            subscriber.unsubscribe();\n          }\n        },\n        error: reject,\n        complete: resolve,\n      });\n      this.subscribe(subscriber);\n    }) as Promise<void>;\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<any>): TeardownLogic {\n    return this.source?.subscribe(subscriber);\n  }\n\n  /**\n   * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n   * @method Symbol.observable\n   * @return {Observable} this instance of the observable\n   */\n  [Symbol_observable]() {\n    return this;\n  }\n\n  /* tslint:disable:max-line-length */\n  pipe(): Observable<T>;\n  pipe<A>(op1: OperatorFunction<T, A>): Observable<A>;\n  pipe<A, B>(op1: OperatorFunction<T, A>, op2: OperatorFunction<A, B>): Observable<B>;\n  pipe<A, B, C>(op1: OperatorFunction<T, A>, op2: OperatorFunction<A, B>, op3: OperatorFunction<B, C>): Observable<C>;\n  pipe<A, B, C, D>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>\n  ): Observable<D>;\n  pipe<A, B, C, D, E>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>\n  ): Observable<E>;\n  pipe<A, B, C, D, E, F>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>\n  ): Observable<F>;\n  pipe<A, B, C, D, E, F, G>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>\n  ): Observable<G>;\n  pipe<A, B, C, D, E, F, G, H>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>,\n    op8: OperatorFunction<G, H>\n  ): Observable<H>;\n  pipe<A, B, C, D, E, F, G, H, I>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>,\n    op8: OperatorFunction<G, H>,\n    op9: OperatorFunction<H, I>\n  ): Observable<I>;\n  pipe<A, B, C, D, E, F, G, H, I>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>,\n    op8: OperatorFunction<G, H>,\n    op9: OperatorFunction<H, I>,\n    ...operations: OperatorFunction<any, any>[]\n  ): Observable<unknown>;\n  /* tslint:enable:max-line-length */\n\n  /**\n   * Used to stitch together functional operators into a chain.\n   * @method pipe\n   * @return {Observable} the Observable result of all of the operators having\n   * been called in the order they were passed in.\n   *\n   * ## Example\n   *\n   * ```ts\n   * import { interval, filter, map, scan } from 'rxjs';\n   *\n   * interval(1000)\n   *   .pipe(\n   *     filter(x => x % 2 === 0),\n   *     map(x => x + x),\n   *     scan((acc, x) => acc + x)\n   *   )\n   *   .subscribe(x => console.log(x));\n   * ```\n   */\n  pipe(...operations: OperatorFunction<any, any>[]): Observable<any> {\n    return pipeFromArray(operations)(this);\n  }\n\n  /* tslint:disable:max-line-length */\n  /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n  toPromise(): Promise<T | undefined>;\n  /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n  toPromise(PromiseCtor: typeof Promise): Promise<T | undefined>;\n  /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n  toPromise(PromiseCtor: PromiseConstructorLike): Promise<T | undefined>;\n  /* tslint:enable:max-line-length */\n\n  /**\n   * Subscribe to this Observable and get a Promise resolving on\n   * `complete` with the last emission (if any).\n   *\n   * **WARNING**: Only use this with observables you *know* will complete. If the source\n   * observable does not complete, you will end up with a promise that is hung up, and\n   * potentially all of the state of an async function hanging out in memory. To avoid\n   * this situation, look into adding something like {@link timeout}, {@link take},\n   * {@link takeWhile}, or {@link takeUntil} amongst others.\n   *\n   * @method toPromise\n   * @param [promiseCtor] a constructor function used to instantiate\n   * the Promise\n   * @return A Promise that resolves with the last value emit, or\n   * rejects on an error. If there were no emissions, Promise\n   * resolves with undefined.\n   * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n   */\n  toPromise(promiseCtor?: PromiseConstructorLike): Promise<T | undefined> {\n    promiseCtor = getPromiseCtor(promiseCtor);\n\n    return new promiseCtor((resolve, reject) => {\n      let value: T | undefined;\n      this.subscribe(\n        (x: T) => (value = x),\n        (err: any) => reject(err),\n        () => resolve(value)\n      );\n    }) as Promise<T | undefined>;\n  }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n  return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver<T>(value: any): value is Observer<T> {\n  return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber<T>(value: any): value is Subscriber<T> {\n  return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType<typeof Observable>['lift'] } {\n  return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate<T, R>(\n  init: (liftedSource: Observable<T>, subscriber: Subscriber<R>) => (() => void) | void\n): OperatorFunction<T, R> {\n  return (source: Observable<T>) => {\n    if (hasLift(source)) {\n      return source.lift(function (this: Subscriber<R>, liftedSource: Observable<T>) {\n        try {\n          return init(liftedSource, this);\n        } catch (err) {\n          this.error(err);\n        }\n      });\n    }\n    throw new TypeError('Unable to lift unknown Observable type');\n  };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber<T>(\n  destination: Subscriber<any>,\n  onNext?: (value: T) => void,\n  onComplete?: () => void,\n  onError?: (err: any) => void,\n  onFinalize?: () => void\n): Subscriber<T> {\n  return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber<T> extends Subscriber<T> {\n  /**\n   * Creates an instance of an `OperatorSubscriber`.\n   * @param destination The downstream subscriber.\n   * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n   * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n   * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n   * and send to the `destination` error handler.\n   * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n   * this handler are sent to the `destination` error handler.\n   * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n   * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n   * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n   * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n   * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n   * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n   */\n  constructor(\n    destination: Subscriber<any>,\n    onNext?: (value: T) => void,\n    onComplete?: () => void,\n    onError?: (err: any) => void,\n    private onFinalize?: () => void,\n    private shouldUnsubscribe?: () => boolean\n  ) {\n    // It's important - for performance reasons - that all of this class's\n    // members are initialized and that they are always initialized in the same\n    // order. This will ensure that all OperatorSubscriber instances have the\n    // same hidden class in V8. This, in turn, will help keep the number of\n    // hidden classes involved in property accesses within the base class as\n    // low as possible. If the number of hidden classes involved exceeds four,\n    // the property accesses will become megamorphic and performance penalties\n    // will be incurred - i.e. inline caches won't be used.\n    //\n    // The reasons for ensuring all instances have the same hidden class are\n    // further discussed in this blog post from Benedikt Meurer:\n    // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n    super(destination);\n    this._next = onNext\n      ? function (this: OperatorSubscriber<T>, value: T) {\n          try {\n            onNext(value);\n          } catch (err) {\n            destination.error(err);\n          }\n        }\n      : super._next;\n    this._error = onError\n      ? function (this: OperatorSubscriber<T>, err: any) {\n          try {\n            onError(err);\n          } catch (err) {\n            // Send any errors that occur down stream.\n            destination.error(err);\n          } finally {\n            // Ensure finalization.\n            this.unsubscribe();\n          }\n        }\n      : super._error;\n    this._complete = onComplete\n      ? function (this: OperatorSubscriber<T>) {\n          try {\n            onComplete();\n          } catch (err) {\n            // Send any errors that occur down stream.\n            destination.error(err);\n          } finally {\n            // Ensure finalization.\n            this.unsubscribe();\n          }\n        }\n      : super._complete;\n  }\n\n  unsubscribe() {\n    if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n      const { closed } = this;\n      super.unsubscribe();\n      // Execute additional teardown if we have any and we didn't already do so.\n      !closed && this.onFinalize?.();\n    }\n  }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n  schedule(callback: FrameRequestCallback): Subscription;\n  requestAnimationFrame: typeof requestAnimationFrame;\n  cancelAnimationFrame: typeof cancelAnimationFrame;\n  delegate:\n    | {\n        requestAnimationFrame: typeof requestAnimationFrame;\n        cancelAnimationFrame: typeof cancelAnimationFrame;\n      }\n    | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n  // When accessing the delegate, use the variable rather than `this` so that\n  // the functions can be called without being bound to the provider.\n  schedule(callback) {\n    let request = requestAnimationFrame;\n    let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n    const { delegate } = animationFrameProvider;\n    if (delegate) {\n      request = delegate.requestAnimationFrame;\n      cancel = delegate.cancelAnimationFrame;\n    }\n    const handle = request((timestamp) => {\n      // Clear the cancel function. The request has been fulfilled, so\n      // attempting to cancel the request upon unsubscription would be\n      // pointless.\n      cancel = undefined;\n      callback(timestamp);\n    });\n    return new Subscription(() => cancel?.(handle));\n  },\n  requestAnimationFrame(...args) {\n    const { delegate } = animationFrameProvider;\n    return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n  },\n  cancelAnimationFrame(...args) {\n    const { delegate } = animationFrameProvider;\n    return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n  },\n  delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n  /**\n   * @deprecated Internal implementation detail. Do not construct error instances.\n   * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n   */\n  new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n  (_super) =>\n    function ObjectUnsubscribedErrorImpl(this: any) {\n      _super(this);\n      this.name = 'ObjectUnsubscribedError';\n      this.message = 'object unsubscribed';\n    }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject<T> extends Observable<T> implements SubscriptionLike {\n  closed = false;\n\n  private currentObservers: Observer<T>[] | null = null;\n\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  observers: Observer<T>[] = [];\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  isStopped = false;\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  hasError = false;\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  thrownError: any = null;\n\n  /**\n   * Creates a \"subject\" by basically gluing an observer to an observable.\n   *\n   * @nocollapse\n   * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n   */\n  static create: (...args: any[]) => any = <T>(destination: Observer<T>, source: Observable<T>): AnonymousSubject<T> => {\n    return new AnonymousSubject<T>(destination, source);\n  };\n\n  constructor() {\n    // NOTE: This must be here to obscure Observable's constructor.\n    super();\n  }\n\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  lift<R>(operator: Operator<T, R>): Observable<R> {\n    const subject = new AnonymousSubject(this, this);\n    subject.operator = operator as any;\n    return subject as any;\n  }\n\n  /** @internal */\n  protected _throwIfClosed() {\n    if (this.closed) {\n      throw new ObjectUnsubscribedError();\n    }\n  }\n\n  next(value: T) {\n    errorContext(() => {\n      this._throwIfClosed();\n      if (!this.isStopped) {\n        if (!this.currentObservers) {\n          this.currentObservers = Array.from(this.observers);\n        }\n        for (const observer of this.currentObservers) {\n          observer.next(value);\n        }\n      }\n    });\n  }\n\n  error(err: any) {\n    errorContext(() => {\n      this._throwIfClosed();\n      if (!this.isStopped) {\n        this.hasError = this.isStopped = true;\n        this.thrownError = err;\n        const { observers } = this;\n        while (observers.length) {\n          observers.shift()!.error(err);\n        }\n      }\n    });\n  }\n\n  complete() {\n    errorContext(() => {\n      this._throwIfClosed();\n      if (!this.isStopped) {\n        this.isStopped = true;\n        const { observers } = this;\n        while (observers.length) {\n          observers.shift()!.complete();\n        }\n      }\n    });\n  }\n\n  unsubscribe() {\n    this.isStopped = this.closed = true;\n    this.observers = this.currentObservers = null!;\n  }\n\n  get observed() {\n    return this.observers?.length > 0;\n  }\n\n  /** @internal */\n  protected _trySubscribe(subscriber: Subscriber<T>): TeardownLogic {\n    this._throwIfClosed();\n    return super._trySubscribe(subscriber);\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    this._throwIfClosed();\n    this._checkFinalizedStatuses(subscriber);\n    return this._innerSubscribe(subscriber);\n  }\n\n  /** @internal */\n  protected _innerSubscribe(subscriber: Subscriber<any>) {\n    const { hasError, isStopped, observers } = this;\n    if (hasError || isStopped) {\n      return EMPTY_SUBSCRIPTION;\n    }\n    this.currentObservers = null;\n    observers.push(subscriber);\n    return new Subscription(() => {\n      this.currentObservers = null;\n      arrRemove(observers, subscriber);\n    });\n  }\n\n  /** @internal */\n  protected _checkFinalizedStatuses(subscriber: Subscriber<any>) {\n    const { hasError, thrownError, isStopped } = this;\n    if (hasError) {\n      subscriber.error(thrownError);\n    } else if (isStopped) {\n      subscriber.complete();\n    }\n  }\n\n  /**\n   * Creates a new Observable with this Subject as the source. You can do this\n   * to create custom Observer-side logic of the Subject and conceal it from\n   * code that uses the Observable.\n   * @return {Observable} Observable that the Subject casts to\n   */\n  asObservable(): Observable<T> {\n    const observable: any = new Observable<T>();\n    observable.source = this;\n    return observable;\n  }\n}\n\n/**\n * @class AnonymousSubject<T>\n */\nexport class AnonymousSubject<T> extends Subject<T> {\n  constructor(\n    /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n    public destination?: Observer<T>,\n    source?: Observable<T>\n  ) {\n    super();\n    this.source = source;\n  }\n\n  next(value: T) {\n    this.destination?.next?.(value);\n  }\n\n  error(err: any) {\n    this.destination?.error?.(err);\n  }\n\n  complete() {\n    this.destination?.complete?.();\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n  }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject<T>\n */\nexport class BehaviorSubject<T> extends Subject<T> {\n  constructor(private _value: T) {\n    super();\n  }\n\n  get value(): T {\n    return this.getValue();\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    const subscription = super._subscribe(subscriber);\n    !subscription.closed && subscriber.next(this._value);\n    return subscription;\n  }\n\n  getValue(): T {\n    const { hasError, thrownError, _value } = this;\n    if (hasError) {\n      throw thrownError;\n    }\n    this._throwIfClosed();\n    return _value;\n  }\n\n  next(value: T): void {\n    super.next((this._value = value));\n  }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n  delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n  now() {\n    // Use the variable rather than `this` so that the function can be called\n    // without being bound to the provider.\n    return (dateTimestampProvider.delegate || Date).now();\n  },\n  delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject<T> extends Subject<T> {\n  private _buffer: (T | number)[] = [];\n  private _infiniteTimeWindow = true;\n\n  /**\n   * @param bufferSize The size of the buffer to replay on subscription\n   * @param windowTime The amount of time the buffered items will stay buffered\n   * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n   * calculate the amount of time something has been buffered.\n   */\n  constructor(\n    private _bufferSize = Infinity,\n    private _windowTime = Infinity,\n    private _timestampProvider: TimestampProvider = dateTimestampProvider\n  ) {\n    super();\n    this._infiniteTimeWindow = _windowTime === Infinity;\n    this._bufferSize = Math.max(1, _bufferSize);\n    this._windowTime = Math.max(1, _windowTime);\n  }\n\n  next(value: T): void {\n    const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n    if (!isStopped) {\n      _buffer.push(value);\n      !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n    }\n    this._trimBuffer();\n    super.next(value);\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    this._throwIfClosed();\n    this._trimBuffer();\n\n    const subscription = this._innerSubscribe(subscriber);\n\n    const { _infiniteTimeWindow, _buffer } = this;\n    // We use a copy here, so reentrant code does not mutate our array while we're\n    // emitting it to a new subscriber.\n    const copy = _buffer.slice();\n    for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n      subscriber.next(copy[i] as T);\n    }\n\n    this._checkFinalizedStatuses(subscriber);\n\n    return subscription;\n  }\n\n  private _trimBuffer() {\n    const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n    // If we don't have an infinite buffer size, and we're over the length,\n    // use splice to truncate the old buffer values off. Note that we have to\n    // double the size for instances where we're not using an infinite time window\n    // because we're storing the values and the timestamps in the same array.\n    const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n    _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n    // Now, if we're not in an infinite time window, remove all values where the time is\n    // older than what is allowed.\n    if (!_infiniteTimeWindow) {\n      const now = _timestampProvider.now();\n      let last = 0;\n      // Search the array for the first timestamp that isn't expired and\n      // truncate the buffer up to that point.\n      for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n        last = i;\n      }\n      last && _buffer.splice(0, last + 1);\n    }\n  }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action<T> extends Subscription {\n *   new (scheduler: Scheduler, work: (state?: T) => void);\n *   schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action<T>\n */\nexport class Action<T> extends Subscription {\n  constructor(scheduler: Scheduler, work: (this: SchedulerAction<T>, state?: T) => void) {\n    super();\n  }\n  /**\n   * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n   * some context object, `state`. May happen at some point in the future,\n   * according to the `delay` parameter, if specified.\n   * @param {T} [state] Some contextual data that the `work` function uses when\n   * called by the Scheduler.\n   * @param {number} [delay] Time to wait before executing the work, where the\n   * time unit is implicit and defined by the Scheduler.\n   * @return {void}\n   */\n  public schedule(state?: T, delay: number = 0): Subscription {\n    return this;\n  }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n  setInterval: SetIntervalFunction;\n  clearInterval: ClearIntervalFunction;\n  delegate:\n    | {\n        setInterval: SetIntervalFunction;\n        clearInterval: ClearIntervalFunction;\n      }\n    | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n  // When accessing the delegate, use the variable rather than `this` so that\n  // the functions can be called without being bound to the provider.\n  setInterval(handler: () => void, timeout?: number, ...args) {\n    const { delegate } = intervalProvider;\n    if (delegate?.setInterval) {\n      return delegate.setInterval(handler, timeout, ...args);\n    }\n    return setInterval(handler, timeout, ...args);\n  },\n  clearInterval(handle) {\n    const { delegate } = intervalProvider;\n    return (delegate?.clearInterval || clearInterval)(handle as any);\n  },\n  delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction<T> extends Action<T> {\n  public id: TimerHandle | undefined;\n  public state?: T;\n  // @ts-ignore: Property has no initializer and is not definitely assigned\n  public delay: number;\n  protected pending: boolean = false;\n\n  constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction<T>, state?: T) => void) {\n    super(scheduler, work);\n  }\n\n  public schedule(state?: T, delay: number = 0): Subscription {\n    if (this.closed) {\n      return this;\n    }\n\n    // Always replace the current state with the new state.\n    this.state = state;\n\n    const id = this.id;\n    const scheduler = this.scheduler;\n\n    //\n    // Important implementation note:\n    //\n    // Actions only execute once by default, unless rescheduled from within the\n    // scheduled callback. This allows us to implement single and repeat\n    // actions via the same code path, without adding API surface area, as well\n    // as mimic traditional recursion but across asynchronous boundaries.\n    //\n    // However, JS runtimes and timers distinguish between intervals achieved by\n    // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n    // serial `setTimeout` calls can be individually delayed, which delays\n    // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n    // guarantee the interval callback will be invoked more precisely to the\n    // interval period, regardless of load.\n    //\n    // Therefore, we use `setInterval` to schedule single and repeat actions.\n    // If the action reschedules itself with the same delay, the interval is not\n    // canceled. If the action doesn't reschedule, or reschedules with a\n    // different delay, the interval will be canceled after scheduled callback\n    // execution.\n    //\n    if (id != null) {\n      this.id = this.recycleAsyncId(scheduler, id, delay);\n    }\n\n    // Set the pending flag indicating that this action has been scheduled, or\n    // has recursively rescheduled itself.\n    this.pending = true;\n\n    this.delay = delay;\n    // If this action has already an async Id, don't request a new one.\n    this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n    return this;\n  }\n\n  protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n    return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n  }\n\n  protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n    // If this action is rescheduled with the same delay time, don't clear the interval id.\n    if (delay != null && this.delay === delay && this.pending === false) {\n      return id;\n    }\n    // Otherwise, if the action's delay time is different from the current delay,\n    // or the action has been rescheduled before it's executed, clear the interval id\n    if (id != null) {\n      intervalProvider.clearInterval(id);\n    }\n\n    return undefined;\n  }\n\n  /**\n   * Immediately executes this action and the `work` it contains.\n   * @return {any}\n   */\n  public execute(state: T, delay: number): any {\n    if (this.closed) {\n      return new Error('executing a cancelled action');\n    }\n\n    this.pending = false;\n    const error = this._execute(state, delay);\n    if (error) {\n      return error;\n    } else if (this.pending === false && this.id != null) {\n      // Dequeue if the action didn't reschedule itself. Don't call\n      // unsubscribe(), because the action could reschedule later.\n      // For example:\n      // ```\n      // scheduler.schedule(function doWork(counter) {\n      //   /* ... I'm a busy worker bee ... */\n      //   var originalAction = this;\n      //   /* wait 100ms before rescheduling the action */\n      //   setTimeout(function () {\n      //     originalAction.schedule(counter + 1);\n      //   }, 100);\n      // }, 1000);\n      // ```\n      this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n    }\n  }\n\n  protected _execute(state: T, _delay: number): any {\n    let errored: boolean = false;\n    let errorValue: any;\n    try {\n      this.work(state);\n    } catch (e) {\n      errored = true;\n      // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n      // return here, we can't have it return \"\" or 0 or false.\n      // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n      errorValue = e ? e : new Error('Scheduled action threw falsy error');\n    }\n    if (errored) {\n      this.unsubscribe();\n      return errorValue;\n    }\n  }\n\n  unsubscribe() {\n    if (!this.closed) {\n      const { id, scheduler } = this;\n      const { actions } = scheduler;\n\n      this.work = this.state = this.scheduler = null!;\n      this.pending = false;\n\n      arrRemove(actions, this);\n      if (id != null) {\n        this.id = this.recycleAsyncId(scheduler, id, null);\n      }\n\n      this.delay = null!;\n      super.unsubscribe();\n    }\n  }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n *   now(): number;\n *   schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n  public static now: () => number = dateTimestampProvider.now;\n\n  constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n    this.now = now;\n  }\n\n  /**\n   * A getter method that returns a number representing the current time\n   * (at the time this function was called) according to the scheduler's own\n   * internal clock.\n   * @return {number} A number that represents the current time. May or may not\n   * have a relation to wall-clock time. May or may not refer to a time unit\n   * (e.g. milliseconds).\n   */\n  public now: () => number;\n\n  /**\n   * Schedules a function, `work`, for execution. May happen at some point in\n   * the future, according to the `delay` parameter, if specified. May be passed\n   * some context object, `state`, which will be passed to the `work` function.\n   *\n   * The given arguments will be processed an stored as an Action object in a\n   * queue of actions.\n   *\n   * @param {function(state: ?T): ?Subscription} work A function representing a\n   * task, or some unit of work to be executed by the Scheduler.\n   * @param {number} [delay] Time to wait before executing the work, where the\n   * time unit is implicit and defined by the Scheduler itself.\n   * @param {T} [state] Some contextual data that the `work` function uses when\n   * called by the Scheduler.\n   * @return {Subscription} A subscription in order to be able to unsubscribe\n   * the scheduled work.\n   */\n  public schedule<T>(work: (this: SchedulerAction<T>, state?: T) => void, delay: number = 0, state?: T): Subscription {\n    return new this.schedulerActionCtor<T>(this, work).schedule(state, delay);\n  }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n  public actions: Array<AsyncAction<any>> = [];\n  /**\n   * A flag to indicate whether the Scheduler is currently executing a batch of\n   * queued actions.\n   * @type {boolean}\n   * @internal\n   */\n  public _active: boolean = false;\n  /**\n   * An internal ID used to track the latest asynchronous task such as those\n   * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n   * others.\n   * @type {any}\n   * @internal\n   */\n  public _scheduled: TimerHandle | undefined;\n\n  constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n    super(SchedulerAction, now);\n  }\n\n  public flush(action: AsyncAction<any>): void {\n    const { actions } = this;\n\n    if (this._active) {\n      actions.push(action);\n      return;\n    }\n\n    let error: any;\n    this._active = true;\n\n    do {\n      if ((error = action.execute(action.state, action.delay))) {\n        break;\n      }\n    } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n    this._active = false;\n\n    if (error) {\n      while ((action = actions.shift()!)) {\n        action.unsubscribe();\n      }\n      throw error;\n    }\n  }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * <span class=\"informal\">Schedule task as if you used setTimeout(task, duration)</span>\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n *   console.log(state);\n *   this.schedule(state + 1, 1000); // `this` references currently executing Action,\n *                                   // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction<T> extends AsyncAction<T> {\n  constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction<T>, state?: T) => void) {\n    super(scheduler, work);\n  }\n\n  public schedule(state?: T, delay: number = 0): Subscription {\n    if (delay > 0) {\n      return super.schedule(state, delay);\n    }\n    this.delay = delay;\n    this.state = state;\n    this.scheduler.flush(this);\n    return this;\n  }\n\n  public execute(state: T, delay: number): any {\n    return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n  }\n\n  protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n    // If delay exists and is greater than 0, or if the delay is null (the\n    // action wasn't rescheduled) but was originally scheduled as an async\n    // action, then recycle as an async action.\n\n    if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n      return super.requestAsyncId(scheduler, id, delay);\n    }\n\n    // Otherwise flush the scheduler starting with this action.\n    scheduler.flush(this);\n\n    // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n    // `TimerHandle`, and generally the return value here isn't really used. So the\n    // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n    // as opposed to refactoring every other instanceo of `requestAsyncId`.\n    return 0;\n  }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * <span class=\"informal\">Put every next task on a queue, instead of executing it immediately</span>\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n *   queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n *   console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n *   if (state !== 0) {\n *     console.log('before', state);\n *     this.schedule(state - 1); // `this` references currently executing Action,\n *                               // which we reschedule with new state\n *     console.log('after', state);\n *   }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction<T> extends AsyncAction<T> {\n  constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction<T>, state?: T) => void) {\n    super(scheduler, work);\n  }\n\n  protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n    // If delay is greater than 0, request as an async action.\n    if (delay !== null && delay > 0) {\n      return super.requestAsyncId(scheduler, id, delay);\n    }\n    // Push the action to the end of the scheduler queue.\n    scheduler.actions.push(this);\n    // If an animation frame has already been requested, don't request another\n    // one. If an animation frame hasn't been requested yet, request one. Return\n    // the current animation frame request id.\n    return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n  }\n\n  protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n    // If delay exists and is greater than 0, or if the delay is null (the\n    // action wasn't rescheduled) but was originally scheduled as an async\n    // action, then recycle as an async action.\n    if (delay != null ? delay > 0 : this.delay > 0) {\n      return super.recycleAsyncId(scheduler, id, delay);\n    }\n    // If the scheduler queue has no remaining actions with the same async id,\n    // cancel the requested animation frame and set the scheduled flag to\n    // undefined so the next AnimationFrameAction will request its own.\n    const { actions } = scheduler;\n    if (id != null && actions[actions.length - 1]?.id !== id) {\n      animationFrameProvider.cancelAnimationFrame(id as number);\n      scheduler._scheduled = undefined;\n    }\n    // Return undefined so the action knows to request a new async id if it's rescheduled.\n    return undefined;\n  }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n  public flush(action?: AsyncAction<any>): void {\n    this._active = true;\n    // The async id that effects a call to flush is stored in _scheduled.\n    // Before executing an action, it's necessary to check the action's async\n    // id to determine whether it's supposed to be executed in the current\n    // flush.\n    // Previous implementations of this method used a count to determine this,\n    // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n    // are removed from the actions array and that can shift actions that are\n    // scheduled to be executed in a subsequent flush into positions at which\n    // they are executed within the current flush.\n    const flushId = this._scheduled;\n    this._scheduled = undefined;\n\n    const { actions } = this;\n    let error: any;\n    action = action || actions.shift()!;\n\n    do {\n      if ((error = action.execute(action.state, action.delay))) {\n        break;\n      }\n    } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n    this._active = false;\n\n    if (error) {\n      while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n        action.unsubscribe();\n      }\n      throw error;\n    }\n  }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * <span class=\"informal\">Perform task when `window.requestAnimationFrame` would fire</span>\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html: <div style=\"background: #0ff;\"></div>\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n *   div.style.height = height + \"px\";\n *\n *   this.schedule(height + 1);  // `this` references currently executing Action,\n *                               // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * <span class=\"informal\">Just emits 'complete', and nothing else.</span>\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n *   next: () => console.log('Next'),\n *   complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n *   mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable<never>((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n  return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n  return new Observable<never>((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n  return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last<T>(arr: T[]): T | undefined {\n  return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n  return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n  return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n  return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = (<T>(x: any): x is ArrayLike<T> => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike<any> {\n  return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable<any> {\n  return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable<T>(obj: any): obj is AsyncIterable<T> {\n  return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n  // TODO: We should create error codes that can be looked up, so this can be less verbose.\n  return new TypeError(\n    `You provided ${\n      input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n    } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n  );\n}\n", "export function getSymbolIterator(): symbol {\n  if (typeof Symbol !== 'function' || !Symbol.iterator) {\n    return '@@iterator' as any;\n  }\n\n  return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable<any> {\n  return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator<T>(readableStream: ReadableStreamLike<T>): AsyncGenerator<T> {\n  const reader = readableStream.getReader();\n  try {\n    while (true) {\n      const { value, done } = await reader.read();\n      if (done) {\n        return;\n      }\n      yield value!;\n    }\n  } finally {\n    reader.releaseLock();\n  }\n}\n\nexport function isReadableStreamLike<T>(obj: any): obj is ReadableStreamLike<T> {\n  // We don't want to use instanceof checks because they would return\n  // false for instances from another Realm, like an <iframe>.\n  return isFunction(obj?.getReader);\n}\n", "import { isArrayLike } from '../util/isArrayLike';\nimport { isPromise } from '../util/isPromise';\nimport { Observable } from '../Observable';\nimport { ObservableInput, ObservedValueOf, ReadableStreamLike } from '../types';\nimport { isInteropObservable } from '../util/isInteropObservable';\nimport { isAsyncIterable } from '../util/isAsyncIterable';\nimport { createInvalidObservableTypeError } from '../util/throwUnobservableError';\nimport { isIterable } from '../util/isIterable';\nimport { isReadableStreamLike, readableStreamLikeToAsyncGenerator } from '../util/isReadableStreamLike';\nimport { Subscriber } from '../Subscriber';\nimport { isFunction } from '../util/isFunction';\nimport { reportUnhandledError } from '../util/reportUnhandledError';\nimport { observable as Symbol_observable } from '../symbol/observable';\n\nexport function innerFrom<O extends ObservableInput<any>>(input: O): Observable<ObservedValueOf<O>>;\nexport function innerFrom<T>(input: ObservableInput<T>): Observable<T> {\n  if (input instanceof Observable) {\n    return input;\n  }\n  if (input != null) {\n    if (isInteropObservable(input)) {\n      return fromInteropObservable(input);\n    }\n    if (isArrayLike(input)) {\n      return fromArrayLike(input);\n    }\n    if (isPromise(input)) {\n      return fromPromise(input);\n    }\n    if (isAsyncIterable(input)) {\n      return fromAsyncIterable(input);\n    }\n    if (isIterable(input)) {\n      return fromIterable(input);\n    }\n    if (isReadableStreamLike(input)) {\n      return fromReadableStreamLike(input);\n    }\n  }\n\n  throw createInvalidObservableTypeError(input);\n}\n\n/**\n * Creates an RxJS Observable from an object that implements `Symbol.observable`.\n * @param obj An object that properly implements `Symbol.observable`.\n */\nexport function fromInteropObservable<T>(obj: any) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    const obs = obj[Symbol_observable]();\n    if (isFunction(obs.subscribe)) {\n      return obs.subscribe(subscriber);\n    }\n    // Should be caught by observable subscribe function error handling.\n    throw new TypeError('Provided object does not correctly implement Symbol.observable');\n  });\n}\n\n/**\n * Synchronously emits the values of an array like and completes.\n * This is exported because there are creation functions and operators that need to\n * make direct use of the same logic, and there's no reason to make them run through\n * `from` conditionals because we *know* they're dealing with an array.\n * @param array The array to emit values from\n */\nexport function fromArrayLike<T>(array: ArrayLike<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    // Loop over the array and emit each value. Note two things here:\n    // 1. We're making sure that the subscriber is not closed on each loop.\n    //    This is so we don't continue looping over a very large array after\n    //    something like a `take`, `takeWhile`, or other synchronous unsubscription\n    //    has already unsubscribed.\n    // 2. In this form, reentrant code can alter that array we're looping over.\n    //    This is a known issue, but considered an edge case. The alternative would\n    //    be to copy the array before executing the loop, but this has\n    //    performance implications.\n    for (let i = 0; i < array.length && !subscriber.closed; i++) {\n      subscriber.next(array[i]);\n    }\n    subscriber.complete();\n  });\n}\n\nexport function fromPromise<T>(promise: PromiseLike<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    promise\n      .then(\n        (value) => {\n          if (!subscriber.closed) {\n            subscriber.next(value);\n            subscriber.complete();\n          }\n        },\n        (err: any) => subscriber.error(err)\n      )\n      .then(null, reportUnhandledError);\n  });\n}\n\nexport function fromIterable<T>(iterable: Iterable<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    for (const value of iterable) {\n      subscriber.next(value);\n      if (subscriber.closed) {\n        return;\n      }\n    }\n    subscriber.complete();\n  });\n}\n\nexport function fromAsyncIterable<T>(asyncIterable: AsyncIterable<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    process(asyncIterable, subscriber).catch((err) => subscriber.error(err));\n  });\n}\n\nexport function fromReadableStreamLike<T>(readableStream: ReadableStreamLike<T>) {\n  return fromAsyncIterable(readableStreamLikeToAsyncGenerator(readableStream));\n}\n\nasync function process<T>(asyncIterable: AsyncIterable<T>, subscriber: Subscriber<T>) {\n  for await (const value of asyncIterable) {\n    subscriber.next(value);\n    // A side-effect may have closed our subscriber,\n    // check before the next iteration.\n    if (subscriber.closed) {\n      return;\n    }\n  }\n  subscriber.complete();\n}\n", "import { Subscription } from '../Subscription';\nimport { SchedulerAction, SchedulerLike } from '../types';\n\nexport function executeSchedule(\n  parentSubscription: Subscription,\n  scheduler: SchedulerLike,\n  work: () => void,\n  delay: number,\n  repeat: true\n): void;\nexport function executeSchedule(\n  parentSubscription: Subscription,\n  scheduler: SchedulerLike,\n  work: () => void,\n  delay?: number,\n  repeat?: false\n): Subscription;\n\nexport function executeSchedule(\n  parentSubscription: Subscription,\n  scheduler: SchedulerLike,\n  work: () => void,\n  delay = 0,\n  repeat = false\n): Subscription | void {\n  const scheduleSubscription = scheduler.schedule(function (this: SchedulerAction<any>) {\n    work();\n    if (repeat) {\n      parentSubscription.add(this.schedule(null, delay));\n    } else {\n      this.unsubscribe();\n    }\n  }, delay);\n\n  parentSubscription.add(scheduleSubscription);\n\n  if (!repeat) {\n    // Because user-land scheduler implementations are unlikely to properly reuse\n    // Actions for repeat scheduling, we can't trust that the returned subscription\n    // will control repeat subscription scenarios. So we're trying to avoid using them\n    // incorrectly within this library.\n    return scheduleSubscription;\n  }\n}\n", "/** @prettier */\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { executeSchedule } from '../util/executeSchedule';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Re-emits all notifications from source Observable with specified scheduler.\n *\n * <span class=\"informal\">Ensure a specific scheduler is used, from outside of an Observable.</span>\n *\n * `observeOn` is an operator that accepts a scheduler as a first parameter, which will be used to reschedule\n * notifications emitted by the source Observable. It might be useful, if you do not have control over\n * internal scheduler of a given Observable, but want to control when its values are emitted nevertheless.\n *\n * Returned Observable emits the same notifications (nexted values, complete and error events) as the source Observable,\n * but rescheduled with provided scheduler. Note that this doesn't mean that source Observables internal\n * scheduler will be replaced in any way. Original scheduler still will be used, but when the source Observable emits\n * notification, it will be immediately scheduled again - this time with scheduler passed to `observeOn`.\n * An anti-pattern would be calling `observeOn` on Observable that emits lots of values synchronously, to split\n * that emissions into asynchronous chunks. For this to happen, scheduler would have to be passed into the source\n * Observable directly (usually into the operator that creates it). `observeOn` simply delays notifications a\n * little bit more, to ensure that they are emitted at expected moments.\n *\n * As a matter of fact, `observeOn` accepts second parameter, which specifies in milliseconds with what delay notifications\n * will be emitted. The main difference between {@link delay} operator and `observeOn` is that `observeOn`\n * will delay all notifications - including error notifications - while `delay` will pass through error\n * from source Observable immediately when it is emitted. In general it is highly recommended to use `delay` operator\n * for any kind of delaying of values in the stream, while using `observeOn` to specify which scheduler should be used\n * for notification emissions in general.\n *\n * ## Example\n *\n * Ensure values in subscribe are called just before browser repaint\n *\n * ```ts\n * import { interval, observeOn, animationFrameScheduler } from 'rxjs';\n *\n * const someDiv = document.createElement('div');\n * someDiv.style.cssText = 'width: 200px;background: #09c';\n * document.body.appendChild(someDiv);\n * const intervals = interval(10);      // Intervals are scheduled\n *                                      // with async scheduler by default...\n * intervals.pipe(\n *   observeOn(animationFrameScheduler) // ...but we will observe on animationFrame\n * )                                    // scheduler to ensure smooth animation.\n * .subscribe(val => {\n *   someDiv.style.height = val + 'px';\n * });\n * ```\n *\n * @see {@link delay}\n *\n * @param scheduler Scheduler that will be used to reschedule notifications from source Observable.\n * @param delay Number of milliseconds that states with what delay every notification should be rescheduled.\n * @return A function that returns an Observable that emits the same\n * notifications as the source Observable, but with provided scheduler.\n */\nexport function observeOn<T>(scheduler: SchedulerLike, delay = 0): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => executeSchedule(subscriber, scheduler, () => subscriber.next(value), delay),\n        () => executeSchedule(subscriber, scheduler, () => subscriber.complete(), delay),\n        (err) => executeSchedule(subscriber, scheduler, () => subscriber.error(err), delay)\n      )\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { operate } from '../util/lift';\n\n/**\n * Asynchronously subscribes Observers to this Observable on the specified {@link SchedulerLike}.\n *\n * With `subscribeOn` you can decide what type of scheduler a specific Observable will be using when it is subscribed to.\n *\n * Schedulers control the speed and order of emissions to observers from an Observable stream.\n *\n * ![](subscribeOn.png)\n *\n * ## Example\n *\n * Given the following code:\n *\n * ```ts\n * import { of, merge } from 'rxjs';\n *\n * const a = of(1, 2, 3);\n * const b = of(4, 5, 6);\n *\n * merge(a, b).subscribe(console.log);\n *\n * // Outputs\n * // 1\n * // 2\n * // 3\n * // 4\n * // 5\n * // 6\n * ```\n *\n * Both Observable `a` and `b` will emit their values directly and synchronously once they are subscribed to.\n *\n * If we instead use the `subscribeOn` operator declaring that we want to use the {@link asyncScheduler} for values emitted by Observable `a`:\n *\n * ```ts\n * import { of, subscribeOn, asyncScheduler, merge } from 'rxjs';\n *\n * const a = of(1, 2, 3).pipe(subscribeOn(asyncScheduler));\n * const b = of(4, 5, 6);\n *\n * merge(a, b).subscribe(console.log);\n *\n * // Outputs\n * // 4\n * // 5\n * // 6\n * // 1\n * // 2\n * // 3\n * ```\n *\n * The reason for this is that Observable `b` emits its values directly and synchronously like before\n * but the emissions from `a` are scheduled on the event loop because we are now using the {@link asyncScheduler} for that specific Observable.\n *\n * @param scheduler The {@link SchedulerLike} to perform subscription actions on.\n * @param delay A delay to pass to the scheduler to delay subscriptions\n * @return A function that returns an Observable modified so that its\n * subscriptions happen on the specified {@link SchedulerLike}.\n */\nexport function subscribeOn<T>(scheduler: SchedulerLike, delay: number = 0): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    subscriber.add(scheduler.schedule(() => source.subscribe(subscriber), delay));\n  });\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { observeOn } from '../operators/observeOn';\nimport { subscribeOn } from '../operators/subscribeOn';\nimport { InteropObservable, SchedulerLike } from '../types';\n\nexport function scheduleObservable<T>(input: InteropObservable<T>, scheduler: SchedulerLike) {\n  return innerFrom(input).pipe(subscribeOn(scheduler), observeOn(scheduler));\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { observeOn } from '../operators/observeOn';\nimport { subscribeOn } from '../operators/subscribeOn';\nimport { SchedulerLike } from '../types';\n\nexport function schedulePromise<T>(input: PromiseLike<T>, scheduler: SchedulerLike) {\n  return innerFrom(input).pipe(subscribeOn(scheduler), observeOn(scheduler));\n}\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\nexport function scheduleArray<T>(input: ArrayLike<T>, scheduler: SchedulerLike) {\n  return new Observable<T>((subscriber) => {\n    // The current array index.\n    let i = 0;\n    // Start iterating over the array like on a schedule.\n    return scheduler.schedule(function () {\n      if (i === input.length) {\n        // If we have hit the end of the array like in the\n        // previous job, we can complete.\n        subscriber.complete();\n      } else {\n        // Otherwise let's next the value at the current index,\n        // then increment our index.\n        subscriber.next(input[i++]);\n        // If the last emission didn't cause us to close the subscriber\n        // (via take or some side effect), reschedule the job and we'll\n        // make another pass.\n        if (!subscriber.closed) {\n          this.schedule();\n        }\n      }\n    });\n  });\n}\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\nimport { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from '../util/isFunction';\nimport { executeSchedule } from '../util/executeSchedule';\n\n/**\n * Used in {@link scheduled} to create an observable from an Iterable.\n * @param input The iterable to create an observable from\n * @param scheduler The scheduler to use\n */\nexport function scheduleIterable<T>(input: Iterable<T>, scheduler: SchedulerLike) {\n  return new Observable<T>((subscriber) => {\n    let iterator: Iterator<T, T>;\n\n    // Schedule the initial creation of the iterator from\n    // the iterable. This is so the code in the iterable is\n    // not called until the scheduled job fires.\n    executeSchedule(subscriber, scheduler, () => {\n      // Create the iterator.\n      iterator = (input as any)[Symbol_iterator]();\n\n      executeSchedule(\n        subscriber,\n        scheduler,\n        () => {\n          let value: T;\n          let done: boolean | undefined;\n          try {\n            // Pull the value out of the iterator\n            ({ value, done } = iterator.next());\n          } catch (err) {\n            // We got an error while pulling from the iterator\n            subscriber.error(err);\n            return;\n          }\n\n          if (done) {\n            // If it is \"done\" we just complete. This mimics the\n            // behavior of JavaScript's `for..of` consumption of\n            // iterables, which will not emit the value from an iterator\n            // result of `{ done: true: value: 'here' }`.\n            subscriber.complete();\n          } else {\n            // The iterable is not done, emit the value.\n            subscriber.next(value);\n          }\n        },\n        0,\n        true\n      );\n    });\n\n    // During finalization, if we see this iterator has a `return` method,\n    // then we know it is a Generator, and not just an Iterator. So we call\n    // the `return()` function. This will ensure that any `finally { }` blocks\n    // inside of the generator we can hit will be hit properly.\n    return () => isFunction(iterator?.return) && iterator.return();\n  });\n}\n", "import { SchedulerLike } from '../types';\nimport { Observable } from '../Observable';\nimport { executeSchedule } from '../util/executeSchedule';\n\nexport function scheduleAsyncIterable<T>(input: AsyncIterable<T>, scheduler: SchedulerLike) {\n  if (!input) {\n    throw new Error('Iterable cannot be null');\n  }\n  return new Observable<T>((subscriber) => {\n    executeSchedule(subscriber, scheduler, () => {\n      const iterator = input[Symbol.asyncIterator]();\n      executeSchedule(\n        subscriber,\n        scheduler,\n        () => {\n          iterator.next().then((result) => {\n            if (result.done) {\n              // This will remove the subscriptions from\n              // the parent subscription.\n              subscriber.complete();\n            } else {\n              subscriber.next(result.value);\n            }\n          });\n        },\n        0,\n        true\n      );\n    });\n  });\n}\n", "import { SchedulerLike, ReadableStreamLike } from '../types';\nimport { Observable } from '../Observable';\nimport { scheduleAsyncIterable } from './scheduleAsyncIterable';\nimport { readableStreamLikeToAsyncGenerator } from '../util/isReadableStreamLike';\n\nexport function scheduleReadableStreamLike<T>(input: ReadableStreamLike<T>, scheduler: SchedulerLike): Observable<T> {\n  return scheduleAsyncIterable(readableStreamLikeToAsyncGenerator(input), scheduler);\n}\n", "import { scheduleObservable } from './scheduleObservable';\nimport { schedulePromise } from './schedulePromise';\nimport { scheduleArray } from './scheduleArray';\nimport { scheduleIterable } from './scheduleIterable';\nimport { scheduleAsyncIterable } from './scheduleAsyncIterable';\nimport { isInteropObservable } from '../util/isInteropObservable';\nimport { isPromise } from '../util/isPromise';\nimport { isArrayLike } from '../util/isArrayLike';\nimport { isIterable } from '../util/isIterable';\nimport { ObservableInput, SchedulerLike } from '../types';\nimport { Observable } from '../Observable';\nimport { isAsyncIterable } from '../util/isAsyncIterable';\nimport { createInvalidObservableTypeError } from '../util/throwUnobservableError';\nimport { isReadableStreamLike } from '../util/isReadableStreamLike';\nimport { scheduleReadableStreamLike } from './scheduleReadableStreamLike';\n\n/**\n * Converts from a common {@link ObservableInput} type to an observable where subscription and emissions\n * are scheduled on the provided scheduler.\n *\n * @see {@link from}\n * @see {@link of}\n *\n * @param input The observable, array, promise, iterable, etc you would like to schedule\n * @param scheduler The scheduler to use to schedule the subscription and emissions from\n * the returned observable.\n */\nexport function scheduled<T>(input: ObservableInput<T>, scheduler: SchedulerLike): Observable<T> {\n  if (input != null) {\n    if (isInteropObservable(input)) {\n      return scheduleObservable(input, scheduler);\n    }\n    if (isArrayLike(input)) {\n      return scheduleArray(input, scheduler);\n    }\n    if (isPromise(input)) {\n      return schedulePromise(input, scheduler);\n    }\n    if (isAsyncIterable(input)) {\n      return scheduleAsyncIterable(input, scheduler);\n    }\n    if (isIterable(input)) {\n      return scheduleIterable(input, scheduler);\n    }\n    if (isReadableStreamLike(input)) {\n      return scheduleReadableStreamLike(input, scheduler);\n    }\n  }\n  throw createInvalidObservableTypeError(input);\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInput, SchedulerLike, ObservedValueOf } from '../types';\nimport { scheduled } from '../scheduled/scheduled';\nimport { innerFrom } from './innerFrom';\n\nexport function from<O extends ObservableInput<any>>(input: O): Observable<ObservedValueOf<O>>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function from<O extends ObservableInput<any>>(input: O, scheduler: SchedulerLike | undefined): Observable<ObservedValueOf<O>>;\n\n/**\n * Creates an Observable from an Array, an array-like object, a Promise, an iterable object, or an Observable-like object.\n *\n * <span class=\"informal\">Converts almost anything to an Observable.</span>\n *\n * ![](from.png)\n *\n * `from` converts various other objects and data types into Observables. It also converts a Promise, an array-like, or an\n * <a href=\"https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols#iterable\" target=\"_blank\">iterable</a>\n * object into an Observable that emits the items in that promise, array, or iterable. A String, in this context, is treated\n * as an array of characters. Observable-like objects (contains a function named with the ES2015 Symbol for Observable) can also be\n * converted through this operator.\n *\n * ## Examples\n *\n * Converts an array to an Observable\n *\n * ```ts\n * import { from } from 'rxjs';\n *\n * const array = [10, 20, 30];\n * const result = from(array);\n *\n * result.subscribe(x => console.log(x));\n *\n * // Logs:\n * // 10\n * // 20\n * // 30\n * ```\n *\n * Convert an infinite iterable (from a generator) to an Observable\n *\n * ```ts\n * import { from, take } from 'rxjs';\n *\n * function* generateDoubles(seed) {\n *    let i = seed;\n *    while (true) {\n *      yield i;\n *      i = 2 * i; // double it\n *    }\n * }\n *\n * const iterator = generateDoubles(3);\n * const result = from(iterator).pipe(take(10));\n *\n * result.subscribe(x => console.log(x));\n *\n * // Logs:\n * // 3\n * // 6\n * // 12\n * // 24\n * // 48\n * // 96\n * // 192\n * // 384\n * // 768\n * // 1536\n * ```\n *\n * With `asyncScheduler`\n *\n * ```ts\n * import { from, asyncScheduler } from 'rxjs';\n *\n * console.log('start');\n *\n * const array = [10, 20, 30];\n * const result = from(array, asyncScheduler);\n *\n * result.subscribe(x => console.log(x));\n *\n * console.log('end');\n *\n * // Logs:\n * // 'start'\n * // 'end'\n * // 10\n * // 20\n * // 30\n * ```\n *\n * @see {@link fromEvent}\n * @see {@link fromEventPattern}\n *\n * @param {ObservableInput<T>} A subscription object, a Promise, an Observable-like,\n * an Array, an iterable, or an array-like object to be converted.\n * @param {SchedulerLike} An optional {@link SchedulerLike} on which to schedule the emission of values.\n * @return {Observable<T>}\n */\nexport function from<T>(input: ObservableInput<T>, scheduler?: SchedulerLike): Observable<T> {\n  return scheduler ? scheduled(input, scheduler) : innerFrom(input);\n}\n", "import { SchedulerLike, ValueFromArray } from '../types';\nimport { Observable } from '../Observable';\nimport { popScheduler } from '../util/args';\nimport { from } from './from';\n\n// Devs are more likely to pass null or undefined than they are a scheduler\n// without accompanying values. To make things easier for (naughty) devs who\n// use the `strictNullChecks: false` TypeScript compiler option, these\n// overloads with explicit null and undefined values are included.\n\nexport function of(value: null): Observable<null>;\nexport function of(value: undefined): Observable<undefined>;\n\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function of(scheduler: SchedulerLike): Observable<never>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function of<A extends readonly unknown[]>(...valuesAndScheduler: [...A, SchedulerLike]): Observable<ValueFromArray<A>>;\n\nexport function of(): Observable<never>;\n/** @deprecated Do not specify explicit type parameters. Signatures with type parameters that cannot be inferred will be removed in v8. */\nexport function of<T>(): Observable<T>;\nexport function of<T>(value: T): Observable<T>;\nexport function of<A extends readonly unknown[]>(...values: A): Observable<ValueFromArray<A>>;\n\n/**\n * Converts the arguments to an observable sequence.\n *\n * <span class=\"informal\">Each argument becomes a `next` notification.</span>\n *\n * ![](of.png)\n *\n * Unlike {@link from}, it does not do any flattening and emits each argument in whole\n * as a separate `next` notification.\n *\n * ## Examples\n *\n * Emit the values `10, 20, 30`\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * of(10, 20, 30)\n *   .subscribe({\n *     next: value => console.log('next:', value),\n *     error: err => console.log('error:', err),\n *     complete: () => console.log('the end'),\n *   });\n *\n * // Outputs\n * // next: 10\n * // next: 20\n * // next: 30\n * // the end\n * ```\n *\n * Emit the array `[1, 2, 3]`\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * of([1, 2, 3])\n *   .subscribe({\n *     next: value => console.log('next:', value),\n *     error: err => console.log('error:', err),\n *     complete: () => console.log('the end'),\n *   });\n *\n * // Outputs\n * // next: [1, 2, 3]\n * // the end\n * ```\n *\n * @see {@link from}\n * @see {@link range}\n *\n * @param {...T} values A comma separated list of arguments you want to be emitted\n * @return {Observable} An Observable that emits the arguments\n * described above and then completes.\n */\nexport function of<T>(...args: Array<T | SchedulerLike>): Observable<T> {\n  const scheduler = popScheduler(args);\n  return from(args as T[], scheduler);\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { SchedulerLike } from '../types';\nimport { isFunction } from '../util/isFunction';\n\n/**\n * Creates an observable that will create an error instance and push it to the consumer as an error\n * immediately upon subscription.\n *\n * <span class=\"informal\">Just errors and does nothing else</span>\n *\n * ![](throw.png)\n *\n * This creation function is useful for creating an observable that will create an error and error every\n * time it is subscribed to. Generally, inside of most operators when you might want to return an errored\n * observable, this is unnecessary. In most cases, such as in the inner return of {@link concatMap},\n * {@link mergeMap}, {@link defer}, and many others, you can simply throw the error, and RxJS will pick\n * that up and notify the consumer of the error.\n *\n * ## Example\n *\n * Create a simple observable that will create a new error with a timestamp and log it\n * and the message every time you subscribe to it\n *\n * ```ts\n * import { throwError } from 'rxjs';\n *\n * let errorCount = 0;\n *\n * const errorWithTimestamp$ = throwError(() => {\n *   const error: any = new Error(`This is error number ${ ++errorCount }`);\n *   error.timestamp = Date.now();\n *   return error;\n * });\n *\n * errorWithTimestamp$.subscribe({\n *   error: err => console.log(err.timestamp, err.message)\n * });\n *\n * errorWithTimestamp$.subscribe({\n *   error: err => console.log(err.timestamp, err.message)\n * });\n *\n * // Logs the timestamp and a new error message for each subscription\n * ```\n *\n * ### Unnecessary usage\n *\n * Using `throwError` inside of an operator or creation function\n * with a callback, is usually not necessary\n *\n * ```ts\n * import { of, concatMap, timer, throwError } from 'rxjs';\n *\n * const delays$ = of(1000, 2000, Infinity, 3000);\n *\n * delays$.pipe(\n *   concatMap(ms => {\n *     if (ms < 10000) {\n *       return timer(ms);\n *     } else {\n *       // This is probably overkill.\n *       return throwError(() => new Error(`Invalid time ${ ms }`));\n *     }\n *   })\n * )\n * .subscribe({\n *   next: console.log,\n *   error: console.error\n * });\n * ```\n *\n * You can just throw the error instead\n *\n * ```ts\n * import { of, concatMap, timer } from 'rxjs';\n *\n * const delays$ = of(1000, 2000, Infinity, 3000);\n *\n * delays$.pipe(\n *   concatMap(ms => {\n *     if (ms < 10000) {\n *       return timer(ms);\n *     } else {\n *       // Cleaner and easier to read for most folks.\n *       throw new Error(`Invalid time ${ ms }`);\n *     }\n *   })\n * )\n * .subscribe({\n *   next: console.log,\n *   error: console.error\n * });\n * ```\n *\n * @param errorFactory A factory function that will create the error instance that is pushed.\n */\nexport function throwError(errorFactory: () => any): Observable<never>;\n\n/**\n * Returns an observable that will error with the specified error immediately upon subscription.\n *\n * @param error The error instance to emit\n * @deprecated Support for passing an error value will be removed in v8. Instead, pass a factory function to `throwError(() => new Error('test'))`. This is\n * because it will create the error at the moment it should be created and capture a more appropriate stack trace. If\n * for some reason you need to create the error ahead of time, you can still do that: `const err = new Error('test'); throwError(() => err);`.\n */\nexport function throwError(error: any): Observable<never>;\n\n/**\n * Notifies the consumer of an error using a given scheduler by scheduling it at delay `0` upon subscription.\n *\n * @param errorOrErrorFactory An error instance or error factory\n * @param scheduler A scheduler to use to schedule the error notification\n * @deprecated The `scheduler` parameter will be removed in v8.\n * Use `throwError` in combination with {@link observeOn}: `throwError(() => new Error('test')).pipe(observeOn(scheduler));`.\n * Details: https://rxjs.dev/deprecations/scheduler-argument\n */\nexport function throwError(errorOrErrorFactory: any, scheduler: SchedulerLike): Observable<never>;\n\nexport function throwError(errorOrErrorFactory: any, scheduler?: SchedulerLike): Observable<never> {\n  const errorFactory = isFunction(errorOrErrorFactory) ? errorOrErrorFactory : () => errorOrErrorFactory;\n  const init = (subscriber: Subscriber<never>) => subscriber.error(errorFactory());\n  return new Observable(scheduler ? (subscriber) => scheduler.schedule(init as any, 0, subscriber) : init);\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface EmptyError extends Error {}\n\nexport interface EmptyErrorCtor {\n  /**\n   * @deprecated Internal implementation detail. Do not construct error instances.\n   * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n   */\n  new (): EmptyError;\n}\n\n/**\n * An error thrown when an Observable or a sequence was queried but has no\n * elements.\n *\n * @see {@link first}\n * @see {@link last}\n * @see {@link single}\n * @see {@link firstValueFrom}\n * @see {@link lastValueFrom}\n *\n * @class EmptyError\n */\nexport const EmptyError: EmptyErrorCtor = createErrorClass((_super) => function EmptyErrorImpl(this: any) {\n  _super(this);\n  this.name = 'EmptyError';\n  this.message = 'no elements in sequence';\n});\n", "/**\n * Checks to see if a value is not only a `Date` object,\n * but a *valid* `Date` object that can be converted to a\n * number. For example, `new Date('blah')` is indeed an\n * `instanceof Date`, however it cannot be converted to a\n * number.\n */\nexport function isValidDate(value: any): value is Date {\n  return value instanceof Date && !isNaN(value as any);\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\nexport function map<T, R>(project: (value: T, index: number) => R): OperatorFunction<T, R>;\n/** @deprecated Use a closure instead of a `thisArg`. Signatures accepting a `thisArg` will be removed in v8. */\nexport function map<T, R, A>(project: (this: A, value: T, index: number) => R, thisArg: A): OperatorFunction<T, R>;\n\n/**\n * Applies a given `project` function to each value emitted by the source\n * Observable, and emits the resulting values as an Observable.\n *\n * <span class=\"informal\">Like [Array.prototype.map()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/map),\n * it passes each source value through a transformation function to get\n * corresponding output values.</span>\n *\n * ![](map.png)\n *\n * Similar to the well known `Array.prototype.map` function, this operator\n * applies a projection to each value and emits that projection in the output\n * Observable.\n *\n * ## Example\n *\n * Map every click to the `clientX` position of that click\n *\n * ```ts\n * import { fromEvent, map } from 'rxjs';\n *\n * const clicks = fromEvent<PointerEvent>(document, 'click');\n * const positions = clicks.pipe(map(ev => ev.clientX));\n *\n * positions.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link mapTo}\n * @see {@link pluck}\n *\n * @param {function(value: T, index: number): R} project The function to apply\n * to each `value` emitted by the source Observable. The `index` parameter is\n * the number `i` for the i-th emission that has happened since the\n * subscription, starting from the number `0`.\n * @param {any} [thisArg] An optional argument to define what `this` is in the\n * `project` function.\n * @return A function that returns an Observable that emits the values from the\n * source Observable transformed by the given `project` function.\n */\nexport function map<T, R>(project: (value: T, index: number) => R, thisArg?: any): OperatorFunction<T, R> {\n  return operate((source, subscriber) => {\n    // The index of the value from the source. Used with projection.\n    let index = 0;\n    // Subscribe to the source, all errors and completions are sent along\n    // to the consumer.\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value: T) => {\n        // Call the projection function with the appropriate this context,\n        // and send the resulting value to the consumer.\n        subscriber.next(project.call(thisArg, value, index++));\n      })\n    );\n  });\n}\n", "import { OperatorFunction } from \"../types\";\nimport { map } from \"../operators/map\";\n\nconst { isArray } = Array;\n\nfunction callOrApply<T, R>(fn: ((...values: T[]) => R), args: T|T[]): R {\n    return isArray(args) ? fn(...args) : fn(args);\n}\n\n/**\n * Used in several -- mostly deprecated -- situations where we need to \n * apply a list of arguments or a single argument to a result selector.\n */\nexport function mapOneOrManyArgs<T, R>(fn: ((...values: T[]) => R)): OperatorFunction<T|T[], R> {\n    return map(args => callOrApply(fn, args))\n}", "const { isArray } = Array;\nconst { getPrototypeOf, prototype: objectProto, keys: getKeys } = Object;\n\n/**\n * Used in functions where either a list of arguments, a single array of arguments, or a\n * dictionary of arguments can be returned. Returns an object with an `args` property with\n * the arguments in an array, if it is a dictionary, it will also return the `keys` in another\n * property.\n */\nexport function argsArgArrayOrObject<T, O extends Record<string, T>>(args: T[] | [O] | [T[]]): { args: T[]; keys: string[] | null } {\n  if (args.length === 1) {\n    const first = args[0];\n    if (isArray(first)) {\n      return { args: first, keys: null };\n    }\n    if (isPOJO(first)) {\n      const keys = getKeys(first);\n      return {\n        args: keys.map((key) => first[key]),\n        keys,\n      };\n    }\n  }\n\n  return { args: args as T[], keys: null };\n}\n\nfunction isPOJO(obj: any): obj is object {\n  return obj && typeof obj === 'object' && getPrototypeOf(obj) === objectProto;\n}\n", "export function createObject(keys: string[], values: any[]) {\n  return keys.reduce((result, key, i) => ((result[key] = values[i]), result), {} as any);\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInput, SchedulerLike, ObservedValueOf, ObservableInputTuple } from '../types';\nimport { argsArgArrayOrObject } from '../util/argsArgArrayOrObject';\nimport { Subscriber } from '../Subscriber';\nimport { from } from './from';\nimport { identity } from '../util/identity';\nimport { Subscription } from '../Subscription';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\nimport { popResultSelector, popScheduler } from '../util/args';\nimport { createObject } from '../util/createObject';\nimport { createOperatorSubscriber } from '../operators/OperatorSubscriber';\nimport { AnyCatcher } from '../AnyCatcher';\nimport { executeSchedule } from '../util/executeSchedule';\n\n// combineLatest(any)\n// We put this first because we need to catch cases where the user has supplied\n// _exactly `any`_ as the argument. Since `any` literally matches _anything_,\n// we don't want it to randomly hit one of the other type signatures below,\n// as we have no idea at build-time what type we should be returning when given an any.\n\n/**\n * You have passed `any` here, we can't figure out if it is\n * an array or an object, so you're getting `unknown`. Use better types.\n * @param arg Something typed as `any`\n */\nexport function combineLatest<T extends AnyCatcher>(arg: T): Observable<unknown>;\n\n// combineLatest([a, b, c])\nexport function combineLatest(sources: []): Observable<never>;\nexport function combineLatest<A extends readonly unknown[]>(sources: readonly [...ObservableInputTuple<A>]): Observable<A>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[], R>(\n  sources: readonly [...ObservableInputTuple<A>],\n  resultSelector: (...values: A) => R,\n  scheduler: SchedulerLike\n): Observable<R>;\nexport function combineLatest<A extends readonly unknown[], R>(\n  sources: readonly [...ObservableInputTuple<A>],\n  resultSelector: (...values: A) => R\n): Observable<R>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[]>(\n  sources: readonly [...ObservableInputTuple<A>],\n  scheduler: SchedulerLike\n): Observable<A>;\n\n// combineLatest(a, b, c)\n/** @deprecated Pass an array of sources instead. The rest-parameters signature will be removed in v8. Details: https://rxjs.dev/deprecations/array-argument */\nexport function combineLatest<A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): Observable<A>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[], R>(\n  ...sourcesAndResultSelectorAndScheduler: [...ObservableInputTuple<A>, (...values: A) => R, SchedulerLike]\n): Observable<R>;\n/** @deprecated Pass an array of sources instead. The rest-parameters signature will be removed in v8. Details: https://rxjs.dev/deprecations/array-argument */\nexport function combineLatest<A extends readonly unknown[], R>(\n  ...sourcesAndResultSelector: [...ObservableInputTuple<A>, (...values: A) => R]\n): Observable<R>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[]>(\n  ...sourcesAndScheduler: [...ObservableInputTuple<A>, SchedulerLike]\n): Observable<A>;\n\n// combineLatest({a, b, c})\nexport function combineLatest(sourcesObject: { [K in any]: never }): Observable<never>;\nexport function combineLatest<T extends Record<string, ObservableInput<any>>>(\n  sourcesObject: T\n): Observable<{ [K in keyof T]: ObservedValueOf<T[K]> }>;\n\n/**\n * Combines multiple Observables to create an Observable whose values are\n * calculated from the latest values of each of its input Observables.\n *\n * <span class=\"informal\">Whenever any input Observable emits a value, it\n * computes a formula using the latest values from all the inputs, then emits\n * the output of that formula.</span>\n *\n * ![](combineLatest.png)\n *\n * `combineLatest` combines the values from all the Observables passed in the\n * observables array. This is done by subscribing to each Observable in order and,\n * whenever any Observable emits, collecting an array of the most recent\n * values from each Observable. So if you pass `n` Observables to this operator,\n * the returned Observable will always emit an array of `n` values, in an order\n * corresponding to the order of the passed Observables (the value from the first Observable\n * will be at index 0 of the array and so on).\n *\n * Static version of `combineLatest` accepts an array of Observables. Note that an array of\n * Observables is a good choice, if you don't know beforehand how many Observables\n * you will combine. Passing an empty array will result in an Observable that\n * completes immediately.\n *\n * To ensure the output array always has the same length, `combineLatest` will\n * actually wait for all input Observables to emit at least once,\n * before it starts emitting results. This means if some Observable emits\n * values before other Observables started emitting, all these values but the last\n * will be lost. On the other hand, if some Observable does not emit a value but\n * completes, resulting Observable will complete at the same moment without\n * emitting anything, since it will now be impossible to include a value from the\n * completed Observable in the resulting array. Also, if some input Observable does\n * not emit any value and never completes, `combineLatest` will also never emit\n * and never complete, since, again, it will wait for all streams to emit some\n * value.\n *\n * If at least one Observable was passed to `combineLatest` and all passed Observables\n * emitted something, the resulting Observable will complete when all combined\n * streams complete. So even if some Observable completes, the result of\n * `combineLatest` will still emit values when other Observables do. In case\n * of a completed Observable, its value from now on will always be the last\n * emitted value. On the other hand, if any Observable errors, `combineLatest`\n * will error immediately as well, and all other Observables will be unsubscribed.\n *\n * ## Examples\n *\n * Combine two timer Observables\n *\n * ```ts\n * import { timer, combineLatest } from 'rxjs';\n *\n * const firstTimer = timer(0, 1000); // emit 0, 1, 2... after every second, starting from now\n * const secondTimer = timer(500, 1000); // emit 0, 1, 2... after every second, starting 0,5s from now\n * const combinedTimers = combineLatest([firstTimer, secondTimer]);\n * combinedTimers.subscribe(value => console.log(value));\n * // Logs\n * // [0, 0] after 0.5s\n * // [1, 0] after 1s\n * // [1, 1] after 1.5s\n * // [2, 1] after 2s\n * ```\n *\n * Combine a dictionary of Observables\n *\n * ```ts\n * import { of, delay, startWith, combineLatest } from 'rxjs';\n *\n * const observables = {\n *   a: of(1).pipe(delay(1000), startWith(0)),\n *   b: of(5).pipe(delay(5000), startWith(0)),\n *   c: of(10).pipe(delay(10000), startWith(0))\n * };\n * const combined = combineLatest(observables);\n * combined.subscribe(value => console.log(value));\n * // Logs\n * // { a: 0, b: 0, c: 0 } immediately\n * // { a: 1, b: 0, c: 0 } after 1s\n * // { a: 1, b: 5, c: 0 } after 5s\n * // { a: 1, b: 5, c: 10 } after 10s\n * ```\n *\n * Combine an array of Observables\n *\n * ```ts\n * import { of, delay, startWith, combineLatest } from 'rxjs';\n *\n * const observables = [1, 5, 10].map(\n *   n => of(n).pipe(\n *     delay(n * 1000), // emit 0 and then emit n after n seconds\n *     startWith(0)\n *   )\n * );\n * const combined = combineLatest(observables);\n * combined.subscribe(value => console.log(value));\n * // Logs\n * // [0, 0, 0] immediately\n * // [1, 0, 0] after 1s\n * // [1, 5, 0] after 5s\n * // [1, 5, 10] after 10s\n * ```\n *\n * Use map operator to dynamically calculate the Body-Mass Index\n *\n * ```ts\n * import { of, combineLatest, map } from 'rxjs';\n *\n * const weight = of(70, 72, 76, 79, 75);\n * const height = of(1.76, 1.77, 1.78);\n * const bmi = combineLatest([weight, height]).pipe(\n *   map(([w, h]) => w / (h * h)),\n * );\n * bmi.subscribe(x => console.log('BMI is ' + x));\n *\n * // With output to console:\n * // BMI is 24.212293388429753\n * // BMI is 23.93948099205209\n * // BMI is 23.671253629592222\n * ```\n *\n * @see {@link combineLatestAll}\n * @see {@link merge}\n * @see {@link withLatestFrom}\n *\n * @param {ObservableInput} [observables] An array of input Observables to combine with each other.\n * An array of Observables must be given as the first argument.\n * @param {function} [project] An optional function to project the values from\n * the combined latest values into a new value on the output Observable.\n * @param {SchedulerLike} [scheduler=null] The {@link SchedulerLike} to use for subscribing to\n * each input Observable.\n * @return {Observable} An Observable of projected values from the most recent\n * values from each input Observable, or an array of the most recent values from\n * each input Observable.\n */\nexport function combineLatest<O extends ObservableInput<any>, R>(...args: any[]): Observable<R> | Observable<ObservedValueOf<O>[]> {\n  const scheduler = popScheduler(args);\n  const resultSelector = popResultSelector(args);\n\n  const { args: observables, keys } = argsArgArrayOrObject(args);\n\n  if (observables.length === 0) {\n    // If no observables are passed, or someone has passed an empty array\n    // of observables, or even an empty object POJO, we need to just\n    // complete (EMPTY), but we have to honor the scheduler provided if any.\n    return from([], scheduler as any);\n  }\n\n  const result = new Observable<ObservedValueOf<O>[]>(\n    combineLatestInit(\n      observables as ObservableInput<ObservedValueOf<O>>[],\n      scheduler,\n      keys\n        ? // A handler for scrubbing the array of args into a dictionary.\n          (values) => createObject(keys, values)\n        : // A passthrough to just return the array\n          identity\n    )\n  );\n\n  return resultSelector ? (result.pipe(mapOneOrManyArgs(resultSelector)) as Observable<R>) : result;\n}\n\nexport function combineLatestInit(\n  observables: ObservableInput<any>[],\n  scheduler?: SchedulerLike,\n  valueTransform: (values: any[]) => any = identity\n) {\n  return (subscriber: Subscriber<any>) => {\n    // The outer subscription. We're capturing this in a function\n    // because we may have to schedule it.\n    maybeSchedule(\n      scheduler,\n      () => {\n        const { length } = observables;\n        // A store for the values each observable has emitted so far. We match observable to value on index.\n        const values = new Array(length);\n        // The number of currently active subscriptions, as they complete, we decrement this number to see if\n        // we are all done combining values, so we can complete the result.\n        let active = length;\n        // The number of inner sources that still haven't emitted the first value\n        // We need to track this because all sources need to emit one value in order\n        // to start emitting values.\n        let remainingFirstValues = length;\n        // The loop to kick off subscription. We're keying everything on index `i` to relate the observables passed\n        // in to the slot in the output array or the key in the array of keys in the output dictionary.\n        for (let i = 0; i < length; i++) {\n          maybeSchedule(\n            scheduler,\n            () => {\n              const source = from(observables[i], scheduler as any);\n              let hasFirstValue = false;\n              source.subscribe(\n                createOperatorSubscriber(\n                  subscriber,\n                  (value) => {\n                    // When we get a value, record it in our set of values.\n                    values[i] = value;\n                    if (!hasFirstValue) {\n                      // If this is our first value, record that.\n                      hasFirstValue = true;\n                      remainingFirstValues--;\n                    }\n                    if (!remainingFirstValues) {\n                      // We're not waiting for any more\n                      // first values, so we can emit!\n                      subscriber.next(valueTransform(values.slice()));\n                    }\n                  },\n                  () => {\n                    if (!--active) {\n                      // We only complete the result if we have no more active\n                      // inner observables.\n                      subscriber.complete();\n                    }\n                  }\n                )\n              );\n            },\n            subscriber\n          );\n        }\n      },\n      subscriber\n    );\n  };\n}\n\n/**\n * A small utility to handle the couple of locations where we want to schedule if a scheduler was provided,\n * but we don't if there was no scheduler.\n */\nfunction maybeSchedule(scheduler: SchedulerLike | undefined, execute: () => void, subscription: Subscription) {\n  if (scheduler) {\n    executeSchedule(subscription, scheduler, execute);\n  } else {\n    execute();\n  }\n}\n", "import { Observable } from '../Observable';\nimport { innerFrom } from '../observable/innerFrom';\nimport { Subscriber } from '../Subscriber';\nimport { ObservableInput, SchedulerLike } from '../types';\nimport { executeSchedule } from '../util/executeSchedule';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * A process embodying the general \"merge\" strategy. This is used in\n * `mergeMap` and `mergeScan` because the logic is otherwise nearly identical.\n * @param source The original source observable\n * @param subscriber The consumer subscriber\n * @param project The projection function to get our inner sources\n * @param concurrent The number of concurrent inner subscriptions\n * @param onBeforeNext Additional logic to apply before nexting to our consumer\n * @param expand If `true` this will perform an \"expand\" strategy, which differs only\n * in that it recurses, and the inner subscription must be schedule-able.\n * @param innerSubScheduler A scheduler to use to schedule inner subscriptions,\n * this is to support the expand strategy, mostly, and should be deprecated\n */\nexport function mergeInternals<T, R>(\n  source: Observable<T>,\n  subscriber: Subscriber<R>,\n  project: (value: T, index: number) => ObservableInput<R>,\n  concurrent: number,\n  onBeforeNext?: (innerValue: R) => void,\n  expand?: boolean,\n  innerSubScheduler?: SchedulerLike,\n  additionalFinalizer?: () => void\n) {\n  // Buffered values, in the event of going over our concurrency limit\n  const buffer: T[] = [];\n  // The number of active inner subscriptions.\n  let active = 0;\n  // An index to pass to our accumulator function\n  let index = 0;\n  // Whether or not the outer source has completed.\n  let isComplete = false;\n\n  /**\n   * Checks to see if we can complete our result or not.\n   */\n  const checkComplete = () => {\n    // If the outer has completed, and nothing is left in the buffer,\n    // and we don't have any active inner subscriptions, then we can\n    // Emit the state and complete.\n    if (isComplete && !buffer.length && !active) {\n      subscriber.complete();\n    }\n  };\n\n  // If we're under our concurrency limit, just start the inner subscription, otherwise buffer and wait.\n  const outerNext = (value: T) => (active < concurrent ? doInnerSub(value) : buffer.push(value));\n\n  const doInnerSub = (value: T) => {\n    // If we're expanding, we need to emit the outer values and the inner values\n    // as the inners will \"become outers\" in a way as they are recursively fed\n    // back to the projection mechanism.\n    expand && subscriber.next(value as any);\n\n    // Increment the number of active subscriptions so we can track it\n    // against our concurrency limit later.\n    active++;\n\n    // A flag used to show that the inner observable completed.\n    // This is checked during finalization to see if we should\n    // move to the next item in the buffer, if there is on.\n    let innerComplete = false;\n\n    // Start our inner subscription.\n    innerFrom(project(value, index++)).subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (innerValue) => {\n          // `mergeScan` has additional handling here. For example\n          // taking the inner value and updating state.\n          onBeforeNext?.(innerValue);\n\n          if (expand) {\n            // If we're expanding, then just recurse back to our outer\n            // handler. It will emit the value first thing.\n            outerNext(innerValue as any);\n          } else {\n            // Otherwise, emit the inner value.\n            subscriber.next(innerValue);\n          }\n        },\n        () => {\n          // Flag that we have completed, so we know to check the buffer\n          // during finalization.\n          innerComplete = true;\n        },\n        // Errors are passed to the destination.\n        undefined,\n        () => {\n          // During finalization, if the inner completed (it wasn't errored or\n          // cancelled), then we want to try the next item in the buffer if\n          // there is one.\n          if (innerComplete) {\n            // We have to wrap this in a try/catch because it happens during\n            // finalization, possibly asynchronously, and we want to pass\n            // any errors that happen (like in a projection function) to\n            // the outer Subscriber.\n            try {\n              // INNER SOURCE COMPLETE\n              // Decrement the active count to ensure that the next time\n              // we try to call `doInnerSub`, the number is accurate.\n              active--;\n              // If we have more values in the buffer, try to process those\n              // Note that this call will increment `active` ahead of the\n              // next conditional, if there were any more inner subscriptions\n              // to start.\n              while (buffer.length && active < concurrent) {\n                const bufferedValue = buffer.shift()!;\n                // Particularly for `expand`, we need to check to see if a scheduler was provided\n                // for when we want to start our inner subscription. Otherwise, we just start\n                // are next inner subscription.\n                if (innerSubScheduler) {\n                  executeSchedule(subscriber, innerSubScheduler, () => doInnerSub(bufferedValue));\n                } else {\n                  doInnerSub(bufferedValue);\n                }\n              }\n              // Check to see if we can complete, and complete if so.\n              checkComplete();\n            } catch (err) {\n              subscriber.error(err);\n            }\n          }\n        }\n      )\n    );\n  };\n\n  // Subscribe to our source observable.\n  source.subscribe(\n    createOperatorSubscriber(subscriber, outerNext, () => {\n      // Outer completed, make a note of it, and check to see if we can complete everything.\n      isComplete = true;\n      checkComplete();\n    })\n  );\n\n  // Additional finalization (for when the destination is torn down).\n  // Other finalization is added implicitly via subscription above.\n  return () => {\n    additionalFinalizer?.();\n  };\n}\n", "import { ObservableInput, OperatorFunction, ObservedValueOf } from '../types';\nimport { map } from './map';\nimport { innerFrom } from '../observable/innerFrom';\nimport { operate } from '../util/lift';\nimport { mergeInternals } from './mergeInternals';\nimport { isFunction } from '../util/isFunction';\n\n/* tslint:disable:max-line-length */\nexport function mergeMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  concurrent?: number\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function mergeMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: undefined,\n  concurrent?: number\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function mergeMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: (outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R,\n  concurrent?: number\n): OperatorFunction<T, R>;\n/* tslint:enable:max-line-length */\n\n/**\n * Projects each source value to an Observable which is merged in the output\n * Observable.\n *\n * <span class=\"informal\">Maps each value to an Observable, then flattens all of\n * these inner Observables using {@link mergeAll}.</span>\n *\n * ![](mergeMap.png)\n *\n * Returns an Observable that emits items based on applying a function that you\n * supply to each item emitted by the source Observable, where that function\n * returns an Observable, and then merging those resulting Observables and\n * emitting the results of this merger.\n *\n * ## Example\n *\n * Map and flatten each letter to an Observable ticking every 1 second\n *\n * ```ts\n * import { of, mergeMap, interval, map } from 'rxjs';\n *\n * const letters = of('a', 'b', 'c');\n * const result = letters.pipe(\n *   mergeMap(x => interval(1000).pipe(map(i => x + i)))\n * );\n *\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // a0\n * // b0\n * // c0\n * // a1\n * // b1\n * // c1\n * // continues to list a, b, c every second with respective ascending integers\n * ```\n *\n * @see {@link concatMap}\n * @see {@link exhaustMap}\n * @see {@link merge}\n * @see {@link mergeAll}\n * @see {@link mergeMapTo}\n * @see {@link mergeScan}\n * @see {@link switchMap}\n *\n * @param {function(value: T, ?index: number): ObservableInput} project A function\n * that, when applied to an item emitted by the source Observable, returns an\n * Observable.\n * @param {number} [concurrent=Infinity] Maximum number of input\n * Observables being subscribed to concurrently.\n * @return A function that returns an Observable that emits the result of\n * applying the projection function (and the optional deprecated\n * `resultSelector`) to each item emitted by the source Observable and merging\n * the results of the Observables obtained from this transformation.\n */\nexport function mergeMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector?: ((outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R) | number,\n  concurrent: number = Infinity\n): OperatorFunction<T, ObservedValueOf<O> | R> {\n  if (isFunction(resultSelector)) {\n    // DEPRECATED PATH\n    return mergeMap((a, i) => map((b: any, ii: number) => resultSelector(a, b, i, ii))(innerFrom(project(a, i))), concurrent);\n  } else if (typeof resultSelector === 'number') {\n    concurrent = resultSelector;\n  }\n\n  return operate((source, subscriber) => mergeInternals(source, subscriber, project, concurrent));\n}\n", "import { mergeMap } from './mergeMap';\nimport { identity } from '../util/identity';\nimport { OperatorFunction, ObservableInput, ObservedValueOf } from '../types';\n\n/**\n * Converts a higher-order Observable into a first-order Observable which\n * concurrently delivers all values that are emitted on the inner Observables.\n *\n * <span class=\"informal\">Flattens an Observable-of-Observables.</span>\n *\n * ![](mergeAll.png)\n *\n * `mergeAll` subscribes to an Observable that emits Observables, also known as\n * a higher-order Observable. Each time it observes one of these emitted inner\n * Observables, it subscribes to that and delivers all the values from the\n * inner Observable on the output Observable. The output Observable only\n * completes once all inner Observables have completed. Any error delivered by\n * a inner Observable will be immediately emitted on the output Observable.\n *\n * ## Examples\n *\n * Spawn a new interval Observable for each click event, and blend their outputs as one Observable\n *\n * ```ts\n * import { fromEvent, map, interval, mergeAll } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const higherOrder = clicks.pipe(map(() => interval(1000)));\n * const firstOrder = higherOrder.pipe(mergeAll());\n *\n * firstOrder.subscribe(x => console.log(x));\n * ```\n *\n * Count from 0 to 9 every second for each click, but only allow 2 concurrent timers\n *\n * ```ts\n * import { fromEvent, map, interval, take, mergeAll } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const higherOrder = clicks.pipe(\n *   map(() => interval(1000).pipe(take(10)))\n * );\n * const firstOrder = higherOrder.pipe(mergeAll(2));\n *\n * firstOrder.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link combineLatestAll}\n * @see {@link concatAll}\n * @see {@link exhaustAll}\n * @see {@link merge}\n * @see {@link mergeMap}\n * @see {@link mergeMapTo}\n * @see {@link mergeScan}\n * @see {@link switchAll}\n * @see {@link switchMap}\n * @see {@link zipAll}\n *\n * @param {number} [concurrent=Infinity] Maximum number of inner\n * Observables being subscribed to concurrently.\n * @return A function that returns an Observable that emits values coming from\n * all the inner Observables emitted by the source Observable.\n */\nexport function mergeAll<O extends ObservableInput<any>>(concurrent: number = Infinity): OperatorFunction<O, ObservedValueOf<O>> {\n  return mergeMap(identity, concurrent);\n}\n", "import { mergeAll } from './mergeAll';\nimport { OperatorFunction, ObservableInput, ObservedValueOf } from '../types';\n\n/**\n * Converts a higher-order Observable into a first-order Observable by\n * concatenating the inner Observables in order.\n *\n * <span class=\"informal\">Flattens an Observable-of-Observables by putting one\n * inner Observable after the other.</span>\n *\n * ![](concatAll.svg)\n *\n * Joins every Observable emitted by the source (a higher-order Observable), in\n * a serial fashion. It subscribes to each inner Observable only after the\n * previous inner Observable has completed, and merges all of their values into\n * the returned observable.\n *\n * __Warning:__ If the source Observable emits Observables quickly and\n * endlessly, and the inner Observables it emits generally complete slower than\n * the source emits, you can run into memory issues as the incoming Observables\n * collect in an unbounded buffer.\n *\n * Note: `concatAll` is equivalent to `mergeAll` with concurrency parameter set\n * to `1`.\n *\n * ## Example\n *\n * For each click event, tick every second from 0 to 3, with no concurrency\n *\n * ```ts\n * import { fromEvent, map, interval, take, concatAll } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const higherOrder = clicks.pipe(\n *   map(() => interval(1000).pipe(take(4)))\n * );\n * const firstOrder = higherOrder.pipe(concatAll());\n * firstOrder.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // (results are not concurrent)\n * // For every click on the \"document\" it will emit values 0 to 3 spaced\n * // on a 1000ms interval\n * // one click = 1000ms-> 0 -1000ms-> 1 -1000ms-> 2 -1000ms-> 3\n * ```\n *\n * @see {@link combineLatestAll}\n * @see {@link concat}\n * @see {@link concatMap}\n * @see {@link concatMapTo}\n * @see {@link exhaustAll}\n * @see {@link mergeAll}\n * @see {@link switchAll}\n * @see {@link switchMap}\n * @see {@link zipAll}\n *\n * @return A function that returns an Observable emitting values from all the\n * inner Observables concatenated.\n */\nexport function concatAll<O extends ObservableInput<any>>(): OperatorFunction<O, ObservedValueOf<O>> {\n  return mergeAll(1);\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInputTuple, SchedulerLike } from '../types';\nimport { concatAll } from '../operators/concatAll';\nimport { popScheduler } from '../util/args';\nimport { from } from './from';\n\nexport function concat<T extends readonly unknown[]>(...inputs: [...ObservableInputTuple<T>]): Observable<T[number]>;\nexport function concat<T extends readonly unknown[]>(\n  ...inputsAndScheduler: [...ObservableInputTuple<T>, SchedulerLike]\n): Observable<T[number]>;\n\n/**\n * Creates an output Observable which sequentially emits all values from the first given\n * Observable and then moves on to the next.\n *\n * <span class=\"informal\">Concatenates multiple Observables together by\n * sequentially emitting their values, one Observable after the other.</span>\n *\n * ![](concat.png)\n *\n * `concat` joins multiple Observables together, by subscribing to them one at a time and\n * merging their results into the output Observable. You can pass either an array of\n * Observables, or put them directly as arguments. Passing an empty array will result\n * in Observable that completes immediately.\n *\n * `concat` will subscribe to first input Observable and emit all its values, without\n * changing or affecting them in any way. When that Observable completes, it will\n * subscribe to then next Observable passed and, again, emit its values. This will be\n * repeated, until the operator runs out of Observables. When last input Observable completes,\n * `concat` will complete as well. At any given moment only one Observable passed to operator\n * emits values. If you would like to emit values from passed Observables concurrently, check out\n * {@link merge} instead, especially with optional `concurrent` parameter. As a matter of fact,\n * `concat` is an equivalent of `merge` operator with `concurrent` parameter set to `1`.\n *\n * Note that if some input Observable never completes, `concat` will also never complete\n * and Observables following the one that did not complete will never be subscribed. On the other\n * hand, if some Observable simply completes immediately after it is subscribed, it will be\n * invisible for `concat`, which will just move on to the next Observable.\n *\n * If any Observable in chain errors, instead of passing control to the next Observable,\n * `concat` will error immediately as well. Observables that would be subscribed after\n * the one that emitted error, never will.\n *\n * If you pass to `concat` the same Observable many times, its stream of values\n * will be \"replayed\" on every subscription, which means you can repeat given Observable\n * as many times as you like. If passing the same Observable to `concat` 1000 times becomes tedious,\n * you can always use {@link repeat}.\n *\n * ## Examples\n *\n * Concatenate a timer counting from 0 to 3 with a synchronous sequence from 1 to 10\n *\n * ```ts\n * import { interval, take, range, concat } from 'rxjs';\n *\n * const timer = interval(1000).pipe(take(4));\n * const sequence = range(1, 10);\n * const result = concat(timer, sequence);\n * result.subscribe(x => console.log(x));\n *\n * // results in:\n * // 0 -1000ms-> 1 -1000ms-> 2 -1000ms-> 3 -immediate-> 1 ... 10\n * ```\n *\n * Concatenate 3 Observables\n *\n * ```ts\n * import { interval, take, concat } from 'rxjs';\n *\n * const timer1 = interval(1000).pipe(take(10));\n * const timer2 = interval(2000).pipe(take(6));\n * const timer3 = interval(500).pipe(take(10));\n *\n * const result = concat(timer1, timer2, timer3);\n * result.subscribe(x => console.log(x));\n *\n * // results in the following:\n * // (Prints to console sequentially)\n * // -1000ms-> 0 -1000ms-> 1 -1000ms-> ... 9\n * // -2000ms-> 0 -2000ms-> 1 -2000ms-> ... 5\n * // -500ms-> 0 -500ms-> 1 -500ms-> ... 9\n * ```\n *\n * Concatenate the same Observable to repeat it\n *\n * ```ts\n * import { interval, take, concat } from 'rxjs';\n *\n * const timer = interval(1000).pipe(take(2));\n *\n * concat(timer, timer) // concatenating the same Observable!\n *   .subscribe({\n *     next: value => console.log(value),\n *     complete: () => console.log('...and it is done!')\n *   });\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 0 after 3s\n * // 1 after 4s\n * // '...and it is done!' also after 4s\n * ```\n *\n * @see {@link concatAll}\n * @see {@link concatMap}\n * @see {@link concatMapTo}\n * @see {@link startWith}\n * @see {@link endWith}\n *\n * @param args Input Observables to concatenate.\n */\nexport function concat(...args: any[]): Observable<unknown> {\n  return concatAll()(from(args, popScheduler(args)));\n}\n", "import { Observable } from '../Observable';\nimport { ObservedValueOf, ObservableInput } from '../types';\nimport { innerFrom } from './innerFrom';\n\n/**\n * Creates an Observable that, on subscribe, calls an Observable factory to\n * make an Observable for each new Observer.\n *\n * <span class=\"informal\">Creates the Observable lazily, that is, only when it\n * is subscribed.\n * </span>\n *\n * ![](defer.png)\n *\n * `defer` allows you to create an Observable only when the Observer\n * subscribes. It waits until an Observer subscribes to it, calls the given\n * factory function to get an Observable -- where a factory function typically\n * generates a new Observable -- and subscribes the Observer to this Observable.\n * In case the factory function returns a falsy value, then EMPTY is used as\n * Observable instead. Last but not least, an exception during the factory\n * function call is transferred to the Observer by calling `error`.\n *\n * ## Example\n *\n * Subscribe to either an Observable of clicks or an Observable of interval, at random\n *\n * ```ts\n * import { defer, fromEvent, interval } from 'rxjs';\n *\n * const clicksOrInterval = defer(() => {\n *   return Math.random() > 0.5\n *     ? fromEvent(document, 'click')\n *     : interval(1000);\n * });\n * clicksOrInterval.subscribe(x => console.log(x));\n *\n * // Results in the following behavior:\n * // If the result of Math.random() is greater than 0.5 it will listen\n * // for clicks anywhere on the \"document\"; when document is clicked it\n * // will log a MouseEvent object to the console. If the result is less\n * // than 0.5 it will emit ascending numbers, one every second(1000ms).\n * ```\n *\n * @see {@link Observable}\n *\n * @param {function(): ObservableInput} observableFactory The Observable\n * factory function to invoke for each Observer that subscribes to the output\n * Observable. May also return a Promise, which will be converted on the fly\n * to an Observable.\n * @return {Observable} An Observable whose Observers' subscriptions trigger\n * an invocation of the given Observable factory function.\n */\nexport function defer<R extends ObservableInput<any>>(observableFactory: () => R): Observable<ObservedValueOf<R>> {\n  return new Observable<ObservedValueOf<R>>((subscriber) => {\n    innerFrom(observableFactory()).subscribe(subscriber);\n  });\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { Observable } from '../Observable';\nimport { mergeMap } from '../operators/mergeMap';\nimport { isArrayLike } from '../util/isArrayLike';\nimport { isFunction } from '../util/isFunction';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\n\n// These constants are used to create handler registry functions using array mapping below.\nconst nodeEventEmitterMethods = ['addListener', 'removeListener'] as const;\nconst eventTargetMethods = ['addEventListener', 'removeEventListener'] as const;\nconst jqueryMethods = ['on', 'off'] as const;\n\nexport interface NodeStyleEventEmitter {\n  addListener(eventName: string | symbol, handler: NodeEventHandler): this;\n  removeListener(eventName: string | symbol, handler: NodeEventHandler): this;\n}\n\nexport type NodeEventHandler = (...args: any[]) => void;\n\n// For APIs that implement `addListener` and `removeListener` methods that may\n// not use the same arguments or return EventEmitter values\n// such as React Native\nexport interface NodeCompatibleEventEmitter {\n  addListener(eventName: string, handler: NodeEventHandler): void | {};\n  removeListener(eventName: string, handler: NodeEventHandler): void | {};\n}\n\n// Use handler types like those in @types/jquery. See:\n// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/847731ba1d7fa6db6b911c0e43aa0afe596e7723/types/jquery/misc.d.ts#L6395\nexport interface JQueryStyleEventEmitter<TContext, T> {\n  on(eventName: string, handler: (this: TContext, t: T, ...args: any[]) => any): void;\n  off(eventName: string, handler: (this: TContext, t: T, ...args: any[]) => any): void;\n}\n\nexport interface EventListenerObject<E> {\n  handleEvent(evt: E): void;\n}\n\nexport interface HasEventTargetAddRemove<E> {\n  addEventListener(\n    type: string,\n    listener: ((evt: E) => void) | EventListenerObject<E> | null,\n    options?: boolean | AddEventListenerOptions\n  ): void;\n  removeEventListener(\n    type: string,\n    listener: ((evt: E) => void) | EventListenerObject<E> | null,\n    options?: EventListenerOptions | boolean\n  ): void;\n}\n\nexport interface EventListenerOptions {\n  capture?: boolean;\n  passive?: boolean;\n  once?: boolean;\n}\n\nexport interface AddEventListenerOptions extends EventListenerOptions {\n  once?: boolean;\n  passive?: boolean;\n}\n\nexport function fromEvent<T>(target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>, eventName: string): Observable<T>;\nexport function fromEvent<T, R>(\n  target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>,\n  eventName: string,\n  resultSelector: (event: T) => R\n): Observable<R>;\nexport function fromEvent<T>(\n  target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>,\n  eventName: string,\n  options: EventListenerOptions\n): Observable<T>;\nexport function fromEvent<T, R>(\n  target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>,\n  eventName: string,\n  options: EventListenerOptions,\n  resultSelector: (event: T) => R\n): Observable<R>;\n\nexport function fromEvent(target: NodeStyleEventEmitter | ArrayLike<NodeStyleEventEmitter>, eventName: string): Observable<unknown>;\n/** @deprecated Do not specify explicit type parameters. Signatures with type parameters that cannot be inferred will be removed in v8. */\nexport function fromEvent<T>(target: NodeStyleEventEmitter | ArrayLike<NodeStyleEventEmitter>, eventName: string): Observable<T>;\nexport function fromEvent<R>(\n  target: NodeStyleEventEmitter | ArrayLike<NodeStyleEventEmitter>,\n  eventName: string,\n  resultSelector: (...args: any[]) => R\n): Observable<R>;\n\nexport function fromEvent(\n  target: NodeCompatibleEventEmitter | ArrayLike<NodeCompatibleEventEmitter>,\n  eventName: string\n): Observable<unknown>;\n/** @deprecated Do not specify explicit type parameters. Signatures with type parameters that cannot be inferred will be removed in v8. */\nexport function fromEvent<T>(target: NodeCompatibleEventEmitter | ArrayLike<NodeCompatibleEventEmitter>, eventName: string): Observable<T>;\nexport function fromEvent<R>(\n  target: NodeCompatibleEventEmitter | ArrayLike<NodeCompatibleEventEmitter>,\n  eventName: string,\n  resultSelector: (...args: any[]) => R\n): Observable<R>;\n\nexport function fromEvent<T>(\n  target: JQueryStyleEventEmitter<any, T> | ArrayLike<JQueryStyleEventEmitter<any, T>>,\n  eventName: string\n): Observable<T>;\nexport function fromEvent<T, R>(\n  target: JQueryStyleEventEmitter<any, T> | ArrayLike<JQueryStyleEventEmitter<any, T>>,\n  eventName: string,\n  resultSelector: (value: T, ...args: any[]) => R\n): Observable<R>;\n\n/**\n * Creates an Observable that emits events of a specific type coming from the\n * given event target.\n *\n * <span class=\"informal\">Creates an Observable from DOM events, or Node.js\n * EventEmitter events or others.</span>\n *\n * ![](fromEvent.png)\n *\n * `fromEvent` accepts as a first argument event target, which is an object with methods\n * for registering event handler functions. As a second argument it takes string that indicates\n * type of event we want to listen for. `fromEvent` supports selected types of event targets,\n * which are described in detail below. If your event target does not match any of the ones listed,\n * you should use {@link fromEventPattern}, which can be used on arbitrary APIs.\n * When it comes to APIs supported by `fromEvent`, their methods for adding and removing event\n * handler functions have different names, but they all accept a string describing event type\n * and function itself, which will be called whenever said event happens.\n *\n * Every time resulting Observable is subscribed, event handler function will be registered\n * to event target on given event type. When that event fires, value\n * passed as a first argument to registered function will be emitted by output Observable.\n * When Observable is unsubscribed, function will be unregistered from event target.\n *\n * Note that if event target calls registered function with more than one argument, second\n * and following arguments will not appear in resulting stream. In order to get access to them,\n * you can pass to `fromEvent` optional project function, which will be called with all arguments\n * passed to event handler. Output Observable will then emit value returned by project function,\n * instead of the usual value.\n *\n * Remember that event targets listed below are checked via duck typing. It means that\n * no matter what kind of object you have and no matter what environment you work in,\n * you can safely use `fromEvent` on that object if it exposes described methods (provided\n * of course they behave as was described above). So for example if Node.js library exposes\n * event target which has the same method names as DOM EventTarget, `fromEvent` is still\n * a good choice.\n *\n * If the API you use is more callback then event handler oriented (subscribed\n * callback function fires only once and thus there is no need to manually\n * unregister it), you should use {@link bindCallback} or {@link bindNodeCallback}\n * instead.\n *\n * `fromEvent` supports following types of event targets:\n *\n * **DOM EventTarget**\n *\n * This is an object with `addEventListener` and `removeEventListener` methods.\n *\n * In the browser, `addEventListener` accepts - apart from event type string and event\n * handler function arguments - optional third parameter, which is either an object or boolean,\n * both used for additional configuration how and when passed function will be called. When\n * `fromEvent` is used with event target of that type, you can provide this values\n * as third parameter as well.\n *\n * **Node.js EventEmitter**\n *\n * An object with `addListener` and `removeListener` methods.\n *\n * **JQuery-style event target**\n *\n * An object with `on` and `off` methods\n *\n * **DOM NodeList**\n *\n * List of DOM Nodes, returned for example by `document.querySelectorAll` or `Node.childNodes`.\n *\n * Although this collection is not event target in itself, `fromEvent` will iterate over all Nodes\n * it contains and install event handler function in every of them. When returned Observable\n * is unsubscribed, function will be removed from all Nodes.\n *\n * **DOM HtmlCollection**\n *\n * Just as in case of NodeList it is a collection of DOM nodes. Here as well event handler function is\n * installed and removed in each of elements.\n *\n *\n * ## Examples\n *\n * Emit clicks happening on the DOM document\n *\n * ```ts\n * import { fromEvent } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * clicks.subscribe(x => console.log(x));\n *\n * // Results in:\n * // MouseEvent object logged to console every time a click\n * // occurs on the document.\n * ```\n *\n * Use `addEventListener` with capture option\n *\n * ```ts\n * import { fromEvent } from 'rxjs';\n *\n * const div = document.createElement('div');\n * div.style.cssText = 'width: 200px; height: 200px; background: #09c;';\n * document.body.appendChild(div);\n *\n * // note optional configuration parameter which will be passed to addEventListener\n * const clicksInDocument = fromEvent(document, 'click', { capture: true });\n * const clicksInDiv = fromEvent(div, 'click');\n *\n * clicksInDocument.subscribe(() => console.log('document'));\n * clicksInDiv.subscribe(() => console.log('div'));\n *\n * // By default events bubble UP in DOM tree, so normally\n * // when we would click on div in document\n * // \"div\" would be logged first and then \"document\".\n * // Since we specified optional `capture` option, document\n * // will catch event when it goes DOWN DOM tree, so console\n * // will log \"document\" and then \"div\".\n * ```\n *\n * @see {@link bindCallback}\n * @see {@link bindNodeCallback}\n * @see {@link fromEventPattern}\n *\n * @param {FromEventTarget<T>} target The DOM EventTarget, Node.js\n * EventEmitter, JQuery-like event target, NodeList or HTMLCollection to attach the event handler to.\n * @param {string} eventName The event name of interest, being emitted by the\n * `target`.\n * @param {EventListenerOptions} [options] Options to pass through to addEventListener\n * @return {Observable<T>}\n */\nexport function fromEvent<T>(\n  target: any,\n  eventName: string,\n  options?: EventListenerOptions | ((...args: any[]) => T),\n  resultSelector?: (...args: any[]) => T\n): Observable<T> {\n  if (isFunction(options)) {\n    resultSelector = options;\n    options = undefined;\n  }\n  if (resultSelector) {\n    return fromEvent<T>(target, eventName, options as EventListenerOptions).pipe(mapOneOrManyArgs(resultSelector));\n  }\n\n  // Figure out our add and remove methods. In order to do this,\n  // we are going to analyze the target in a preferred order, if\n  // the target matches a given signature, we take the two \"add\" and \"remove\"\n  // method names and apply them to a map to create opposite versions of the\n  // same function. This is because they all operate in duplicate pairs,\n  // `addListener(name, handler)`, `removeListener(name, handler)`, for example.\n  // The call only differs by method name, as to whether or not you're adding or removing.\n  const [add, remove] =\n    // If it is an EventTarget, we need to use a slightly different method than the other two patterns.\n    isEventTarget(target)\n      ? eventTargetMethods.map((methodName) => (handler: any) => target[methodName](eventName, handler, options as EventListenerOptions))\n      : // In all other cases, the call pattern is identical with the exception of the method names.\n      isNodeStyleEventEmitter(target)\n      ? nodeEventEmitterMethods.map(toCommonHandlerRegistry(target, eventName))\n      : isJQueryStyleEventEmitter(target)\n      ? jqueryMethods.map(toCommonHandlerRegistry(target, eventName))\n      : [];\n\n  // If add is falsy, it's because we didn't match a pattern above.\n  // Check to see if it is an ArrayLike, because if it is, we want to\n  // try to apply fromEvent to all of it's items. We do this check last,\n  // because there are may be some types that are both ArrayLike *and* implement\n  // event registry points, and we'd rather delegate to that when possible.\n  if (!add) {\n    if (isArrayLike(target)) {\n      return mergeMap((subTarget: any) => fromEvent(subTarget, eventName, options as EventListenerOptions))(\n        innerFrom(target)\n      ) as Observable<T>;\n    }\n  }\n\n  // If add is falsy and we made it here, it's because we didn't\n  // match any valid target objects above.\n  if (!add) {\n    throw new TypeError('Invalid event target');\n  }\n\n  return new Observable<T>((subscriber) => {\n    // The handler we are going to register. Forwards the event object, by itself, or\n    // an array of arguments to the event handler, if there is more than one argument,\n    // to the consumer.\n    const handler = (...args: any[]) => subscriber.next(1 < args.length ? args : args[0]);\n    // Do the work of adding the handler to the target.\n    add(handler);\n    // When we finalize, we want to remove the handler and free up memory.\n    return () => remove!(handler);\n  });\n}\n\n/**\n * Used to create `add` and `remove` functions to register and unregister event handlers\n * from a target in the most common handler pattern, where there are only two arguments.\n * (e.g.  `on(name, fn)`, `off(name, fn)`, `addListener(name, fn)`, or `removeListener(name, fn)`)\n * @param target The target we're calling methods on\n * @param eventName The event name for the event we're creating register or unregister functions for\n */\nfunction toCommonHandlerRegistry(target: any, eventName: string) {\n  return (methodName: string) => (handler: any) => target[methodName](eventName, handler);\n}\n\n/**\n * Checks to see if the target implements the required node-style EventEmitter methods\n * for adding and removing event handlers.\n * @param target the object to check\n */\nfunction isNodeStyleEventEmitter(target: any): target is NodeStyleEventEmitter {\n  return isFunction(target.addListener) && isFunction(target.removeListener);\n}\n\n/**\n * Checks to see if the target implements the required jQuery-style EventEmitter methods\n * for adding and removing event handlers.\n * @param target the object to check\n */\nfunction isJQueryStyleEventEmitter(target: any): target is JQueryStyleEventEmitter<any, any> {\n  return isFunction(target.on) && isFunction(target.off);\n}\n\n/**\n * Checks to see if the target implements the required EventTarget methods\n * for adding and removing event handlers.\n * @param target the object to check\n */\nfunction isEventTarget(target: any): target is HasEventTargetAddRemove<any> {\n  return isFunction(target.addEventListener) && isFunction(target.removeEventListener);\n}\n", "import { Observable } from '../Observable';\nimport { isFunction } from '../util/isFunction';\nimport { NodeEventHandler } from './fromEvent';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\n\n/* tslint:disable:max-line-length */\nexport function fromEventPattern<T>(\n  addHandler: (handler: NodeEventHandler) => any,\n  removeHandler?: (handler: NodeEventHandler, signal?: any) => void\n): Observable<T>;\nexport function fromEventPattern<T>(\n  addHandler: (handler: NodeEventHandler) => any,\n  removeHandler?: (handler: NodeEventHandler, signal?: any) => void,\n  resultSelector?: (...args: any[]) => T\n): Observable<T>;\n/* tslint:enable:max-line-length */\n\n/**\n * Creates an Observable from an arbitrary API for registering event handlers.\n *\n * <span class=\"informal\">When that method for adding event handler was something {@link fromEvent}\n * was not prepared for.</span>\n *\n * ![](fromEventPattern.png)\n *\n * `fromEventPattern` allows you to convert into an Observable any API that supports registering handler functions\n * for events. It is similar to {@link fromEvent}, but far\n * more flexible. In fact, all use cases of {@link fromEvent} could be easily handled by\n * `fromEventPattern` (although in slightly more verbose way).\n *\n * This operator accepts as a first argument an `addHandler` function, which will be injected with\n * handler parameter. That handler is actually an event handler function that you now can pass\n * to API expecting it. `addHandler` will be called whenever Observable\n * returned by the operator is subscribed, so registering handler in API will not\n * necessarily happen when `fromEventPattern` is called.\n *\n * After registration, every time an event that we listen to happens,\n * Observable returned by `fromEventPattern` will emit value that event handler\n * function was called with. Note that if event handler was called with more\n * than one argument, second and following arguments will not appear in the Observable.\n *\n * If API you are using allows to unregister event handlers as well, you can pass to `fromEventPattern`\n * another function - `removeHandler` - as a second parameter. It will be injected\n * with the same handler function as before, which now you can use to unregister\n * it from the API. `removeHandler` will be called when consumer of resulting Observable\n * unsubscribes from it.\n *\n * In some APIs unregistering is actually handled differently. Method registering an event handler\n * returns some kind of token, which is later used to identify which function should\n * be unregistered or it itself has method that unregisters event handler.\n * If that is the case with your API, make sure token returned\n * by registering method is returned by `addHandler`. Then it will be passed\n * as a second argument to `removeHandler`, where you will be able to use it.\n *\n * If you need access to all event handler parameters (not only the first one),\n * or you need to transform them in any way, you can call `fromEventPattern` with optional\n * third parameter - project function which will accept all arguments passed to\n * event handler when it is called. Whatever is returned from project function will appear on\n * resulting stream instead of usual event handlers first argument. This means\n * that default project can be thought of as function that takes its first parameter\n * and ignores the rest.\n *\n * ## Examples\n *\n * Emits clicks happening on the DOM document\n *\n * ```ts\n * import { fromEventPattern } from 'rxjs';\n *\n * function addClickHandler(handler) {\n *   document.addEventListener('click', handler);\n * }\n *\n * function removeClickHandler(handler) {\n *   document.removeEventListener('click', handler);\n * }\n *\n * const clicks = fromEventPattern(\n *   addClickHandler,\n *   removeClickHandler\n * );\n * clicks.subscribe(x => console.log(x));\n *\n * // Whenever you click anywhere in the browser, DOM MouseEvent\n * // object will be logged.\n * ```\n *\n * Use with API that returns cancellation token\n *\n * ```ts\n * import { fromEventPattern } from 'rxjs';\n *\n * const token = someAPI.registerEventHandler(function() {});\n * someAPI.unregisterEventHandler(token); // this APIs cancellation method accepts\n *                                        // not handler itself, but special token.\n *\n * const someAPIObservable = fromEventPattern(\n *   function(handler) { return someAPI.registerEventHandler(handler); }, // Note that we return the token here...\n *   function(handler, token) { someAPI.unregisterEventHandler(token); }  // ...to then use it here.\n * );\n * ```\n *\n * Use with project function\n *\n * ```ts\n * import { fromEventPattern } from 'rxjs';\n *\n * someAPI.registerEventHandler((eventType, eventMessage) => {\n *   console.log(eventType, eventMessage); // Logs 'EVENT_TYPE' 'EVENT_MESSAGE' to console.\n * });\n *\n * const someAPIObservable = fromEventPattern(\n *   handler => someAPI.registerEventHandler(handler),\n *   handler => someAPI.unregisterEventHandler(handler)\n *   (eventType, eventMessage) => eventType + ' --- ' + eventMessage // without that function only 'EVENT_TYPE'\n * );                                                                // would be emitted by the Observable\n *\n * someAPIObservable.subscribe(value => console.log(value));\n *\n * // Logs:\n * // 'EVENT_TYPE --- EVENT_MESSAGE'\n * ```\n *\n * @see {@link fromEvent}\n * @see {@link bindCallback}\n * @see {@link bindNodeCallback}\n *\n * @param {function(handler: Function): any} addHandler A function that takes\n * a `handler` function as argument and attaches it somehow to the actual\n * source of events.\n * @param {function(handler: Function, token?: any): void} [removeHandler] A function that\n * takes a `handler` function as an argument and removes it from the event source. If `addHandler`\n * returns some kind of token, `removeHandler` function will have it as a second parameter.\n * @param {function(...args: any): T} [project] A function to\n * transform results. It takes the arguments from the event handler and\n * should return a single value.\n * @return {Observable<T>} Observable which, when an event happens, emits first parameter\n * passed to registered event handler. Alternatively it emits whatever project function returns\n * at that moment.\n */\nexport function fromEventPattern<T>(\n  addHandler: (handler: NodeEventHandler) => any,\n  removeHandler?: (handler: NodeEventHandler, signal?: any) => void,\n  resultSelector?: (...args: any[]) => T\n): Observable<T | T[]> {\n  if (resultSelector) {\n    return fromEventPattern<T>(addHandler, removeHandler).pipe(mapOneOrManyArgs(resultSelector));\n  }\n\n  return new Observable<T | T[]>((subscriber) => {\n    const handler = (...e: T[]) => subscriber.next(e.length === 1 ? e[0] : e);\n    const retValue = addHandler(handler);\n    return isFunction(removeHandler) ? () => removeHandler(handler, retValue) : undefined;\n  });\n}\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\nimport { async as asyncScheduler } from '../scheduler/async';\nimport { isScheduler } from '../util/isScheduler';\nimport { isValidDate } from '../util/isDate';\n\n/**\n * Creates an observable that will wait for a specified time period, or exact date, before\n * emitting the number 0.\n *\n * <span class=\"informal\">Used to emit a notification after a delay.</span>\n *\n * This observable is useful for creating delays in code, or racing against other values\n * for ad-hoc timeouts.\n *\n * The `delay` is specified by default in milliseconds, however providing a custom scheduler could\n * create a different behavior.\n *\n * ## Examples\n *\n * Wait 3 seconds and start another observable\n *\n * You might want to use `timer` to delay subscription to an\n * observable by a set amount of time. Here we use a timer with\n * {@link concatMapTo} or {@link concatMap} in order to wait\n * a few seconds and start a subscription to a source.\n *\n * ```ts\n * import { of, timer, concatMap } from 'rxjs';\n *\n * // This could be any observable\n * const source = of(1, 2, 3);\n *\n * timer(3000)\n *   .pipe(concatMap(() => source))\n *   .subscribe(console.log);\n * ```\n *\n * Take all values until the start of the next minute\n *\n * Using a `Date` as the trigger for the first emission, you can\n * do things like wait until midnight to fire an event, or in this case,\n * wait until a new minute starts (chosen so the example wouldn't take\n * too long to run) in order to stop watching a stream. Leveraging\n * {@link takeUntil}.\n *\n * ```ts\n * import { interval, takeUntil, timer } from 'rxjs';\n *\n * // Build a Date object that marks the\n * // next minute.\n * const currentDate = new Date();\n * const startOfNextMinute = new Date(\n *   currentDate.getFullYear(),\n *   currentDate.getMonth(),\n *   currentDate.getDate(),\n *   currentDate.getHours(),\n *   currentDate.getMinutes() + 1\n * );\n *\n * // This could be any observable stream\n * const source = interval(1000);\n *\n * const result = source.pipe(\n *   takeUntil(timer(startOfNextMinute))\n * );\n *\n * result.subscribe(console.log);\n * ```\n *\n * ### Known Limitations\n *\n * - The {@link asyncScheduler} uses `setTimeout` which has limitations for how far in the future it can be scheduled.\n *\n * - If a `scheduler` is provided that returns a timestamp other than an epoch from `now()`, and\n * a `Date` object is passed to the `dueTime` argument, the calculation for when the first emission\n * should occur will be incorrect. In this case, it would be best to do your own calculations\n * ahead of time, and pass a `number` in as the `dueTime`.\n *\n * @param due If a `number`, the amount of time in milliseconds to wait before emitting.\n * If a `Date`, the exact time at which to emit.\n * @param scheduler The scheduler to use to schedule the delay. Defaults to {@link asyncScheduler}.\n */\nexport function timer(due: number | Date, scheduler?: SchedulerLike): Observable<0>;\n\n/**\n * Creates an observable that starts an interval after a specified delay, emitting incrementing numbers -- starting at `0` --\n * on each interval after words.\n *\n * The `delay` and `intervalDuration` are specified by default in milliseconds, however providing a custom scheduler could\n * create a different behavior.\n *\n * ## Example\n *\n * ### Start an interval that starts right away\n *\n * Since {@link interval} waits for the passed delay before starting,\n * sometimes that's not ideal. You may want to start an interval immediately.\n * `timer` works well for this. Here we have both side-by-side so you can\n * see them in comparison.\n *\n * Note that this observable will never complete.\n *\n * ```ts\n * import { timer, interval } from 'rxjs';\n *\n * timer(0, 1000).subscribe(n => console.log('timer', n));\n * interval(1000).subscribe(n => console.log('interval', n));\n * ```\n *\n * ### Known Limitations\n *\n * - The {@link asyncScheduler} uses `setTimeout` which has limitations for how far in the future it can be scheduled.\n *\n * - If a `scheduler` is provided that returns a timestamp other than an epoch from `now()`, and\n * a `Date` object is passed to the `dueTime` argument, the calculation for when the first emission\n * should occur will be incorrect. In this case, it would be best to do your own calculations\n * ahead of time, and pass a `number` in as the `startDue`.\n * @param startDue If a `number`, is the time to wait before starting the interval.\n * If a `Date`, is the exact time at which to start the interval.\n * @param intervalDuration The delay between each value emitted in the interval. Passing a\n * negative number here will result in immediate completion after the first value is emitted, as though\n * no `intervalDuration` was passed at all.\n * @param scheduler The scheduler to use to schedule the delay. Defaults to {@link asyncScheduler}.\n */\nexport function timer(startDue: number | Date, intervalDuration: number, scheduler?: SchedulerLike): Observable<number>;\n\n/**\n * @deprecated The signature allowing `undefined` to be passed for `intervalDuration` will be removed in v8. Use the `timer(dueTime, scheduler?)` signature instead.\n */\nexport function timer(dueTime: number | Date, unused: undefined, scheduler?: SchedulerLike): Observable<0>;\n\nexport function timer(\n  dueTime: number | Date = 0,\n  intervalOrScheduler?: number | SchedulerLike,\n  scheduler: SchedulerLike = asyncScheduler\n): Observable<number> {\n  // Since negative intervalDuration is treated as though no\n  // interval was specified at all, we start with a negative number.\n  let intervalDuration = -1;\n\n  if (intervalOrScheduler != null) {\n    // If we have a second argument, and it's a scheduler,\n    // override the scheduler we had defaulted. Otherwise,\n    // it must be an interval.\n    if (isScheduler(intervalOrScheduler)) {\n      scheduler = intervalOrScheduler;\n    } else {\n      // Note that this *could* be negative, in which case\n      // it's like not passing an intervalDuration at all.\n      intervalDuration = intervalOrScheduler;\n    }\n  }\n\n  return new Observable((subscriber) => {\n    // If a valid date is passed, calculate how long to wait before\n    // executing the first value... otherwise, if it's a number just schedule\n    // that many milliseconds (or scheduler-specified unit size) in the future.\n    let due = isValidDate(dueTime) ? +dueTime - scheduler!.now() : dueTime;\n\n    if (due < 0) {\n      // Ensure we don't schedule in the future.\n      due = 0;\n    }\n\n    // The incrementing value we emit.\n    let n = 0;\n\n    // Start the timer.\n    return scheduler.schedule(function () {\n      if (!subscriber.closed) {\n        // Emit the next value and increment.\n        subscriber.next(n++);\n\n        if (0 <= intervalDuration) {\n          // If we have a interval after the initial timer,\n          // reschedule with the period.\n          this.schedule(undefined, intervalDuration);\n        } else {\n          // We didn't have an interval. So just complete.\n          subscriber.complete();\n        }\n      }\n    }, due);\n  });\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInput, ObservableInputTuple, SchedulerLike } from '../types';\nimport { mergeAll } from '../operators/mergeAll';\nimport { innerFrom } from './innerFrom';\nimport { EMPTY } from './empty';\nimport { popNumber, popScheduler } from '../util/args';\nimport { from } from './from';\n\nexport function merge<A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): Observable<A[number]>;\nexport function merge<A extends readonly unknown[]>(...sourcesAndConcurrency: [...ObservableInputTuple<A>, number?]): Observable<A[number]>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `mergeAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function merge<A extends readonly unknown[]>(\n  ...sourcesAndScheduler: [...ObservableInputTuple<A>, SchedulerLike?]\n): Observable<A[number]>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `mergeAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function merge<A extends readonly unknown[]>(\n  ...sourcesAndConcurrencyAndScheduler: [...ObservableInputTuple<A>, number?, SchedulerLike?]\n): Observable<A[number]>;\n\n/**\n * Creates an output Observable which concurrently emits all values from every\n * given input Observable.\n *\n * <span class=\"informal\">Flattens multiple Observables together by blending\n * their values into one Observable.</span>\n *\n * ![](merge.png)\n *\n * `merge` subscribes to each given input Observable (as arguments), and simply\n * forwards (without doing any transformation) all the values from all the input\n * Observables to the output Observable. The output Observable only completes\n * once all input Observables have completed. Any error delivered by an input\n * Observable will be immediately emitted on the output Observable.\n *\n * ## Examples\n *\n * Merge together two Observables: 1s interval and clicks\n *\n * ```ts\n * import { merge, fromEvent, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const timer = interval(1000);\n * const clicksOrTimer = merge(clicks, timer);\n * clicksOrTimer.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // timer will emit ascending values, one every second(1000ms) to console\n * // clicks logs MouseEvents to console every time the \"document\" is clicked\n * // Since the two streams are merged you see these happening\n * // as they occur.\n * ```\n *\n * Merge together 3 Observables, but run only 2 concurrently\n *\n * ```ts\n * import { interval, take, merge } from 'rxjs';\n *\n * const timer1 = interval(1000).pipe(take(10));\n * const timer2 = interval(2000).pipe(take(6));\n * const timer3 = interval(500).pipe(take(10));\n *\n * const concurrent = 2; // the argument\n * const merged = merge(timer1, timer2, timer3, concurrent);\n * merged.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // - First timer1 and timer2 will run concurrently\n * // - timer1 will emit a value every 1000ms for 10 iterations\n * // - timer2 will emit a value every 2000ms for 6 iterations\n * // - after timer1 hits its max iteration, timer2 will\n * //   continue, and timer3 will start to run concurrently with timer2\n * // - when timer2 hits its max iteration it terminates, and\n * //   timer3 will continue to emit a value every 500ms until it is complete\n * ```\n *\n * @see {@link mergeAll}\n * @see {@link mergeMap}\n * @see {@link mergeMapTo}\n * @see {@link mergeScan}\n *\n * @param {...ObservableInput} observables Input Observables to merge together.\n * @param {number} [concurrent=Infinity] Maximum number of input\n * Observables being subscribed to concurrently.\n * @param {SchedulerLike} [scheduler=null] The {@link SchedulerLike} to use for managing\n * concurrency of input Observables.\n * @return {Observable} an Observable that emits items that are the result of\n * every input Observable.\n */\nexport function merge(...args: (ObservableInput<unknown> | number | SchedulerLike)[]): Observable<unknown> {\n  const scheduler = popScheduler(args);\n  const concurrent = popNumber(args, Infinity);\n  const sources = args as ObservableInput<unknown>[];\n  return !sources.length\n    ? // No source provided\n      EMPTY\n    : sources.length === 1\n    ? // One source? Just return it.\n      innerFrom(sources[0])\n    : // Merge all sources\n      mergeAll(concurrent)(from(sources, scheduler));\n}\n", "import { Observable } from '../Observable';\nimport { noop } from '../util/noop';\n\n/**\n * An Observable that emits no items to the Observer and never completes.\n *\n * ![](never.png)\n *\n * A simple Observable that emits neither values nor errors nor the completion\n * notification. It can be used for testing purposes or for composing with other\n * Observables. Please note that by never emitting a complete notification, this\n * Observable keeps the subscription from being disposed automatically.\n * Subscriptions need to be manually disposed.\n *\n * ##  Example\n *\n * Emit the number 7, then never emit anything else (not even complete)\n *\n * ```ts\n * import { NEVER, startWith } from 'rxjs';\n *\n * const info = () => console.log('Will not be called');\n *\n * const result = NEVER.pipe(startWith(7));\n * result.subscribe({\n *   next: x => console.log(x),\n *   error: info,\n *   complete: info\n * });\n * ```\n *\n * @see {@link Observable}\n * @see {@link EMPTY}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const NEVER = new Observable<never>(noop);\n\n/**\n * @deprecated Replaced with the {@link NEVER} constant. Will be removed in v8.\n */\nexport function never() {\n  return NEVER;\n}\n", "const { isArray } = Array;\n\n/**\n * Used in operators and functions that accept either a list of arguments, or an array of arguments\n * as a single argument.\n */\nexport function argsOrArgArray<T>(args: (T | T[])[]): T[] {\n  return args.length === 1 && isArray(args[0]) ? args[0] : (args as T[]);\n}\n", "import { OperatorFunction, MonoTypeOperatorFunction, TruthyTypesOf } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/** @deprecated Use a closure instead of a `thisArg`. Signatures accepting a `thisArg` will be removed in v8. */\nexport function filter<T, S extends T, A>(predicate: (this: A, value: T, index: number) => value is S, thisArg: A): OperatorFunction<T, S>;\nexport function filter<T, S extends T>(predicate: (value: T, index: number) => value is S): OperatorFunction<T, S>;\nexport function filter<T>(predicate: BooleanConstructor): OperatorFunction<T, TruthyTypesOf<T>>;\n/** @deprecated Use a closure instead of a `thisArg`. Signatures accepting a `thisArg` will be removed in v8. */\nexport function filter<T, A>(predicate: (this: A, value: T, index: number) => boolean, thisArg: A): MonoTypeOperatorFunction<T>;\nexport function filter<T>(predicate: (value: T, index: number) => boolean): MonoTypeOperatorFunction<T>;\n\n/**\n * Filter items emitted by the source Observable by only emitting those that\n * satisfy a specified predicate.\n *\n * <span class=\"informal\">Like\n * [Array.prototype.filter()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/filter),\n * it only emits a value from the source if it passes a criterion function.</span>\n *\n * ![](filter.png)\n *\n * Similar to the well-known `Array.prototype.filter` method, this operator\n * takes values from the source Observable, passes them through a `predicate`\n * function and only emits those values that yielded `true`.\n *\n * ## Example\n *\n * Emit only click events whose target was a DIV element\n *\n * ```ts\n * import { fromEvent, filter } from 'rxjs';\n *\n * const div = document.createElement('div');\n * div.style.cssText = 'width: 200px; height: 200px; background: #09c;';\n * document.body.appendChild(div);\n *\n * const clicks = fromEvent(document, 'click');\n * const clicksOnDivs = clicks.pipe(filter(ev => (<HTMLElement>ev.target).tagName === 'DIV'));\n * clicksOnDivs.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link distinct}\n * @see {@link distinctUntilChanged}\n * @see {@link distinctUntilKeyChanged}\n * @see {@link ignoreElements}\n * @see {@link partition}\n * @see {@link skip}\n *\n * @param predicate A function that\n * evaluates each value emitted by the source Observable. If it returns `true`,\n * the value is emitted, if `false` the value is not passed to the output\n * Observable. The `index` parameter is the number `i` for the i-th source\n * emission that has happened since the subscription, starting from the number\n * `0`.\n * @param thisArg An optional argument to determine the value of `this`\n * in the `predicate` function.\n * @return A function that returns an Observable that emits items from the\n * source Observable that satisfy the specified `predicate`.\n */\nexport function filter<T>(predicate: (value: T, index: number) => boolean, thisArg?: any): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    // An index passed to our predicate function on each call.\n    let index = 0;\n\n    // Subscribe to the source, all errors and completions are\n    // forwarded to the consumer.\n    source.subscribe(\n      // Call the predicate with the appropriate `this` context,\n      // if the predicate returns `true`, then send the value\n      // to the consumer.\n      createOperatorSubscriber(subscriber, (value) => predicate.call(thisArg, value, index++) && subscriber.next(value))\n    );\n  });\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInputTuple } from '../types';\nimport { innerFrom } from './innerFrom';\nimport { argsOrArgArray } from '../util/argsOrArgArray';\nimport { EMPTY } from './empty';\nimport { createOperatorSubscriber } from '../operators/OperatorSubscriber';\nimport { popResultSelector } from '../util/args';\n\nexport function zip<A extends readonly unknown[]>(sources: [...ObservableInputTuple<A>]): Observable<A>;\nexport function zip<A extends readonly unknown[], R>(\n  sources: [...ObservableInputTuple<A>],\n  resultSelector: (...values: A) => R\n): Observable<R>;\nexport function zip<A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): Observable<A>;\nexport function zip<A extends readonly unknown[], R>(\n  ...sourcesAndResultSelector: [...ObservableInputTuple<A>, (...values: A) => R]\n): Observable<R>;\n\n/**\n * Combines multiple Observables to create an Observable whose values are calculated from the values, in order, of each\n * of its input Observables.\n *\n * If the last parameter is a function, this function is used to compute the created value from the input values.\n * Otherwise, an array of the input values is returned.\n *\n * ## Example\n *\n * Combine age and name from different sources\n *\n * ```ts\n * import { of, zip, map } from 'rxjs';\n *\n * const age$ = of(27, 25, 29);\n * const name$ = of('Foo', 'Bar', 'Beer');\n * const isDev$ = of(true, true, false);\n *\n * zip(age$, name$, isDev$).pipe(\n *   map(([age, name, isDev]) => ({ age, name, isDev }))\n * )\n * .subscribe(x => console.log(x));\n *\n * // Outputs\n * // { age: 27, name: 'Foo', isDev: true }\n * // { age: 25, name: 'Bar', isDev: true }\n * // { age: 29, name: 'Beer', isDev: false }\n * ```\n *\n * @param sources\n * @return {Observable<R>}\n */\nexport function zip(...args: unknown[]): Observable<unknown> {\n  const resultSelector = popResultSelector(args);\n\n  const sources = argsOrArgArray(args) as Observable<unknown>[];\n\n  return sources.length\n    ? new Observable<unknown[]>((subscriber) => {\n        // A collection of buffers of values from each source.\n        // Keyed by the same index with which the sources were passed in.\n        let buffers: unknown[][] = sources.map(() => []);\n\n        // An array of flags of whether or not the sources have completed.\n        // This is used to check to see if we should complete the result.\n        // Keyed by the same index with which the sources were passed in.\n        let completed = sources.map(() => false);\n\n        // When everything is done, release the arrays above.\n        subscriber.add(() => {\n          buffers = completed = null!;\n        });\n\n        // Loop over our sources and subscribe to each one. The index `i` is\n        // especially important here, because we use it in closures below to\n        // access the related buffers and completion properties\n        for (let sourceIndex = 0; !subscriber.closed && sourceIndex < sources.length; sourceIndex++) {\n          innerFrom(sources[sourceIndex]).subscribe(\n            createOperatorSubscriber(\n              subscriber,\n              (value) => {\n                buffers[sourceIndex].push(value);\n                // if every buffer has at least one value in it, then we\n                // can shift out the oldest value from each buffer and emit\n                // them as an array.\n                if (buffers.every((buffer) => buffer.length)) {\n                  const result: any = buffers.map((buffer) => buffer.shift()!);\n                  // Emit the array. If theres' a result selector, use that.\n                  subscriber.next(resultSelector ? resultSelector(...result) : result);\n                  // If any one of the sources is both complete and has an empty buffer\n                  // then we complete the result. This is because we cannot possibly have\n                  // any more values to zip together.\n                  if (buffers.some((buffer, i) => !buffer.length && completed[i])) {\n                    subscriber.complete();\n                  }\n                }\n              },\n              () => {\n                // This source completed. Mark it as complete so we can check it later\n                // if we have to.\n                completed[sourceIndex] = true;\n                // But, if this complete source has nothing in its buffer, then we\n                // can complete the result, because we can't possibly have any more\n                // values from this to zip together with the other values.\n                !buffers[sourceIndex].length && subscriber.complete();\n              }\n            )\n          );\n        }\n\n        // When everything is done, release the arrays above.\n        return () => {\n          buffers = completed = null!;\n        };\n      })\n    : EMPTY;\n}\n", "import { Subscriber } from '../Subscriber';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\n\nimport { operate } from '../util/lift';\nimport { innerFrom } from '../observable/innerFrom';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Ignores source values for a duration determined by another Observable, then\n * emits the most recent value from the source Observable, then repeats this\n * process.\n *\n * <span class=\"informal\">It's like {@link auditTime}, but the silencing\n * duration is determined by a second Observable.</span>\n *\n * ![](audit.svg)\n *\n * `audit` is similar to `throttle`, but emits the last value from the silenced\n * time window, instead of the first value. `audit` emits the most recent value\n * from the source Observable on the output Observable as soon as its internal\n * timer becomes disabled, and ignores source values while the timer is enabled.\n * Initially, the timer is disabled. As soon as the first source value arrives,\n * the timer is enabled by calling the `durationSelector` function with the\n * source value, which returns the \"duration\" Observable. When the duration\n * Observable emits a value, the timer is disabled, then the most\n * recent source value is emitted on the output Observable, and this process\n * repeats for the next source value.\n *\n * ## Example\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, audit, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(audit(ev => interval(1000)));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link auditTime}\n * @see {@link debounce}\n * @see {@link delayWhen}\n * @see {@link sample}\n * @see {@link throttle}\n *\n * @param durationSelector A function\n * that receives a value from the source Observable, for computing the silencing\n * duration, returned as an Observable or a Promise.\n * @return A function that returns an Observable that performs rate-limiting of\n * emissions from the source Observable.\n */\nexport function audit<T>(durationSelector: (value: T) => ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    let lastValue: T | null = null;\n    let durationSubscriber: Subscriber<any> | null = null;\n    let isComplete = false;\n\n    const endDuration = () => {\n      durationSubscriber?.unsubscribe();\n      durationSubscriber = null;\n      if (hasValue) {\n        hasValue = false;\n        const value = lastValue!;\n        lastValue = null;\n        subscriber.next(value);\n      }\n      isComplete && subscriber.complete();\n    };\n\n    const cleanupDuration = () => {\n      durationSubscriber = null;\n      isComplete && subscriber.complete();\n    };\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          hasValue = true;\n          lastValue = value;\n          if (!durationSubscriber) {\n            innerFrom(durationSelector(value)).subscribe(\n              (durationSubscriber = createOperatorSubscriber(subscriber, endDuration, cleanupDuration))\n            );\n          }\n        },\n        () => {\n          isComplete = true;\n          (!hasValue || !durationSubscriber || durationSubscriber.closed) && subscriber.complete();\n        }\n      )\n    );\n  });\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { audit } from './audit';\nimport { timer } from '../observable/timer';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\n\n/**\n * Ignores source values for `duration` milliseconds, then emits the most recent\n * value from the source Observable, then repeats this process.\n *\n * <span class=\"informal\">When it sees a source value, it ignores that plus\n * the next ones for `duration` milliseconds, and then it emits the most recent\n * value from the source.</span>\n *\n * ![](auditTime.png)\n *\n * `auditTime` is similar to `throttleTime`, but emits the last value from the\n * silenced time window, instead of the first value. `auditTime` emits the most\n * recent value from the source Observable on the output Observable as soon as\n * its internal timer becomes disabled, and ignores source values while the\n * timer is enabled. Initially, the timer is disabled. As soon as the first\n * source value arrives, the timer is enabled. After `duration` milliseconds (or\n * the time unit determined internally by the optional `scheduler`) has passed,\n * the timer is disabled, then the most recent source value is emitted on the\n * output Observable, and this process repeats for the next source value.\n * Optionally takes a {@link SchedulerLike} for managing timers.\n *\n * ## Example\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, auditTime } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(auditTime(1000));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link debounceTime}\n * @see {@link delay}\n * @see {@link sampleTime}\n * @see {@link throttleTime}\n *\n * @param {number} duration Time to wait before emitting the most recent source\n * value, measured in milliseconds or the time unit determined internally\n * by the optional `scheduler`.\n * @param {SchedulerLike} [scheduler=async] The {@link SchedulerLike} to use for\n * managing the timers that handle the rate-limiting behavior.\n * @return A function that returns an Observable that performs rate-limiting of\n * emissions from the source Observable.\n */\nexport function auditTime<T>(duration: number, scheduler: SchedulerLike = asyncScheduler): MonoTypeOperatorFunction<T> {\n  return audit(() => timer(duration, scheduler));\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { arrRemove } from '../util/arrRemove';\n\n/**\n * Buffers the source Observable values until the size hits the maximum\n * `bufferSize` given.\n *\n * <span class=\"informal\">Collects values from the past as an array, and emits\n * that array only when its size reaches `bufferSize`.</span>\n *\n * ![](bufferCount.png)\n *\n * Buffers a number of values from the source Observable by `bufferSize` then\n * emits the buffer and clears it, and starts a new buffer each\n * `startBufferEvery` values. If `startBufferEvery` is not provided or is\n * `null`, then new buffers are started immediately at the start of the source\n * and when each buffer closes and is emitted.\n *\n * ## Examples\n *\n * Emit the last two click events as an array\n *\n * ```ts\n * import { fromEvent, bufferCount } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const buffered = clicks.pipe(bufferCount(2));\n * buffered.subscribe(x => console.log(x));\n * ```\n *\n * On every click, emit the last two click events as an array\n *\n * ```ts\n * import { fromEvent, bufferCount } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const buffered = clicks.pipe(bufferCount(2, 1));\n * buffered.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link buffer}\n * @see {@link bufferTime}\n * @see {@link bufferToggle}\n * @see {@link bufferWhen}\n * @see {@link pairwise}\n * @see {@link windowCount}\n *\n * @param {number} bufferSize The maximum size of the buffer emitted.\n * @param {number} [startBufferEvery] Interval at which to start a new buffer.\n * For example if `startBufferEvery` is `2`, then a new buffer will be started\n * on every other value from the source. A new buffer is started at the\n * beginning of the source by default.\n * @return A function that returns an Observable of arrays of buffered values.\n */\nexport function bufferCount<T>(bufferSize: number, startBufferEvery: number | null = null): OperatorFunction<T, T[]> {\n  // If no `startBufferEvery` value was supplied, then we're\n  // opening and closing on the bufferSize itself.\n  startBufferEvery = startBufferEvery ?? bufferSize;\n\n  return operate((source, subscriber) => {\n    let buffers: T[][] = [];\n    let count = 0;\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          let toEmit: T[][] | null = null;\n\n          // Check to see if we need to start a buffer.\n          // This will start one at the first value, and then\n          // a new one every N after that.\n          if (count++ % startBufferEvery! === 0) {\n            buffers.push([]);\n          }\n\n          // Push our value into our active buffers.\n          for (const buffer of buffers) {\n            buffer.push(value);\n            // Check to see if we're over the bufferSize\n            // if we are, record it so we can emit it later.\n            // If we emitted it now and removed it, it would\n            // mutate the `buffers` array while we're looping\n            // over it.\n            if (bufferSize <= buffer.length) {\n              toEmit = toEmit ?? [];\n              toEmit.push(buffer);\n            }\n          }\n\n          if (toEmit) {\n            // We have found some buffers that are over the\n            // `bufferSize`. Emit them, and remove them from our\n            // buffers list.\n            for (const buffer of toEmit) {\n              arrRemove(buffers, buffer);\n              subscriber.next(buffer);\n            }\n          }\n        },\n        () => {\n          // When the source completes, emit all of our\n          // active buffers.\n          for (const buffer of buffers) {\n            subscriber.next(buffer);\n          }\n          subscriber.complete();\n        },\n        // Pass all errors through to consumer.\n        undefined,\n        () => {\n          // Clean up our memory when we finalize\n          buffers = null!;\n        }\n      )\n    );\n  });\n}\n", "import { Observable } from '../Observable';\n\nimport { ObservableInput, OperatorFunction, ObservedValueOf } from '../types';\nimport { Subscription } from '../Subscription';\nimport { innerFrom } from '../observable/innerFrom';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { operate } from '../util/lift';\n\n/* tslint:disable:max-line-length */\nexport function catchError<T, O extends ObservableInput<any>>(\n  selector: (err: any, caught: Observable<T>) => O\n): OperatorFunction<T, T | ObservedValueOf<O>>;\n/* tslint:enable:max-line-length */\n\n/**\n * Catches errors on the observable to be handled by returning a new observable or throwing an error.\n *\n * <span class=\"informal\">\n * It only listens to the error channel and ignores notifications.\n * Handles errors from the source observable, and maps them to a new observable.\n * The error may also be rethrown, or a new error can be thrown to emit an error from the result.\n * </span>\n *\n * ![](catch.png)\n *\n * This operator handles errors, but forwards along all other events to the resulting observable.\n * If the source observable terminates with an error, it will map that error to a new observable,\n * subscribe to it, and forward all of its events to the resulting observable.\n *\n * ## Examples\n *\n * Continue with a different Observable when there's an error\n *\n * ```ts\n * import { of, map, catchError } from 'rxjs';\n *\n * of(1, 2, 3, 4, 5)\n *   .pipe(\n *     map(n => {\n *       if (n === 4) {\n *         throw 'four!';\n *       }\n *       return n;\n *     }),\n *     catchError(err => of('I', 'II', 'III', 'IV', 'V'))\n *   )\n *   .subscribe(x => console.log(x));\n *   // 1, 2, 3, I, II, III, IV, V\n * ```\n *\n * Retry the caught source Observable again in case of error, similar to `retry()` operator\n *\n * ```ts\n * import { of, map, catchError, take } from 'rxjs';\n *\n * of(1, 2, 3, 4, 5)\n *   .pipe(\n *     map(n => {\n *       if (n === 4) {\n *         throw 'four!';\n *       }\n *       return n;\n *     }),\n *     catchError((err, caught) => caught),\n *     take(30)\n *   )\n *   .subscribe(x => console.log(x));\n *   // 1, 2, 3, 1, 2, 3, ...\n * ```\n *\n * Throw a new error when the source Observable throws an error\n *\n * ```ts\n * import { of, map, catchError } from 'rxjs';\n *\n * of(1, 2, 3, 4, 5)\n *   .pipe(\n *     map(n => {\n *       if (n === 4) {\n *         throw 'four!';\n *       }\n *       return n;\n *     }),\n *     catchError(err => {\n *       throw 'error in source. Details: ' + err;\n *     })\n *   )\n *   .subscribe({\n *     next: x => console.log(x),\n *     error: err => console.log(err)\n *   });\n *   // 1, 2, 3, error in source. Details: four!\n * ```\n *\n * @see {@link onErrorResumeNext}\n * @see {@link repeat}\n * @see {@link repeatWhen}\n * @see {@link retry }\n * @see {@link retryWhen}\n *\n * @param {function} selector a function that takes as arguments `err`, which is the error, and `caught`, which\n * is the source observable, in case you'd like to \"retry\" that observable by returning it again. Whatever observable\n * is returned by the `selector` will be used to continue the observable chain.\n * @return A function that returns an Observable that originates from either\n * the source or the Observable returned by the `selector` function.\n */\nexport function catchError<T, O extends ObservableInput<any>>(\n  selector: (err: any, caught: Observable<T>) => O\n): OperatorFunction<T, T | ObservedValueOf<O>> {\n  return operate((source, subscriber) => {\n    let innerSub: Subscription | null = null;\n    let syncUnsub = false;\n    let handledResult: Observable<ObservedValueOf<O>>;\n\n    innerSub = source.subscribe(\n      createOperatorSubscriber(subscriber, undefined, undefined, (err) => {\n        handledResult = innerFrom(selector(err, catchError(selector)(source)));\n        if (innerSub) {\n          innerSub.unsubscribe();\n          innerSub = null;\n          handledResult.subscribe(subscriber);\n        } else {\n          // We don't have an innerSub yet, that means the error was synchronous\n          // because the subscribe call hasn't returned yet.\n          syncUnsub = true;\n        }\n      })\n    );\n\n    if (syncUnsub) {\n      // We have a synchronous error, we need to make sure to\n      // finalize right away. This ensures that callbacks in the `finalize` operator are called\n      // at the right time, and that finalization occurs at the expected\n      // time between the source error and the subscription to the\n      // next observable.\n      innerSub.unsubscribe();\n      innerSub = null;\n      handledResult!.subscribe(subscriber);\n    }\n  });\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * A basic scan operation. This is used for `scan` and `reduce`.\n * @param accumulator The accumulator to use\n * @param seed The seed value for the state to accumulate\n * @param hasSeed Whether or not a seed was provided\n * @param emitOnNext Whether or not to emit the state on next\n * @param emitBeforeComplete Whether or not to emit the before completion\n */\n\nexport function scanInternals<V, A, S>(\n  accumulator: (acc: V | A | S, value: V, index: number) => A,\n  seed: S,\n  hasSeed: boolean,\n  emitOnNext: boolean,\n  emitBeforeComplete?: undefined | true\n) {\n  return (source: Observable<V>, subscriber: Subscriber<any>) => {\n    // Whether or not we have state yet. This will only be\n    // false before the first value arrives if we didn't get\n    // a seed value.\n    let hasState = hasSeed;\n    // The state that we're tracking, starting with the seed,\n    // if there is one, and then updated by the return value\n    // from the accumulator on each emission.\n    let state: any = seed;\n    // An index to pass to the accumulator function.\n    let index = 0;\n\n    // Subscribe to our source. All errors and completions are passed through.\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          // Always increment the index.\n          const i = index++;\n          // Set the state\n          state = hasState\n            ? // We already have state, so we can get the new state from the accumulator\n              accumulator(state, value, i)\n            : // We didn't have state yet, a seed value was not provided, so\n\n              // we set the state to the first value, and mark that we have state now\n              ((hasState = true), value);\n\n          // Maybe send it to the consumer.\n          emitOnNext && subscriber.next(state);\n        },\n        // If an onComplete was given, call it, otherwise\n        // just pass through the complete notification to the consumer.\n        emitBeforeComplete &&\n          (() => {\n            hasState && subscriber.next(state);\n            subscriber.complete();\n          })\n      )\n    );\n  };\n}\n", "import { combineLatestInit } from '../observable/combineLatest';\nimport { ObservableInput, ObservableInputTuple, OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { argsOrArgArray } from '../util/argsOrArgArray';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\nimport { pipe } from '../util/pipe';\nimport { popResultSelector } from '../util/args';\n\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(\n  sources: [...ObservableInputTuple<A>],\n  project: (...values: [T, ...A]) => R\n): OperatorFunction<T, R>;\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(sources: [...ObservableInputTuple<A>]): OperatorFunction<T, [T, ...A]>;\n\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(\n  ...sourcesAndProject: [...ObservableInputTuple<A>, (...values: [T, ...A]) => R]\n): OperatorFunction<T, R>;\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(...sources: [...ObservableInputTuple<A>]): OperatorFunction<T, [T, ...A]>;\n\n/**\n * @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8.\n */\nexport function combineLatest<T, R>(...args: (ObservableInput<any> | ((...values: any[]) => R))[]): OperatorFunction<T, unknown> {\n  const resultSelector = popResultSelector(args);\n  return resultSelector\n    ? pipe(combineLatest(...(args as Array<ObservableInput<any>>)), mapOneOrManyArgs(resultSelector))\n    : operate((source, subscriber) => {\n        combineLatestInit([source, ...argsOrArgArray(args)])(subscriber);\n      });\n}\n", "import { ObservableInputTuple, OperatorFunction, Cons } from '../types';\nimport { combineLatest } from './combineLatest';\n\n/**\n * Create an observable that combines the latest values from all passed observables and the source\n * into arrays and emits them.\n *\n * Returns an observable, that when subscribed to, will subscribe to the source observable and all\n * sources provided as arguments. Once all sources emit at least one value, all of the latest values\n * will be emitted as an array. After that, every time any source emits a value, all of the latest values\n * will be emitted as an array.\n *\n * This is a useful operator for eagerly calculating values based off of changed inputs.\n *\n * ## Example\n *\n * Simple concatenation of values from two inputs\n *\n * ```ts\n * import { fromEvent, combineLatestWith, map } from 'rxjs';\n *\n * // Setup: Add two inputs to the page\n * const input1 = document.createElement('input');\n * document.body.appendChild(input1);\n * const input2 = document.createElement('input');\n * document.body.appendChild(input2);\n *\n * // Get streams of changes\n * const input1Changes$ = fromEvent(input1, 'change');\n * const input2Changes$ = fromEvent(input2, 'change');\n *\n * // Combine the changes by adding them together\n * input1Changes$.pipe(\n *   combineLatestWith(input2Changes$),\n *   map(([e1, e2]) => (<HTMLInputElement>e1.target).value + ' - ' + (<HTMLInputElement>e2.target).value)\n * )\n * .subscribe(x => console.log(x));\n * ```\n *\n * @param otherSources the other sources to subscribe to.\n * @return A function that returns an Observable that emits the latest\n * emissions from both source and provided Observables.\n */\nexport function combineLatestWith<T, A extends readonly unknown[]>(\n  ...otherSources: [...ObservableInputTuple<A>]\n): OperatorFunction<T, Cons<T, A>> {\n  return combineLatest(...otherSources);\n}\n", "import { Subscriber } from '../Subscriber';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { noop } from '../util/noop';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\n\n/**\n * Emits a notification from the source Observable only after a particular time span\n * determined by another Observable has passed without another source emission.\n *\n * <span class=\"informal\">It's like {@link debounceTime}, but the time span of\n * emission silence is determined by a second Observable.</span>\n *\n * ![](debounce.svg)\n *\n * `debounce` delays notifications emitted by the source Observable, but drops previous\n * pending delayed emissions if a new notification arrives on the source Observable.\n * This operator keeps track of the most recent notification from the source\n * Observable, and spawns a duration Observable by calling the\n * `durationSelector` function. The notification is emitted only when the duration\n * Observable emits a next notification, and if no other notification was emitted on\n * the source Observable since the duration Observable was spawned. If a new\n * notification appears before the duration Observable emits, the previous notification will\n * not be emitted and a new duration is scheduled from `durationSelector` is scheduled.\n * If the completing event happens during the scheduled duration the last cached notification\n * is emitted before the completion event is forwarded to the output observable.\n * If the error event happens during the scheduled duration or after it only the error event is\n * forwarded to the output observable. The cache notification is not emitted in this case.\n *\n * Like {@link debounceTime}, this is a rate-limiting operator, and also a\n * delay-like operator since output emissions do not necessarily occur at the\n * same time as they did on the source Observable.\n *\n * ## Example\n *\n * Emit the most recent click after a burst of clicks\n *\n * ```ts\n * import { fromEvent, scan, debounce, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(\n *   scan(i => ++i, 1),\n *   debounce(i => interval(200 * i))\n * );\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link auditTime}\n * @see {@link debounceTime}\n * @see {@link delay}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link throttle}\n * @see {@link throttleTime}\n *\n * @param durationSelector A function\n * that receives a value from the source Observable, for computing the timeout\n * duration for each source value, returned as an Observable or a Promise.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by the specified duration Observable returned by\n * `durationSelector`, and may drop some values if they occur too frequently.\n */\nexport function debounce<T>(durationSelector: (value: T) => ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    let lastValue: T | null = null;\n    // The subscriber/subscription for the current debounce, if there is one.\n    let durationSubscriber: Subscriber<any> | null = null;\n\n    const emit = () => {\n      // Unsubscribe any current debounce subscription we have,\n      // we only cared about the first notification from it, and we\n      // want to clean that subscription up as soon as possible.\n      durationSubscriber?.unsubscribe();\n      durationSubscriber = null;\n      if (hasValue) {\n        // We have a value! Free up memory first, then emit the value.\n        hasValue = false;\n        const value = lastValue!;\n        lastValue = null;\n        subscriber.next(value);\n      }\n    };\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value: T) => {\n          // Cancel any pending debounce duration. We don't\n          // need to null it out here yet tho, because we're just going\n          // to create another one in a few lines.\n          durationSubscriber?.unsubscribe();\n          hasValue = true;\n          lastValue = value;\n          // Capture our duration subscriber, so we can unsubscribe it when we're notified\n          // and we're going to emit the value.\n          durationSubscriber = createOperatorSubscriber(subscriber, emit, noop);\n          // Subscribe to the duration.\n          innerFrom(durationSelector(value)).subscribe(durationSubscriber);\n        },\n        () => {\n          // Source completed.\n          // Emit any pending debounced values then complete\n          emit();\n          subscriber.complete();\n        },\n        // Pass all errors through to consumer\n        undefined,\n        () => {\n          // Finalization.\n          lastValue = durationSubscriber = null;\n        }\n      )\n    );\n  });\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { Subscription } from '../Subscription';\nimport { MonoTypeOperatorFunction, SchedulerAction, SchedulerLike } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Emits a notification from the source Observable only after a particular time span\n * has passed without another source emission.\n *\n * <span class=\"informal\">It's like {@link delay}, but passes only the most\n * recent notification from each burst of emissions.</span>\n *\n * ![](debounceTime.png)\n *\n * `debounceTime` delays notifications emitted by the source Observable, but drops\n * previous pending delayed emissions if a new notification arrives on the source\n * Observable. This operator keeps track of the most recent notification from the\n * source Observable, and emits that only when `dueTime` has passed\n * without any other notification appearing on the source Observable. If a new value\n * appears before `dueTime` silence occurs, the previous notification will be dropped\n * and will not be emitted and a new `dueTime` is scheduled.\n * If the completing event happens during `dueTime` the last cached notification\n * is emitted before the completion event is forwarded to the output observable.\n * If the error event happens during `dueTime` or after it only the error event is\n * forwarded to the output observable. The cache notification is not emitted in this case.\n *\n * This is a rate-limiting operator, because it is impossible for more than one\n * notification to be emitted in any time window of duration `dueTime`, but it is also\n * a delay-like operator since output emissions do not occur at the same time as\n * they did on the source Observable. Optionally takes a {@link SchedulerLike} for\n * managing timers.\n *\n * ## Example\n *\n * Emit the most recent click after a burst of clicks\n *\n * ```ts\n * import { fromEvent, debounceTime } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(debounceTime(1000));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link auditTime}\n * @see {@link debounce}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link throttle}\n * @see {@link throttleTime}\n *\n * @param {number} dueTime The timeout duration in milliseconds (or the time\n * unit determined internally by the optional `scheduler`) for the window of\n * time required to wait for emission silence before emitting the most recent\n * source value.\n * @param {SchedulerLike} [scheduler=async] The {@link SchedulerLike} to use for\n * managing the timers that handle the timeout for each value.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by the specified `dueTime`, and may drop some values\n * if they occur too frequently.\n */\nexport function debounceTime<T>(dueTime: number, scheduler: SchedulerLike = asyncScheduler): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let activeTask: Subscription | null = null;\n    let lastValue: T | null = null;\n    let lastTime: number | null = null;\n\n    const emit = () => {\n      if (activeTask) {\n        // We have a value! Free up memory first, then emit the value.\n        activeTask.unsubscribe();\n        activeTask = null;\n        const value = lastValue!;\n        lastValue = null;\n        subscriber.next(value);\n      }\n    };\n    function emitWhenIdle(this: SchedulerAction<unknown>) {\n      // This is called `dueTime` after the first value\n      // but we might have received new values during this window!\n\n      const targetTime = lastTime! + dueTime;\n      const now = scheduler.now();\n      if (now < targetTime) {\n        // On that case, re-schedule to the new target\n        activeTask = this.schedule(undefined, targetTime - now);\n        subscriber.add(activeTask);\n        return;\n      }\n\n      emit();\n    }\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value: T) => {\n          lastValue = value;\n          lastTime = scheduler.now();\n\n          // Only set up a task if it's not already up\n          if (!activeTask) {\n            activeTask = scheduler.schedule(emitWhenIdle, dueTime);\n            subscriber.add(activeTask);\n          }\n        },\n        () => {\n          // Source completed.\n          // Emit any pending debounced values then complete\n          emit();\n          subscriber.complete();\n        },\n        // Pass all errors through to consumer.\n        undefined,\n        () => {\n          // Finalization.\n          lastValue = activeTask = null;\n        }\n      )\n    );\n  });\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Emits a given value if the source Observable completes without emitting any\n * `next` value, otherwise mirrors the source Observable.\n *\n * <span class=\"informal\">If the source Observable turns out to be empty, then\n * this operator will emit a default value.</span>\n *\n * ![](defaultIfEmpty.png)\n *\n * `defaultIfEmpty` emits the values emitted by the source Observable or a\n * specified default value if the source Observable is empty (completes without\n * having emitted any `next` value).\n *\n * ## Example\n *\n * If no clicks happen in 5 seconds, then emit 'no clicks'\n *\n * ```ts\n * import { fromEvent, takeUntil, interval, defaultIfEmpty } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const clicksBeforeFive = clicks.pipe(takeUntil(interval(5000)));\n * const result = clicksBeforeFive.pipe(defaultIfEmpty('no clicks'));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link empty}\n * @see {@link last}\n *\n * @param defaultValue The default value used if the source\n * Observable is empty.\n * @return A function that returns an Observable that emits either the\n * specified `defaultValue` if the source Observable emits no items, or the\n * values emitted by the source Observable.\n */\nexport function defaultIfEmpty<T, R>(defaultValue: R): OperatorFunction<T, T | R> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          hasValue = true;\n          subscriber.next(value);\n        },\n        () => {\n          if (!hasValue) {\n            subscriber.next(defaultValue!);\n          }\n          subscriber.complete();\n        }\n      )\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { EMPTY } from '../observable/empty';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Emits only the first `count` values emitted by the source Observable.\n *\n * <span class=\"informal\">Takes the first `count` values from the source, then\n * completes.</span>\n *\n * ![](take.png)\n *\n * `take` returns an Observable that emits only the first `count` values emitted\n * by the source Observable. If the source emits fewer than `count` values then\n * all of its values are emitted. After that, it completes, regardless if the\n * source completes.\n *\n * ## Example\n *\n * Take the first 5 seconds of an infinite 1-second interval Observable\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const intervalCount = interval(1000);\n * const takeFive = intervalCount.pipe(take(5));\n * takeFive.subscribe(x => console.log(x));\n *\n * // Logs:\n * // 0\n * // 1\n * // 2\n * // 3\n * // 4\n * ```\n *\n * @see {@link takeLast}\n * @see {@link takeUntil}\n * @see {@link takeWhile}\n * @see {@link skip}\n *\n * @param count The maximum number of `next` values to emit.\n * @return A function that returns an Observable that emits only the first\n * `count` values emitted by the source Observable, or all of the values from\n * the source if the source emits fewer than `count` values.\n */\nexport function take<T>(count: number): MonoTypeOperatorFunction<T> {\n  return count <= 0\n    ? // If we are taking no values, that's empty.\n      () => EMPTY\n    : operate((source, subscriber) => {\n        let seen = 0;\n        source.subscribe(\n          createOperatorSubscriber(subscriber, (value) => {\n            // Increment the number of values we have seen,\n            // then check it against the allowed count to see\n            // if we are still letting values through.\n            if (++seen <= count) {\n              subscriber.next(value);\n              // If we have met or passed our allowed count,\n              // we need to complete. We have to do <= here,\n              // because re-entrant code will increment `seen` twice.\n              if (count <= seen) {\n                subscriber.complete();\n              }\n            }\n          })\n        );\n      });\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { noop } from '../util/noop';\n\n/**\n * Ignores all items emitted by the source Observable and only passes calls of `complete` or `error`.\n *\n * ![](ignoreElements.png)\n *\n * The `ignoreElements` operator suppresses all items emitted by the source Observable,\n * but allows its termination notification (either `error` or `complete`) to pass through unchanged.\n *\n * If you do not care about the items being emitted by an Observable, but you do want to be notified\n * when it completes or when it terminates with an error, you can apply the `ignoreElements` operator\n * to the Observable, which will ensure that it will never call its observers\u2019 `next` handlers.\n *\n * ## Example\n *\n * Ignore all `next` emissions from the source\n *\n * ```ts\n * import { of, ignoreElements } from 'rxjs';\n *\n * of('you', 'talking', 'to', 'me')\n *   .pipe(ignoreElements())\n *   .subscribe({\n *     next: word => console.log(word),\n *     error: err => console.log('error:', err),\n *     complete: () => console.log('the end'),\n *   });\n *\n * // result:\n * // 'the end'\n * ```\n *\n * @return A function that returns an empty Observable that only calls\n * `complete` or `error`, based on which one is called by the source\n * Observable.\n */\nexport function ignoreElements(): OperatorFunction<unknown, never> {\n  return operate((source, subscriber) => {\n    source.subscribe(createOperatorSubscriber(subscriber, noop));\n  });\n}\n", "import { OperatorFunction } from '../types';\nimport { map } from './map';\n\n/** @deprecated To be removed in v9. Use {@link map} instead: `map(() => value)`. */\nexport function mapTo<R>(value: R): OperatorFunction<unknown, R>;\n/**\n * @deprecated Do not specify explicit type parameters. Signatures with type parameters\n * that cannot be inferred will be removed in v8. `mapTo` itself will be removed in v9,\n * use {@link map} instead: `map(() => value)`.\n * */\nexport function mapTo<T, R>(value: R): OperatorFunction<T, R>;\n\n/**\n * Emits the given constant value on the output Observable every time the source\n * Observable emits a value.\n *\n * <span class=\"informal\">Like {@link map}, but it maps every source value to\n * the same output value every time.</span>\n *\n * ![](mapTo.png)\n *\n * Takes a constant `value` as argument, and emits that whenever the source\n * Observable emits a value. In other words, ignores the actual source value,\n * and simply uses the emission moment to know when to emit the given `value`.\n *\n * ## Example\n *\n * Map every click to the string `'Hi'`\n *\n * ```ts\n * import { fromEvent, mapTo } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const greetings = clicks.pipe(mapTo('Hi'));\n *\n * greetings.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link map}\n *\n * @param value The value to map each source value to.\n * @return A function that returns an Observable that emits the given `value`\n * every time the source Observable emits.\n * @deprecated To be removed in v9. Use {@link map} instead: `map(() => value)`.\n */\nexport function mapTo<R>(value: R): OperatorFunction<unknown, R> {\n  return map(() => value);\n}\n", "import { Observable } from '../Observable';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { concat } from '../observable/concat';\nimport { take } from './take';\nimport { ignoreElements } from './ignoreElements';\nimport { mapTo } from './mapTo';\nimport { mergeMap } from './mergeMap';\nimport { innerFrom } from '../observable/innerFrom';\n\n/** @deprecated The `subscriptionDelay` parameter will be removed in v8. */\nexport function delayWhen<T>(\n  delayDurationSelector: (value: T, index: number) => ObservableInput<any>,\n  subscriptionDelay: Observable<any>\n): MonoTypeOperatorFunction<T>;\nexport function delayWhen<T>(delayDurationSelector: (value: T, index: number) => ObservableInput<any>): MonoTypeOperatorFunction<T>;\n\n/**\n * Delays the emission of items from the source Observable by a given time span\n * determined by the emissions of another Observable.\n *\n * <span class=\"informal\">It's like {@link delay}, but the time span of the\n * delay duration is determined by a second Observable.</span>\n *\n * ![](delayWhen.png)\n *\n * `delayWhen` operator shifts each emitted value from the source Observable by\n * a time span determined by another Observable. When the source emits a value,\n * the `delayDurationSelector` function is called with the value emitted from\n * the source Observable as the first argument to the `delayDurationSelector`.\n * The `delayDurationSelector` function should return an {@link ObservableInput},\n * that is internally converted to an Observable that is called the \"duration\"\n * Observable.\n *\n * The source value is emitted on the output Observable only when the \"duration\"\n * Observable emits ({@link guide/glossary-and-semantics#next next}s) any value.\n * Upon that, the \"duration\" Observable gets unsubscribed.\n *\n * Before RxJS V7, the {@link guide/glossary-and-semantics#complete completion}\n * of the \"duration\" Observable would have been triggering the emission of the\n * source value to the output Observable, but with RxJS V7, this is not the case\n * anymore.\n *\n * Only next notifications (from the \"duration\" Observable) trigger values from\n * the source Observable to be passed to the output Observable. If the \"duration\"\n * Observable only emits the complete notification (without next), the value\n * emitted by the source Observable will never get to the output Observable - it\n * will be swallowed. If the \"duration\" Observable errors, the error will be\n * propagated to the output Observable.\n *\n * Optionally, `delayWhen` takes a second argument, `subscriptionDelay`, which\n * is an Observable. When `subscriptionDelay` emits its first value or\n * completes, the source Observable is subscribed to and starts behaving like\n * described in the previous paragraph. If `subscriptionDelay` is not provided,\n * `delayWhen` will subscribe to the source Observable as soon as the output\n * Observable is subscribed.\n *\n * ## Example\n *\n * Delay each click by a random amount of time, between 0 and 5 seconds\n *\n * ```ts\n * import { fromEvent, delayWhen, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const delayedClicks = clicks.pipe(\n *   delayWhen(() => interval(Math.random() * 5000))\n * );\n * delayedClicks.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link delay}\n * @see {@link throttle}\n * @see {@link throttleTime}\n * @see {@link debounce}\n * @see {@link debounceTime}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link audit}\n * @see {@link auditTime}\n *\n * @param delayDurationSelector A function that returns an `ObservableInput` for\n * each `value` emitted by the source Observable, which is then used to delay the\n * emission of that `value` on the output Observable until the `ObservableInput`\n * returned from this function emits a next value. When called, beside `value`,\n * this function receives a zero-based `index` of the emission order.\n * @param subscriptionDelay An Observable that triggers the subscription to the\n * source Observable once it emits any value.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by an amount of time specified by the Observable\n * returned by `delayDurationSelector`.\n */\nexport function delayWhen<T>(\n  delayDurationSelector: (value: T, index: number) => ObservableInput<any>,\n  subscriptionDelay?: Observable<any>\n): MonoTypeOperatorFunction<T> {\n  if (subscriptionDelay) {\n    // DEPRECATED PATH\n    return (source: Observable<T>) =>\n      concat(subscriptionDelay.pipe(take(1), ignoreElements()), source.pipe(delayWhen(delayDurationSelector)));\n  }\n\n  return mergeMap((value, index) => innerFrom(delayDurationSelector(value, index)).pipe(take(1), mapTo(value)));\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { delayWhen } from './delayWhen';\nimport { timer } from '../observable/timer';\n\n/**\n * Delays the emission of items from the source Observable by a given timeout or\n * until a given Date.\n *\n * <span class=\"informal\">Time shifts each item by some specified amount of\n * milliseconds.</span>\n *\n * ![](delay.svg)\n *\n * If the delay argument is a Number, this operator time shifts the source\n * Observable by that amount of time expressed in milliseconds. The relative\n * time intervals between the values are preserved.\n *\n * If the delay argument is a Date, this operator time shifts the start of the\n * Observable execution until the given date occurs.\n *\n * ## Examples\n *\n * Delay each click by one second\n *\n * ```ts\n * import { fromEvent, delay } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const delayedClicks = clicks.pipe(delay(1000)); // each click emitted after 1 second\n * delayedClicks.subscribe(x => console.log(x));\n * ```\n *\n * Delay all clicks until a future date happens\n *\n * ```ts\n * import { fromEvent, delay } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const date = new Date('March 15, 2050 12:00:00'); // in the future\n * const delayedClicks = clicks.pipe(delay(date)); // click emitted only after that date\n * delayedClicks.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link delayWhen}\n * @see {@link throttle}\n * @see {@link throttleTime}\n * @see {@link debounce}\n * @see {@link debounceTime}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link audit}\n * @see {@link auditTime}\n *\n * @param {number|Date} due The delay duration in milliseconds (a `number`) or\n * a `Date` until which the emission of the source items is delayed.\n * @param {SchedulerLike} [scheduler=async] The {@link SchedulerLike} to use for\n * managing the timers that handle the time-shift for each item.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by the specified timeout or Date.\n */\nexport function delay<T>(due: number | Date, scheduler: SchedulerLike = asyncScheduler): MonoTypeOperatorFunction<T> {\n  const duration = timer(due, scheduler);\n  return delayWhen(() => duration);\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { identity } from '../util/identity';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\nexport function distinctUntilChanged<T>(comparator?: (previous: T, current: T) => boolean): MonoTypeOperatorFunction<T>;\nexport function distinctUntilChanged<T, K>(\n  comparator: (previous: K, current: K) => boolean,\n  keySelector: (value: T) => K\n): MonoTypeOperatorFunction<T>;\n\n/**\n * Returns a result {@link Observable} that emits all values pushed by the source observable if they\n * are distinct in comparison to the last value the result observable emitted.\n *\n * When provided without parameters or with the first parameter (`{@link distinctUntilChanged#comparator comparator}`),\n * it behaves like this:\n *\n * 1. It will always emit the first value from the source.\n * 2. For all subsequent values pushed by the source, they will be compared to the previously emitted values\n *    using the provided `comparator` or an `===` equality check.\n * 3. If the value pushed by the source is determined to be unequal by this check, that value is emitted and\n *    becomes the new \"previously emitted value\" internally.\n *\n * When the second parameter (`{@link distinctUntilChanged#keySelector keySelector}`) is provided, the behavior\n * changes:\n *\n * 1. It will always emit the first value from the source.\n * 2. The `keySelector` will be run against all values, including the first value.\n * 3. For all values after the first, the selected key will be compared against the key selected from\n *    the previously emitted value using the `comparator`.\n * 4. If the keys are determined to be unequal by this check, the value (not the key), is emitted\n *    and the selected key from that value is saved for future comparisons against other keys.\n *\n * ## Examples\n *\n * A very basic example with no `{@link distinctUntilChanged#comparator comparator}`. Note that `1` is emitted more than once,\n * because it's distinct in comparison to the _previously emitted_ value,\n * not in comparison to _all other emitted values_.\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * of(1, 1, 1, 2, 2, 2, 1, 1, 3, 3)\n *   .pipe(distinctUntilChanged())\n *   .subscribe(console.log);\n * // Logs: 1, 2, 1, 3\n * ```\n *\n * With a `{@link distinctUntilChanged#comparator comparator}`, you can do custom comparisons. Let's say\n * you only want to emit a value when all of its components have\n * changed:\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * const totallyDifferentBuilds$ = of(\n *   { engineVersion: '1.1.0', transmissionVersion: '1.2.0' },\n *   { engineVersion: '1.1.0', transmissionVersion: '1.4.0' },\n *   { engineVersion: '1.3.0', transmissionVersion: '1.4.0' },\n *   { engineVersion: '1.3.0', transmissionVersion: '1.5.0' },\n *   { engineVersion: '2.0.0', transmissionVersion: '1.5.0' }\n * ).pipe(\n *   distinctUntilChanged((prev, curr) => {\n *     return (\n *       prev.engineVersion === curr.engineVersion ||\n *       prev.transmissionVersion === curr.transmissionVersion\n *     );\n *   })\n * );\n *\n * totallyDifferentBuilds$.subscribe(console.log);\n *\n * // Logs:\n * // { engineVersion: '1.1.0', transmissionVersion: '1.2.0' }\n * // { engineVersion: '1.3.0', transmissionVersion: '1.4.0' }\n * // { engineVersion: '2.0.0', transmissionVersion: '1.5.0' }\n * ```\n *\n * You can also provide a custom `{@link distinctUntilChanged#comparator comparator}` to check that emitted\n * changes are only in one direction. Let's say you only want to get\n * the next record temperature:\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * const temps$ = of(30, 31, 20, 34, 33, 29, 35, 20);\n *\n * const recordHighs$ = temps$.pipe(\n *   distinctUntilChanged((prevHigh, temp) => {\n *     // If the current temp is less than\n *     // or the same as the previous record,\n *     // the record hasn't changed.\n *     return temp <= prevHigh;\n *   })\n * );\n *\n * recordHighs$.subscribe(console.log);\n * // Logs: 30, 31, 34, 35\n * ```\n *\n * Selecting update events only when the `updatedBy` field shows\n * the account changed hands.\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * // A stream of updates to a given account\n * const accountUpdates$ = of(\n *   { updatedBy: 'blesh', data: [] },\n *   { updatedBy: 'blesh', data: [] },\n *   { updatedBy: 'ncjamieson', data: [] },\n *   { updatedBy: 'ncjamieson', data: [] },\n *   { updatedBy: 'blesh', data: [] }\n * );\n *\n * // We only want the events where it changed hands\n * const changedHands$ = accountUpdates$.pipe(\n *   distinctUntilChanged(undefined, update => update.updatedBy)\n * );\n *\n * changedHands$.subscribe(console.log);\n * // Logs:\n * // { updatedBy: 'blesh', data: Array[0] }\n * // { updatedBy: 'ncjamieson', data: Array[0] }\n * // { updatedBy: 'blesh', data: Array[0] }\n * ```\n *\n * @see {@link distinct}\n * @see {@link distinctUntilKeyChanged}\n *\n * @param comparator A function used to compare the previous and current keys for\n * equality. Defaults to a `===` check.\n * @param keySelector Used to select a key value to be passed to the `comparator`.\n *\n * @return A function that returns an Observable that emits items from the\n * source Observable with distinct values.\n */\nexport function distinctUntilChanged<T, K>(\n  comparator?: (previous: K, current: K) => boolean,\n  keySelector: (value: T) => K = identity as (value: T) => K\n): MonoTypeOperatorFunction<T> {\n  // We've been allowing `null` do be passed as the `compare`, so we can't do\n  // a default value for the parameter, because that will only work\n  // for `undefined`.\n  comparator = comparator ?? defaultCompare;\n\n  return operate((source, subscriber) => {\n    // The previous key, used to compare against keys selected\n    // from new arrivals to determine \"distinctiveness\".\n    let previousKey: K;\n    // Whether or not this is the first value we've gotten.\n    let first = true;\n\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value) => {\n        // We always call the key selector.\n        const currentKey = keySelector(value);\n\n        // If it's the first value, we always emit it.\n        // Otherwise, we compare this key to the previous key, and\n        // if the comparer returns false, we emit.\n        if (first || !comparator!(previousKey, currentKey)) {\n          // Update our state *before* we emit the value\n          // as emission can be the source of re-entrant code\n          // in functional libraries like this. We only really\n          // need to do this if it's the first value, or if the\n          // key we're tracking in previous needs to change.\n          first = false;\n          previousKey = currentKey;\n\n          // Emit the value!\n          subscriber.next(value);\n        }\n      })\n    );\n  });\n}\n\nfunction defaultCompare(a: any, b: any) {\n  return a === b;\n}\n", "import { distinctUntilChanged } from './distinctUntilChanged';\nimport { MonoTypeOperatorFunction } from '../types';\n\n/* tslint:disable:max-line-length */\nexport function distinctUntilKeyChanged<T>(key: keyof T): MonoTypeOperatorFunction<T>;\nexport function distinctUntilKeyChanged<T, K extends keyof T>(key: K, compare: (x: T[K], y: T[K]) => boolean): MonoTypeOperatorFunction<T>;\n/* tslint:enable:max-line-length */\n\n/**\n * Returns an Observable that emits all items emitted by the source Observable that are distinct by comparison from the previous item,\n * using a property accessed by using the key provided to check if the two items are distinct.\n *\n * If a comparator function is provided, then it will be called for each item to test for whether or not that value should be emitted.\n *\n * If a comparator function is not provided, an equality check is used by default.\n *\n * ## Examples\n *\n * An example comparing the name of persons\n *\n * ```ts\n * import { of, distinctUntilKeyChanged } from 'rxjs';\n *\n * of(\n *   { age: 4, name: 'Foo' },\n *   { age: 7, name: 'Bar' },\n *   { age: 5, name: 'Foo' },\n *   { age: 6, name: 'Foo' }\n * ).pipe(\n *   distinctUntilKeyChanged('name')\n * )\n * .subscribe(x => console.log(x));\n *\n * // displays:\n * // { age: 4, name: 'Foo' }\n * // { age: 7, name: 'Bar' }\n * // { age: 5, name: 'Foo' }\n * ```\n *\n * An example comparing the first letters of the name\n *\n * ```ts\n * import { of, distinctUntilKeyChanged } from 'rxjs';\n *\n * of(\n *   { age: 4, name: 'Foo1' },\n *   { age: 7, name: 'Bar' },\n *   { age: 5, name: 'Foo2' },\n *   { age: 6, name: 'Foo3' }\n * ).pipe(\n *   distinctUntilKeyChanged('name', (x, y) => x.substring(0, 3) === y.substring(0, 3))\n * )\n * .subscribe(x => console.log(x));\n *\n * // displays:\n * // { age: 4, name: 'Foo1' }\n * // { age: 7, name: 'Bar' }\n * // { age: 5, name: 'Foo2' }\n * ```\n *\n * @see {@link distinct}\n * @see {@link distinctUntilChanged}\n *\n * @param {string} key String key for object property lookup on each item.\n * @param {function} [compare] Optional comparison function called to test if an item is distinct from the previous item in the source.\n * @return A function that returns an Observable that emits items from the\n * source Observable with distinct values based on the key specified.\n */\nexport function distinctUntilKeyChanged<T, K extends keyof T>(key: K, compare?: (x: T[K], y: T[K]) => boolean): MonoTypeOperatorFunction<T> {\n  return distinctUntilChanged((x: T, y: T) => compare ? compare(x[key], y[key]) : x[key] === y[key]);\n}\n", "import { EmptyError } from '../util/EmptyError';\nimport { MonoTypeOperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * If the source observable completes without emitting a value, it will emit\n * an error. The error will be created at that time by the optional\n * `errorFactory` argument, otherwise, the error will be {@link EmptyError}.\n *\n * ![](throwIfEmpty.png)\n *\n * ## Example\n *\n * Throw an error if the document wasn't clicked within 1 second\n *\n * ```ts\n * import { fromEvent, takeUntil, timer, throwIfEmpty } from 'rxjs';\n *\n * const click$ = fromEvent(document, 'click');\n *\n * click$.pipe(\n *   takeUntil(timer(1000)),\n *   throwIfEmpty(() => new Error('The document was not clicked within 1 second'))\n * )\n * .subscribe({\n *   next() {\n *    console.log('The document was clicked');\n *   },\n *   error(err) {\n *     console.error(err.message);\n *   }\n * });\n * ```\n *\n * @param errorFactory A factory function called to produce the\n * error to be thrown when the source observable completes without emitting a\n * value.\n * @return A function that returns an Observable that throws an error if the\n * source Observable completed without emitting.\n */\nexport function throwIfEmpty<T>(errorFactory: () => any = defaultErrorFactory): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          hasValue = true;\n          subscriber.next(value);\n        },\n        () => (hasValue ? subscriber.complete() : subscriber.error(errorFactory()))\n      )\n    );\n  });\n}\n\nfunction defaultErrorFactory() {\n  return new EmptyError();\n}\n", "/** prettier */\nimport { Observable } from '../Observable';\nimport { concat } from '../observable/concat';\nimport { of } from '../observable/of';\nimport { MonoTypeOperatorFunction, SchedulerLike, OperatorFunction, ValueFromArray } from '../types';\n\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `concatAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function endWith<T>(scheduler: SchedulerLike): MonoTypeOperatorFunction<T>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `concatAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function endWith<T, A extends unknown[] = T[]>(\n  ...valuesAndScheduler: [...A, SchedulerLike]\n): OperatorFunction<T, T | ValueFromArray<A>>;\n\nexport function endWith<T, A extends unknown[] = T[]>(...values: A): OperatorFunction<T, T | ValueFromArray<A>>;\n\n/**\n * Returns an observable that will emit all values from the source, then synchronously emit\n * the provided value(s) immediately after the source completes.\n *\n * NOTE: Passing a last argument of a Scheduler is _deprecated_, and may result in incorrect\n * types in TypeScript.\n *\n * This is useful for knowing when an observable ends. Particularly when paired with an\n * operator like {@link takeUntil}\n *\n * ![](endWith.png)\n *\n * ## Example\n *\n * Emit values to know when an interval starts and stops. The interval will\n * stop when a user clicks anywhere on the document.\n *\n * ```ts\n * import { interval, map, fromEvent, startWith, takeUntil, endWith } from 'rxjs';\n *\n * const ticker$ = interval(5000).pipe(\n *   map(() => 'tick')\n * );\n *\n * const documentClicks$ = fromEvent(document, 'click');\n *\n * ticker$.pipe(\n *   startWith('interval started'),\n *   takeUntil(documentClicks$),\n *   endWith('interval ended by click')\n * )\n * .subscribe(x => console.log(x));\n *\n * // Result (assuming a user clicks after 15 seconds)\n * // 'interval started'\n * // 'tick'\n * // 'tick'\n * // 'tick'\n * // 'interval ended by click'\n * ```\n *\n * @see {@link startWith}\n * @see {@link concat}\n * @see {@link takeUntil}\n *\n * @param values Items you want the modified Observable to emit last.\n * @return A function that returns an Observable that emits all values from the\n * source, then synchronously emits the provided value(s) immediately after the\n * source completes.\n */\nexport function endWith<T>(...values: Array<T | SchedulerLike>): MonoTypeOperatorFunction<T> {\n  return (source: Observable<T>) => concat(source, of(...values)) as Observable<T>;\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { operate } from '../util/lift';\n\n/**\n * Returns an Observable that mirrors the source Observable, but will call a specified function when\n * the source terminates on complete or error.\n * The specified function will also be called when the subscriber explicitly unsubscribes.\n *\n * ## Examples\n *\n * Execute callback function when the observable completes\n *\n * ```ts\n * import { interval, take, finalize } from 'rxjs';\n *\n * // emit value in sequence every 1 second\n * const source = interval(1000);\n * const example = source.pipe(\n *   take(5), //take only the first 5 values\n *   finalize(() => console.log('Sequence complete')) // Execute when the observable completes\n * );\n * const subscribe = example.subscribe(val => console.log(val));\n *\n * // results:\n * // 0\n * // 1\n * // 2\n * // 3\n * // 4\n * // 'Sequence complete'\n * ```\n *\n * Execute callback function when the subscriber explicitly unsubscribes\n *\n * ```ts\n * import { interval, finalize, tap, noop, timer } from 'rxjs';\n *\n * const source = interval(100).pipe(\n *   finalize(() => console.log('[finalize] Called')),\n *   tap({\n *     next: () => console.log('[next] Called'),\n *     error: () => console.log('[error] Not called'),\n *     complete: () => console.log('[tap complete] Not called')\n *   })\n * );\n *\n * const sub = source.subscribe({\n *   next: x => console.log(x),\n *   error: noop,\n *   complete: () => console.log('[complete] Not called')\n * });\n *\n * timer(150).subscribe(() => sub.unsubscribe());\n *\n * // results:\n * // '[next] Called'\n * // 0\n * // '[finalize] Called'\n * ```\n *\n * @param {function} callback Function to be called when source terminates.\n * @return A function that returns an Observable that mirrors the source, but\n * will call the specified function on termination.\n */\nexport function finalize<T>(callback: () => void): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    // TODO: This try/finally was only added for `useDeprecatedSynchronousErrorHandling`.\n    // REMOVE THIS WHEN THAT HOT GARBAGE IS REMOVED IN V8.\n    try {\n      source.subscribe(subscriber);\n    } finally {\n      subscriber.add(callback);\n    }\n  });\n}\n", "import { Observable } from '../Observable';\nimport { EmptyError } from '../util/EmptyError';\nimport { OperatorFunction, TruthyTypesOf } from '../types';\nimport { filter } from './filter';\nimport { take } from './take';\nimport { defaultIfEmpty } from './defaultIfEmpty';\nimport { throwIfEmpty } from './throwIfEmpty';\nimport { identity } from '../util/identity';\n\nexport function first<T, D = T>(predicate?: null, defaultValue?: D): OperatorFunction<T, T | D>;\nexport function first<T>(predicate: BooleanConstructor): OperatorFunction<T, TruthyTypesOf<T>>;\nexport function first<T, D>(predicate: BooleanConstructor, defaultValue: D): OperatorFunction<T, TruthyTypesOf<T> | D>;\nexport function first<T, S extends T>(\n  predicate: (value: T, index: number, source: Observable<T>) => value is S,\n  defaultValue?: S\n): OperatorFunction<T, S>;\nexport function first<T, S extends T, D>(\n  predicate: (value: T, index: number, source: Observable<T>) => value is S,\n  defaultValue: D\n): OperatorFunction<T, S | D>;\nexport function first<T, D = T>(\n  predicate: (value: T, index: number, source: Observable<T>) => boolean,\n  defaultValue?: D\n): OperatorFunction<T, T | D>;\n\n/**\n * Emits only the first value (or the first value that meets some condition)\n * emitted by the source Observable.\n *\n * <span class=\"informal\">Emits only the first value. Or emits only the first\n * value that passes some test.</span>\n *\n * ![](first.png)\n *\n * If called with no arguments, `first` emits the first value of the source\n * Observable, then completes. If called with a `predicate` function, `first`\n * emits the first value of the source that matches the specified condition. Throws an error if\n * `defaultValue` was not provided and a matching element is not found.\n *\n * ## Examples\n *\n * Emit only the first click that happens on the DOM\n *\n * ```ts\n * import { fromEvent, first } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(first());\n * result.subscribe(x => console.log(x));\n * ```\n *\n * Emits the first click that happens on a DIV\n *\n * ```ts\n * import { fromEvent, first } from 'rxjs';\n *\n * const div = document.createElement('div');\n * div.style.cssText = 'width: 200px; height: 200px; background: #09c;';\n * document.body.appendChild(div);\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(first(ev => (<HTMLElement>ev.target).tagName === 'DIV'));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link filter}\n * @see {@link find}\n * @see {@link take}\n *\n * @throws {EmptyError} Delivers an EmptyError to the Observer's `error`\n * callback if the Observable completes before any `next` notification was sent.\n * This is how `first()` is different from {@link take}(1) which completes instead.\n *\n * @param {function(value: T, index: number, source: Observable<T>): boolean} [predicate]\n * An optional function called with each item to test for condition matching.\n * @param {D} [defaultValue] The default value emitted in case no valid value\n * was found on the source.\n * @return A function that returns an Observable that emits the first item that\n * matches the condition.\n */\nexport function first<T, D>(\n  predicate?: ((value: T, index: number, source: Observable<T>) => boolean) | null,\n  defaultValue?: D\n): OperatorFunction<T, T | D> {\n  const hasDefaultValue = arguments.length >= 2;\n  return (source: Observable<T>) =>\n    source.pipe(\n      predicate ? filter((v, i) => predicate(v, i, source)) : identity,\n      take(1),\n      hasDefaultValue ? defaultIfEmpty(defaultValue!) : throwIfEmpty(() => new EmptyError())\n    );\n}\n", "import { EMPTY } from '../observable/empty';\nimport { MonoTypeOperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Waits for the source to complete, then emits the last N values from the source,\n * as specified by the `count` argument.\n *\n * ![](takeLast.png)\n *\n * `takeLast` results in an observable that will hold values up to `count` values in memory,\n * until the source completes. It then pushes all values in memory to the consumer, in the\n * order they were received from the source, then notifies the consumer that it is\n * complete.\n *\n * If for some reason the source completes before the `count` supplied to `takeLast` is reached,\n * all values received until that point are emitted, and then completion is notified.\n *\n * **Warning**: Using `takeLast` with an observable that never completes will result\n * in an observable that never emits a value.\n *\n * ## Example\n *\n * Take the last 3 values of an Observable with many values\n *\n * ```ts\n * import { range, takeLast } from 'rxjs';\n *\n * const many = range(1, 100);\n * const lastThree = many.pipe(takeLast(3));\n * lastThree.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link take}\n * @see {@link takeUntil}\n * @see {@link takeWhile}\n * @see {@link skip}\n *\n * @param count The maximum number of values to emit from the end of\n * the sequence of values emitted by the source Observable.\n * @return A function that returns an Observable that emits at most the last\n * `count` values emitted by the source Observable.\n */\nexport function takeLast<T>(count: number): MonoTypeOperatorFunction<T> {\n  return count <= 0\n    ? () => EMPTY\n    : operate((source, subscriber) => {\n        // This buffer will hold the values we are going to emit\n        // when the source completes. Since we only want to take the\n        // last N values, we can't emit until we're sure we're not getting\n        // any more values.\n        let buffer: T[] = [];\n        source.subscribe(\n          createOperatorSubscriber(\n            subscriber,\n            (value) => {\n              // Add the most recent value onto the end of our buffer.\n              buffer.push(value);\n              // If our buffer is now larger than the number of values we\n              // want to take, we remove the oldest value from the buffer.\n              count < buffer.length && buffer.shift();\n            },\n            () => {\n              // The source completed, we now know what are last values\n              // are, emit them in the order they were received.\n              for (const value of buffer) {\n                subscriber.next(value);\n              }\n              subscriber.complete();\n            },\n            // Errors are passed through to the consumer\n            undefined,\n            () => {\n              // During finalization release the values in our buffer.\n              buffer = null!;\n            }\n          )\n        );\n      });\n}\n", "import { ObservableInput, ObservableInputTuple, OperatorFunction, SchedulerLike } from '../types';\nimport { operate } from '../util/lift';\nimport { argsOrArgArray } from '../util/argsOrArgArray';\nimport { mergeAll } from './mergeAll';\nimport { popNumber, popScheduler } from '../util/args';\nimport { from } from '../observable/from';\n\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): OperatorFunction<T, T | A[number]>;\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(\n  ...sourcesAndConcurrency: [...ObservableInputTuple<A>, number]\n): OperatorFunction<T, T | A[number]>;\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(\n  ...sourcesAndScheduler: [...ObservableInputTuple<A>, SchedulerLike]\n): OperatorFunction<T, T | A[number]>;\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(\n  ...sourcesAndConcurrencyAndScheduler: [...ObservableInputTuple<A>, number, SchedulerLike]\n): OperatorFunction<T, T | A[number]>;\n\nexport function merge<T>(...args: unknown[]): OperatorFunction<T, unknown> {\n  const scheduler = popScheduler(args);\n  const concurrent = popNumber(args, Infinity);\n  args = argsOrArgArray(args);\n\n  return operate((source, subscriber) => {\n    mergeAll(concurrent)(from([source, ...(args as ObservableInput<T>[])], scheduler)).subscribe(subscriber);\n  });\n}\n", "import { ObservableInputTuple, OperatorFunction } from '../types';\nimport { merge } from './merge';\n\n/**\n * Merge the values from all observables to a single observable result.\n *\n * Creates an observable, that when subscribed to, subscribes to the source\n * observable, and all other sources provided as arguments. All values from\n * every source are emitted from the resulting subscription.\n *\n * When all sources complete, the resulting observable will complete.\n *\n * When any source errors, the resulting observable will error.\n *\n * ## Example\n *\n * Joining all outputs from multiple user input event streams\n *\n * ```ts\n * import { fromEvent, map, mergeWith } from 'rxjs';\n *\n * const clicks$ = fromEvent(document, 'click').pipe(map(() => 'click'));\n * const mousemoves$ = fromEvent(document, 'mousemove').pipe(map(() => 'mousemove'));\n * const dblclicks$ = fromEvent(document, 'dblclick').pipe(map(() => 'dblclick'));\n *\n * mousemoves$\n *   .pipe(mergeWith(clicks$, dblclicks$))\n *   .subscribe(x => console.log(x));\n *\n * // result (assuming user interactions)\n * // 'mousemove'\n * // 'mousemove'\n * // 'mousemove'\n * // 'click'\n * // 'click'\n * // 'dblclick'\n * ```\n *\n * @see {@link merge}\n *\n * @param otherSources the sources to combine the current source with.\n * @return A function that returns an Observable that merges the values from\n * all given Observables.\n */\nexport function mergeWith<T, A extends readonly unknown[]>(\n  ...otherSources: [...ObservableInputTuple<A>]\n): OperatorFunction<T, T | A[number]> {\n  return merge(...otherSources);\n}\n", "import { Subscription } from '../Subscription';\nimport { EMPTY } from '../observable/empty';\nimport { operate } from '../util/lift';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { timer } from '../observable/timer';\n\nexport interface RepeatConfig {\n  /**\n   * The number of times to repeat the source. Defaults to `Infinity`.\n   */\n  count?: number;\n\n  /**\n   * If a `number`, will delay the repeat of the source by that number of milliseconds.\n   * If a function, it will provide the number of times the source has been subscribed to,\n   * and the return value should be a valid observable input that will notify when the source\n   * should be repeated. If the notifier observable is empty, the result will complete.\n   */\n  delay?: number | ((count: number) => ObservableInput<any>);\n}\n\n/**\n * Returns an Observable that will resubscribe to the source stream when the source stream completes.\n *\n * <span class=\"informal\">Repeats all values emitted on the source. It's like {@link retry}, but for non error cases.</span>\n *\n * ![](repeat.png)\n *\n * Repeat will output values from a source until the source completes, then it will resubscribe to the\n * source a specified number of times, with a specified delay. Repeat can be particularly useful in\n * combination with closing operators like {@link take}, {@link takeUntil}, {@link first}, or {@link takeWhile},\n * as it can be used to restart a source again from scratch.\n *\n * Repeat is very similar to {@link retry}, where {@link retry} will resubscribe to the source in the error case, but\n * `repeat` will resubscribe if the source completes.\n *\n * Note that `repeat` will _not_ catch errors. Use {@link retry} for that.\n *\n * - `repeat(0)` returns an empty observable\n * - `repeat()` will repeat forever\n * - `repeat({ delay: 200 })` will repeat forever, with a delay of 200ms between repetitions.\n * - `repeat({ count: 2, delay: 400 })` will repeat twice, with a delay of 400ms between repetitions.\n * - `repeat({ delay: (count) => timer(count * 1000) })` will repeat forever, but will have a delay that grows by one second for each repetition.\n *\n * ## Example\n *\n * Repeat a message stream\n *\n * ```ts\n * import { of, repeat } from 'rxjs';\n *\n * const source = of('Repeat message');\n * const result = source.pipe(repeat(3));\n *\n * result.subscribe(x => console.log(x));\n *\n * // Results\n * // 'Repeat message'\n * // 'Repeat message'\n * // 'Repeat message'\n * ```\n *\n * Repeat 3 values, 2 times\n *\n * ```ts\n * import { interval, take, repeat } from 'rxjs';\n *\n * const source = interval(1000);\n * const result = source.pipe(take(3), repeat(2));\n *\n * result.subscribe(x => console.log(x));\n *\n * // Results every second\n * // 0\n * // 1\n * // 2\n * // 0\n * // 1\n * // 2\n * ```\n *\n * Defining two complex repeats with delays on the same source.\n * Note that the second repeat cannot be called until the first\n * repeat as exhausted it's count.\n *\n * ```ts\n * import { defer, of, repeat } from 'rxjs';\n *\n * const source = defer(() => {\n *    return of(`Hello, it is ${new Date()}`)\n * });\n *\n * source.pipe(\n *    // Repeat 3 times with a delay of 1 second between repetitions\n *    repeat({\n *      count: 3,\n *      delay: 1000,\n *    }),\n *\n *    // *Then* repeat forever, but with an exponential step-back\n *    // maxing out at 1 minute.\n *    repeat({\n *      delay: (count) => timer(Math.min(60000, 2 ^ count * 1000))\n *    })\n * )\n * ```\n *\n * @see {@link repeatWhen}\n * @see {@link retry}\n *\n * @param count The number of times the source Observable items are repeated, a count of 0 will yield\n * an empty Observable.\n */\nexport function repeat<T>(countOrConfig?: number | RepeatConfig): MonoTypeOperatorFunction<T> {\n  let count = Infinity;\n  let delay: RepeatConfig['delay'];\n\n  if (countOrConfig != null) {\n    if (typeof countOrConfig === 'object') {\n      ({ count = Infinity, delay } = countOrConfig);\n    } else {\n      count = countOrConfig;\n    }\n  }\n\n  return count <= 0\n    ? () => EMPTY\n    : operate((source, subscriber) => {\n        let soFar = 0;\n        let sourceSub: Subscription | null;\n\n        const resubscribe = () => {\n          sourceSub?.unsubscribe();\n          sourceSub = null;\n          if (delay != null) {\n            const notifier = typeof delay === 'number' ? timer(delay) : innerFrom(delay(soFar));\n            const notifierSubscriber = createOperatorSubscriber(subscriber, () => {\n              notifierSubscriber.unsubscribe();\n              subscribeToSource();\n            });\n            notifier.subscribe(notifierSubscriber);\n          } else {\n            subscribeToSource();\n          }\n        };\n\n        const subscribeToSource = () => {\n          let syncUnsub = false;\n          sourceSub = source.subscribe(\n            createOperatorSubscriber(subscriber, undefined, () => {\n              if (++soFar < count) {\n                if (sourceSub) {\n                  resubscribe();\n                } else {\n                  syncUnsub = true;\n                }\n              } else {\n                subscriber.complete();\n              }\n            })\n          );\n\n          if (syncUnsub) {\n            resubscribe();\n          }\n        };\n\n        subscribeToSource();\n      });\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { scanInternals } from './scanInternals';\n\nexport function scan<V, A = V>(accumulator: (acc: A | V, value: V, index: number) => A): OperatorFunction<V, V | A>;\nexport function scan<V, A>(accumulator: (acc: A, value: V, index: number) => A, seed: A): OperatorFunction<V, A>;\nexport function scan<V, A, S>(accumulator: (acc: A | S, value: V, index: number) => A, seed: S): OperatorFunction<V, A>;\n\n// TODO: link to a \"redux pattern\" section in the guide (location TBD)\n\n/**\n * Useful for encapsulating and managing state. Applies an accumulator (or \"reducer function\")\n * to each value from the source after an initial state is established -- either via\n * a `seed` value (second argument), or from the first value from the source.\n *\n * <span class=\"informal\">It's like {@link reduce}, but emits the current\n * accumulation state after each update</span>\n *\n * ![](scan.png)\n *\n * This operator maintains an internal state and emits it after processing each value as follows:\n *\n * 1. First value arrives\n *   - If a `seed` value was supplied (as the second argument to `scan`), let `state = seed` and `value = firstValue`.\n *   - If NO `seed` value was supplied (no second argument), let `state = firstValue` and go to 3.\n * 2. Let `state = accumulator(state, value)`.\n *   - If an error is thrown by `accumulator`, notify the consumer of an error. The process ends.\n * 3. Emit `state`.\n * 4. Next value arrives, let `value = nextValue`, go to 2.\n *\n * ## Examples\n *\n * An average of previous numbers. This example shows how\n * not providing a `seed` can prime the stream with the\n * first value from the source.\n *\n * ```ts\n * import { of, scan, map } from 'rxjs';\n *\n * const numbers$ = of(1, 2, 3);\n *\n * numbers$\n *   .pipe(\n *     // Get the sum of the numbers coming in.\n *     scan((total, n) => total + n),\n *     // Get the average by dividing the sum by the total number\n *     // received so far (which is 1 more than the zero-based index).\n *     map((sum, index) => sum / (index + 1))\n *   )\n *   .subscribe(console.log);\n * ```\n *\n * The Fibonacci sequence. This example shows how you can use\n * a seed to prime accumulation process. Also... you know... Fibonacci.\n * So important to like, computers and stuff that its whiteboarded\n * in job interviews. Now you can show them the Rx version! (Please don't, haha)\n *\n * ```ts\n * import { interval, scan, map, startWith } from 'rxjs';\n *\n * const firstTwoFibs = [0, 1];\n * // An endless stream of Fibonacci numbers.\n * const fibonacci$ = interval(1000).pipe(\n *   // Scan to get the fibonacci numbers (after 0, 1)\n *   scan(([a, b]) => [b, a + b], firstTwoFibs),\n *   // Get the second number in the tuple, it's the one you calculated\n *   map(([, n]) => n),\n *   // Start with our first two digits :)\n *   startWith(...firstTwoFibs)\n * );\n *\n * fibonacci$.subscribe(console.log);\n * ```\n *\n * @see {@link expand}\n * @see {@link mergeScan}\n * @see {@link reduce}\n * @see {@link switchScan}\n *\n * @param accumulator A \"reducer function\". This will be called for each value after an initial state is\n * acquired.\n * @param seed The initial state. If this is not provided, the first value from the source will\n * be used as the initial state, and emitted without going through the accumulator. All subsequent values\n * will be processed by the accumulator function. If this is provided, all values will go through\n * the accumulator function.\n * @return A function that returns an Observable of the accumulated values.\n */\nexport function scan<V, A, S>(accumulator: (acc: V | A | S, value: V, index: number) => A, seed?: S): OperatorFunction<V, V | A> {\n  // providing a seed of `undefined` *should* be valid and trigger\n  // hasSeed! so don't use `seed !== undefined` checks!\n  // For this reason, we have to check it here at the original call site\n  // otherwise inside Operator/Subscriber we won't know if `undefined`\n  // means they didn't provide anything or if they literally provided `undefined`\n  return operate(scanInternals(accumulator, seed as S, arguments.length >= 2, true));\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { Subject } from '../Subject';\nimport { SafeSubscriber } from '../Subscriber';\nimport { Subscription } from '../Subscription';\nimport { MonoTypeOperatorFunction, SubjectLike, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\n\nexport interface ShareConfig<T> {\n  /**\n   * The factory used to create the subject that will connect the source observable to\n   * multicast consumers.\n   */\n  connector?: () => SubjectLike<T>;\n  /**\n   * If `true`, the resulting observable will reset internal state on error from source and return to a \"cold\" state. This\n   * allows the resulting observable to be \"retried\" in the event of an error.\n   * If `false`, when an error comes from the source it will push the error into the connecting subject, and the subject\n   * will remain the connecting subject, meaning the resulting observable will not go \"cold\" again, and subsequent retries\n   * or resubscriptions will resubscribe to that same subject. In all cases, RxJS subjects will emit the same error again, however\n   * {@link ReplaySubject} will also push its buffered values before pushing the error.\n   * It is also possible to pass a notifier factory returning an `ObservableInput` instead which grants more fine-grained\n   * control over how and when the reset should happen. This allows behaviors like conditional or delayed resets.\n   */\n  resetOnError?: boolean | ((error: any) => ObservableInput<any>);\n  /**\n   * If `true`, the resulting observable will reset internal state on completion from source and return to a \"cold\" state. This\n   * allows the resulting observable to be \"repeated\" after it is done.\n   * If `false`, when the source completes, it will push the completion through the connecting subject, and the subject\n   * will remain the connecting subject, meaning the resulting observable will not go \"cold\" again, and subsequent repeats\n   * or resubscriptions will resubscribe to that same subject.\n   * It is also possible to pass a notifier factory returning an `ObservableInput` instead which grants more fine-grained\n   * control over how and when the reset should happen. This allows behaviors like conditional or delayed resets.\n   */\n  resetOnComplete?: boolean | (() => ObservableInput<any>);\n  /**\n   * If `true`, when the number of subscribers to the resulting observable reaches zero due to those subscribers unsubscribing, the\n   * internal state will be reset and the resulting observable will return to a \"cold\" state. This means that the next\n   * time the resulting observable is subscribed to, a new subject will be created and the source will be subscribed to\n   * again.\n   * If `false`, when the number of subscribers to the resulting observable reaches zero due to unsubscription, the subject\n   * will remain connected to the source, and new subscriptions to the result will be connected through that same subject.\n   * It is also possible to pass a notifier factory returning an `ObservableInput` instead which grants more fine-grained\n   * control over how and when the reset should happen. This allows behaviors like conditional or delayed resets.\n   */\n  resetOnRefCountZero?: boolean | (() => ObservableInput<any>);\n}\n\nexport function share<T>(): MonoTypeOperatorFunction<T>;\n\nexport function share<T>(options: ShareConfig<T>): MonoTypeOperatorFunction<T>;\n\n/**\n * Returns a new Observable that multicasts (shares) the original Observable. As long as there is at least one\n * Subscriber this Observable will be subscribed and emitting data. When all subscribers have unsubscribed it will\n * unsubscribe from the source Observable. Because the Observable is multicasting it makes the stream `hot`.\n * This is an alias for `multicast(() => new Subject()), refCount()`.\n *\n * The subscription to the underlying source Observable can be reset (unsubscribe and resubscribe for new subscribers),\n * if the subscriber count to the shared observable drops to 0, or if the source Observable errors or completes. It is\n * possible to use notifier factories for the resets to allow for behaviors like conditional or delayed resets. Please\n * note that resetting on error or complete of the source Observable does not behave like a transparent retry or restart\n * of the source because the error or complete will be forwarded to all subscribers and their subscription will be\n * closed. Only new subscribers after a reset on error or complete happened will cause a fresh subscription to the\n * source. To achieve transparent retries or restarts pipe the source through appropriate operators before sharing.\n *\n * ![](share.png)\n *\n * ## Example\n *\n * Generate new multicast Observable from the `source` Observable value\n *\n * ```ts\n * import { interval, tap, map, take, share } from 'rxjs';\n *\n * const source = interval(1000).pipe(\n *   tap(x => console.log('Processing: ', x)),\n *   map(x => x * x),\n *   take(6),\n *   share()\n * );\n *\n * source.subscribe(x => console.log('subscription 1: ', x));\n * source.subscribe(x => console.log('subscription 2: ', x));\n *\n * // Logs:\n * // Processing: 0\n * // subscription 1: 0\n * // subscription 2: 0\n * // Processing: 1\n * // subscription 1: 1\n * // subscription 2: 1\n * // Processing: 2\n * // subscription 1: 4\n * // subscription 2: 4\n * // Processing: 3\n * // subscription 1: 9\n * // subscription 2: 9\n * // Processing: 4\n * // subscription 1: 16\n * // subscription 2: 16\n * // Processing: 5\n * // subscription 1: 25\n * // subscription 2: 25\n * ```\n *\n * ## Example with notifier factory: Delayed reset\n *\n * ```ts\n * import { interval, take, share, timer } from 'rxjs';\n *\n * const source = interval(1000).pipe(\n *   take(3),\n *   share({\n *     resetOnRefCountZero: () => timer(1000)\n *   })\n * );\n *\n * const subscriptionOne = source.subscribe(x => console.log('subscription 1: ', x));\n * setTimeout(() => subscriptionOne.unsubscribe(), 1300);\n *\n * setTimeout(() => source.subscribe(x => console.log('subscription 2: ', x)), 1700);\n *\n * setTimeout(() => source.subscribe(x => console.log('subscription 3: ', x)), 5000);\n *\n * // Logs:\n * // subscription 1:  0\n * // (subscription 1 unsubscribes here)\n * // (subscription 2 subscribes here ~400ms later, source was not reset)\n * // subscription 2:  1\n * // subscription 2:  2\n * // (subscription 2 unsubscribes here)\n * // (subscription 3 subscribes here ~2000ms later, source did reset before)\n * // subscription 3:  0\n * // subscription 3:  1\n * // subscription 3:  2\n * ```\n *\n * @see {@link shareReplay}\n *\n * @return A function that returns an Observable that mirrors the source.\n */\nexport function share<T>(options: ShareConfig<T> = {}): MonoTypeOperatorFunction<T> {\n  const { connector = () => new Subject<T>(), resetOnError = true, resetOnComplete = true, resetOnRefCountZero = true } = options;\n  // It's necessary to use a wrapper here, as the _operator_ must be\n  // referentially transparent. Otherwise, it cannot be used in calls to the\n  // static `pipe` function - to create a partial pipeline.\n  //\n  // The _operator function_ - the function returned by the _operator_ - will\n  // not be referentially transparent - as it shares its source - but the\n  // _operator function_ is called when the complete pipeline is composed via a\n  // call to a source observable's `pipe` method - not when the static `pipe`\n  // function is called.\n  return (wrapperSource) => {\n    let connection: SafeSubscriber<T> | undefined;\n    let resetConnection: Subscription | undefined;\n    let subject: SubjectLike<T> | undefined;\n    let refCount = 0;\n    let hasCompleted = false;\n    let hasErrored = false;\n\n    const cancelReset = () => {\n      resetConnection?.unsubscribe();\n      resetConnection = undefined;\n    };\n    // Used to reset the internal state to a \"cold\"\n    // state, as though it had never been subscribed to.\n    const reset = () => {\n      cancelReset();\n      connection = subject = undefined;\n      hasCompleted = hasErrored = false;\n    };\n    const resetAndUnsubscribe = () => {\n      // We need to capture the connection before\n      // we reset (if we need to reset).\n      const conn = connection;\n      reset();\n      conn?.unsubscribe();\n    };\n\n    return operate<T, T>((source, subscriber) => {\n      refCount++;\n      if (!hasErrored && !hasCompleted) {\n        cancelReset();\n      }\n\n      // Create the subject if we don't have one yet. Grab a local reference to\n      // it as well, which avoids non-null assertions when using it and, if we\n      // connect to it now, then error/complete need a reference after it was\n      // reset.\n      const dest = (subject = subject ?? connector());\n\n      // Add the finalization directly to the subscriber - instead of returning it -\n      // so that the handling of the subscriber's unsubscription will be wired\n      // up _before_ the subscription to the source occurs. This is done so that\n      // the assignment to the source connection's `closed` property will be seen\n      // by synchronous firehose sources.\n      subscriber.add(() => {\n        refCount--;\n\n        // If we're resetting on refCount === 0, and it's 0, we only want to do\n        // that on \"unsubscribe\", really. Resetting on error or completion is a different\n        // configuration.\n        if (refCount === 0 && !hasErrored && !hasCompleted) {\n          resetConnection = handleReset(resetAndUnsubscribe, resetOnRefCountZero);\n        }\n      });\n\n      // The following line adds the subscription to the subscriber passed.\n      // Basically, `subscriber === dest.subscribe(subscriber)` is `true`.\n      dest.subscribe(subscriber);\n\n      if (\n        !connection &&\n        // Check this shareReplay is still activate - it can be reset to 0\n        // and be \"unsubscribed\" _before_ it actually subscribes.\n        // If we were to subscribe then, it'd leak and get stuck.\n        refCount > 0\n      ) {\n        // We need to create a subscriber here - rather than pass an observer and\n        // assign the returned subscription to connection - because it's possible\n        // for reentrant subscriptions to the shared observable to occur and in\n        // those situations we want connection to be already-assigned so that we\n        // don't create another connection to the source.\n        connection = new SafeSubscriber({\n          next: (value) => dest.next(value),\n          error: (err) => {\n            hasErrored = true;\n            cancelReset();\n            resetConnection = handleReset(reset, resetOnError, err);\n            dest.error(err);\n          },\n          complete: () => {\n            hasCompleted = true;\n            cancelReset();\n            resetConnection = handleReset(reset, resetOnComplete);\n            dest.complete();\n          },\n        });\n        innerFrom(source).subscribe(connection);\n      }\n    })(wrapperSource);\n  };\n}\n\nfunction handleReset<T extends unknown[] = never[]>(\n  reset: () => void,\n  on: boolean | ((...args: T) => ObservableInput<any>),\n  ...args: T\n): Subscription | undefined {\n  if (on === true) {\n    reset();\n    return;\n  }\n\n  if (on === false) {\n    return;\n  }\n\n  const onSubscriber = new SafeSubscriber({\n    next: () => {\n      onSubscriber.unsubscribe();\n      reset();\n    },\n  });\n\n  return innerFrom(on(...args)).subscribe(onSubscriber);\n}\n", "import { ReplaySubject } from '../ReplaySubject';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { share } from './share';\n\nexport interface ShareReplayConfig {\n  bufferSize?: number;\n  windowTime?: number;\n  refCount: boolean;\n  scheduler?: SchedulerLike;\n}\n\nexport function shareReplay<T>(config: ShareReplayConfig): MonoTypeOperatorFunction<T>;\nexport function shareReplay<T>(bufferSize?: number, windowTime?: number, scheduler?: SchedulerLike): MonoTypeOperatorFunction<T>;\n\n/**\n * Share source and replay specified number of emissions on subscription.\n *\n * This operator is a specialization of `replay` that connects to a source observable\n * and multicasts through a `ReplaySubject` constructed with the specified arguments.\n * A successfully completed source will stay cached in the `shareReplay`ed observable forever,\n * but an errored source can be retried.\n *\n * ## Why use `shareReplay`?\n *\n * You generally want to use `shareReplay` when you have side-effects or taxing computations\n * that you do not wish to be executed amongst multiple subscribers.\n * It may also be valuable in situations where you know you will have late subscribers to\n * a stream that need access to previously emitted values.\n * This ability to replay values on subscription is what differentiates {@link share} and `shareReplay`.\n *\n * ## Reference counting\n *\n * By default `shareReplay` will use `refCount` of false, meaning that it will _not_ unsubscribe the\n * source when the reference counter drops to zero, i.e. the inner `ReplaySubject` will _not_ be unsubscribed\n * (and potentially run for ever).\n * This is the default as it is expected that `shareReplay` is often used to keep around expensive to setup\n * observables which we want to keep running instead of having to do the expensive setup again.\n *\n * As of RXJS version 6.4.0 a new overload signature was added to allow for manual control over what\n * happens when the operators internal reference counter drops to zero.\n * If `refCount` is true, the source will be unsubscribed from once the reference count drops to zero, i.e.\n * the inner `ReplaySubject` will be unsubscribed. All new subscribers will receive value emissions from a\n * new `ReplaySubject` which in turn will cause a new subscription to the source observable.\n *\n * ## Examples\n *\n * Example with a third subscriber coming late to the party\n *\n * ```ts\n * import { interval, take, shareReplay } from 'rxjs';\n *\n * const shared$ = interval(2000).pipe(\n *   take(6),\n *   shareReplay(3)\n * );\n *\n * shared$.subscribe(x => console.log('sub A: ', x));\n * shared$.subscribe(y => console.log('sub B: ', y));\n *\n * setTimeout(() => {\n *   shared$.subscribe(y => console.log('sub C: ', y));\n * }, 11000);\n *\n * // Logs:\n * // (after ~2000 ms)\n * // sub A: 0\n * // sub B: 0\n * // (after ~4000 ms)\n * // sub A: 1\n * // sub B: 1\n * // (after ~6000 ms)\n * // sub A: 2\n * // sub B: 2\n * // (after ~8000 ms)\n * // sub A: 3\n * // sub B: 3\n * // (after ~10000 ms)\n * // sub A: 4\n * // sub B: 4\n * // (after ~11000 ms, sub C gets the last 3 values)\n * // sub C: 2\n * // sub C: 3\n * // sub C: 4\n * // (after ~12000 ms)\n * // sub A: 5\n * // sub B: 5\n * // sub C: 5\n * ```\n *\n * Example for `refCount` usage\n *\n * ```ts\n * import { Observable, tap, interval, shareReplay, take } from 'rxjs';\n *\n * const log = <T>(name: string, source: Observable<T>) => source.pipe(\n *   tap({\n *     subscribe: () => console.log(`${ name }: subscribed`),\n *     next: value => console.log(`${ name }: ${ value }`),\n *     complete: () => console.log(`${ name }: completed`),\n *     finalize: () => console.log(`${ name }: unsubscribed`)\n *   })\n * );\n *\n * const obs$ = log('source', interval(1000));\n *\n * const shared$ = log('shared', obs$.pipe(\n *   shareReplay({ bufferSize: 1, refCount: true }),\n *   take(2)\n * ));\n *\n * shared$.subscribe(x => console.log('sub A: ', x));\n * shared$.subscribe(y => console.log('sub B: ', y));\n *\n * // PRINTS:\n * // shared: subscribed <-- reference count = 1\n * // source: subscribed\n * // shared: subscribed <-- reference count = 2\n * // source: 0\n * // shared: 0\n * // sub A: 0\n * // shared: 0\n * // sub B: 0\n * // source: 1\n * // shared: 1\n * // sub A: 1\n * // shared: completed <-- take(2) completes the subscription for sub A\n * // shared: unsubscribed <-- reference count = 1\n * // shared: 1\n * // sub B: 1\n * // shared: completed <-- take(2) completes the subscription for sub B\n * // shared: unsubscribed <-- reference count = 0\n * // source: unsubscribed <-- replaySubject unsubscribes from source observable because the reference count dropped to 0 and refCount is true\n *\n * // In case of refCount being false, the unsubscribe is never called on the source and the source would keep on emitting, even if no subscribers\n * // are listening.\n * // source: 2\n * // source: 3\n * // source: 4\n * // ...\n * ```\n *\n * @see {@link publish}\n * @see {@link share}\n * @see {@link publishReplay}\n *\n * @param configOrBufferSize Maximum element count of the replay buffer or {@link ShareReplayConfig configuration}\n * object.\n * @param windowTime Maximum time length of the replay buffer in milliseconds.\n * @param scheduler Scheduler where connected observers within the selector function\n * will be invoked on.\n * @return A function that returns an Observable sequence that contains the\n * elements of a sequence produced by multicasting the source sequence within a\n * selector function.\n */\nexport function shareReplay<T>(\n  configOrBufferSize?: ShareReplayConfig | number,\n  windowTime?: number,\n  scheduler?: SchedulerLike\n): MonoTypeOperatorFunction<T> {\n  let bufferSize: number;\n  let refCount = false;\n  if (configOrBufferSize && typeof configOrBufferSize === 'object') {\n    ({ bufferSize = Infinity, windowTime = Infinity, refCount = false, scheduler } = configOrBufferSize);\n  } else {\n    bufferSize = (configOrBufferSize ?? Infinity) as number;\n  }\n  return share<T>({\n    connector: () => new ReplaySubject(bufferSize, windowTime, scheduler),\n    resetOnError: true,\n    resetOnComplete: false,\n    resetOnRefCountZero: refCount,\n  });\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { filter } from './filter';\n\n/**\n * Returns an Observable that skips the first `count` items emitted by the source Observable.\n *\n * ![](skip.png)\n *\n * Skips the values until the sent notifications are equal or less than provided skip count. It raises\n * an error if skip count is equal or more than the actual number of emits and source raises an error.\n *\n * ## Example\n *\n * Skip the values before the emission\n *\n * ```ts\n * import { interval, skip } from 'rxjs';\n *\n * // emit every half second\n * const source = interval(500);\n * // skip the first 10 emitted values\n * const result = source.pipe(skip(10));\n *\n * result.subscribe(value => console.log(value));\n * // output: 10...11...12...13...\n * ```\n *\n * @see {@link last}\n * @see {@link skipWhile}\n * @see {@link skipUntil}\n * @see {@link skipLast}\n *\n * @param {Number} count - The number of times, items emitted by source Observable should be skipped.\n * @return A function that returns an Observable that skips the first `count`\n * values emitted by the source Observable.\n */\nexport function skip<T>(count: number): MonoTypeOperatorFunction<T> {\n  return filter((_, index) => count <= index);\n}\n", "import { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { noop } from '../util/noop';\n\n/**\n * Returns an Observable that skips items emitted by the source Observable until a second Observable emits an item.\n *\n * The `skipUntil` operator causes the observable stream to skip the emission of values until the passed in observable\n * emits the first value. This can be particularly useful in combination with user interactions, responses of HTTP\n * requests or waiting for specific times to pass by.\n *\n * ![](skipUntil.png)\n *\n * Internally, the `skipUntil` operator subscribes to the passed in `notifier` `ObservableInput` (which gets converted\n * to an Observable) in order to recognize the emission of its first value. When `notifier` emits next, the operator\n * unsubscribes from it and starts emitting the values of the *source* observable until it completes or errors. It\n * will never let the *source* observable emit any values if the `notifier` completes or throws an error without\n * emitting a value before.\n *\n * ## Example\n *\n * In the following example, all emitted values of the interval observable are skipped until the user clicks anywhere\n * within the page\n *\n * ```ts\n * import { interval, fromEvent, skipUntil } from 'rxjs';\n *\n * const intervalObservable = interval(1000);\n * const click = fromEvent(document, 'click');\n *\n * const emitAfterClick = intervalObservable.pipe(\n *   skipUntil(click)\n * );\n * // clicked at 4.6s. output: 5...6...7...8........ or\n * // clicked at 7.3s. output: 8...9...10..11.......\n * emitAfterClick.subscribe(value => console.log(value));\n * ```\n *\n * @see {@link last}\n * @see {@link skip}\n * @see {@link skipWhile}\n * @see {@link skipLast}\n *\n * @param notifier An `ObservableInput` that has to emit an item before the source Observable elements begin to\n * be mirrored by the resulting Observable.\n * @return A function that returns an Observable that skips items from the\n * source Observable until the `notifier` Observable emits an item, then emits the\n * remaining items.\n */\nexport function skipUntil<T>(notifier: ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let taking = false;\n\n    const skipSubscriber = createOperatorSubscriber(\n      subscriber,\n      () => {\n        skipSubscriber?.unsubscribe();\n        taking = true;\n      },\n      noop\n    );\n\n    innerFrom(notifier).subscribe(skipSubscriber);\n\n    source.subscribe(createOperatorSubscriber(subscriber, (value) => taking && subscriber.next(value)));\n  });\n}\n", "import { concat } from '../observable/concat';\nimport { OperatorFunction, SchedulerLike, ValueFromArray } from '../types';\nimport { popScheduler } from '../util/args';\nimport { operate } from '../util/lift';\n\n// Devs are more likely to pass null or undefined than they are a scheduler\n// without accompanying values. To make things easier for (naughty) devs who\n// use the `strictNullChecks: false` TypeScript compiler option, these\n// overloads with explicit null and undefined values are included.\n\nexport function startWith<T>(value: null): OperatorFunction<T, T | null>;\nexport function startWith<T>(value: undefined): OperatorFunction<T, T | undefined>;\n\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `concatAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function startWith<T, A extends readonly unknown[] = T[]>(\n  ...valuesAndScheduler: [...A, SchedulerLike]\n): OperatorFunction<T, T | ValueFromArray<A>>;\nexport function startWith<T, A extends readonly unknown[] = T[]>(...values: A): OperatorFunction<T, T | ValueFromArray<A>>;\n\n/**\n * Returns an observable that, at the moment of subscription, will synchronously emit all\n * values provided to this operator, then subscribe to the source and mirror all of its emissions\n * to subscribers.\n *\n * This is a useful way to know when subscription has occurred on an existing observable.\n *\n * <span class=\"informal\">First emits its arguments in order, and then any\n * emissions from the source.</span>\n *\n * ![](startWith.png)\n *\n * ## Examples\n *\n * Emit a value when a timer starts.\n *\n * ```ts\n * import { timer, map, startWith } from 'rxjs';\n *\n * timer(1000)\n *   .pipe(\n *     map(() => 'timer emit'),\n *     startWith('timer start')\n *   )\n *   .subscribe(x => console.log(x));\n *\n * // results:\n * // 'timer start'\n * // 'timer emit'\n * ```\n *\n * @param values Items you want the modified Observable to emit first.\n * @return A function that returns an Observable that synchronously emits\n * provided values before subscribing to the source Observable.\n *\n * @see {@link endWith}\n * @see {@link finalize}\n * @see {@link concat}\n */\nexport function startWith<T, D>(...values: D[]): OperatorFunction<T, T | D> {\n  const scheduler = popScheduler(values);\n  return operate((source, subscriber) => {\n    // Here we can't pass `undefined` as a scheduler, because if we did, the\n    // code inside of `concat` would be confused by the `undefined`, and treat it\n    // like an invalid observable. So we have to split it two different ways.\n    (scheduler ? concat(values, source, scheduler) : concat(values, source)).subscribe(subscriber);\n  });\n}\n", "import { Subscriber } from '../Subscriber';\nimport { ObservableInput, OperatorFunction, ObservedValueOf } from '../types';\nimport { innerFrom } from '../observable/innerFrom';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/* tslint:disable:max-line-length */\nexport function switchMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function switchMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: undefined\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function switchMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: (outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R\n): OperatorFunction<T, R>;\n/* tslint:enable:max-line-length */\n\n/**\n * Projects each source value to an Observable which is merged in the output\n * Observable, emitting values only from the most recently projected Observable.\n *\n * <span class=\"informal\">Maps each value to an Observable, then flattens all of\n * these inner Observables using {@link switchAll}.</span>\n *\n * ![](switchMap.png)\n *\n * Returns an Observable that emits items based on applying a function that you\n * supply to each item emitted by the source Observable, where that function\n * returns an (so-called \"inner\") Observable. Each time it observes one of these\n * inner Observables, the output Observable begins emitting the items emitted by\n * that inner Observable. When a new inner Observable is emitted, `switchMap`\n * stops emitting items from the earlier-emitted inner Observable and begins\n * emitting items from the new one. It continues to behave like this for\n * subsequent inner Observables.\n *\n * ## Example\n *\n * Generate new Observable according to source Observable values\n *\n * ```ts\n * import { of, switchMap } from 'rxjs';\n *\n * const switched = of(1, 2, 3).pipe(switchMap(x => of(x, x ** 2, x ** 3)));\n * switched.subscribe(x => console.log(x));\n * // outputs\n * // 1\n * // 1\n * // 1\n * // 2\n * // 4\n * // 8\n * // 3\n * // 9\n * // 27\n * ```\n *\n * Restart an interval Observable on every click event\n *\n * ```ts\n * import { fromEvent, switchMap, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(switchMap(() => interval(1000)));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link concatMap}\n * @see {@link exhaustMap}\n * @see {@link mergeMap}\n * @see {@link switchAll}\n * @see {@link switchMapTo}\n *\n * @param {function(value: T, index: number): ObservableInput} project A function\n * that, when applied to an item emitted by the source Observable, returns an\n * Observable.\n * @return A function that returns an Observable that emits the result of\n * applying the projection function (and the optional deprecated\n * `resultSelector`) to each item emitted by the source Observable and taking\n * only the values from the most recently projected inner Observable.\n */\nexport function switchMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector?: (outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R\n): OperatorFunction<T, ObservedValueOf<O> | R> {\n  return operate((source, subscriber) => {\n    let innerSubscriber: Subscriber<ObservedValueOf<O>> | null = null;\n    let index = 0;\n    // Whether or not the source subscription has completed\n    let isComplete = false;\n\n    // We only complete the result if the source is complete AND we don't have an active inner subscription.\n    // This is called both when the source completes and when the inners complete.\n    const checkComplete = () => isComplete && !innerSubscriber && subscriber.complete();\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          // Cancel the previous inner subscription if there was one\n          innerSubscriber?.unsubscribe();\n          let innerIndex = 0;\n          const outerIndex = index++;\n          // Start the next inner subscription\n          innerFrom(project(value, outerIndex)).subscribe(\n            (innerSubscriber = createOperatorSubscriber(\n              subscriber,\n              // When we get a new inner value, next it through. Note that this is\n              // handling the deprecate result selector here. This is because with this architecture\n              // it ends up being smaller than using the map operator.\n              (innerValue) => subscriber.next(resultSelector ? resultSelector(value, innerValue, outerIndex, innerIndex++) : innerValue),\n              () => {\n                // The inner has completed. Null out the inner subscriber to\n                // free up memory and to signal that we have no inner subscription\n                // currently.\n                innerSubscriber = null!;\n                checkComplete();\n              }\n            ))\n          );\n        },\n        () => {\n          isComplete = true;\n          checkComplete();\n        }\n      )\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { noop } from '../util/noop';\n\n/**\n * Emits the values emitted by the source Observable until a `notifier`\n * Observable emits a value.\n *\n * <span class=\"informal\">Lets values pass until a second Observable,\n * `notifier`, emits a value. Then, it completes.</span>\n *\n * ![](takeUntil.png)\n *\n * `takeUntil` subscribes and begins mirroring the source Observable. It also\n * monitors a second Observable, `notifier` that you provide. If the `notifier`\n * emits a value, the output Observable stops mirroring the source Observable\n * and completes. If the `notifier` doesn't emit any value and completes\n * then `takeUntil` will pass all values.\n *\n * ## Example\n *\n * Tick every second until the first click happens\n *\n * ```ts\n * import { interval, fromEvent, takeUntil } from 'rxjs';\n *\n * const source = interval(1000);\n * const clicks = fromEvent(document, 'click');\n * const result = source.pipe(takeUntil(clicks));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link take}\n * @see {@link takeLast}\n * @see {@link takeWhile}\n * @see {@link skip}\n *\n * @param {Observable} notifier The Observable whose first emitted value will\n * cause the output Observable of `takeUntil` to stop emitting values from the\n * source Observable.\n * @return A function that returns an Observable that emits the values from the\n * source Observable until `notifier` emits its first value.\n */\nexport function takeUntil<T>(notifier: ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    innerFrom(notifier).subscribe(createOperatorSubscriber(subscriber, () => subscriber.complete(), noop));\n    !subscriber.closed && source.subscribe(subscriber);\n  });\n}\n", "import { OperatorFunction, MonoTypeOperatorFunction, TruthyTypesOf } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\nexport function takeWhile<T>(predicate: BooleanConstructor, inclusive: true): MonoTypeOperatorFunction<T>;\nexport function takeWhile<T>(predicate: BooleanConstructor, inclusive: false): OperatorFunction<T, TruthyTypesOf<T>>;\nexport function takeWhile<T>(predicate: BooleanConstructor): OperatorFunction<T, TruthyTypesOf<T>>;\nexport function takeWhile<T, S extends T>(predicate: (value: T, index: number) => value is S): OperatorFunction<T, S>;\nexport function takeWhile<T, S extends T>(predicate: (value: T, index: number) => value is S, inclusive: false): OperatorFunction<T, S>;\nexport function takeWhile<T>(predicate: (value: T, index: number) => boolean, inclusive?: boolean): MonoTypeOperatorFunction<T>;\n\n/**\n * Emits values emitted by the source Observable so long as each value satisfies\n * the given `predicate`, and then completes as soon as this `predicate` is not\n * satisfied.\n *\n * <span class=\"informal\">Takes values from the source only while they pass the\n * condition given. When the first value does not satisfy, it completes.</span>\n *\n * ![](takeWhile.png)\n *\n * `takeWhile` subscribes and begins mirroring the source Observable. Each value\n * emitted on the source is given to the `predicate` function which returns a\n * boolean, representing a condition to be satisfied by the source values. The\n * output Observable emits the source values until such time as the `predicate`\n * returns false, at which point `takeWhile` stops mirroring the source\n * Observable and completes the output Observable.\n *\n * ## Example\n *\n * Emit click events only while the clientX property is greater than 200\n *\n * ```ts\n * import { fromEvent, takeWhile } from 'rxjs';\n *\n * const clicks = fromEvent<PointerEvent>(document, 'click');\n * const result = clicks.pipe(takeWhile(ev => ev.clientX > 200));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link take}\n * @see {@link takeLast}\n * @see {@link takeUntil}\n * @see {@link skip}\n *\n * @param {function(value: T, index: number): boolean} predicate A function that\n * evaluates a value emitted by the source Observable and returns a boolean.\n * Also takes the (zero-based) index as the second argument.\n * @param {boolean} inclusive When set to `true` the value that caused\n * `predicate` to return `false` will also be emitted.\n * @return A function that returns an Observable that emits values from the\n * source Observable so long as each value satisfies the condition defined by\n * the `predicate`, then completes.\n */\nexport function takeWhile<T>(predicate: (value: T, index: number) => boolean, inclusive = false): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let index = 0;\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value) => {\n        const result = predicate(value, index++);\n        (result || inclusive) && subscriber.next(value);\n        !result && subscriber.complete();\n      })\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction, Observer } from '../types';\nimport { isFunction } from '../util/isFunction';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { identity } from '../util/identity';\n\n/**\n * An extension to the {@link Observer} interface used only by the {@link tap} operator.\n *\n * It provides a useful set of callbacks a user can register to do side-effects in\n * cases other than what the usual {@link Observer} callbacks are\n * ({@link guide/glossary-and-semantics#next next},\n * {@link guide/glossary-and-semantics#error error} and/or\n * {@link guide/glossary-and-semantics#complete complete}).\n *\n * ## Example\n *\n * ```ts\n * import { fromEvent, switchMap, tap, interval, take } from 'rxjs';\n *\n * const source$ = fromEvent(document, 'click');\n * const result$ = source$.pipe(\n *   switchMap((_, i) => i % 2 === 0\n *     ? fromEvent(document, 'mousemove').pipe(\n *         tap({\n *           subscribe: () => console.log('Subscribed to the mouse move events after click #' + i),\n *           unsubscribe: () => console.log('Mouse move events #' + i + ' unsubscribed'),\n *           finalize: () => console.log('Mouse move events #' + i + ' finalized')\n *         })\n *       )\n *     : interval(1_000).pipe(\n *         take(5),\n *         tap({\n *           subscribe: () => console.log('Subscribed to the 1-second interval events after click #' + i),\n *           unsubscribe: () => console.log('1-second interval events #' + i + ' unsubscribed'),\n *           finalize: () => console.log('1-second interval events #' + i + ' finalized')\n *         })\n *       )\n *   )\n * );\n *\n * const subscription = result$.subscribe({\n *   next: console.log\n * });\n *\n * setTimeout(() => {\n *   console.log('Unsubscribe after 60 seconds');\n *   subscription.unsubscribe();\n * }, 60_000);\n * ```\n */\nexport interface TapObserver<T> extends Observer<T> {\n  /**\n   * The callback that `tap` operator invokes at the moment when the source Observable\n   * gets subscribed to.\n   */\n  subscribe: () => void;\n  /**\n   * The callback that `tap` operator invokes when an explicit\n   * {@link guide/glossary-and-semantics#unsubscription unsubscribe} happens. It won't get invoked on\n   * `error` or `complete` events.\n   */\n  unsubscribe: () => void;\n  /**\n   * The callback that `tap` operator invokes when any kind of\n   * {@link guide/glossary-and-semantics#finalization finalization} happens - either when\n   * the source Observable `error`s or `complete`s or when it gets explicitly unsubscribed\n   * by the user. There is no difference in using this callback or the {@link finalize}\n   * operator, but if you're already using `tap` operator, you can use this callback\n   * instead. You'd get the same result in either case.\n   */\n  finalize: () => void;\n}\nexport function tap<T>(observerOrNext?: Partial<TapObserver<T>> | ((value: T) => void)): MonoTypeOperatorFunction<T>;\n/** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\nexport function tap<T>(\n  next?: ((value: T) => void) | null,\n  error?: ((error: any) => void) | null,\n  complete?: (() => void) | null\n): MonoTypeOperatorFunction<T>;\n\n/**\n * Used to perform side-effects for notifications from the source observable\n *\n * <span class=\"informal\">Used when you want to affect outside state with a notification without altering the notification</span>\n *\n * ![](tap.png)\n *\n * Tap is designed to allow the developer a designated place to perform side effects. While you _could_ perform side-effects\n * inside of a `map` or a `mergeMap`, that would make their mapping functions impure, which isn't always a big deal, but will\n * make it so you can't do things like memoize those functions. The `tap` operator is designed solely for such side-effects to\n * help you remove side-effects from other operations.\n *\n * For any notification, next, error, or complete, `tap` will call the appropriate callback you have provided to it, via a function\n * reference, or a partial observer, then pass that notification down the stream.\n *\n * The observable returned by `tap` is an exact mirror of the source, with one exception: Any error that occurs -- synchronously -- in a handler\n * provided to `tap` will be emitted as an error from the returned observable.\n *\n * > Be careful! You can mutate objects as they pass through the `tap` operator's handlers.\n *\n * The most common use of `tap` is actually for debugging. You can place a `tap(console.log)` anywhere\n * in your observable `pipe`, log out the notifications as they are emitted by the source returned by the previous\n * operation.\n *\n * ## Examples\n *\n * Check a random number before it is handled. Below is an observable that will use a random number between 0 and 1,\n * and emit `'big'` or `'small'` depending on the size of that number. But we wanted to log what the original number\n * was, so we have added a `tap(console.log)`.\n *\n * ```ts\n * import { of, tap, map } from 'rxjs';\n *\n * of(Math.random()).pipe(\n *   tap(console.log),\n *   map(n => n > 0.5 ? 'big' : 'small')\n * ).subscribe(console.log);\n * ```\n *\n * Using `tap` to analyze a value and force an error. Below is an observable where in our system we only\n * want to emit numbers 3 or less we get from another source. We can force our observable to error\n * using `tap`.\n *\n * ```ts\n * import { of, tap } from 'rxjs';\n *\n * const source = of(1, 2, 3, 4, 5);\n *\n * source.pipe(\n *   tap(n => {\n *     if (n > 3) {\n *       throw new TypeError(`Value ${ n } is greater than 3`);\n *     }\n *   })\n * )\n * .subscribe({ next: console.log, error: err => console.log(err.message) });\n * ```\n *\n * We want to know when an observable completes before moving on to the next observable. The system\n * below will emit a random series of `'X'` characters from 3 different observables in sequence. The\n * only way we know when one observable completes and moves to the next one, in this case, is because\n * we have added a `tap` with the side effect of logging to console.\n *\n * ```ts\n * import { of, concatMap, interval, take, map, tap } from 'rxjs';\n *\n * of(1, 2, 3).pipe(\n *   concatMap(n => interval(1000).pipe(\n *     take(Math.round(Math.random() * 10)),\n *     map(() => 'X'),\n *     tap({ complete: () => console.log(`Done with ${ n }`) })\n *   ))\n * )\n * .subscribe(console.log);\n * ```\n *\n * @see {@link finalize}\n * @see {@link TapObserver}\n *\n * @param observerOrNext A next handler or partial observer\n * @param error An error handler\n * @param complete A completion handler\n * @return A function that returns an Observable identical to the source, but\n * runs the specified Observer or callback(s) for each item.\n */\nexport function tap<T>(\n  observerOrNext?: Partial<TapObserver<T>> | ((value: T) => void) | null,\n  error?: ((e: any) => void) | null,\n  complete?: (() => void) | null\n): MonoTypeOperatorFunction<T> {\n  // We have to check to see not only if next is a function,\n  // but if error or complete were passed. This is because someone\n  // could technically call tap like `tap(null, fn)` or `tap(null, null, fn)`.\n  const tapObserver =\n    isFunction(observerOrNext) || error || complete\n      ? // tslint:disable-next-line: no-object-literal-type-assertion\n        ({ next: observerOrNext as Exclude<typeof observerOrNext, Partial<TapObserver<T>>>, error, complete } as Partial<TapObserver<T>>)\n      : observerOrNext;\n\n  return tapObserver\n    ? operate((source, subscriber) => {\n        tapObserver.subscribe?.();\n        let isUnsub = true;\n        source.subscribe(\n          createOperatorSubscriber(\n            subscriber,\n            (value) => {\n              tapObserver.next?.(value);\n              subscriber.next(value);\n            },\n            () => {\n              isUnsub = false;\n              tapObserver.complete?.();\n              subscriber.complete();\n            },\n            (err) => {\n              isUnsub = false;\n              tapObserver.error?.(err);\n              subscriber.error(err);\n            },\n            () => {\n              if (isUnsub) {\n                tapObserver.unsubscribe?.();\n              }\n              tapObserver.finalize?.();\n            }\n          )\n        );\n      })\n    : // Tap was called with no valid tap observer or handler\n      // (e.g. `tap(null, null, null)` or `tap(null)` or `tap()`)\n      // so we're going to just mirror the source.\n      identity;\n}\n", "import { Subscription } from '../Subscription';\n\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\n\n/**\n * An object interface used by {@link throttle} or {@link throttleTime} that ensure\n * configuration options of these operators.\n *\n * @see {@link throttle}\n * @see {@link throttleTime}\n */\nexport interface ThrottleConfig {\n  /**\n   * If `true`, the resulting Observable will emit the first value from the source\n   * Observable at the **start** of the \"throttling\" process (when starting an\n   * internal timer that prevents other emissions from the source to pass through).\n   * If `false`, it will not emit the first value from the source Observable at the\n   * start of the \"throttling\" process.\n   *\n   * If not provided, defaults to: `true`.\n   */\n  leading?: boolean;\n  /**\n   * If `true`, the resulting Observable will emit the last value from the source\n   * Observable at the **end** of the \"throttling\" process (when ending an internal\n   * timer that prevents other emissions from the source to pass through).\n   * If `false`, it will not emit the last value from the source Observable at the\n   * end of the \"throttling\" process.\n   *\n   * If not provided, defaults to: `false`.\n   */\n  trailing?: boolean;\n}\n\n/**\n * Emits a value from the source Observable, then ignores subsequent source\n * values for a duration determined by another Observable, then repeats this\n * process.\n *\n * <span class=\"informal\">It's like {@link throttleTime}, but the silencing\n * duration is determined by a second Observable.</span>\n *\n * ![](throttle.svg)\n *\n * `throttle` emits the source Observable values on the output Observable\n * when its internal timer is disabled, and ignores source values when the timer\n * is enabled. Initially, the timer is disabled. As soon as the first source\n * value arrives, it is forwarded to the output Observable, and then the timer\n * is enabled by calling the `durationSelector` function with the source value,\n * which returns the \"duration\" Observable. When the duration Observable emits a\n * value, the timer is disabled, and this process repeats for the\n * next source value.\n *\n * ## Example\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, throttle, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(throttle(() => interval(1000)));\n *\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link debounce}\n * @see {@link delayWhen}\n * @see {@link sample}\n * @see {@link throttleTime}\n *\n * @param durationSelector A function that receives a value from the source\n * Observable, for computing the silencing duration for each source value,\n * returned as an `ObservableInput`.\n * @param config A configuration object to define `leading` and `trailing`\n * behavior. Defaults to `{ leading: true, trailing: false }`.\n * @return A function that returns an Observable that performs the throttle\n * operation to limit the rate of emissions from the source.\n */\nexport function throttle<T>(durationSelector: (value: T) => ObservableInput<any>, config?: ThrottleConfig): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    const { leading = true, trailing = false } = config ?? {};\n    let hasValue = false;\n    let sendValue: T | null = null;\n    let throttled: Subscription | null = null;\n    let isComplete = false;\n\n    const endThrottling = () => {\n      throttled?.unsubscribe();\n      throttled = null;\n      if (trailing) {\n        send();\n        isComplete && subscriber.complete();\n      }\n    };\n\n    const cleanupThrottling = () => {\n      throttled = null;\n      isComplete && subscriber.complete();\n    };\n\n    const startThrottle = (value: T) =>\n      (throttled = innerFrom(durationSelector(value)).subscribe(createOperatorSubscriber(subscriber, endThrottling, cleanupThrottling)));\n\n    const send = () => {\n      if (hasValue) {\n        // Ensure we clear out our value and hasValue flag\n        // before we emit, otherwise reentrant code can cause\n        // issues here.\n        hasValue = false;\n        const value = sendValue!;\n        sendValue = null;\n        // Emit the value.\n        subscriber.next(value);\n        !isComplete && startThrottle(value);\n      }\n    };\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        // Regarding the presence of throttled.closed in the following\n        // conditions, if a synchronous duration selector is specified - weird,\n        // but legal - an already-closed subscription will be assigned to\n        // throttled, so the subscription's closed property needs to be checked,\n        // too.\n        (value) => {\n          hasValue = true;\n          sendValue = value;\n          !(throttled && !throttled.closed) && (leading ? send() : startThrottle(value));\n        },\n        () => {\n          isComplete = true;\n          !(trailing && hasValue && throttled && !throttled.closed) && subscriber.complete();\n        }\n      )\n    );\n  });\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { throttle, ThrottleConfig } from './throttle';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { timer } from '../observable/timer';\n\n/**\n * Emits a value from the source Observable, then ignores subsequent source\n * values for `duration` milliseconds, then repeats this process.\n *\n * <span class=\"informal\">Lets a value pass, then ignores source values for the\n * next `duration` milliseconds.</span>\n *\n * ![](throttleTime.png)\n *\n * `throttleTime` emits the source Observable values on the output Observable\n * when its internal timer is disabled, and ignores source values when the timer\n * is enabled. Initially, the timer is disabled. As soon as the first source\n * value arrives, it is forwarded to the output Observable, and then the timer\n * is enabled. After `duration` milliseconds (or the time unit determined\n * internally by the optional `scheduler`) has passed, the timer is disabled,\n * and this process repeats for the next source value. Optionally takes a\n * {@link SchedulerLike} for managing timers.\n *\n * ## Examples\n *\n * ### Limit click rate\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, throttleTime } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(throttleTime(1000));\n *\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link auditTime}\n * @see {@link debounceTime}\n * @see {@link delay}\n * @see {@link sampleTime}\n * @see {@link throttle}\n *\n * @param duration Time to wait before emitting another value after\n * emitting the last value, measured in milliseconds or the time unit determined\n * internally by the optional `scheduler`.\n * @param scheduler The {@link SchedulerLike} to use for\n * managing the timers that handle the throttling. Defaults to {@link asyncScheduler}.\n * @param config A configuration object to define `leading` and\n * `trailing` behavior. Defaults to `{ leading: true, trailing: false }`.\n * @return A function that returns an Observable that performs the throttle\n * operation to limit the rate of emissions from the source.\n */\nexport function throttleTime<T>(\n  duration: number,\n  scheduler: SchedulerLike = asyncScheduler,\n  config?: ThrottleConfig\n): MonoTypeOperatorFunction<T> {\n  const duration$ = timer(duration, scheduler);\n  return throttle(() => duration$, config);\n}\n", "import { OperatorFunction, ObservableInputTuple } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { identity } from '../util/identity';\nimport { noop } from '../util/noop';\nimport { popResultSelector } from '../util/args';\n\nexport function withLatestFrom<T, O extends unknown[]>(...inputs: [...ObservableInputTuple<O>]): OperatorFunction<T, [T, ...O]>;\n\nexport function withLatestFrom<T, O extends unknown[], R>(\n  ...inputs: [...ObservableInputTuple<O>, (...value: [T, ...O]) => R]\n): OperatorFunction<T, R>;\n\n/**\n * Combines the source Observable with other Observables to create an Observable\n * whose values are calculated from the latest values of each, only when the\n * source emits.\n *\n * <span class=\"informal\">Whenever the source Observable emits a value, it\n * computes a formula using that value plus the latest values from other input\n * Observables, then emits the output of that formula.</span>\n *\n * ![](withLatestFrom.png)\n *\n * `withLatestFrom` combines each value from the source Observable (the\n * instance) with the latest values from the other input Observables only when\n * the source emits a value, optionally using a `project` function to determine\n * the value to be emitted on the output Observable. All input Observables must\n * emit at least one value before the output Observable will emit a value.\n *\n * ## Example\n *\n * On every click event, emit an array with the latest timer event plus the click event\n *\n * ```ts\n * import { fromEvent, interval, withLatestFrom } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const timer = interval(1000);\n * const result = clicks.pipe(withLatestFrom(timer));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link combineLatest}\n *\n * @param {ObservableInput} other An input Observable to combine with the source\n * Observable. More than one input Observables may be given as argument.\n * @param {Function} [project] Projection function for combining values\n * together. Receives all values in order of the Observables passed, where the\n * first parameter is a value from the source Observable. (e.g.\n * `a.pipe(withLatestFrom(b, c), map(([a1, b1, c1]) => a1 + b1 + c1))`). If this is not\n * passed, arrays will be emitted on the output Observable.\n * @return A function that returns an Observable of projected values from the\n * most recent values from each input Observable, or an array of the most\n * recent values from each input Observable.\n */\nexport function withLatestFrom<T, R>(...inputs: any[]): OperatorFunction<T, R | any[]> {\n  const project = popResultSelector(inputs) as ((...args: any[]) => R) | undefined;\n\n  return operate((source, subscriber) => {\n    const len = inputs.length;\n    const otherValues = new Array(len);\n    // An array of whether or not the other sources have emitted. Matched with them by index.\n    // TODO: At somepoint, we should investigate the performance implications here, and look\n    // into using a `Set()` and checking the `size` to see if we're ready.\n    let hasValue = inputs.map(() => false);\n    // Flipped true when we have at least one value from all other sources and\n    // we are ready to start emitting values.\n    let ready = false;\n\n    // Other sources. Note that here we are not checking `subscriber.closed`,\n    // this causes all inputs to be subscribed to, even if nothing can be emitted\n    // from them. This is an important distinction because subscription constitutes\n    // a side-effect.\n    for (let i = 0; i < len; i++) {\n      innerFrom(inputs[i]).subscribe(\n        createOperatorSubscriber(\n          subscriber,\n          (value) => {\n            otherValues[i] = value;\n            if (!ready && !hasValue[i]) {\n              // If we're not ready yet, flag to show this observable has emitted.\n              hasValue[i] = true;\n              // Intentionally terse code.\n              // If all of our other observables have emitted, set `ready` to `true`,\n              // so we know we can start emitting values, then clean up the `hasValue` array,\n              // because we don't need it anymore.\n              (ready = hasValue.every(identity)) && (hasValue = null!);\n            }\n          },\n          // Completing one of the other sources has\n          // no bearing on the completion of our result.\n          noop\n        )\n      );\n    }\n\n    // Source subscription\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value) => {\n        if (ready) {\n          // We have at least one value from the other sources. Go ahead and emit.\n          const values = [value, ...otherValues];\n          subscriber.next(project ? project(...values) : values);\n        }\n      })\n    );\n  });\n}\n", "import { zip as zipStatic } from '../observable/zip';\nimport { ObservableInput, ObservableInputTuple, OperatorFunction, Cons } from '../types';\nimport { operate } from '../util/lift';\n\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[]>(otherInputs: [...ObservableInputTuple<A>]): OperatorFunction<T, Cons<T, A>>;\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[], R>(\n  otherInputsAndProject: [...ObservableInputTuple<A>],\n  project: (...values: Cons<T, A>) => R\n): OperatorFunction<T, R>;\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[]>(...otherInputs: [...ObservableInputTuple<A>]): OperatorFunction<T, Cons<T, A>>;\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[], R>(\n  ...otherInputsAndProject: [...ObservableInputTuple<A>, (...values: Cons<T, A>) => R]\n): OperatorFunction<T, R>;\n\n/**\n * @deprecated Replaced with {@link zipWith}. Will be removed in v8.\n */\nexport function zip<T, R>(...sources: Array<ObservableInput<any> | ((...values: Array<any>) => R)>): OperatorFunction<T, any> {\n  return operate((source, subscriber) => {\n    zipStatic(source as ObservableInput<any>, ...(sources as Array<ObservableInput<any>>)).subscribe(subscriber);\n  });\n}\n", "import { ObservableInputTuple, OperatorFunction, Cons } from '../types';\nimport { zip } from './zip';\n\n/**\n * Subscribes to the source, and the observable inputs provided as arguments, and combines their values, by index, into arrays.\n *\n * What is meant by \"combine by index\": The first value from each will be made into a single array, then emitted,\n * then the second value from each will be combined into a single array and emitted, then the third value\n * from each will be combined into a single array and emitted, and so on.\n *\n * This will continue until it is no longer able to combine values of the same index into an array.\n *\n * After the last value from any one completed source is emitted in an array, the resulting observable will complete,\n * as there is no way to continue \"zipping\" values together by index.\n *\n * Use-cases for this operator are limited. There are memory concerns if one of the streams is emitting\n * values at a much faster rate than the others. Usage should likely be limited to streams that emit\n * at a similar pace, or finite streams of known length.\n *\n * In many cases, authors want `combineLatestWith` and not `zipWith`.\n *\n * @param otherInputs other observable inputs to collate values from.\n * @return A function that returns an Observable that emits items by index\n * combined from the source Observable and provided Observables, in form of an\n * array.\n */\nexport function zipWith<T, A extends readonly unknown[]>(...otherInputs: [...ObservableInputTuple<A>]): OperatorFunction<T, Cons<T, A>> {\n  return zip(...otherInputs);\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  ReplaySubject,\n  Subject,\n  fromEvent\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch document\n *\n * Documents are implemented as subjects, so all downstream observables are\n * automatically updated when a new document is emitted.\n *\n * @returns Document subject\n */\nexport function watchDocument(): Subject<Document> {\n  const document$ = new ReplaySubject<Document>(1)\n  fromEvent(document, \"DOMContentLoaded\", { once: true })\n    .subscribe(() => document$.next(document))\n\n  /* Return document */\n  return document$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve all elements matching the query selector\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Elements\n */\nexport function getElements<T extends keyof HTMLElementTagNameMap>(\n  selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T][]\n\nexport function getElements<T extends HTMLElement>(\n  selector: string, node?: ParentNode\n): T[]\n\nexport function getElements<T extends HTMLElement>(\n  selector: string, node: ParentNode = document\n): T[] {\n  return Array.from(node.querySelectorAll<T>(selector))\n}\n\n/**\n * Retrieve an element matching a query selector or throw a reference error\n *\n * Note that this function assumes that the element is present. If unsure if an\n * element is existent, use the `getOptionalElement` function instead.\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Element\n */\nexport function getElement<T extends keyof HTMLElementTagNameMap>(\n  selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T]\n\nexport function getElement<T extends HTMLElement>(\n  selector: string, node?: ParentNode\n): T\n\nexport function getElement<T extends HTMLElement>(\n  selector: string, node: ParentNode = document\n): T {\n  const el = getOptionalElement<T>(selector, node)\n  if (typeof el === \"undefined\")\n    throw new ReferenceError(\n      `Missing element: expected \"${selector}\" to be present`\n    )\n\n  /* Return element */\n  return el\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Retrieve an optional element matching the query selector\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Element or nothing\n */\nexport function getOptionalElement<T extends keyof HTMLElementTagNameMap>(\n  selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T] | undefined\n\nexport function getOptionalElement<T extends HTMLElement>(\n  selector: string, node?: ParentNode\n): T | undefined\n\nexport function getOptionalElement<T extends HTMLElement>(\n  selector: string, node: ParentNode = document\n): T | undefined {\n  return node.querySelector<T>(selector) || undefined\n}\n\n/**\n * Retrieve the currently active element\n *\n * @returns Element or nothing\n */\nexport function getActiveElement(): HTMLElement | undefined {\n  return (\n    document.activeElement?.shadowRoot?.activeElement as HTMLElement ??\n    document.activeElement as HTMLElement ??\n    undefined\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  debounceTime,\n  distinctUntilChanged,\n  fromEvent,\n  map,\n  merge,\n  shareReplay,\n  startWith\n} from \"rxjs\"\n\nimport { getActiveElement } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Focus observable\n *\n * Previously, this observer used `focus` and `blur` events to determine whether\n * an element is focused, but this doesn't work if there are focusable elements\n * within the elements itself. A better solutions are `focusin` and `focusout`\n * events, which bubble up the tree and allow for more fine-grained control.\n *\n * `debounceTime` is necessary, because when a focus change happens inside an\n * element, the observable would first emit `false` and then `true` again.\n */\nconst observer$ = merge(\n  fromEvent(document.body, \"focusin\"),\n  fromEvent(document.body, \"focusout\")\n)\n  .pipe(\n    debounceTime(1),\n    startWith(undefined),\n    map(() => getActiveElement() || document.body),\n    shareReplay(1)\n  )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element focus\n *\n * @param el - Element\n *\n * @returns Element focus observable\n */\nexport function watchElementFocus(\n  el: HTMLElement\n): Observable<boolean> {\n  return observer$\n    .pipe(\n      map(active => el.contains(active)),\n      distinctUntilChanged()\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  debounce,\n  defer,\n  fromEvent,\n  identity,\n  map,\n  merge,\n  startWith,\n  timer\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element hover\n *\n * The second parameter allows to specify a timeout in milliseconds after which\n * the hover state will be reset to `false`. This is useful for tooltips which\n * should disappear after a certain amount of time, in order to allow the user\n * to move the cursor from the host to the tooltip.\n *\n * @param el - Element\n * @param timeout - Timeout\n *\n * @returns Element hover observable\n */\nexport function watchElementHover(\n  el: HTMLElement, timeout?: number\n): Observable<boolean> {\n  return defer(() => merge(\n    fromEvent(el, \"mouseenter\").pipe(map(() => true)),\n    fromEvent(el, \"mouseleave\").pipe(map(() => false))\n  )\n    .pipe(\n      timeout ? debounce(active => timer(+!active * timeout)) : identity,\n      startWith(el.matches(\":hover\"))\n    )\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { JSX as JSXInternal } from \"preact\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * HTML attributes\n */\ntype Attributes =\n  & JSXInternal.HTMLAttributes\n  & JSXInternal.SVGAttributes\n  & Record<string, any>\n\n/**\n * Child element\n */\ntype Child =\n  | ChildNode\n  | HTMLElement\n  | Text\n  | string\n  | number\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Append a child node to an element\n *\n * @param el - Element\n * @param child - Child node(s)\n */\nfunction appendChild(el: HTMLElement, child: Child | Child[]): void {\n\n  /* Handle primitive types (including raw HTML) */\n  if (typeof child === \"string\" || typeof child === \"number\") {\n    el.innerHTML += child.toString()\n\n  /* Handle nodes */\n  } else if (child instanceof Node) {\n    el.appendChild(child)\n\n  /* Handle nested children */\n  } else if (Array.isArray(child)) {\n    for (const node of child)\n      appendChild(el, node)\n  }\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * JSX factory\n *\n * @template T - Element type\n *\n * @param tag - HTML tag\n * @param attributes - HTML attributes\n * @param children - Child elements\n *\n * @returns Element\n */\nexport function h<T extends keyof HTMLElementTagNameMap>(\n  tag: T, attributes?: Attributes | null, ...children: Child[]\n): HTMLElementTagNameMap[T]\n\nexport function h<T extends h.JSX.Element>(\n  tag: string, attributes?: Attributes | null, ...children: Child[]\n): T\n\nexport function h<T extends h.JSX.Element>(\n  tag: string, attributes?: Attributes | null, ...children: Child[]\n): T {\n  const el = document.createElement(tag)\n\n  /* Set attributes, if any */\n  if (attributes)\n    for (const attr of Object.keys(attributes)) {\n      if (typeof attributes[attr] === \"undefined\")\n        continue\n\n      /* Set default attribute or boolean */\n      if (typeof attributes[attr] !== \"boolean\")\n        el.setAttribute(attr, attributes[attr])\n      else\n        el.setAttribute(attr, \"\")\n    }\n\n  /* Append child nodes */\n  for (const child of children)\n    appendChild(el, child)\n\n  /* Return element */\n  return el as T\n}\n\n/* ----------------------------------------------------------------------------\n * Namespace\n * ------------------------------------------------------------------------- */\n\nexport declare namespace h {\n  namespace JSX {\n    type Element = HTMLElement\n    type IntrinsicElements = JSXInternal.IntrinsicElements\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Round a number for display with repository facts\n *\n * This is a reverse-engineered version of GitHub's weird rounding algorithm\n * for stars, forks and all other numbers. While all numbers below `1,000` are\n * returned as-is, bigger numbers are converted to fixed numbers:\n *\n * - `1,049` => `1k`\n * - `1,050` => `1.1k`\n * - `1,949` => `1.9k`\n * - `1,950` => `2k`\n *\n * @param value - Original value\n *\n * @returns Rounded value\n */\nexport function round(value: number): string {\n  if (value > 999) {\n    const digits = +((value - 950) % 1000 > 99)\n    return `${((value + 0.000001) / 1000).toFixed(digits)}k`\n  } else {\n    return value.toString()\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  defer,\n  finalize,\n  fromEvent,\n  map,\n  merge,\n  switchMap,\n  take,\n  throwError\n} from \"rxjs\"\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create and load a `script` element\n *\n * This function returns an observable that will emit when the script was\n * successfully loaded, or throw an error if it wasn't.\n *\n * @param src - Script URL\n *\n * @returns Script observable\n */\nexport function watchScript(src: string): Observable<void> {\n  const script = h(\"script\", { src })\n  return defer(() => {\n    document.head.appendChild(script)\n    return merge(\n      fromEvent(script, \"load\"),\n      fromEvent(script, \"error\")\n        .pipe(\n          switchMap(() => (\n            throwError(() => new ReferenceError(`Invalid script: ${src}`))\n          ))\n        )\n    )\n      .pipe(\n        map(() => undefined),\n        finalize(() => document.head.removeChild(script)),\n        take(1)\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  NEVER,\n  Observable,\n  Subject,\n  defer,\n  filter,\n  finalize,\n  map,\n  merge,\n  of,\n  shareReplay,\n  startWith,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { watchScript } from \"../../../script\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Element offset\n */\nexport interface ElementSize {\n  width: number                        /* Element width */\n  height: number                       /* Element height */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Resize observer entry subject\n */\nconst entry$ = new Subject<ResizeObserverEntry>()\n\n/**\n * Resize observer observable\n *\n * This observable will create a `ResizeObserver` on the first subscription\n * and will automatically terminate it when there are no more subscribers.\n * It's quite important to centralize observation in a single `ResizeObserver`,\n * as the performance difference can be quite dramatic, as the link shows.\n *\n * If the browser doesn't have a `ResizeObserver` implementation available, a\n * polyfill is automatically downloaded from unpkg.com. This is also compatible\n * with the built-in privacy plugin, which will download the polyfill and put\n * it alongside the built site for self-hosting.\n *\n * @see https://bit.ly/3iIYfEm - Google Groups on performance\n */\nconst observer$ = defer(() => (\n  typeof ResizeObserver === \"undefined\"\n    ? watchScript(\"https://unpkg.com/resize-observer-polyfill\")\n    : of(undefined)\n))\n  .pipe(\n    map(() => new ResizeObserver(entries => (\n      entries.forEach(entry => entry$.next(entry))\n    ))),\n    switchMap(observer => merge(NEVER, of(observer)).pipe(\n      finalize(() => observer.disconnect())\n    )),\n    shareReplay(1)\n  )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element size\n *\n * @param el - Element\n *\n * @returns Element size\n */\nexport function getElementSize(\n  el: HTMLElement\n): ElementSize {\n  return {\n    width:  el.offsetWidth,\n    height: el.offsetHeight\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element size\n *\n * This function returns an observable that subscribes to a single internal\n * instance of `ResizeObserver` upon subscription, and emit resize events until\n * termination. Note that this function should not be called with the same\n * element twice, as the first unsubscription will terminate observation.\n *\n * Sadly, we can't use the `DOMRect` objects returned by the observer, because\n * we need the emitted values to be consistent with `getElementSize`, which will\n * return the used values (rounded) and not actual values (unrounded). Thus, we\n * use the `offset*` properties. See the linked GitHub issue.\n *\n * @see https://bit.ly/3m0k3he - GitHub issue\n *\n * @param el - Element\n *\n * @returns Element size observable\n */\nexport function watchElementSize(\n  el: HTMLElement\n): Observable<ElementSize> {\n\n  // Compute target element - since inline elements cannot be observed by the\n  // current `ResizeObserver` implementation as provided by browsers, we need\n  // to determine the first containing parent element and use that one as a\n  // target, while we always compute the actual size from the element.\n  let target = el\n  while (target.clientWidth === 0)\n    if (target.parentElement)\n      target = target.parentElement\n    else\n      break\n\n  // Observe target element and recompute element size on resize - as described\n  // above, the target element is not necessarily the element of interest\n  return observer$.pipe(\n    tap(observer => observer.observe(target)),\n    switchMap(observer => entry$.pipe(\n      filter(entry => entry.target === target),\n      finalize(() => observer.unobserve(target))\n    )),\n    map(() => getElementSize(el)),\n    startWith(getElementSize(el))\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ElementSize } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element content size (= scroll width and height)\n *\n * @param el - Element\n *\n * @returns Element content size\n */\nexport function getElementContentSize(\n  el: HTMLElement\n): ElementSize {\n  return {\n    width:  el.scrollWidth,\n    height: el.scrollHeight\n  }\n}\n\n/**\n * Retrieve the overflowing container of an element, if any\n *\n * @param el - Element\n *\n * @returns Overflowing container or nothing\n */\nexport function getElementContainer(\n  el: HTMLElement\n): HTMLElement | undefined {\n  let parent = el.parentElement\n  while (parent)\n    if (\n      el.scrollWidth  <= parent.scrollWidth &&\n      el.scrollHeight <= parent.scrollHeight\n    )\n      parent = (el = parent).parentElement\n    else\n      break\n\n  /* Return overflowing container */\n  return parent ? el : undefined\n}\n\n/**\n * Retrieve all overflowing containers of an element, if any\n *\n * Note that this function has a slightly different behavior, so we should at\n * some point consider refactoring how overflowing containers are handled.\n *\n * @param el - Element\n *\n * @returns Overflowing containers\n */\nexport function getElementContainers(\n  el: HTMLElement\n): HTMLElement[] {\n  const containers: HTMLElement[] = []\n\n  // Walk up the DOM tree until we find an overflowing container\n  let parent = el.parentElement\n  while (parent) {\n    if (\n      el.clientWidth  > parent.clientWidth ||\n      el.clientHeight > parent.clientHeight\n    )\n      containers.push(parent)\n\n    // Continue with parent element\n    parent = (el = parent).parentElement\n  }\n\n  // If the page is short, the body might not be overflowing and there might be\n  // no other containers, which is why we need to make sure the body is present\n  if (containers.length === 0)\n    containers.push(document.documentElement)\n\n  // Return overflowing containers\n  return containers\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  animationFrameScheduler,\n  auditTime,\n  fromEvent,\n  map,\n  merge,\n  startWith\n} from \"rxjs\"\n\nimport { watchElementSize } from \"../../size\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Element offset\n */\nexport interface ElementOffset {\n  x: number                            /* Horizontal offset */\n  y: number                            /* Vertical offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element offset\n *\n * @param el - Element\n *\n * @returns Element offset\n */\nexport function getElementOffset(\n  el: HTMLElement\n): ElementOffset {\n  return {\n    x: el.offsetLeft,\n    y: el.offsetTop\n  }\n}\n\n/**\n * Retrieve absolute element offset\n *\n * @param el - Element\n *\n * @returns Element offset\n */\nexport function getElementOffsetAbsolute(\n  el: HTMLElement\n): ElementOffset {\n  const rect = el.getBoundingClientRect()\n  return {\n    x: rect.x + window.scrollX,\n    y: rect.y + window.scrollY\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element offset\n *\n * @param el - Element\n *\n * @returns Element offset observable\n */\nexport function watchElementOffset(\n  el: HTMLElement\n): Observable<ElementOffset> {\n  return merge(\n    fromEvent(window, \"load\"),\n    fromEvent(window, \"resize\")\n  )\n    .pipe(\n      auditTime(0, animationFrameScheduler),\n      map(() => getElementOffset(el)),\n      startWith(getElementOffset(el))\n    )\n}\n\n/**\n * Watch absolute element offset\n *\n * @param el - Element\n *\n * @returns Element offset observable\n */\nexport function watchElementOffsetAbsolute(\n  el: HTMLElement\n): Observable<ElementOffset> {\n  return merge(\n    watchElementOffset(el),\n    watchElementSize(document.body) // @todo find a better way for this\n  )\n    .pipe(\n      map(() => getElementOffsetAbsolute(el)),\n      startWith(getElementOffsetAbsolute(el))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  animationFrameScheduler,\n  auditTime,\n  fromEvent,\n  map,\n  merge,\n  startWith\n} from \"rxjs\"\n\nimport { ElementOffset } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element content offset (= scroll offset)\n *\n * @param el - Element\n *\n * @returns Element content offset\n */\nexport function getElementContentOffset(\n  el: HTMLElement\n): ElementOffset {\n  return {\n    x: el.scrollLeft,\n    y: el.scrollTop\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element content offset\n *\n * @param el - Element\n *\n * @returns Element content offset observable\n */\nexport function watchElementContentOffset(\n  el: HTMLElement\n): Observable<ElementOffset> {\n  return merge(\n    fromEvent(el, \"scroll\"),\n    fromEvent(window, \"scroll\"),\n    fromEvent(window, \"resize\")\n  )\n    .pipe(\n      auditTime(0, animationFrameScheduler),\n      map(() => getElementContentOffset(el)),\n      startWith(getElementContentOffset(el))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  NEVER,\n  Observable,\n  Subject,\n  defer,\n  distinctUntilChanged,\n  filter,\n  finalize,\n  map,\n  merge,\n  of,\n  shareReplay,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport {\n  getElementContentSize,\n  getElementSize,\n  watchElementContentOffset\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Intersection observer entry subject\n */\nconst entry$ = new Subject<IntersectionObserverEntry>()\n\n/**\n * Intersection observer observable\n *\n * This observable will create an `IntersectionObserver` on first subscription\n * and will automatically terminate it when there are no more subscribers.\n *\n * @see https://bit.ly/3iIYfEm - Google Groups on performance\n */\nconst observer$ = defer(() => of(\n  new IntersectionObserver(entries => {\n    for (const entry of entries)\n      entry$.next(entry)\n  }, {\n    threshold: 0\n  })\n))\n  .pipe(\n    switchMap(observer => merge(NEVER, of(observer))\n      .pipe(\n        finalize(() => observer.disconnect())\n      )\n    ),\n    shareReplay(1)\n  )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element visibility\n *\n * @param el - Element\n *\n * @returns Element visibility observable\n */\nexport function watchElementVisibility(\n  el: HTMLElement\n): Observable<boolean> {\n  return observer$\n    .pipe(\n      tap(observer => observer.observe(el)),\n      switchMap(observer => entry$\n        .pipe(\n          filter(({ target }) => target === el),\n          finalize(() => observer.unobserve(el)),\n          map(({ isIntersecting }) => isIntersecting)\n        )\n      )\n    )\n}\n\n/**\n * Watch element boundary\n *\n * This function returns an observable which emits whether the bottom content\n * boundary (= scroll offset) of an element is within a certain threshold.\n *\n * @param el - Element\n * @param threshold - Threshold\n *\n * @returns Element boundary observable\n */\nexport function watchElementBoundary(\n  el: HTMLElement, threshold = 16\n): Observable<boolean> {\n  return watchElementContentOffset(el)\n    .pipe(\n      map(({ y }) => {\n        const visible = getElementSize(el)\n        const content = getElementContentSize(el)\n        return y >= (\n          content.height - visible.height - threshold\n        )\n      }),\n      distinctUntilChanged()\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  startWith\n} from \"rxjs\"\n\nimport { getElement } from \"../element\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Toggle\n */\nexport type Toggle =\n  | \"drawer\"                           /* Toggle for drawer */\n  | \"search\"                           /* Toggle for search */\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Toggle map\n */\nconst toggles: Record<Toggle, HTMLInputElement> = {\n  drawer: getElement(\"[data-md-toggle=drawer]\"),\n  search: getElement(\"[data-md-toggle=search]\")\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve the value of a toggle\n *\n * @param name - Toggle\n *\n * @returns Toggle value\n */\nexport function getToggle(name: Toggle): boolean {\n  return toggles[name].checked\n}\n\n/**\n * Set toggle\n *\n * Simulating a click event seems to be the most cross-browser compatible way\n * of changing the value while also emitting a `change` event. Before, Material\n * used `CustomEvent` to programmatically change the value of a toggle, but this\n * is a much simpler and cleaner solution which doesn't require a polyfill.\n *\n * @param name - Toggle\n * @param value - Toggle value\n */\nexport function setToggle(name: Toggle, value: boolean): void {\n  if (toggles[name].checked !== value)\n    toggles[name].click()\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch toggle\n *\n * @param name - Toggle\n *\n * @returns Toggle value observable\n */\nexport function watchToggle(name: Toggle): Observable<boolean> {\n  const el = toggles[name]\n  return fromEvent(el, \"change\")\n    .pipe(\n      map(() => el.checked),\n      startWith(el.checked)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  filter,\n  fromEvent,\n  map,\n  merge,\n  share,\n  startWith,\n  switchMap\n} from \"rxjs\"\n\nimport { getActiveElement } from \"../element\"\nimport { getToggle } from \"../toggle\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Keyboard mode\n */\nexport type KeyboardMode =\n  | \"global\"                           /* Global */\n  | \"search\"                           /* Search is open */\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Keyboard\n */\nexport interface Keyboard {\n  mode: KeyboardMode                   /* Keyboard mode */\n  type: string                         /* Key type */\n  claim(): void                        /* Key claim */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Check whether an element may receive keyboard input\n *\n * @param el - Element\n * @param type - Key type\n *\n * @returns Test result\n */\nfunction isSusceptibleToKeyboard(\n  el: HTMLElement, type: string\n): boolean {\n  switch (el.constructor) {\n\n    /* Input elements */\n    case HTMLInputElement:\n      /* @ts-expect-error - omit unnecessary type cast */\n      if (el.type === \"radio\")\n        return /^Arrow/.test(type)\n      else\n        return true\n\n    /* Select element and textarea */\n    case HTMLSelectElement:\n    case HTMLTextAreaElement:\n      return true\n\n    /* Everything else */\n    default:\n      return el.isContentEditable\n  }\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch composition events\n *\n * @returns Composition observable\n */\nexport function watchComposition(): Observable<boolean> {\n  return merge(\n    fromEvent(window, \"compositionstart\").pipe(map(() => true)),\n    fromEvent(window, \"compositionend\").pipe(map(() => false))\n  )\n    .pipe(\n      startWith(false)\n    )\n}\n\n/**\n * Watch keyboard\n *\n * @returns Keyboard observable\n */\nexport function watchKeyboard(): Observable<Keyboard> {\n  const keyboard$ = fromEvent<KeyboardEvent>(window, \"keydown\")\n    .pipe(\n      filter(ev => !(ev.metaKey || ev.ctrlKey)),\n      map(ev => ({\n        mode: getToggle(\"search\") ? \"search\" : \"global\",\n        type: ev.key,\n        claim() {\n          ev.preventDefault()\n          ev.stopPropagation()\n        }\n      } as Keyboard)),\n      filter(({ mode, type }) => {\n        if (mode === \"global\") {\n          const active = getActiveElement()\n          if (typeof active !== \"undefined\")\n            return !isSusceptibleToKeyboard(active, type)\n        }\n        return true\n      }),\n      share()\n    )\n\n  /* Don't emit during composition events - see https://bit.ly/3te3Wl8 */\n  return watchComposition()\n    .pipe(\n      switchMap(active => !active ? keyboard$ : EMPTY)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Subject } from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve location\n *\n * This function returns a `URL` object (and not `Location`) to normalize the\n * typings across the application. Furthermore, locations need to be tracked\n * without setting them and `Location` is a singleton which represents the\n * current location.\n *\n * @returns URL\n */\nexport function getLocation(): URL {\n  return new URL(location.href)\n}\n\n/**\n * Set location\n *\n * If instant navigation is enabled, this function creates a temporary anchor\n * element, sets the `href` attribute, appends it to the body, clicks it, and\n * then removes it again. The event will bubble up the DOM and trigger be\n * intercepted by the instant loading business logic.\n *\n * Note that we must append and remove the anchor element, or the event will\n * not bubble up the DOM, making it impossible to intercept it.\n *\n * @param url - URL to navigate to\n * @param navigate - Force navigation\n */\nexport function setLocation(\n  url: URL | HTMLLinkElement, navigate = false\n): void {\n  if (feature(\"navigation.instant\") && !navigate) {\n    const el = h(\"a\", { href: url.href })\n    document.body.appendChild(el)\n    el.click()\n    el.remove()\n\n  // If we're not using instant navigation, and the page should not be reloaded\n  // just instruct the browser to navigate to the given URL\n  } else {\n    location.href = url.href\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch location\n *\n * @returns Location subject\n */\nexport function watchLocation(): Subject<URL> {\n  return new Subject<URL>()\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  filter,\n  fromEvent,\n  map,\n  merge,\n  shareReplay,\n  startWith\n} from \"rxjs\"\n\nimport { getOptionalElement } from \"~/browser\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve location hash\n *\n * @returns Location hash\n */\nexport function getLocationHash(): string {\n  return location.hash.slice(1)\n}\n\n/**\n * Set location hash\n *\n * Setting a new fragment identifier via `location.hash` will have no effect\n * if the value doesn't change. When a new fragment identifier is set, we want\n * the browser to target the respective element at all times, which is why we\n * use this dirty little trick.\n *\n * @param hash - Location hash\n */\nexport function setLocationHash(hash: string): void {\n  const el = h(\"a\", { href: hash })\n  el.addEventListener(\"click\", ev => ev.stopPropagation())\n  el.click()\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch location hash\n *\n * @param location$ - Location observable\n *\n * @returns Location hash observable\n */\nexport function watchLocationHash(\n  location$: Observable<URL>\n): Observable<string> {\n  return merge(\n    fromEvent<HashChangeEvent>(window, \"hashchange\"),\n    location$\n  )\n    .pipe(\n      map(getLocationHash),\n      startWith(getLocationHash()),\n      filter(hash => hash.length > 0),\n      shareReplay(1)\n    )\n}\n\n/**\n * Watch location target\n *\n * @param location$ - Location observable\n *\n * @returns Location target observable\n */\nexport function watchLocationTarget(\n  location$: Observable<URL>\n): Observable<HTMLElement> {\n  return watchLocationHash(location$)\n    .pipe(\n      map(id => getOptionalElement(`[id=\"${id}\"]`)!),\n      filter(el => typeof el !== \"undefined\")\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  fromEvent,\n  fromEventPattern,\n  map,\n  merge,\n  startWith,\n  switchMap\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch media query\n *\n * Note that although `MediaQueryList.addListener` is deprecated we have to\n * use it, because it's the only way to ensure proper downward compatibility.\n *\n * @see https://bit.ly/3dUBH2m - GitHub issue\n *\n * @param query - Media query\n *\n * @returns Media observable\n */\nexport function watchMedia(query: string): Observable<boolean> {\n  const media = matchMedia(query)\n  return fromEventPattern<boolean>(next => (\n    media.addListener(() => next(media.matches))\n  ))\n    .pipe(\n      startWith(media.matches)\n    )\n}\n\n/**\n * Watch print mode\n *\n * @returns Print observable\n */\nexport function watchPrint(): Observable<boolean> {\n  const media = matchMedia(\"print\")\n  return merge(\n    fromEvent(window, \"beforeprint\").pipe(map(() => true)),\n    fromEvent(window, \"afterprint\").pipe(map(() => false))\n  )\n    .pipe(\n      startWith(media.matches)\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Toggle an observable with a media observable\n *\n * @template T - Data type\n *\n * @param query$ - Media observable\n * @param factory - Observable factory\n *\n * @returns Toggled observable\n */\nexport function at<T>(\n  query$: Observable<boolean>, factory: () => Observable<T>\n): Observable<T> {\n  return query$\n    .pipe(\n      switchMap(active => active ? factory() : EMPTY)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  map,\n  shareReplay,\n  switchMap\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Options\n */\ninterface Options {\n  progress$?: Subject<number>          // Progress subject\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch the given URL\n *\n * This function returns an observable that emits the response as a blob and\n * completes, or emits an error if the request failed. The caller can cancel\n * the request by unsubscribing at any time, which will automatically abort\n * the inflight request and complete the observable.\n *\n * Note that we use `XMLHTTPRequest` not because we're nostalgic, but because\n * it's the only way to get progress events for downloads and also allow for\n * cancellation of requests, as the official Fetch API does not support this\n * yet, even though we're already in 2024.\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function request(\n  url: URL | string, options?: Options\n): Observable<Blob> {\n  return new Observable<Blob>(observer => {\n    const req = new XMLHttpRequest()\n    req.open(\"GET\", `${url}`)\n    req.responseType = \"blob\"\n\n    // Handle response\n    req.addEventListener(\"load\", () => {\n      if (req.status >= 200 && req.status < 300) {\n        observer.next(req.response)\n        observer.complete()\n\n      // Every response that is not in the 2xx range is considered an error\n      } else {\n        observer.error(new Error(req.statusText))\n      }\n    })\n\n    // Handle network errors\n    req.addEventListener(\"error\", () => {\n      observer.error(new Error(\"Network error\"))\n    })\n\n    // Handle aborted requests\n    req.addEventListener(\"abort\", () => {\n      observer.complete()\n    })\n\n    // Handle download progress\n    if (typeof options?.progress$ !== \"undefined\") {\n      req.addEventListener(\"progress\", event => {\n        if (event.lengthComputable) {\n          options.progress$!.next((event.loaded / event.total) * 100)\n\n        // Hack: Chromium doesn't report the total number of bytes if content\n        // is compressed, so we need this fallback - see https://t.ly/ZXofI\n        } else {\n          const length = req.getResponseHeader(\"Content-Length\") ?? 0\n          options.progress$!.next((event.loaded / +length) * 100)\n        }\n      })\n\n      // Immediately set progress to 5% to indicate that we're loading\n      options.progress$.next(5)\n    }\n\n    // Send request and automatically abort request upon unsubscription\n    req.send()\n    return () => req.abort()\n  })\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Fetch JSON from the given URL\n *\n * @template T - Data type\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestJSON<T>(\n  url: URL | string, options?: Options\n): Observable<T> {\n  return request(url, options)\n    .pipe(\n      switchMap(res => res.text()),\n      map(body => JSON.parse(body) as T),\n      shareReplay(1)\n    )\n}\n\n/**\n * Fetch HTML from the given URL\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestHTML(\n  url: URL | string, options?: Options\n): Observable<Document> {\n  const dom = new DOMParser()\n  return request(url, options)\n    .pipe(\n      switchMap(res => res.text()),\n      map(res => dom.parseFromString(res, \"text/html\")),\n      shareReplay(1)\n    )\n}\n\n/**\n * Fetch XML from the given URL\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestXML(\n  url: URL | string, options?: Options\n): Observable<Document> {\n  const dom = new DOMParser()\n  return request(url, options)\n    .pipe(\n      switchMap(res => res.text()),\n      map(res => dom.parseFromString(res, \"text/xml\")),\n      shareReplay(1)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  merge,\n  startWith\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport offset\n */\nexport interface ViewportOffset {\n  x: number                            /* Horizontal offset */\n  y: number                            /* Vertical offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve viewport offset\n *\n * On iOS Safari, viewport offset can be negative due to overflow scrolling.\n * As this may induce strange behaviors downstream, we'll just limit it to 0.\n *\n * @returns Viewport offset\n */\nexport function getViewportOffset(): ViewportOffset {\n  return {\n    x: Math.max(0, scrollX),\n    y: Math.max(0, scrollY)\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport offset\n *\n * @returns Viewport offset observable\n */\nexport function watchViewportOffset(): Observable<ViewportOffset> {\n  return merge(\n    fromEvent(window, \"scroll\", { passive: true }),\n    fromEvent(window, \"resize\", { passive: true })\n  )\n    .pipe(\n      map(getViewportOffset),\n      startWith(getViewportOffset())\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  startWith\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport size\n */\nexport interface ViewportSize {\n  width: number                        /* Viewport width */\n  height: number                       /* Viewport height */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve viewport size\n *\n * @returns Viewport size\n */\nexport function getViewportSize(): ViewportSize {\n  return {\n    width:  innerWidth,\n    height: innerHeight\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport size\n *\n * @returns Viewport size observable\n */\nexport function watchViewportSize(): Observable<ViewportSize> {\n  return fromEvent(window, \"resize\", { passive: true })\n    .pipe(\n      map(getViewportSize),\n      startWith(getViewportSize())\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  map,\n  shareReplay\n} from \"rxjs\"\n\nimport {\n  ViewportOffset,\n  watchViewportOffset\n} from \"../offset\"\nimport {\n  ViewportSize,\n  watchViewportSize\n} from \"../size\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport\n */\nexport interface Viewport {\n  offset: ViewportOffset               /* Viewport offset */\n  size: ViewportSize                   /* Viewport size */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport\n *\n * @returns Viewport observable\n */\nexport function watchViewport(): Observable<Viewport> {\n  return combineLatest([\n    watchViewportOffset(),\n    watchViewportSize()\n  ])\n    .pipe(\n      map(([offset, size]) => ({ offset, size })),\n      shareReplay(1)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  distinctUntilKeyChanged,\n  map\n} from \"rxjs\"\n\nimport { Header } from \"~/components\"\n\nimport { getElementOffset } from \"../../element\"\nimport { Viewport } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport relative to element\n *\n * @param el - Element\n * @param options - Options\n *\n * @returns Viewport observable\n */\nexport function watchViewportAt(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<Viewport> {\n  const size$ = viewport$\n    .pipe(\n      distinctUntilKeyChanged(\"size\")\n    )\n\n  /* Compute element offset */\n  const offset$ = combineLatest([size$, header$])\n    .pipe(\n      map(() => getElementOffset(el))\n    )\n\n  /* Compute relative viewport, return hot observable */\n  return combineLatest([header$, viewport$, offset$])\n    .pipe(\n      map(([{ height }, { offset, size }, { x, y }]) => ({\n        offset: {\n          x: offset.x - x,\n          y: offset.y - y + height\n        },\n        size\n      }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  endWith,\n  fromEvent,\n  ignoreElements,\n  mergeWith,\n  share,\n  takeUntil\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Worker message\n */\nexport interface WorkerMessage {\n  type: unknown                        /* Message type */\n  data?: unknown                       /* Message data */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create an observable for receiving from a web worker\n *\n * @template T - Data type\n *\n * @param worker - Web worker\n *\n * @returns Message observable\n */\nfunction recv<T>(worker: Worker): Observable<T> {\n  return fromEvent<MessageEvent<T>, T>(worker, \"message\", ev => ev.data)\n}\n\n/**\n * Create a subject for sending to a web worker\n *\n * @template T - Data type\n *\n * @param worker - Web worker\n *\n * @returns Message subject\n */\nfunction send<T>(worker: Worker): Subject<T> {\n  const send$ = new Subject<T>()\n  send$.subscribe(data => worker.postMessage(data))\n\n  /* Return message subject */\n  return send$\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a bidirectional communication channel to a web worker\n *\n * @template T - Data type\n *\n * @param url - Worker URL\n * @param worker - Worker\n *\n * @returns Worker subject\n */\nexport function watchWorker<T extends WorkerMessage>(\n  url: string, worker = new Worker(url)\n): Subject<T> {\n  const recv$ = recv<T>(worker)\n  const send$ = send<T>(worker)\n\n  /* Create worker subject and forward messages */\n  const worker$ = new Subject<T>()\n  worker$.subscribe(send$)\n\n  /* Return worker subject */\n  const done$ = send$.pipe(ignoreElements(), endWith(true))\n  return worker$\n    .pipe(\n      ignoreElements(),\n      mergeWith(recv$.pipe(takeUntil(done$))),\n      share()\n    ) as Subject<T>\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { getElement, getLocation } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Feature flag\n */\nexport type Flag =\n  | \"announce.dismiss\"                 /* Dismissable announcement bar */\n  | \"content.code.annotate\"            /* Code annotations */\n  | \"content.code.copy\"                /* Code copy button */\n  | \"content.lazy\"                     /* Lazy content elements */\n  | \"content.tabs.link\"                /* Link content tabs */\n  | \"content.tooltips\"                 /* Tooltips */\n  | \"header.autohide\"                  /* Hide header */\n  | \"navigation.expand\"                /* Automatic expansion */\n  | \"navigation.indexes\"               /* Section pages */\n  | \"navigation.instant\"               /* Instant navigation */\n  | \"navigation.instant.progress\"      /* Instant navigation progress */\n  | \"navigation.sections\"              /* Section navigation */\n  | \"navigation.tabs\"                  /* Tabs navigation */\n  | \"navigation.tabs.sticky\"           /* Tabs navigation (sticky) */\n  | \"navigation.top\"                   /* Back-to-top button */\n  | \"navigation.tracking\"              /* Anchor tracking */\n  | \"search.highlight\"                 /* Search highlighting */\n  | \"search.share\"                     /* Search sharing */\n  | \"search.suggest\"                   /* Search suggestions */\n  | \"toc.follow\"                       /* Following table of contents */\n  | \"toc.integrate\"                    /* Integrated table of contents */\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Translation\n */\nexport type Translation =\n  | \"clipboard.copy\"                   /* Copy to clipboard */\n  | \"clipboard.copied\"                 /* Copied to clipboard */\n  | \"search.result.placeholder\"        /* Type to start searching */\n  | \"search.result.none\"               /* No matching documents */\n  | \"search.result.one\"                /* 1 matching document */\n  | \"search.result.other\"              /* # matching documents */\n  | \"search.result.more.one\"           /* 1 more on this page */\n  | \"search.result.more.other\"         /* # more on this page */\n  | \"search.result.term.missing\"       /* Missing */\n  | \"select.version\"                   /* Version selector */\n\n/**\n * Translations\n */\nexport type Translations =\n  Record<Translation, string>\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Versioning\n */\nexport interface Versioning {\n  provider: \"mike\"                     /* Version provider */\n  default?: string | string[]          /* Default version */\n  alias?: boolean                      /* Show alias */\n}\n\n/**\n * Configuration\n */\nexport interface Config {\n  base: string                         /* Base URL */\n  features: Flag[]                     /* Feature flags */\n  translations: Translations           /* Translations */\n  search: string                       /* Search worker URL */\n  tags?: Record<string, string>        /* Tags mapping */\n  version?: Versioning                 /* Versioning */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve global configuration and make base URL absolute\n */\nconst script = getElement(\"#__config\")\nconst config: Config = JSON.parse(script.textContent!)\nconfig.base = `${new URL(config.base, getLocation())}`\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve global configuration\n *\n * @returns Global configuration\n */\nexport function configuration(): Config {\n  return config\n}\n\n/**\n * Check whether a feature flag is enabled\n *\n * @param flag - Feature flag\n *\n * @returns Test result\n */\nexport function feature(flag: Flag): boolean {\n  return config.features.includes(flag)\n}\n\n/**\n * Retrieve the translation for the given key\n *\n * @param key - Key to be translated\n * @param value - Positional value, if any\n *\n * @returns Translation\n */\nexport function translation(\n  key: Translation, value?: string | number\n): string {\n  return typeof value !== \"undefined\"\n    ? config.translations[key].replace(\"#\", value.toString())\n    : config.translations[key]\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { getElement, getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Component type\n */\nexport type ComponentType =\n  | \"announce\"                         /* Announcement bar */\n  | \"container\"                        /* Container */\n  | \"consent\"                          /* Consent */\n  | \"content\"                          /* Content */\n  | \"dialog\"                           /* Dialog */\n  | \"header\"                           /* Header */\n  | \"header-title\"                     /* Header title */\n  | \"header-topic\"                     /* Header topic */\n  | \"main\"                             /* Main area */\n  | \"outdated\"                         /* Version warning */\n  | \"palette\"                          /* Color palette */\n  | \"progress\"                         /* Progress indicator */\n  | \"search\"                           /* Search */\n  | \"search-query\"                     /* Search input */\n  | \"search-result\"                    /* Search results */\n  | \"search-share\"                     /* Search sharing */\n  | \"search-suggest\"                   /* Search suggestions */\n  | \"sidebar\"                          /* Sidebar */\n  | \"skip\"                             /* Skip link */\n  | \"source\"                           /* Repository information */\n  | \"tabs\"                             /* Navigation tabs */\n  | \"toc\"                              /* Table of contents */\n  | \"top\"                              /* Back-to-top button */\n\n/**\n * Component\n *\n * @template T - Component type\n * @template U - Reference type\n */\nexport type Component<\n  T extends {} = {},\n  U extends HTMLElement = HTMLElement\n> =\n  T & {\n    ref: U                             /* Component reference */\n  }\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Component type map\n */\ninterface ComponentTypeMap {\n  \"announce\": HTMLElement              /* Announcement bar */\n  \"container\": HTMLElement             /* Container */\n  \"consent\": HTMLElement               /* Consent */\n  \"content\": HTMLElement               /* Content */\n  \"dialog\": HTMLElement                /* Dialog */\n  \"header\": HTMLElement                /* Header */\n  \"header-title\": HTMLElement          /* Header title */\n  \"header-topic\": HTMLElement          /* Header topic */\n  \"main\": HTMLElement                  /* Main area */\n  \"outdated\": HTMLElement              /* Version warning */\n  \"palette\": HTMLElement               /* Color palette */\n  \"progress\": HTMLElement              /* Progress indicator */\n  \"search\": HTMLElement                /* Search */\n  \"search-query\": HTMLInputElement     /* Search input */\n  \"search-result\": HTMLElement         /* Search results */\n  \"search-share\": HTMLAnchorElement    /* Search sharing */\n  \"search-suggest\": HTMLElement        /* Search suggestions */\n  \"sidebar\": HTMLElement               /* Sidebar */\n  \"skip\": HTMLAnchorElement            /* Skip link */\n  \"source\": HTMLAnchorElement          /* Repository information */\n  \"tabs\": HTMLElement                  /* Navigation tabs */\n  \"toc\": HTMLElement                   /* Table of contents */\n  \"top\": HTMLAnchorElement             /* Back-to-top button */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve the element for a given component or throw a reference error\n *\n * @template T - Component type\n *\n * @param type - Component type\n * @param node - Node of reference\n *\n * @returns Element\n */\nexport function getComponentElement<T extends ComponentType>(\n  type: T, node: ParentNode = document\n): ComponentTypeMap[T] {\n  return getElement(`[data-md-component=${type}]`, node)\n}\n\n/**\n * Retrieve all elements for a given component\n *\n * @template T - Component type\n *\n * @param type - Component type\n * @param node - Node of reference\n *\n * @returns Elements\n */\nexport function getComponentElements<T extends ComponentType>(\n  type: T, node: ParentNode = document\n): ComponentTypeMap[T][] {\n  return getElements(`[data-md-component=${type}]`, node)\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  finalize,\n  fromEvent,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { getElement } from \"~/browser\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Announcement bar\n */\nexport interface Announce {\n  hash: number                        /* Content hash */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch announcement bar\n *\n * @param el - Announcement bar element\n *\n * @returns Announcement bar observable\n */\nexport function watchAnnounce(\n  el: HTMLElement\n): Observable<Announce> {\n  const button = getElement(\".md-typeset > :first-child\", el)\n  return fromEvent(button, \"click\", { once: true })\n    .pipe(\n      map(() => getElement(\".md-typeset\", el)),\n      map(content => ({ hash: __md_hash(content.innerHTML) }))\n    )\n}\n\n/**\n * Mount announcement bar\n *\n * @param el - Announcement bar element\n *\n * @returns Announcement bar component observable\n */\nexport function mountAnnounce(\n  el: HTMLElement\n): Observable<Component<Announce>> {\n  if (!feature(\"announce.dismiss\") || !el.childElementCount)\n    return EMPTY\n\n  /* Support instant navigation - see https://t.ly/3FTme */\n  if (!el.hidden) {\n    const content = getElement(\".md-typeset\", el)\n    if (__md_hash(content.innerHTML) === __md_get(\"__announce\"))\n      el.hidden = true\n  }\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<Announce>()\n    push$.subscribe(({ hash }) => {\n      el.hidden = true\n\n      /* Persist preference in local storage */\n      __md_set<number>(\"__announce\", hash)\n    })\n\n    /* Create and return component */\n    return watchAnnounce(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  finalize,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Consent\n */\nexport interface Consent {\n  hidden: boolean                      /* Consent is hidden */\n}\n\n/**\n * Consent defaults\n */\nexport interface ConsentDefaults {\n  analytics?: boolean                  /* Consent for Analytics */\n  github?: boolean                     /* Consent for GitHub */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  target$: Observable<HTMLElement>     /* Target observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch consent\n *\n * @param el - Consent element\n * @param options - Options\n *\n * @returns Consent observable\n */\nexport function watchConsent(\n  el: HTMLElement, { target$ }: WatchOptions\n): Observable<Consent> {\n  return target$\n    .pipe(\n      map(target => ({ hidden: target !== el }))\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount consent\n *\n * @param el - Consent element\n * @param options - Options\n *\n * @returns Consent component observable\n */\nexport function mountConsent(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Consent>> {\n  const internal$ = new Subject<Consent>()\n  internal$.subscribe(({ hidden }) => {\n    el.hidden = hidden\n  })\n\n  /* Create and return component */\n  return watchConsent(el, options)\n    .pipe(\n      tap(state => internal$.next(state)),\n      finalize(() => internal$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ComponentChild } from \"preact\"\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tooltip style\n */\nexport type TooltipStyle =\n  | \"inline\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a tooltip\n *\n * @param id - Tooltip identifier\n * @param style - Tooltip style\n *\n * @returns Element\n */\nexport function renderTooltip(\n  id?: string, style?: TooltipStyle\n): HTMLElement {\n  if (style === \"inline\") { // @todo refactor control flow\n    return (\n      <div class=\"md-tooltip md-tooltip--inline\" id={id} role=\"tooltip\">\n        <div class=\"md-tooltip__inner md-typeset\"></div>\n      </div>\n    )\n  } else {\n    return (\n      <div class=\"md-tooltip\" id={id} role=\"tooltip\">\n        <div class=\"md-tooltip__inner md-typeset\"></div>\n      </div>\n    )\n  }\n}\n\n// @todo: rename\nexport function renderInlineTooltip2(\n  ...children: ComponentChild[]\n): HTMLElement {\n  return (\n    <div class=\"md-tooltip2\" role=\"tooltip\">\n      <div class=\"md-tooltip2__inner md-typeset\">\n        {children}\n      </div>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\nimport { renderTooltip } from \"../tooltip\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render an annotation\n *\n * @param id - Annotation identifier\n * @param prefix - Tooltip identifier prefix\n *\n * @returns Element\n */\nexport function renderAnnotation(\n  id: string | number, prefix?: string\n): HTMLElement {\n  prefix = prefix ? `${prefix}_annotation_${id}` : undefined\n\n  /* Render tooltip with anchor, if given */\n  if (prefix) {\n    const anchor = prefix ? `#${prefix}` : undefined\n    return (\n      <aside class=\"md-annotation\" tabIndex={0}>\n        {renderTooltip(prefix)}\n        <a href={anchor} class=\"md-annotation__index\" tabIndex={-1}>\n          <span data-md-annotation-id={id}></span>\n        </a>\n      </aside>\n    )\n  } else {\n    return (\n      <aside class=\"md-annotation\" tabIndex={0}>\n        {renderTooltip(prefix)}\n        <span class=\"md-annotation__index\" tabIndex={-1}>\n          <span data-md-annotation-id={id}></span>\n        </span>\n      </aside>\n    )\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { translation } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a 'copy-to-clipboard' button\n *\n * @param id - Unique identifier\n *\n * @returns Element\n */\nexport function renderClipboardButton(id: string): HTMLElement {\n  return (\n    <button\n      class=\"md-clipboard md-icon\"\n      title={translation(\"clipboard.copy\")}\n      data-clipboard-target={`#${id} > code`}\n    ></button>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ComponentChild } from \"preact\"\n\nimport { configuration, feature, translation } from \"~/_\"\nimport { SearchItem } from \"~/integrations/search\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Render flag\n */\nconst enum Flag {\n  TEASER = 1,                          /* Render teaser */\n  PARENT = 2                           /* Render as parent */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper function\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a search document\n *\n * @param document - Search document\n * @param flag - Render flags\n *\n * @returns Element\n */\nfunction renderSearchDocument(\n  document: SearchItem, flag: Flag\n): HTMLElement {\n  const parent = flag & Flag.PARENT\n  const teaser = flag & Flag.TEASER\n\n  /* Render missing query terms */\n  const missing = Object.keys(document.terms)\n    .filter(key => !document.terms[key])\n    .reduce<ComponentChild[]>((list, key) => [\n      ...list, <del>{key}</del>, \" \"\n    ], [])\n    .slice(0, -1)\n\n  /* Assemble query string for highlighting */\n  const config = configuration()\n  const url = new URL(document.location, config.base)\n  if (feature(\"search.highlight\"))\n    url.searchParams.set(\"h\", Object.entries(document.terms)\n      .filter(([, match]) => match)\n      .reduce((highlight, [value]) => `${highlight} ${value}`.trim(), \"\")\n    )\n\n  /* Render article or section, depending on flags */\n  const { tags } = configuration()\n  return (\n    <a href={`${url}`} class=\"md-search-result__link\" tabIndex={-1}>\n      <article\n        class=\"md-search-result__article md-typeset\"\n        data-md-score={document.score.toFixed(2)}\n      >\n        {parent > 0 && <div class=\"md-search-result__icon md-icon\"></div>}\n        {parent > 0 && <h1>{document.title}</h1>}\n        {parent <= 0 && <h2>{document.title}</h2>}\n        {teaser > 0 && document.text.length > 0 &&\n          document.text\n        }\n        {document.tags && document.tags.map(tag => {\n          const type = tags\n            ? tag in tags\n              ? `md-tag-icon md-tag--${tags[tag]}`\n              : \"md-tag-icon\"\n            : \"\"\n          return (\n            <span class={`md-tag ${type}`}>{tag}</span>\n          )\n        })}\n        {teaser > 0 && missing.length > 0 &&\n          <p class=\"md-search-result__terms\">\n            {translation(\"search.result.term.missing\")}: {...missing}\n          </p>\n        }\n      </article>\n    </a>\n  )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a search result\n *\n * @param result - Search result\n *\n * @returns Element\n */\nexport function renderSearchResultItem(\n  result: SearchItem[]\n): HTMLElement {\n  const threshold = result[0].score\n  const docs = [...result]\n\n  const config = configuration()\n\n  /* Find and extract parent article */\n  const parent = docs.findIndex(doc => {\n    const l = `${new URL(doc.location, config.base)}` // @todo hacky\n    return !l.includes(\"#\")\n  })\n  const [article] = docs.splice(parent, 1)\n\n  /* Determine last index above threshold */\n  let index = docs.findIndex(doc => doc.score < threshold)\n  if (index === -1)\n    index = docs.length\n\n  /* Partition sections */\n  const best = docs.slice(0, index)\n  const more = docs.slice(index)\n\n  /* Render children */\n  const children = [\n    renderSearchDocument(article, Flag.PARENT | +(!parent && index === 0)),\n    ...best.map(section => renderSearchDocument(section, Flag.TEASER)),\n    ...more.length ? [\n      <details class=\"md-search-result__more\">\n        <summary tabIndex={-1}>\n          <div>\n            {more.length > 0 && more.length === 1\n              ? translation(\"search.result.more.one\")\n              : translation(\"search.result.more.other\", more.length)\n            }\n          </div>\n        </summary>\n        {...more.map(section => renderSearchDocument(section, Flag.TEASER))}\n      </details>\n    ] : []\n  ]\n\n  /* Render search result */\n  return (\n    <li class=\"md-search-result__item\">\n      {children}\n    </li>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { SourceFacts } from \"~/components\"\nimport { h, round } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render repository facts\n *\n * @param facts - Repository facts\n *\n * @returns Element\n */\nexport function renderSourceFacts(facts: SourceFacts): HTMLElement {\n  return (\n    <ul class=\"md-source__facts\">\n      {Object.entries(facts).map(([key, value]) => (\n        <li class={`md-source__fact md-source__fact--${key}`}>\n          {typeof value === \"number\" ? round(value) : value}\n        </li>\n      ))}\n    </ul>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tabbed control type\n */\ntype TabbedControlType =\n  | \"prev\"\n  | \"next\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render control for content tabs\n *\n * @param type - Control type\n *\n * @returns Element\n */\nexport function renderTabbedControl(\n  type: TabbedControlType\n): HTMLElement {\n  const classes = `tabbed-control tabbed-control--${type}`\n  return (\n    <div class={classes} hidden>\n      <button class=\"tabbed-button\" tabIndex={-1} aria-hidden=\"true\"></button>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a table inside a wrapper to improve scrolling on mobile\n *\n * @param table - Table element\n *\n * @returns Element\n */\nexport function renderTable(table: HTMLElement): HTMLElement {\n  return (\n    <div class=\"md-typeset__scrollwrap\">\n      <div class=\"md-typeset__table\">\n        {table}\n      </div>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { configuration, translation } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Version properties\n */\nexport interface VersionProperties {\n  hidden?: boolean                     /* Version is hidden */\n}\n\n/**\n * Version\n */\nexport interface Version {\n  version: string                      /* Version identifier */\n  title: string                        /* Version title */\n  aliases: string[]                    /* Version aliases */\n  properties?: VersionProperties       /* Version properties */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a version\n *\n * @param version - Version\n *\n * @returns Element\n */\nfunction renderVersion(version: Version): HTMLElement {\n  const config = configuration()\n\n  /* Ensure trailing slash - see https://bit.ly/3rL5u3f */\n  const url = new URL(`../${version.version}/`, config.base)\n  return (\n    <li class=\"md-version__item\">\n      <a href={`${url}`} class=\"md-version__link\">\n        {version.title}\n        {config.version?.alias && version.aliases.length > 0 && (\n          <span class=\"md-version__alias\">\n            {version.aliases[0]}\n          </span>\n        )}\n      </a>\n    </li>\n  )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a version selector\n *\n * @param versions - Versions\n * @param active - Active version\n *\n * @returns Element\n */\nexport function renderVersionSelector(\n  versions: Version[], active: Version\n): HTMLElement {\n  const config = configuration()\n  versions = versions.filter(version => !version.properties?.hidden)\n  return (\n    <div class=\"md-version\">\n      <button\n        class=\"md-version__current\"\n        aria-label={translation(\"select.version\")}\n      >\n        {active.title}\n        {config.version?.alias && active.aliases.length > 0 && (\n          <span class=\"md-version__alias\">\n            {active.aliases[0]}\n          </span>\n        )}\n      </button>\n      <ul class=\"md-version__list\">\n        {versions.map(renderVersion)}\n      </ul>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  BehaviorSubject,\n  EMPTY,\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  combineLatest,\n  debounce,\n  defer,\n  distinctUntilChanged,\n  endWith,\n  filter,\n  finalize,\n  first,\n  ignoreElements,\n  map,\n  mergeMap,\n  observeOn,\n  queueScheduler,\n  share,\n  startWith,\n  switchMap,\n  tap,\n  throttleTime,\n  timer,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  ElementOffset,\n  Viewport,\n  getElement,\n  getElementContainers,\n  getElementOffsetAbsolute,\n  getElementSize,\n  watchElementContentOffset,\n  watchElementFocus,\n  watchElementHover\n} from \"~/browser\"\nimport { renderInlineTooltip2 } from \"~/templates\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tooltip\n */\nexport interface Tooltip {\n  active: boolean                      // Tooltip is active\n  offset: ElementOffset                // Tooltip offset\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Dependencies\n */\ninterface Dependencies {\n  content$: Observable<HTMLElement>    // Tooltip content observable\n  viewport$: Observable<Viewport>      // Viewport observable\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Global sequence number for tooltips\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch tooltip\n *\n * This function tracks the tooltip host element, and deduces the active state\n * and offset of the tooltip from it. The active state is determined by whether\n * the host element is focused or hovered, and the offset is determined by the\n * host element's absolute position in the document.\n *\n * @param el - Tooltip host element\n *\n * @returns Tooltip observable\n */\nexport function watchTooltip2(\n  el: HTMLElement\n): Observable<Tooltip> {\n\n  // Compute whether tooltip should be shown - we need to watch both focus and\n  // hover events on the host element and emit if one of them is active. In case\n  // of a hover event, we keep the element visible for a short amount of time\n  // after the pointer left the host element for a better user experience.\n  const active$ =\n    combineLatest([\n      watchElementFocus(el),\n      watchElementHover(el)\n    ])\n      .pipe(\n        map(([focus, hover]) => focus || hover),\n        distinctUntilChanged()\n      )\n\n  // We need to determine all parent elements of the host element that are\n  // currently scrollable, as they might affect the position of the tooltip\n  // depending on their horizontal of vertical offset. We must track all of\n  // them and recompute the position of the tooltip if they change.\n  const offset$ =\n    defer(() => getElementContainers(el)).pipe(\n      mergeMap(watchElementContentOffset),\n      throttleTime(1),\n      map(() => getElementOffsetAbsolute(el))\n    )\n\n  // Only track parent elements and compute offset of the tooltip host if the\n  // tooltip should be shown - we defer the computation of the offset until the\n  // tooltip becomes active for the first time. This is necessary, because we\n  // must also keep the tooltip active as long as it is focused or hovered.\n  return active$.pipe(\n    first(active => active),\n    switchMap(() => combineLatest([active$, offset$])),\n    map(([active, offset]) => ({ active, offset })),\n    share()\n  )\n}\n\n/**\n * Mount tooltip\n *\n * This function renders a tooltip with the content from the provided `content$`\n * observable as passed via the dependencies. If the returned element has a role\n * of type `dialog`, the tooltip is considered to be interactive, and rendered\n * either above or below the host element, depending on the available space.\n *\n * If the returned element has a role of type `tooltip`, the tooltip is always\n * rendered below the host element and considered to be non-interactive. This\n * allows us to reuse the same positioning logic for both interactive and\n * non-interactive tooltips, as it is largely the same.\n *\n * @param el - Tooltip host element\n * @param dependencies - Dependencies\n *\n * @returns Tooltip component observable\n */\nexport function mountTooltip2(\n  el: HTMLElement, dependencies: Dependencies\n): Observable<Component<Tooltip>> {\n  const { content$, viewport$ } = dependencies\n\n  // Compute unique tooltip id - this is necessary to associate the tooltip host\n  // element with the tooltip element for ARIA purposes\n  const id = `__tooltip2_${sequence++}`\n\n  // Create component on subscription\n  return defer(() => {\n    const push$ = new Subject<Tooltip>()\n\n    // Create subject to track tooltip presence and visibility - we use another\n    // purely internal subject to track the tooltip's presence and visibility,\n    // as the tooltip should be visible if the host element or tooltip itself\n    // is focused or hovered to allow for smooth pointer migration\n    const show$ = new BehaviorSubject(false)\n    push$.pipe(ignoreElements(), endWith(false))\n      .subscribe(show$)\n\n    // Create observable controlling tooltip element - we create and attach the\n    // tooltip only if it is actually present, in order to keep the number of\n    // elements low. We need to keep the tooltip visible for a short time after\n    // the pointer left the host element or tooltip itself. For this, we use an\n    // inner subscription to the tooltip observable, which we terminate when the\n    // tooltip should not be shown, automatically removing the element. Moreover\n    // we use the queue scheduler, which will schedule synchronously in case the\n    // tooltip should be shown, and asynchronously if it should be hidden.\n    const node$ = show$.pipe(\n      debounce(active => timer(+!active * 250, queueScheduler)),\n      distinctUntilChanged(),\n      switchMap(active => active ? content$ : EMPTY),\n      tap(node => node.id = id),\n      share()\n    )\n\n    // Compute tooltip presence and visibility - the tooltip should be shown if\n    // the host element or the tooltip itself is focused or hovered\n    combineLatest([\n      push$.pipe(map(({ active }) => active)),\n      node$.pipe(\n        switchMap(node => watchElementHover(node, 250)),\n        startWith(false)\n      )\n    ])\n      .pipe(map(states => states.some(active => active)))\n      .subscribe(show$)\n\n    // Compute tooltip origin - we need to compute the tooltip origin depending\n    // on the position of the host element, the viewport size, as well as the\n    // actual size of the tooltip, if positioned above. The tooltip must about\n    // to be rendered for this to be correct, which is why we do it here.\n    const origin$ = show$.pipe(\n      filter(active => active),\n      withLatestFrom(node$, viewport$),\n      map(([_, node, { size }]) => {\n        const host = el.getBoundingClientRect()\n        const x = host.width / 2\n\n        // If the tooltip is non-interactive, we always render it below the\n        // actual element because all operating systems do it that way\n        if (node.role === \"tooltip\") {\n          return { x, y: 8 + host.height }\n\n        // Otherwise, we determine where there is more space, and render the\n        // tooltip either above or below the host element\n        } else if (host.y >= size.height / 2) {\n          const { height } = getElementSize(node)\n          return { x, y: -16 - height }\n        } else {\n          return { x, y: +16 + host.height }\n        }\n      })\n    )\n\n    // Update tooltip position - we always need to update the position of the\n    // tooltip, as it might change depending on the viewport offset of the host\n    combineLatest([node$, push$, origin$])\n      .subscribe(([node, { offset }, origin]) => {\n        node.style.setProperty(\"--md-tooltip-host-x\", `${offset.x}px`)\n        node.style.setProperty(\"--md-tooltip-host-y\", `${offset.y}px`)\n\n        // Update tooltip origin - this is mainly set to determine the position\n        // of the tooltip tail, to show the direction it is originating from\n        node.style.setProperty(\"--md-tooltip-x\", `${origin.x}px`)\n        node.style.setProperty(\"--md-tooltip-y\", `${origin.y}px`)\n\n        // Update tooltip render location, i.e., whether the tooltip is shown\n        // above or below the host element, depending on the available space\n        node.classList.toggle(\"md-tooltip2--top\",    origin.y <  0)\n        node.classList.toggle(\"md-tooltip2--bottom\", origin.y >= 0)\n      })\n\n    // Update tooltip width - we only explicitly set the width of the tooltip\n    // if it is non-interactive, in case it should always be rendered centered\n    show$.pipe(\n      filter(active => active),\n      withLatestFrom(node$, (_, node) => node),\n      filter(node => node.role === \"tooltip\")\n    )\n      .subscribe(node => {\n        const size = getElementSize(getElement(\":scope > *\", node))\n\n        // Set tooltip width and remove tail by setting it to a width of zero -\n        // if authors want to keep the tail, we can move this to CSS later\n        node.style.setProperty(\"--md-tooltip-width\", `${size.width}px`)\n        node.style.setProperty(\"--md-tooltip-tail\",  `${0}px`)\n      })\n\n    // Update tooltip visibility - we defer to the next animation frame, because\n    // the tooltip must first be added to the document before we make it appear,\n    // or it will appear instantly without delay. Additionally, we need to keep\n    // the tooltip visible for a short time after the pointer left the host.\n    show$.pipe(\n      distinctUntilChanged(),\n      observeOn(animationFrameScheduler),\n      withLatestFrom(node$)\n    )\n      .subscribe(([active, node]) => {\n        node.classList.toggle(\"md-tooltip2--active\", active)\n      })\n\n    // Set up ARIA attributes when tooltip is visible\n    combineLatest([\n      show$.pipe(filter(active => active)),\n      node$\n    ])\n      .subscribe(([_, node]) => {\n        if (node.role === \"dialog\") {\n          el.setAttribute(\"aria-controls\", id)\n          el.setAttribute(\"aria-haspopup\", \"dialog\")\n        } else {\n          el.setAttribute(\"aria-describedby\", id)\n        }\n      })\n\n    // Remove ARIA attributes when tooltip is hidden\n    show$.pipe(filter(active => !active))\n      .subscribe(() => {\n        el.removeAttribute(\"aria-controls\")\n        el.removeAttribute(\"aria-describedby\")\n        el.removeAttribute(\"aria-haspopup\")\n      })\n\n    // Create and return component\n    return watchTooltip2(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n\n// ----------------------------------------------------------------------------\n\n/**\n * Mount inline tooltip\n *\n * @todo refactor this function\n *\n * @param el - Tooltip host element\n * @param dependencies - Dependencies\n * @param container - Container\n *\n * @returns Tooltip component observable\n */\nexport function mountInlineTooltip2(\n  el: HTMLElement, { viewport$ }: { viewport$: Observable<Viewport> },\n  container = document.body\n): Observable<Component<Tooltip>> {\n  return mountTooltip2(el, {\n    content$: new Observable<HTMLElement>(observer => {\n      const title = el.title\n      const node = renderInlineTooltip2(title)\n      observer.next(node)\n      el.removeAttribute(\"title\")\n      // Append tooltip and remove on unsubscription\n      container.append(node)\n      return () => {\n        node.remove()\n        el.setAttribute(\"title\", title)\n      }\n    }),\n    viewport$\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  auditTime,\n  combineLatest,\n  debounceTime,\n  defer,\n  delay,\n  endWith,\n  filter,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  switchMap,\n  take,\n  takeUntil,\n  tap,\n  throttleTime,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  ElementOffset,\n  getActiveElement,\n  getElementSize,\n  watchElementContentOffset,\n  watchElementFocus,\n  watchElementOffset,\n  watchElementVisibility\n} from \"~/browser\"\n\nimport { Component } from \"../../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Annotation\n */\nexport interface Annotation {\n  active: boolean                      /* Annotation is active */\n  offset: ElementOffset                /* Annotation offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch annotation\n *\n * @param el - Annotation element\n * @param container - Containing element\n *\n * @returns Annotation observable\n */\nexport function watchAnnotation(\n  el: HTMLElement, container: HTMLElement\n): Observable<Annotation> {\n  const offset$ = defer(() => combineLatest([\n    watchElementOffset(el),\n    watchElementContentOffset(container)\n  ]))\n    .pipe(\n      map(([{ x, y }, scroll]): ElementOffset => {\n        const { width, height } = getElementSize(el)\n        return ({\n          x: x - scroll.x + width  / 2,\n          y: y - scroll.y + height / 2\n        })\n      })\n    )\n\n  /* Actively watch annotation on focus */\n  return watchElementFocus(el)\n    .pipe(\n      switchMap(active => offset$\n        .pipe(\n          map(offset => ({ active, offset })),\n          take(+!active || Infinity)\n        )\n      )\n    )\n}\n\n/**\n * Mount annotation\n *\n * @param el - Annotation element\n * @param container - Containing element\n * @param options - Options\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotation(\n  el: HTMLElement, container: HTMLElement, { target$ }: MountOptions\n): Observable<Component<Annotation>> {\n  const [tooltip, index] = Array.from(el.children)\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<Annotation>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ offset }) {\n        el.style.setProperty(\"--md-tooltip-x\", `${offset.x}px`)\n        el.style.setProperty(\"--md-tooltip-y\", `${offset.y}px`)\n      },\n\n      /* Handle complete */\n      complete() {\n        el.style.removeProperty(\"--md-tooltip-x\")\n        el.style.removeProperty(\"--md-tooltip-y\")\n      }\n    })\n\n    /* Start animation only when annotation is visible */\n    watchElementVisibility(el)\n      .pipe(\n        takeUntil(done$)\n      )\n        .subscribe(visible => {\n          el.toggleAttribute(\"data-md-visible\", visible)\n        })\n\n    /* Toggle tooltip presence to mitigate empty lines when copying */\n    merge(\n      push$.pipe(filter(({ active }) => active)),\n      push$.pipe(debounceTime(250), filter(({ active }) => !active))\n    )\n      .subscribe({\n\n        /* Handle emission */\n        next({ active }) {\n          if (active)\n            el.prepend(tooltip)\n          else\n            tooltip.remove()\n        },\n\n        /* Handle complete */\n        complete() {\n          el.prepend(tooltip)\n        }\n      })\n\n    /* Toggle tooltip visibility */\n    push$\n      .pipe(\n        auditTime(16, animationFrameScheduler)\n      )\n        .subscribe(({ active }) => {\n          tooltip.classList.toggle(\"md-tooltip--active\", active)\n        })\n\n    /* Track relative origin of tooltip */\n    push$\n      .pipe(\n        throttleTime(125, animationFrameScheduler),\n        filter(() => !!el.offsetParent),\n        map(() => el.offsetParent!.getBoundingClientRect()),\n        map(({ x }) => x)\n      )\n        .subscribe({\n\n          /* Handle emission */\n          next(origin) {\n            if (origin)\n              el.style.setProperty(\"--md-tooltip-0\", `${-origin}px`)\n            else\n              el.style.removeProperty(\"--md-tooltip-0\")\n          },\n\n          /* Handle complete */\n          complete() {\n            el.style.removeProperty(\"--md-tooltip-0\")\n          }\n        })\n\n    /* Allow to copy link without scrolling to anchor */\n    fromEvent<MouseEvent>(index, \"click\")\n      .pipe(\n        takeUntil(done$),\n        filter(ev => !(ev.metaKey || ev.ctrlKey))\n      )\n        .subscribe(ev => {\n          ev.stopPropagation()\n          ev.preventDefault()\n        })\n\n    /* Allow to open link in new tab or blur on close */\n    fromEvent<MouseEvent>(index, \"mousedown\")\n      .pipe(\n        takeUntil(done$),\n        withLatestFrom(push$)\n      )\n        .subscribe(([ev, { active }]) => {\n\n          /* Open in new tab */\n          if (ev.button !== 0 || ev.metaKey || ev.ctrlKey) {\n            ev.preventDefault()\n\n          /* Close annotation */\n          } else if (active) {\n            ev.preventDefault()\n\n            /* Focus parent annotation, if any */\n            const parent = el.parentElement!.closest(\".md-annotation\")\n            if (parent instanceof HTMLElement)\n              parent.focus()\n            else\n              getActiveElement()?.blur()\n          }\n        })\n\n    /* Open and focus annotation on location target */\n    target$\n      .pipe(\n        takeUntil(done$),\n        filter(target => target === tooltip),\n        delay(125)\n      )\n        .subscribe(() => el.focus())\n\n    /* Create and return component */\n    return watchAnnotation(el, container)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  endWith,\n  finalize,\n  ignoreElements,\n  merge,\n  share,\n  takeUntil\n} from \"rxjs\"\n\nimport {\n  getElement,\n  getElements,\n  getOptionalElement\n} from \"~/browser\"\nimport { renderAnnotation } from \"~/templates\"\n\nimport { Component } from \"../../../_\"\nimport {\n  Annotation,\n  mountAnnotation\n} from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find all annotation hosts in the containing element\n *\n * @param container - Containing element\n *\n * @returns Annotation hosts\n */\nfunction findHosts(container: HTMLElement): HTMLElement[] {\n  return container.tagName === \"CODE\"\n    ? getElements(\".c, .c1, .cm\", container)\n    : [container]\n}\n\n/**\n * Find all annotation markers in the containing element\n *\n * @param container - Containing element\n *\n * @returns Annotation markers\n */\nfunction findMarkers(container: HTMLElement): Text[] {\n  const markers: Text[] = []\n  for (const el of findHosts(container)) {\n    const nodes: Text[] = []\n\n    /* Find all text nodes in current element */\n    const it = document.createNodeIterator(el, NodeFilter.SHOW_TEXT)\n    for (let node = it.nextNode(); node; node = it.nextNode())\n      nodes.push(node as Text)\n\n    /* Find all markers in each text node */\n    for (let text of nodes) {\n      let match: RegExpExecArray | null\n\n      /* Split text at marker and add to list */\n      while ((match = /(\\(\\d+\\))(!)?/.exec(text.textContent!))) {\n        const [, id, force] = match\n        if (typeof force === \"undefined\") {\n          const marker = text.splitText(match.index)\n          text = marker.splitText(id.length)\n          markers.push(marker)\n\n        /* Replace entire text with marker */\n        } else {\n          text.textContent = id\n          markers.push(text)\n          break\n        }\n      }\n    }\n  }\n  return markers\n}\n\n/**\n * Swap the child nodes of two elements\n *\n * @param source - Source element\n * @param target - Target element\n */\nfunction swap(source: HTMLElement, target: HTMLElement): void {\n  target.append(...Array.from(source.childNodes))\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount annotation list\n *\n * This function analyzes the containing code block and checks for markers\n * referring to elements in the given annotation list. If no markers are found,\n * the list is left untouched. Otherwise, list elements are rendered as\n * annotations inside the code block.\n *\n * @param el - Annotation list element\n * @param container - Containing element\n * @param options - Options\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotationList(\n  el: HTMLElement, container: HTMLElement, { target$, print$ }: MountOptions\n): Observable<Component<Annotation>> {\n\n  /* Compute prefix for tooltip anchors */\n  const parent = container.closest(\"[id]\")\n  const prefix = parent?.id\n\n  /* Find and replace all markers with empty annotations */\n  const annotations = new Map<string, HTMLElement>()\n  for (const marker of findMarkers(container)) {\n    const [, id] = marker.textContent!.match(/\\((\\d+)\\)/)!\n    if (getOptionalElement(`:scope > li:nth-child(${id})`, el)) {\n      annotations.set(id, renderAnnotation(id, prefix))\n      marker.replaceWith(annotations.get(id)!)\n    }\n  }\n\n  /* Keep list if there are no annotations to render */\n  if (annotations.size === 0)\n    return EMPTY\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n\n    /* Retrieve container pairs for swapping */\n    const pairs: [HTMLElement, HTMLElement][] = []\n    for (const [id, annotation] of annotations)\n      pairs.push([\n        getElement(\".md-typeset\", annotation),\n        getElement(`:scope > li:nth-child(${id})`, el)\n      ])\n\n    /* Handle print mode - see https://bit.ly/3rgPdpt */\n    print$.pipe(takeUntil(done$))\n      .subscribe(active => {\n        el.hidden = !active\n\n        /* Add class to discern list element */\n        el.classList.toggle(\"md-annotation-list\", active)\n\n        /* Show annotations in code block or list (print) */\n        for (const [inner, child] of pairs)\n          if (!active)\n            swap(child, inner)\n          else\n            swap(inner, child)\n      })\n\n    /* Create and return component */\n    return merge(...[...annotations]\n      .map(([, annotation]) => (\n        mountAnnotation(annotation, container, { target$ })\n      ))\n    )\n      .pipe(\n        finalize(() => push$.complete()),\n        share()\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { EMPTY, Observable, defer } from \"rxjs\"\n\nimport { Component } from \"../../../_\"\nimport { Annotation } from \"../_\"\nimport { mountAnnotationList } from \"../list\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find list element directly following a block\n *\n * @param el - Annotation block element\n *\n * @returns List element or nothing\n */\nfunction findList(el: HTMLElement): HTMLElement | undefined {\n  if (el.nextElementSibling) {\n    const sibling = el.nextElementSibling as HTMLElement\n    if (sibling.tagName === \"OL\")\n      return sibling\n\n    /* Skip empty paragraphs - see https://bit.ly/3r4ZJ2O */\n    else if (sibling.tagName === \"P\" && !sibling.children.length)\n      return findList(sibling)\n  }\n\n  /* Everything else */\n  return undefined\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount annotation block\n *\n * @param el - Annotation block element\n * @param options - Options\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotationBlock(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Annotation>> {\n  return defer(() => {\n    const list = findList(el)\n    return typeof list !== \"undefined\"\n      ? mountAnnotationList(list, el, options)\n      : EMPTY\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport ClipboardJS from \"clipboard\"\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  filter,\n  finalize,\n  map,\n  mergeWith,\n  switchMap,\n  take,\n  takeLast,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  getElementContentSize,\n  getElements,\n  watchElementSize,\n  watchElementVisibility\n} from \"~/browser\"\nimport {\n  Tooltip,\n  mountInlineTooltip2\n} from \"~/components/tooltip2\"\nimport { renderClipboardButton } from \"~/templates\"\n\nimport { Component } from \"../../../_\"\nimport {\n  Annotation,\n  mountAnnotationList\n} from \"../../annotation\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Code block overflow\n */\nexport interface Overflow {\n  scrollable: boolean                  /* Code block overflows */\n}\n\n/**\n * Code block\n */\nexport type CodeBlock =\n  | Overflow\n  | Annotation\n  | Tooltip\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Global sequence number for code blocks\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find candidate list element directly following a code block\n *\n * @param el - Code block element\n *\n * @returns List element or nothing\n */\nfunction findCandidateList(el: HTMLElement): HTMLElement | undefined {\n  if (el.nextElementSibling) {\n    const sibling = el.nextElementSibling as HTMLElement\n    if (sibling.tagName === \"OL\")\n      return sibling\n\n    /* Skip empty paragraphs - see https://bit.ly/3r4ZJ2O */\n    else if (sibling.tagName === \"P\" && !sibling.children.length)\n      return findCandidateList(sibling)\n  }\n\n  /* Everything else */\n  return undefined\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch code block\n *\n * This function monitors size changes of the viewport, as well as switches of\n * content tabs with embedded code blocks, as both may trigger overflow.\n *\n * @param el - Code block element\n *\n * @returns Code block observable\n */\nexport function watchCodeBlock(\n  el: HTMLElement\n): Observable<Overflow> {\n  return watchElementSize(el)\n    .pipe(\n      map(({ width }) => {\n        const content = getElementContentSize(el)\n        return {\n          scrollable: content.width > width\n        }\n      }),\n      distinctUntilKeyChanged(\"scrollable\")\n    )\n}\n\n/**\n * Mount code block\n *\n * This function ensures that an overflowing code block is focusable through\n * keyboard, so it can be scrolled without a mouse to improve on accessibility.\n * Furthermore, if code annotations are enabled, they are mounted if and only\n * if the code block is currently visible, e.g., not in a hidden content tab.\n *\n * Note that code blocks may be mounted eagerly or lazily. If they're mounted\n * lazily (on first visibility), code annotation anchor links will not work,\n * as they are evaluated on initial page load, and code annotations in general\n * might feel a little bumpier.\n *\n * @param el - Code block element\n * @param options - Options\n *\n * @returns Code block and annotation component observable\n */\nexport function mountCodeBlock(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<CodeBlock>> {\n  const { matches: hover } = matchMedia(\"(hover)\")\n\n  /* Defer mounting of code block - see https://bit.ly/3vHVoVD */\n  const factory$ = defer(() => {\n    const push$ = new Subject<Overflow>()\n    const done$ = push$.pipe(takeLast(1))\n    push$.subscribe(({ scrollable }) => {\n      if (scrollable && hover)\n        el.setAttribute(\"tabindex\", \"0\")\n      else\n        el.removeAttribute(\"tabindex\")\n    })\n\n    /* Render button for Clipboard.js integration */\n    const content$: Array<Observable<Component<CodeBlock>>> = []\n    if (ClipboardJS.isSupported()) {\n      if (el.closest(\".copy\") || (\n        feature(\"content.code.copy\") && !el.closest(\".no-copy\")\n      )) {\n        const parent = el.closest(\"pre\")!\n        parent.id = `__code_${sequence++}`\n\n        /* Mount tooltip, if enabled */\n        const button = renderClipboardButton(parent.id)\n        parent.insertBefore(button, el)\n        if (feature(\"content.tooltips\"))\n          content$.push(mountInlineTooltip2(button, { viewport$ }))\n      }\n    }\n\n    /* Handle code annotations */\n    const container = el.closest(\".highlight\")\n    if (container instanceof HTMLElement) {\n      const list = findCandidateList(container)\n\n      /* Mount code annotations, if enabled */\n      if (typeof list !== \"undefined\" && (\n        container.classList.contains(\"annotate\") ||\n        feature(\"content.code.annotate\")\n      )) {\n        const annotations$ = mountAnnotationList(list, el, options)\n        content$.push(\n          watchElementSize(container)\n            .pipe(\n              takeUntil(done$),\n              map(({ width, height }) => width && height),\n              distinctUntilChanged(),\n              switchMap(active => active ? annotations$ : EMPTY)\n            )\n        )\n      }\n    }\n\n    // If the code block has line spans, we can add this additional class to\n    // the code block element, which fixes the problem for highlighted code\n    // lines not stretching to the entirety of the screen when the code block\n    // overflows, e.g., on mobile - see\n    const spans = getElements(\":scope > span[id]\", el)\n    if (spans.length)\n      el.classList.add(\"md-code__content\")\n\n    /* Create and return component */\n    return watchCodeBlock(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state })),\n        mergeWith(...content$)\n      )\n  })\n\n  /* Mount code block lazily */\n  if (feature(\"content.lazy\"))\n    return watchElementVisibility(el)\n      .pipe(\n        filter(visible => visible),\n        take(1),\n        switchMap(() => factory$)\n      )\n\n  /* Mount code block */\n  return factory$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  filter,\n  finalize,\n  map,\n  merge,\n  tap\n} from \"rxjs\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Details\n */\nexport interface Details {\n  action: \"open\" | \"close\"             /* Details state */\n  reveal?: boolean                     /* Details is revealed */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch details\n *\n * @param el - Details element\n * @param options - Options\n *\n * @returns Details observable\n */\nexport function watchDetails(\n  el: HTMLDetailsElement, { target$, print$ }: WatchOptions\n): Observable<Details> {\n  let open = true\n  return merge(\n\n    /* Open and focus details on location target */\n    target$\n      .pipe(\n        map(target => target.closest(\"details:not([open])\")!),\n        filter(details => el === details),\n        map(() => ({\n          action: \"open\", reveal: true\n        }) as Details)\n      ),\n\n    /* Open details on print and close afterwards */\n    print$\n      .pipe(\n        filter(active => active || !open),\n        tap(() => open = el.open),\n        map(active => ({\n          action: active ? \"open\" : \"close\"\n        }) as Details)\n      )\n  )\n}\n\n/**\n * Mount details\n *\n * This function ensures that `details` tags are opened on anchor jumps and\n * prior to printing, so the whole content of the page is visible.\n *\n * @param el - Details element\n * @param options - Options\n *\n * @returns Details component observable\n */\nexport function mountDetails(\n  el: HTMLDetailsElement, options: MountOptions\n): Observable<Component<Details>> {\n  return defer(() => {\n    const push$ = new Subject<Details>()\n    push$.subscribe(({ action, reveal }) => {\n      el.toggleAttribute(\"open\", action === \"open\")\n      if (reveal)\n        el.scrollIntoView()\n    })\n\n    /* Create and return component */\n    return watchDetails(el, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", ".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  map,\n  of,\n  shareReplay,\n  tap\n} from \"rxjs\"\n\nimport { watchScript } from \"~/browser\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\nimport themeCSS from \"./index.css\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mermaid diagram\n */\nexport interface Mermaid {}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Mermaid instance observable\n */\nlet mermaid$: Observable<void>\n\n/**\n * Global sequence number for diagrams\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch Mermaid script\n *\n * @returns Mermaid scripts observable\n */\nfunction fetchScripts(): Observable<void> {\n  return typeof mermaid === \"undefined\" || mermaid instanceof Element\n    ? watchScript(\"https://unpkg.com/mermaid@10/dist/mermaid.min.js\")\n    : of(undefined)\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount Mermaid diagram\n *\n * @param el - Code block element\n *\n * @returns Mermaid diagram component observable\n */\nexport function mountMermaid(\n  el: HTMLElement\n): Observable<Component<Mermaid>> {\n  el.classList.remove(\"mermaid\") // Hack: mitigate https://bit.ly/3CiN6Du\n  mermaid$ ||= fetchScripts()\n    .pipe(\n      tap(() => mermaid.initialize({\n        startOnLoad: false,\n        themeCSS,\n        sequence: {\n          actorFontSize: \"16px\", // Hack: mitigate https://bit.ly/3y0NEi3\n          messageFontSize: \"16px\",\n          noteFontSize: \"16px\"\n        }\n      })),\n      map(() => undefined),\n      shareReplay(1)\n    )\n\n  /* Render diagram */\n  mermaid$.subscribe(async () => {\n    el.classList.add(\"mermaid\") // Hack: mitigate https://bit.ly/3CiN6Du\n    const id = `__mermaid_${sequence++}`\n\n    /* Create host element to replace code block */\n    const host = h(\"div\", { class: \"mermaid\" })\n    const text = el.textContent\n\n    /* Render and inject diagram */\n    const { svg, fn } = await mermaid.render(id, text)\n\n    /* Create a shadow root and inject diagram */\n    const shadow = host.attachShadow({ mode: \"closed\" })\n    shadow.innerHTML = svg\n\n    /* Replace code block with diagram and bind functions */\n    el.replaceWith(host)\n    fn?.(shadow)\n  })\n\n  /* Create and return component */\n  return mermaid$\n    .pipe(\n      map(() => ({ ref: el }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Observable, of } from \"rxjs\"\n\nimport { renderTable } from \"~/templates\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Data table\n */\nexport interface DataTable {}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Sentinel for replacement\n */\nconst sentinel = h(\"table\")\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount data table\n *\n * This function wraps a data table in another scrollable container, so it can\n * be smoothly scrolled on smaller screen sizes and won't break the layout.\n *\n * @param el - Data table element\n *\n * @returns Data table component observable\n */\nexport function mountDataTable(\n  el: HTMLElement\n): Observable<Component<DataTable>> {\n  el.replaceWith(sentinel)\n  sentinel.replaceWith(renderTable(el))\n\n  /* Create and return component */\n  return of({ ref: el })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  asyncScheduler,\n  auditTime,\n  combineLatest,\n  defer,\n  endWith,\n  filter,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  skip,\n  startWith,\n  subscribeOn,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElement,\n  getElementContentOffset,\n  getElementContentSize,\n  getElementOffset,\n  getElementSize,\n  getElements,\n  watchElementContentOffset,\n  watchElementSize,\n  watchElementVisibility\n} from \"~/browser\"\nimport { renderTabbedControl } from \"~/templates\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Content tabs\n */\nexport interface ContentTabs {\n  active: HTMLLabelElement             /* Active tab label */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch content tabs\n *\n * @param inputs - Content tabs input elements\n *\n * @returns Content tabs observable\n */\nexport function watchContentTabs(\n  inputs: HTMLInputElement[]\n): Observable<ContentTabs> {\n  const initial = inputs.find(input => input.checked) || inputs[0]\n  return merge(...inputs.map(input => fromEvent(input, \"change\")\n    .pipe(\n      map(() => getElement<HTMLLabelElement>(`label[for=\"${input.id}\"]`))\n    )\n  ))\n    .pipe(\n      startWith(getElement<HTMLLabelElement>(`label[for=\"${initial.id}\"]`)),\n      map(active => ({ active }))\n    )\n}\n\n/**\n * Mount content tabs\n *\n * @param el - Content tabs element\n * @param options - Options\n *\n * @returns Content tabs component observable\n */\nexport function mountContentTabs(\n  el: HTMLElement, { viewport$, target$ }: MountOptions\n): Observable<Component<ContentTabs>> {\n  const container = getElement(\".tabbed-labels\", el)\n  const inputs = getElements<HTMLInputElement>(\":scope > input\", el)\n\n  /* Render content tab previous button for pagination */\n  const prev = renderTabbedControl(\"prev\")\n  el.append(prev)\n\n  /* Render content tab next button for pagination */\n  const next = renderTabbedControl(\"next\")\n  el.append(next)\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<ContentTabs>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    combineLatest([push$, watchElementSize(el), watchElementVisibility(el)])\n      .pipe(\n        takeUntil(done$),\n        auditTime(1, animationFrameScheduler)\n      )\n        .subscribe({\n\n          /* Handle emission */\n          next([{ active }, size]) {\n            const offset = getElementOffset(active)\n            const { width } = getElementSize(active)\n\n            /* Set tab indicator offset and width */\n            el.style.setProperty(\"--md-indicator-x\", `${offset.x}px`)\n            el.style.setProperty(\"--md-indicator-width\", `${width}px`)\n\n            /* Scroll container to active content tab */\n            const content = getElementContentOffset(container)\n            if (\n              offset.x         < content.x              ||\n              offset.x + width > content.x + size.width\n            )\n              container.scrollTo({\n                left: Math.max(0, offset.x - 16),\n                behavior: \"smooth\"\n              })\n          },\n\n          /* Handle complete */\n          complete() {\n            el.style.removeProperty(\"--md-indicator-x\")\n            el.style.removeProperty(\"--md-indicator-width\")\n          }\n        })\n\n    /* Hide content tab buttons on borders */\n    combineLatest([\n      watchElementContentOffset(container),\n      watchElementSize(container)\n    ])\n      .pipe(\n        takeUntil(done$)\n      )\n        .subscribe(([offset, size]) => {\n          const content = getElementContentSize(container)\n          prev.hidden = offset.x < 16\n          next.hidden = offset.x > content.width - size.width - 16\n        })\n\n    /* Paginate content tab container on click */\n    merge(\n      fromEvent(prev, \"click\").pipe(map(() => -1)),\n      fromEvent(next, \"click\").pipe(map(() => +1))\n    )\n      .pipe(\n        takeUntil(done$)\n      )\n        .subscribe(direction => {\n          const { width } = getElementSize(container)\n          container.scrollBy({\n            left: width * direction,\n            behavior: \"smooth\"\n          })\n        })\n\n    /* Switch to content tab target */\n    target$\n      .pipe(\n        takeUntil(done$),\n        filter(input => inputs.includes(input as HTMLInputElement))\n      )\n        .subscribe(input => input.click())\n\n    /* Add link to each content tab label */\n    container.classList.add(\"tabbed-labels--linked\")\n    for (const input of inputs) {\n      const label = getElement<HTMLLabelElement>(`label[for=\"${input.id}\"]`)\n      label.replaceChildren(h(\"a\", {\n        href: `#${label.htmlFor}`,\n        tabIndex: -1\n      }, ...Array.from(label.childNodes)))\n\n      /* Allow to copy link without scrolling to anchor */\n      fromEvent<MouseEvent>(label.firstElementChild!, \"click\")\n        .pipe(\n          takeUntil(done$),\n          filter(ev => !(ev.metaKey || ev.ctrlKey)),\n          tap(ev => {\n            ev.preventDefault()\n            ev.stopPropagation()\n          })\n        )\n          // @todo we might need to remove the anchor link on complete\n          .subscribe(() => {\n            history.replaceState({}, \"\", `#${label.htmlFor}`)\n            label.click()\n          })\n    }\n\n    /* Set up linking of content tabs, if enabled */\n    if (feature(\"content.tabs.link\"))\n      push$.pipe(\n        skip(1),\n        withLatestFrom(viewport$)\n      )\n        .subscribe(([{ active }, { offset }]) => {\n          const tab = active.innerText.trim()\n          if (active.hasAttribute(\"data-md-switching\")) {\n            active.removeAttribute(\"data-md-switching\")\n\n          /* Determine viewport offset of active tab */\n          } else {\n            const y = el.offsetTop - offset.y\n\n            /* Passively activate other tabs */\n            for (const set of getElements(\"[data-tabs]\"))\n              for (const input of getElements<HTMLInputElement>(\n                \":scope > input\", set\n              )) {\n                const label = getElement(`label[for=\"${input.id}\"]`)\n                if (\n                  label !== active &&\n                  label.innerText.trim() === tab\n                ) {\n                  label.setAttribute(\"data-md-switching\", \"\")\n                  input.click()\n                  break\n                }\n              }\n\n            /* Bring active tab into view */\n            window.scrollTo({\n              top: el.offsetTop - y\n            })\n\n            /* Persist active tabs in local storage */\n            const tabs = __md_get<string[]>(\"__tabs\") || []\n            __md_set(\"__tabs\", [...new Set([tab, ...tabs])])\n          }\n        })\n\n    /* Pause media (audio, video) on switch - see https://bit.ly/3Bk6cel */\n    push$.pipe(takeUntil(done$))\n      .subscribe(() => {\n        for (const media of getElements<HTMLAudioElement>(\"audio, video\", el))\n          media.pause()\n      })\n\n    /* Create and return component */\n    return watchContentTabs(inputs)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n    .pipe(\n      subscribeOn(asyncScheduler)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Observable, merge } from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { Viewport, getElements } from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport {\n  Tooltip,\n  mountInlineTooltip2\n} from \"../../tooltip2\"\nimport {\n  Annotation,\n  mountAnnotationBlock\n} from \"../annotation\"\nimport {\n  CodeBlock,\n  mountCodeBlock\n} from \"../code\"\nimport {\n  Details,\n  mountDetails\n} from \"../details\"\nimport {\n  Mermaid,\n  mountMermaid\n} from \"../mermaid\"\nimport {\n  DataTable,\n  mountDataTable\n} from \"../table\"\nimport {\n  ContentTabs,\n  mountContentTabs\n} from \"../tabs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Content\n */\nexport type Content =\n  | Annotation\n  | CodeBlock\n  | ContentTabs\n  | DataTable\n  | Details\n  | Mermaid\n  | Tooltip\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount content\n *\n * This function mounts all components that are found in the content of the\n * actual article, including code blocks, data tables and details.\n *\n * @param el - Content element\n * @param options - Options\n *\n * @returns Content component observable\n */\nexport function mountContent(\n  el: HTMLElement, { viewport$, target$, print$ }: MountOptions\n): Observable<Component<Content>> {\n  return merge(\n\n    /* Annotations */\n    ...getElements(\".annotate:not(.highlight)\", el)\n      .map(child => mountAnnotationBlock(child, { target$, print$ })),\n\n    /* Code blocks */\n    ...getElements(\"pre:not(.mermaid) > code\", el)\n      .map(child => mountCodeBlock(child, { target$, print$ })),\n\n    /* Mermaid diagrams */\n    ...getElements(\"pre.mermaid\", el)\n      .map(child => mountMermaid(child)),\n\n    /* Data tables */\n    ...getElements(\"table:not([class])\", el)\n      .map(child => mountDataTable(child)),\n\n    /* Details */\n    ...getElements(\"details\", el)\n      .map(child => mountDetails(child, { target$, print$ })),\n\n    /* Content tabs */\n    ...getElements(\"[data-tabs]\", el)\n      .map(child => mountContentTabs(child, { viewport$, target$ })),\n\n    /* Tooltips */\n    ...getElements(\"[title]\", el)\n      .filter(() => feature(\"content.tooltips\"))\n      .map(child => mountInlineTooltip2(child, { viewport$ }))\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  delay,\n  finalize,\n  map,\n  merge,\n  of,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { getElement } from \"~/browser\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Dialog\n */\nexport interface Dialog {\n  message: string                      /* Dialog message */\n  active: boolean                      /* Dialog is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  alert$: Subject<string>              /* Alert subject */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  alert$: Subject<string>              /* Alert subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch dialog\n *\n * @param _el - Dialog element\n * @param options - Options\n *\n * @returns Dialog observable\n */\nexport function watchDialog(\n  _el: HTMLElement, { alert$ }: WatchOptions\n): Observable<Dialog> {\n  return alert$\n    .pipe(\n      switchMap(message => merge(\n        of(true),\n        of(false).pipe(delay(2000))\n      )\n        .pipe(\n          map(active => ({ message, active }))\n        )\n      )\n    )\n}\n\n/**\n * Mount dialog\n *\n * This function reveals the dialog in the right corner when a new alert is\n * emitted through the subject that is passed as part of the options.\n *\n * @param el - Dialog element\n * @param options - Options\n *\n * @returns Dialog component observable\n */\nexport function mountDialog(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Dialog>> {\n  const inner = getElement(\".md-typeset\", el)\n  return defer(() => {\n    const push$ = new Subject<Dialog>()\n    push$.subscribe(({ message, active }) => {\n      el.classList.toggle(\"md-dialog--active\", active)\n      inner.textContent = message\n    })\n\n    /* Create and return component */\n    return watchDialog(el, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  asyncScheduler,\n  auditTime,\n  combineLatest,\n  debounceTime,\n  defer,\n  distinctUntilChanged,\n  filter,\n  finalize,\n  map,\n  merge,\n  of,\n  subscribeOn,\n  tap,\n  throttleTime\n} from \"rxjs\"\n\nimport {\n  ElementOffset,\n  getElement,\n  getElementContainer,\n  getElementOffset,\n  getElementSize,\n  watchElementContentOffset,\n  watchElementFocus,\n  watchElementHover\n} from \"~/browser\"\nimport { renderTooltip } from \"~/templates\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tooltip\n */\nexport interface Tooltip {\n  active: boolean                      /* Tooltip is active */\n  offset: ElementOffset                /* Tooltip offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Global sequence number for tooltips\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch tooltip\n *\n * This function will append the tooltip temporarily to compute its width,\n * which is necessary for correct centering, and then removing it again.\n *\n * @param el - Tooltip element\n * @param host - Host element\n *\n * @returns Tooltip observable\n */\nexport function watchTooltip(\n  el: HTMLElement, host: HTMLElement\n): Observable<Tooltip> {\n  document.body.append(el)\n\n  /* Compute width and remove tooltip immediately */\n  const { width } = getElementSize(el)\n  el.style.setProperty(\"--md-tooltip-width\", `${width}px`)\n  el.remove()\n\n  /* Retrieve and watch containing element */\n  const container = getElementContainer(host)\n  const scroll$ =\n    typeof container !== \"undefined\"\n      ? watchElementContentOffset(container)\n      : of({ x: 0, y: 0 })\n\n  /* Compute tooltip visibility */\n  const active$ = merge(\n    watchElementFocus(host),\n    watchElementHover(host)\n  )\n    .pipe(\n      distinctUntilChanged()\n    )\n\n  /* Compute tooltip offset */\n  return combineLatest([active$, scroll$])\n    .pipe(\n      map(([active, scroll]) => {\n        let { x, y } = getElementOffset(host)\n        const size = getElementSize(host)\n\n        /**\n         * Experimental: fix handling of tables - see https://bit.ly/3TQEj5O\n         *\n         * If this proves to be a viable fix, we should refactor tooltip\n         * positioning and somehow streamline the current process. This might\n         * also fix positioning for annotations inside tables, which is another\n         * limitation.\n         */\n        const table = host.closest(\"table\")\n        if (table && host.parentElement) {\n          x += table.offsetLeft + host.parentElement.offsetLeft\n          y += table.offsetTop  + host.parentElement.offsetTop\n        }\n        return {\n          active,\n          offset: {\n            x: x - scroll.x + size.width  / 2 - width / 2,\n            y: y - scroll.y + size.height + 8\n          }\n        }\n      })\n    )\n}\n\n/**\n * Mount tooltip\n *\n * @param el - Host element\n *\n * @returns Tooltip component observable\n */\nexport function mountTooltip(\n  el: HTMLElement\n): Observable<Component<Tooltip>> {\n  const title = el.title\n  if (!title.length)\n    return EMPTY\n\n  /* Render tooltip and set title from host element */\n  const id = `__tooltip_${sequence++}`\n  const tooltip = renderTooltip(id, \"inline\")\n  const typeset = getElement(\".md-typeset\", tooltip)\n  typeset.innerHTML = title\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<Tooltip>()\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ offset }) {\n        tooltip.style.setProperty(\"--md-tooltip-x\", `${offset.x}px`)\n        tooltip.style.setProperty(\"--md-tooltip-y\", `${offset.y}px`)\n      },\n\n      /* Handle complete */\n      complete() {\n        tooltip.style.removeProperty(\"--md-tooltip-x\")\n        tooltip.style.removeProperty(\"--md-tooltip-y\")\n      }\n    })\n\n    /* Toggle tooltip presence to mitigate empty lines when copying */\n    merge(\n      push$.pipe(filter(({ active }) => active)),\n      push$.pipe(debounceTime(250), filter(({ active }) => !active))\n    )\n      .subscribe({\n\n        /* Handle emission */\n        next({ active }) {\n          if (active) {\n            el.insertAdjacentElement(\"afterend\", tooltip)\n            el.setAttribute(\"aria-describedby\", id)\n            el.removeAttribute(\"title\")\n          } else {\n            tooltip.remove()\n            el.removeAttribute(\"aria-describedby\")\n            el.setAttribute(\"title\", title)\n          }\n        },\n\n        /* Handle complete */\n        complete() {\n          tooltip.remove()\n          el.removeAttribute(\"aria-describedby\")\n          el.setAttribute(\"title\", title)\n        }\n      })\n\n    /* Toggle tooltip visibility */\n    push$\n      .pipe(\n        auditTime(16, animationFrameScheduler)\n      )\n        .subscribe(({ active }) => {\n          tooltip.classList.toggle(\"md-tooltip--active\", active)\n        })\n\n    // @todo - refactor positioning together with annotations \u2013 there are\n    // several things that overlap and are identical in handling\n\n    /* Track relative origin of tooltip */\n    push$\n      .pipe(\n        throttleTime(125, animationFrameScheduler),\n        filter(() => !!el.offsetParent),\n        map(() => el.offsetParent!.getBoundingClientRect()),\n        map(({ x }) => x)\n      )\n      .subscribe({\n\n        /* Handle emission */\n        next(origin) {\n          if (origin)\n            tooltip.style.setProperty(\"--md-tooltip-0\", `${-origin}px`)\n          else\n            tooltip.style.removeProperty(\"--md-tooltip-0\")\n        },\n\n        /* Handle complete */\n        complete() {\n          tooltip.style.removeProperty(\"--md-tooltip-0\")\n        }\n      })\n\n    /* Create and return component */\n    return watchTooltip(tooltip, el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n    .pipe(\n      subscribeOn(asyncScheduler)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  bufferCount,\n  combineLatest,\n  combineLatestWith,\n  defer,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  filter,\n  from,\n  ignoreElements,\n  map,\n  mergeMap,\n  mergeWith,\n  of,\n  shareReplay,\n  startWith,\n  switchMap,\n  takeUntil\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElements,\n  watchElementSize,\n  watchToggle\n} from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { Main } from \"../../main\"\nimport {\n  Tooltip,\n  mountTooltip\n} from \"../../tooltip\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Header\n */\nexport interface Header {\n  height: number                       /* Header visible height */\n  hidden: boolean                      /* Header is hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Compute whether the header is hidden\n *\n * If the user scrolls past a certain threshold, the header can be hidden when\n * scrolling down, and shown when scrolling up.\n *\n * @param options - Options\n *\n * @returns Toggle observable\n */\nfunction isHidden({ viewport$ }: WatchOptions): Observable<boolean> {\n  if (!feature(\"header.autohide\"))\n    return of(false)\n\n  /* Compute direction and turning point */\n  const direction$ = viewport$\n    .pipe(\n      map(({ offset: { y } }) => y),\n      bufferCount(2, 1),\n      map(([a, b]) => [a < b, b] as const),\n      distinctUntilKeyChanged(0)\n    )\n\n  /* Compute whether header should be hidden */\n  const hidden$ = combineLatest([viewport$, direction$])\n    .pipe(\n      filter(([{ offset }, [, y]]) => Math.abs(y - offset.y) > 100),\n      map(([, [direction]]) => direction),\n      distinctUntilChanged()\n    )\n\n  /* Compute threshold for hiding */\n  const search$ = watchToggle(\"search\")\n  return combineLatest([viewport$, search$])\n    .pipe(\n      map(([{ offset }, search]) => offset.y > 400 && !search),\n      distinctUntilChanged(),\n      switchMap(active => active ? hidden$ : of(false)),\n      startWith(false)\n    )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch header\n *\n * @param el - Header element\n * @param options - Options\n *\n * @returns Header observable\n */\nexport function watchHeader(\n  el: HTMLElement, options: WatchOptions\n): Observable<Header> {\n  return defer(() => combineLatest([\n    watchElementSize(el),\n    isHidden(options)\n  ]))\n    .pipe(\n      map(([{ height }, hidden]) => ({\n        height,\n        hidden\n      })),\n      distinctUntilChanged((a, b) => (\n        a.height === b.height &&\n        a.hidden === b.hidden\n      )),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount header\n *\n * This function manages the different states of the header, i.e. whether it's\n * hidden or rendered with a shadow. This depends heavily on the main area.\n *\n * @param el - Header element\n * @param options - Options\n *\n * @returns Header component observable\n */\nexport function mountHeader(\n  el: HTMLElement, { header$, main$ }: MountOptions\n): Observable<Component<Header | Tooltip>> {\n  return defer(() => {\n    const push$ = new Subject<Main>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    push$\n      .pipe(\n        distinctUntilKeyChanged(\"active\"),\n        combineLatestWith(header$)\n      )\n        .subscribe(([{ active }, { hidden }]) => {\n          el.classList.toggle(\"md-header--shadow\", active && !hidden)\n          el.hidden = hidden\n        })\n\n    /* Mount tooltips, if enabled */\n    const tooltips = from(getElements(\"[title]\", el))\n      .pipe(\n        filter(() => feature(\"content.tooltips\")),\n        mergeMap(child => mountTooltip(child))\n      )\n\n    /* Link to main area */\n    main$.subscribe(push$)\n\n    /* Create and return component */\n    return header$\n      .pipe(\n        takeUntil(done$),\n        map(state => ({ ref: el, ...state })),\n        mergeWith(tooltips.pipe(takeUntil(done$)))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  distinctUntilKeyChanged,\n  finalize,\n  map,\n  tap\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  getElementSize,\n  getOptionalElement,\n  watchViewportAt\n} from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { Header } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Header\n */\nexport interface HeaderTitle {\n  active: boolean                      /* Header title is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch header title\n *\n * @param el - Heading element\n * @param options - Options\n *\n * @returns Header title observable\n */\nexport function watchHeaderTitle(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<HeaderTitle> {\n  return watchViewportAt(el, { viewport$, header$ })\n    .pipe(\n      map(({ offset: { y } }) => {\n        const { height } = getElementSize(el)\n        return {\n          active: y >= height\n        }\n      }),\n      distinctUntilKeyChanged(\"active\")\n    )\n}\n\n/**\n * Mount header title\n *\n * This function swaps the header title from the site title to the title of the\n * current page when the user scrolls past the first headline.\n *\n * @param el - Header title element\n * @param options - Options\n *\n * @returns Header title component observable\n */\nexport function mountHeaderTitle(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<HeaderTitle>> {\n  return defer(() => {\n    const push$ = new Subject<HeaderTitle>()\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ active }) {\n        el.classList.toggle(\"md-header__title--active\", active)\n      },\n\n      /* Handle complete */\n      complete() {\n        el.classList.remove(\"md-header__title--active\")\n      }\n    })\n\n    /* Obtain headline, if any */\n    const heading = getOptionalElement(\".md-content h1\")\n    if (typeof heading === \"undefined\")\n      return EMPTY\n\n    /* Create and return component */\n    return watchHeaderTitle(heading, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  map,\n  switchMap\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  watchElementSize\n} from \"~/browser\"\n\nimport { Header } from \"../header\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Main area\n */\nexport interface Main {\n  offset: number                       /* Main area top offset */\n  height: number                       /* Main area visible height */\n  active: boolean                      /* Main area is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch main area\n *\n * This function returns an observable that computes the visual parameters of\n * the main area which depends on the viewport vertical offset and height, as\n * well as the height of the header element, if the header is fixed.\n *\n * @param el - Main area element\n * @param options - Options\n *\n * @returns Main area observable\n */\nexport function watchMain(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<Main> {\n\n  /* Compute necessary adjustment for header */\n  const adjust$ = header$\n    .pipe(\n      map(({ height }) => height),\n      distinctUntilChanged()\n    )\n\n  /* Compute the main area's top and bottom borders */\n  const border$ = adjust$\n    .pipe(\n      switchMap(() => watchElementSize(el)\n        .pipe(\n          map(({ height }) => ({\n            top:    el.offsetTop,\n            bottom: el.offsetTop + height\n          })),\n          distinctUntilKeyChanged(\"bottom\")\n        )\n      )\n    )\n\n  /* Compute the main area's offset, visible height and if we scrolled past */\n  return combineLatest([adjust$, border$, viewport$])\n    .pipe(\n      map(([header, { top, bottom }, { offset: { y }, size: { height } }]) => {\n        height = Math.max(0, height\n          - Math.max(0, top    - y,  header)\n          - Math.max(0, height + y - bottom)\n        )\n        return {\n          offset: top - header,\n          height,\n          active: top - header <= y\n        }\n      }),\n      distinctUntilChanged((a, b) => (\n        a.offset === b.offset &&\n        a.height === b.height &&\n        a.active === b.active\n      ))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  asyncScheduler,\n  defer,\n  filter,\n  finalize,\n  fromEvent,\n  map,\n  mergeMap,\n  observeOn,\n  of,\n  repeat,\n  shareReplay,\n  skip,\n  startWith,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { getElements, watchMedia } from \"~/browser\"\nimport { h } from \"~/utilities\"\n\nimport {\n  Component,\n  getComponentElement\n} from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Palette colors\n */\nexport interface PaletteColor {\n  media?: string                       /* Media query */\n  scheme?: string                      /* Color scheme */\n  primary?: string                     /* Primary color */\n  accent?: string                      /* Accent color */\n}\n\n/**\n * Palette\n */\nexport interface Palette {\n  index: number                        /* Palette index */\n  color: PaletteColor                  /* Palette colors */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch color palette\n *\n * @param inputs - Color palette element\n *\n * @returns Color palette observable\n */\nexport function watchPalette(\n  inputs: HTMLInputElement[]\n): Observable<Palette> {\n  const current = __md_get<Palette>(\"__palette\") || {\n    index: inputs.findIndex(input => matchMedia(\n      input.getAttribute(\"data-md-color-media\")!\n    ).matches)\n  }\n\n  /* Emit changes in color palette */\n  const index = Math.max(0, Math.min(current.index, inputs.length - 1))\n  return of(...inputs)\n    .pipe(\n      mergeMap(input => fromEvent(input, \"change\").pipe(map(() => input))),\n      startWith(inputs[index]),\n      map(input => ({\n        index: inputs.indexOf(input),\n        color: {\n          media:   input.getAttribute(\"data-md-color-media\"),\n          scheme:  input.getAttribute(\"data-md-color-scheme\"),\n          primary: input.getAttribute(\"data-md-color-primary\"),\n          accent:  input.getAttribute(\"data-md-color-accent\")\n        }\n      } as Palette)),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount color palette\n *\n * @param el - Color palette element\n *\n * @returns Color palette component observable\n */\nexport function mountPalette(\n  el: HTMLElement\n): Observable<Component<Palette>> {\n  const inputs = getElements<HTMLInputElement>(\"input\", el)\n  const meta = h(\"meta\", { name: \"theme-color\" })\n  document.head.appendChild(meta)\n\n  // Add color scheme meta tag\n  const scheme = h(\"meta\", { name: \"color-scheme\" })\n  document.head.appendChild(scheme)\n\n  /* Mount component on subscription */\n  const media$ = watchMedia(\"(prefers-color-scheme: light)\")\n  return defer(() => {\n    const push$ = new Subject<Palette>()\n    push$.subscribe(palette => {\n      document.body.setAttribute(\"data-md-color-switching\", \"\")\n\n      /* Retrieve color palette for system preference */\n      if (palette.color.media === \"(prefers-color-scheme)\") {\n        const media = matchMedia(\"(prefers-color-scheme: light)\")\n        const input = document.querySelector(media.matches\n          ? \"[data-md-color-media='(prefers-color-scheme: light)']\"\n          : \"[data-md-color-media='(prefers-color-scheme: dark)']\"\n        )!\n\n        /* Retrieve colors for system preference */\n        palette.color.scheme  = input.getAttribute(\"data-md-color-scheme\")!\n        palette.color.primary = input.getAttribute(\"data-md-color-primary\")!\n        palette.color.accent  = input.getAttribute(\"data-md-color-accent\")!\n      }\n\n      /* Set color palette */\n      for (const [key, value] of Object.entries(palette.color))\n        document.body.setAttribute(`data-md-color-${key}`, value)\n\n      /* Set toggle visibility */\n      for (let index = 0; index < inputs.length; index++) {\n        const label = inputs[index].nextElementSibling\n        if (label instanceof HTMLElement)\n          label.hidden = palette.index !== index\n      }\n\n      /* Persist preference in local storage */\n      __md_set(\"__palette\", palette)\n    })\n\n    // Handle color switch on Enter or Space - see https://t.ly/YIhVj\n    fromEvent<KeyboardEvent>(el, \"keydown\").pipe(\n      filter(ev => ev.key === \"Enter\"),\n      withLatestFrom(push$, (_, palette) => palette)\n    )\n      .subscribe(({ index }) => {\n        index = (index + 1) % inputs.length\n        inputs[index].click()\n        inputs[index].focus()\n      })\n\n    /* Update theme-color meta tag */\n    push$\n      .pipe(\n        map(() => {\n          const header = getComponentElement(\"header\")\n          const style  = window.getComputedStyle(header)\n\n          // Set color scheme\n          scheme.content = style.colorScheme\n\n          /* Return color in hexadecimal format */\n          return style.backgroundColor.match(/\\d+/g)!\n            .map(value => (+value).toString(16).padStart(2, \"0\"))\n            .join(\"\")\n        })\n      )\n        .subscribe(color => meta.content = `#${color}`)\n\n    /* Revert transition durations after color switch */\n    push$.pipe(observeOn(asyncScheduler))\n      .subscribe(() => {\n        document.body.removeAttribute(\"data-md-color-switching\")\n      })\n\n    /* Create and return component */\n    return watchPalette(inputs)\n      .pipe(\n        takeUntil(media$.pipe(skip(1))),\n        repeat(),\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  finalize,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Progress indicator\n */\nexport interface Progress {\n  value: number                        // Progress value\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  progress$: Subject<number>           // Progress subject\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount progress indicator\n *\n * @param el - Progress indicator element\n * @param options - Options\n *\n * @returns Progress indicator component observable\n */\nexport function mountProgress(\n  el: HTMLElement, { progress$ }: MountOptions\n): Observable<Component<Progress>> {\n\n  // Mount component on subscription\n  return defer(() => {\n    const push$ = new Subject<Progress>()\n    push$.subscribe(({ value }) => {\n      el.style.setProperty(\"--md-progress-value\", `${value}`)\n    })\n\n    // Create and return component\n    return progress$\n      .pipe(\n        tap(value => push$.next({ value })),\n        finalize(() => push$.complete()),\n        map(value => ({ ref: el, value }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport ClipboardJS from \"clipboard\"\nimport {\n  Observable,\n  Subject,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { translation } from \"~/_\"\nimport { getElement } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n  alert$: Subject<string>              /* Alert subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Extract text to copy\n *\n * @param el - HTML element\n *\n * @returns Extracted text\n */\nfunction extract(el: HTMLElement): string {\n  el.setAttribute(\"data-md-copying\", \"\")\n  const copy = el.closest(\"[data-copy]\")\n  const text = copy\n    ? copy.getAttribute(\"data-copy\")!\n    : el.innerText\n  el.removeAttribute(\"data-md-copying\")\n  return text.trimEnd()\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up Clipboard.js integration\n *\n * @param options - Options\n */\nexport function setupClipboardJS(\n  { alert$ }: SetupOptions\n): void {\n  if (ClipboardJS.isSupported()) {\n    new Observable<ClipboardJS.Event>(subscriber => {\n      new ClipboardJS(\"[data-clipboard-target], [data-clipboard-text]\", {\n        text: el => (\n          el.getAttribute(\"data-clipboard-text\")! ||\n          extract(getElement(\n            el.getAttribute(\"data-clipboard-target\")!\n          ))\n        )\n      })\n        .on(\"success\", ev => subscriber.next(ev))\n    })\n      .pipe(\n        tap(ev => {\n          const trigger = ev.trigger as HTMLElement\n          trigger.focus()\n        }),\n        map(() => translation(\"clipboard.copied\"))\n      )\n        .subscribe(alert$)\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  catchError,\n  map,\n  of\n} from \"rxjs\"\n\nimport {\n  getElement,\n  getElements,\n  requestXML\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Sitemap, i.e. a list of URLs\n */\nexport type Sitemap = Map<string, URL[]>\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Resolve URL to the given base URL\n *\n * When serving the site with instant navigation, MkDocs will set the hostname\n * to the value as specified in `dev_addr`, but the browser allows for several\n * hostnames to be used: `localhost`, `127.0.0.1` or even `0.0.0.0`, depending\n * on configuration. This function resolves the URL to the given hostname.\n *\n * @param url - URL\n * @param base - Base URL\n *\n * @returns Resolved URL\n */\nfunction resolve(url: URL, base: URL) {\n  url.protocol = base.protocol\n  url.hostname = base.hostname\n  return url\n}\n\n/**\n * Extract sitemap from document\n *\n * This function extracts the URLs and alternate links from the document, and\n * associates alternate links to the original URL as found in `loc`, allowing\n * the browser to navigate to the correct page when switching languages. The\n * format of the sitemap is expected to adhere to:\n *\n * ``` xml\n * <urlset>\n *   <url>\n *     <loc>...</loc>\n *     <xhtml:link rel=\"alternate\" hreflang=\"en\" href=\"...\"/>\n *     <xhtml:link rel=\"alternate\" hreflang=\"de\" href=\"...\"/>\n *     ...\n *   </url>\n *   ...\n * </urlset>\n * ```\n *\n * @param document - Document\n * @param base - Base URL\n *\n * @returns Sitemap\n */\nfunction extract(document: Document, base: URL): Sitemap {\n  const sitemap: Sitemap = new Map()\n  for (const el of getElements(\"url\", document)) {\n    const url = getElement(\"loc\", el)\n\n    // Create entry for location and add it to the list of links\n    const links = [resolve(new URL(url.textContent!), base)]\n    sitemap.set(`${links[0]}`, links)\n\n    // Attach alternate links to current entry\n    for (const link of getElements(\"[rel=alternate]\", el)) {\n      const href = link.getAttribute(\"href\")\n      if (href != null)\n        links.push(resolve(new URL(href), base))\n    }\n  }\n\n  // Return sitemap\n  return sitemap\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch the sitemap for the given base URL\n *\n * If a network or parsing error occurs, we just default to an empty sitemap,\n * which means the caller should fall back to regular navigation.\n *\n * @param base - Base URL\n *\n * @returns Sitemap observable\n */\nexport function fetchSitemap(base: URL | string): Observable<Sitemap> {\n  return requestXML(new URL(\"sitemap.xml\", base))\n    .pipe(\n      map(document => extract(document, new URL(base))),\n      catchError(() => of(new Map())),\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  catchError,\n  combineLatestWith,\n  concat,\n  debounceTime,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  of,\n  share,\n  switchMap,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { configuration, feature } from \"~/_\"\nimport {\n  Viewport,\n  getElements,\n  getLocation,\n  getOptionalElement,\n  requestHTML,\n  setLocation,\n  setLocationHash\n} from \"~/browser\"\nimport { getComponentElement } from \"~/components\"\n\nimport { Sitemap, fetchSitemap } from \"../sitemap\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n  location$: Subject<URL>              // Location subject\n  viewport$: Observable<Viewport>      // Viewport observable\n  progress$: Subject<number>           // Progress subject\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Handle clicks on internal URLs while skipping external URLs\n *\n * @param ev - Mouse event\n * @param sitemap - Sitemap\n *\n * @returns URL observable\n */\nfunction handle(\n  ev: MouseEvent, sitemap: Sitemap\n): Observable<URL> {\n  if (!(ev.target instanceof Element))\n    return EMPTY\n\n  // Skip, as target is not within a link - clicks on non-link elements are\n  // also captured, which we need to exclude from processing\n  const el = ev.target.closest(\"a\")\n  if (el === null)\n    return EMPTY\n\n  // Skip, as link opens in new window - we now know we have captured a click\n  // on a link, but the link either has a `target` property defined, or the\n  // user pressed the `meta` or `ctrl` key to open it in a new window. Thus,\n  // we need to filter this event as well.\n  if (el.target || ev.metaKey || ev.ctrlKey)\n    return EMPTY\n\n  // Next, we must check if the URL is relevant for us, i.e., if it's an\n  // internal link to a page that is managed by MkDocs. Only then we can be\n  // sure that the structure of the page to be loaded adheres to the current\n  // document structure and can subsequently be injected into it without doing\n  // a full reload. For this reason, we must canonicalize the URL by removing\n  // all search parameters and hash fragments.\n  const url = new URL(el.href)\n  url.search = url.hash = \"\"\n\n  // Skip, if URL is not included in the sitemap - this could be the case when\n  // linking between versions or languages, or to another page that the author\n  // included as part of the build, but that is not managed by MkDocs. In that\n  // case we must not continue with instant navigation.\n  if (!sitemap.has(`${url}`))\n    return EMPTY\n\n  // We now know that we have a link to an internal page, so we prevent the\n  // browser from navigation and emit the URL for instant navigation. Note that\n  // this also includes anchor links, which means we need to implement anchor\n  // positioning ourselves. The reason for this is that if we wouldn't manage\n  // anchor links as well, scroll restoration will not work correctly (e.g.\n  // following an anchor link and scrolling).\n  ev.preventDefault()\n  return of(new URL(el.href))\n}\n\n/**\n * Create a map of head elements for lookup and replacement\n *\n * @param document - Document\n *\n * @returns Tag map\n */\nfunction head(document: Document): Map<string, HTMLElement> {\n  const tags = new Map<string, HTMLElement>()\n  for (const el of getElements(\":scope > *\", document.head))\n    tags.set(el.outerHTML, el)\n\n  // Return tag map\n  return tags\n}\n\n/**\n * Resolve relative URLs in the given document\n *\n * This function resolves relative `href` and `src` attributes, which can belong\n * to all sorts of tags, like meta tags, links, images, scripts and more.\n *\n * @param document - Document\n *\n * @returns Document observable\n */\nfunction resolve(document: Document): Observable<Document> {\n  for (const el of getElements(\"[href], [src]\", document))\n    for (const key of [\"href\", \"src\"]) {\n      const value = el.getAttribute(key)\n      if (value && !/^(?:[a-z]+:)?\\/\\//i.test(value)) {\n        // @ts-expect-error - trick: self-assign to resolve URL\n        el[key] = el[key]\n        break\n      }\n    }\n\n  // Return document observable\n  return of(document)\n}\n\n/**\n * Inject the contents of a document into the current one\n *\n * @param next - Next document\n *\n * @returns Document observable\n */\nfunction inject(next: Document): Observable<Document> {\n  for (const selector of [\n    \"[data-md-component=announce]\",\n    \"[data-md-component=container]\",\n    \"[data-md-component=header-topic]\",\n    \"[data-md-component=outdated]\",\n    \"[data-md-component=logo]\",\n    \"[data-md-component=skip]\",\n    ...feature(\"navigation.tabs.sticky\")\n      ? [\"[data-md-component=tabs]\"]\n      : []\n  ]) {\n    const source = getOptionalElement(selector)\n    const target = getOptionalElement(selector, next)\n    if (\n      typeof source !== \"undefined\" &&\n      typeof target !== \"undefined\"\n    ) {\n      source.replaceWith(target)\n    }\n  }\n\n  // Update meta tags\n  const tags = head(document)\n  for (const [html, el] of head(next))\n    if (tags.has(html))\n      tags.delete(html)\n    else\n      document.head.appendChild(el)\n\n  // Remove meta tags that are not present in the new document\n  for (const el of tags.values()) {\n    const name = el.getAttribute(\"name\")\n    // @todo - find a better way to handle attributes we add dynamically in\n    // other components without mounting components on every navigation, as\n    // this might impact overall performance - see https://t.ly/ehp_O\n    if (name !== \"theme-color\" && name !== \"color-scheme\")\n      el.remove()\n  }\n\n  // After components and meta tags were replaced, re-evaluate scripts\n  // that were provided by the author as part of Markdown files\n  const container = getComponentElement(\"container\")\n  return concat(getElements(\"script\", container))\n    .pipe(\n      switchMap(el => {\n        const script = next.createElement(\"script\")\n        if (el.src) {\n          for (const name of el.getAttributeNames())\n            script.setAttribute(name, el.getAttribute(name)!)\n          el.replaceWith(script)\n\n          // Complete when script is loaded\n          return new Observable(observer => {\n            script.onload = () => observer.complete()\n          })\n\n        // Complete immediately\n        } else {\n          script.textContent = el.textContent\n          el.replaceWith(script)\n          return EMPTY\n        }\n      }),\n      ignoreElements(),\n      endWith(document)\n    )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up instant navigation\n *\n * This is a heavily orchestrated operation - see inline comments to learn how\n * this works with Material for MkDocs, and how you can hook into it.\n *\n * @param options - Options\n *\n * @returns Document observable\n */\nexport function setupInstantNavigation(\n  { location$, viewport$, progress$ }: SetupOptions\n): Observable<Document> {\n  const config = configuration()\n  if (location.protocol === \"file:\")\n    return EMPTY\n\n  // Load sitemap immediately, so we have it available when the user initiates\n  // the first navigation request without any perceivable delay\n  const sitemap$ = fetchSitemap(config.base)\n\n  // Since we might be on a slow connection, the user might trigger multiple\n  // instant navigation events that overlap. MkDocs produces relative URLs for\n  // all internal links, which becomes a problem in this case, because we need\n  // to change the base URL the moment the user clicks a link that should be\n  // intercepted in order to be consistent with popstate, which means that the\n  // base URL would now be incorrect when resolving another relative link from\n  // the same site. For this reason we always resolve all relative links to\n  // absolute links, so we can be sure this never happens.\n  of(document)\n    .subscribe(resolve)\n\n  // --------------------------------------------------------------------------\n  // Navigation interception\n  // --------------------------------------------------------------------------\n\n  // Intercept navigation - to keep the number of event listeners down we use\n  // the fact that uncaptured events bubble up to the body. This has the nice\n  // property that we don't need to detach and then re-attach event listeners\n  // when the document is replaced after a navigation event.\n  const instant$ =\n    fromEvent<MouseEvent>(document.body, \"click\")\n      .pipe(\n        combineLatestWith(sitemap$),\n        switchMap(([ev, sitemap]) => handle(ev, sitemap)),\n        share()\n      )\n\n  // Intercept history change events, e.g. when the user uses the browser's\n  // back or forward buttons, and emit new location for fetching and parsing\n  const history$ =\n    fromEvent<PopStateEvent>(window, \"popstate\")\n      .pipe(\n        map(getLocation),\n        share()\n      )\n\n  // While it would be better UX to defer navigation events until the document\n  // is fully fetched and parsed, we must schedule it here to synchronize with\n  // popstate events, as they are emitted immediately. Moreover we need to\n  // store the current viewport offset for scroll restoration later on.\n  instant$.pipe(withLatestFrom(viewport$))\n    .subscribe(([url, { offset }]) => {\n      history.replaceState(offset, \"\")\n      history.pushState(null, \"\", url)\n    })\n\n  // Emit URLs that should be fetched via instant navigation on location subject\n  // which was passed into this function. The state of instant navigation can be\n  // intercepted by other parts of the application, which can synchronously back\n  // up or restore state before or after instant navigation happens.\n  merge(instant$, history$)\n    .subscribe(location$)\n\n  // --------------------------------------------------------------------------\n  // Fetching and parsing\n  // --------------------------------------------------------------------------\n\n  // Fetch document - we deduplicate requests to the same location, so we don't\n  // end up with multiple requests for the same page. We use `switchMap`, since\n  // we want to cancel the previous request when a new one is triggered, which\n  // is automatically handled by the observable returned by `request`. This is\n  // essential to ensure a good user experience, as we don't want to load pages\n  // that are not needed anymore, e.g., when the user clicks multiple links in\n  // quick succession or on slow connections. If the request fails for some\n  // reason, we fall back and use regular navigation, forcing a reload.\n  const document$ =\n    location$.pipe(\n      distinctUntilKeyChanged(\"pathname\"),\n      switchMap(url => requestHTML(url, { progress$ })\n        .pipe(\n          catchError(() => {\n            setLocation(url, true)\n            return EMPTY\n          })\n        )\n      ),\n\n      // The document was successfully fetched and parsed, so we can inject its\n      // contents into the currently active document\n      switchMap(resolve),\n      switchMap(inject),\n      share()\n    )\n\n  // --------------------------------------------------------------------------\n  // Scroll restoration\n  // --------------------------------------------------------------------------\n\n  // Handle scroll restoration - we must restore the viewport offset after the\n  // document has been fetched and injected, and every time the user clicks an\n  // anchor that leads to an element on the same page, which might also happen\n  // when the user uses the back or forward button.\n  merge(\n    document$.pipe(withLatestFrom(location$, (_, url) => url)),\n\n    // Handle instant navigation events that are triggered by the user clicking\n    // on an anchor link with a hash fragment different from the current one, as\n    // well as from popstate events, which are emitted when the user navigates\n    // back and forth between pages. We use a two-layered subscription to scope\n    // the scroll restoration to the current page, as we don't need to restore\n    // the viewport offset when the user navigates to a different page, as this\n    // is already handled by the previous observable.\n    document$.pipe(\n      switchMap(() => location$),\n      distinctUntilKeyChanged(\"pathname\"),\n      switchMap(() => location$),\n      distinctUntilKeyChanged(\"hash\")\n    ),\n\n    // Handle instant navigation events that are triggered by the user clicking\n    // on an anchor link with the same hash fragment as the current one in the\n    // URL. It is essential that we only intercept those from instant navigation\n    // events and not from history change events, or we'll end up in and endless\n    // loop. The top-level history entry must be removed, as it will be replaced\n    // with a new one, which would otherwise lead to a duplicate entry.\n    location$.pipe(\n      distinctUntilChanged((a, b) => (\n        a.pathname === b.pathname &&\n        a.hash     === b.hash\n      )),\n      switchMap(() => instant$),\n      tap(() => history.back())\n    )\n  )\n    .subscribe(url => {\n\n      // Check if the current history entry has a state, which happens when the\n      // user presses the back or forward button to visit a page we've already\n      // seen. If there's no state, it means a new page was visited and we must\n      // scroll to the top, unless an anchor is given.\n      if (history.state !== null || !url.hash) {\n        window.scrollTo(0, history.state?.y ?? 0)\n      } else {\n        history.scrollRestoration = \"auto\"\n        setLocationHash(url.hash)\n        history.scrollRestoration = \"manual\"\n      }\n    })\n\n  // Disable scroll restoration when an instant navigation event occurs, so the\n  // browser does not immediately set the viewport offset to the prior history\n  // entry, scrolling to the position on the same page, which would look odd.\n  // Instead, we manually restore the position once the page has loaded.\n  location$.subscribe(() => {\n    history.scrollRestoration = \"manual\"\n  })\n\n  // Enable scroll restoration before window unloads - this is essential to\n  // ensure that full reloads (F5) restore the viewport offset correctly. If\n  // only popstate events wouldn't reset the viewport offset prior to their\n  // emission, we could just reset this in popstate. Meh.\n  fromEvent(window, \"beforeunload\")\n    .subscribe(() => {\n      history.scrollRestoration = \"auto\"\n    })\n\n  // Track viewport offset, so we can restore it when the user navigates back\n  // and forth between pages. Note that this must be debounced and cannot be\n  // done in popstate, as popstate has already removed the entry from the\n  // history, which means it is too late.\n  viewport$.pipe(\n    distinctUntilKeyChanged(\"offset\"),\n    debounceTime(100)\n  )\n    .subscribe(({ offset }) => {\n      history.replaceState(offset, \"\")\n    })\n\n  // Return document observable\n  return document$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport escapeHTML from \"escape-html\"\n\nimport { SearchConfig } from \"../config\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search highlight function\n *\n * @param value - Value\n *\n * @returns Highlighted value\n */\nexport type SearchHighlightFn = (value: string) => string\n\n/**\n * Search highlight factory function\n *\n * @param query - Query value\n *\n * @returns Search highlight function\n */\nexport type SearchHighlightFactoryFn = (query: string) => SearchHighlightFn\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a search highlighter\n *\n * @param config - Search configuration\n *\n * @returns Search highlight factory function\n */\nexport function setupSearchHighlighter(\n  config: SearchConfig\n): SearchHighlightFactoryFn {\n  // Hack: temporarily remove pure lookaheads and lookbehinds\n  const regex = config.separator.split(\"|\").map(term => {\n    const temp = term.replace(/(\\(\\?[!=<][^)]+\\))/g, \"\")\n    return temp.length === 0 ? \"\uFFFD\" : term\n  })\n    .join(\"|\")\n\n  const separator = new RegExp(regex, \"img\")\n  const highlight = (_: unknown, data: string, term: string) => {\n    return `${data}<mark data-md-highlight>${term}</mark>`\n  }\n\n  /* Return factory function */\n  return (query: string) => {\n    query = query\n      .replace(/[\\s*+\\-:~^]+/g, \" \")\n      .trim()\n\n    /* Create search term match expression */\n    const match = new RegExp(`(^|${config.separator}|)(${\n      query\n        .replace(/[|\\\\{}()[\\]^$+*?.-]/g, \"\\\\$&\")\n        .replace(separator, \"|\")\n    })`, \"img\")\n\n    /* Highlight string value */\n    return value => escapeHTML(value)\n      .replace(match, highlight)\n      .replace(/<\\/mark>(\\s+)<mark[^>]*>/img, \"$1\")\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A RTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { SearchResult } from \"../../_\"\nimport { SearchIndex } from \"../../config\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search message type\n */\nexport const enum SearchMessageType {\n  SETUP,                               /* Search index setup */\n  READY,                               /* Search index ready */\n  QUERY,                               /* Search query */\n  RESULT                               /* Search results */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Message containing the data necessary to setup the search index\n */\nexport interface SearchSetupMessage {\n  type: SearchMessageType.SETUP        /* Message type */\n  data: SearchIndex                    /* Message data */\n}\n\n/**\n * Message indicating the search index is ready\n */\nexport interface SearchReadyMessage {\n  type: SearchMessageType.READY        /* Message type */\n}\n\n/**\n * Message containing a search query\n */\nexport interface SearchQueryMessage {\n  type: SearchMessageType.QUERY        /* Message type */\n  data: string                         /* Message data */\n}\n\n/**\n * Message containing results for a search query\n */\nexport interface SearchResultMessage {\n  type: SearchMessageType.RESULT       /* Message type */\n  data: SearchResult                   /* Message data */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Message exchanged with the search worker\n */\nexport type SearchMessage =\n  | SearchSetupMessage\n  | SearchReadyMessage\n  | SearchQueryMessage\n  | SearchResultMessage\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Type guard for search ready messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchReadyMessage(\n  message: SearchMessage\n): message is SearchReadyMessage {\n  return message.type === SearchMessageType.READY\n}\n\n/**\n * Type guard for search result messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchResultMessage(\n  message: SearchMessage\n): message is SearchResultMessage {\n  return message.type === SearchMessageType.RESULT\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A RTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  ObservableInput,\n  Subject,\n  first,\n  merge,\n  of,\n  switchMap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { watchToggle, watchWorker } from \"~/browser\"\n\nimport { SearchIndex } from \"../../config\"\nimport {\n  SearchMessage,\n  SearchMessageType\n} from \"../message\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up search worker\n *\n * This function creates and initializes a web worker that is used for search,\n * so that the user interface doesn't freeze. In general, the application does\n * not care how search is implemented, as long as the web worker conforms to\n * the format expected by the application as defined in `SearchMessage`. This\n * allows the author to implement custom search functionality, by providing a\n * custom web worker via configuration.\n *\n * Material for MkDocs' built-in search implementation makes use of Lunr.js, an\n * efficient and fast implementation for client-side search. Leveraging a tiny\n * iframe-based web worker shim, search is even supported for the `file://`\n * protocol, enabling search for local non-hosted builds.\n *\n * If the protocol is `file://`, search initialization is deferred to mitigate\n * freezing, as it's now synchronous by design - see https://bit.ly/3C521EO\n *\n * @see https://bit.ly/3igvtQv - How to implement custom search\n *\n * @param url - Worker URL\n * @param index$ - Search index observable input\n *\n * @returns Search worker\n */\nexport function setupSearchWorker(\n  url: string, index$: ObservableInput<SearchIndex>\n): Subject<SearchMessage> {\n  const worker$ = watchWorker<SearchMessage>(url)\n  merge(\n    of(location.protocol !== \"file:\"),\n    watchToggle(\"search\")\n  )\n    .pipe(\n      first(active => active),\n      switchMap(() => index$)\n    )\n      .subscribe(({ config, docs }) => worker$.next({\n        type: SearchMessageType.SETUP,\n        data: {\n          config,\n          docs,\n          options: {\n            suggest: feature(\"search.suggest\")\n          }\n        }\n      }))\n\n  /* Return search worker */\n  return worker$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Subject,\n  catchError,\n  combineLatest,\n  filter,\n  fromEvent,\n  map,\n  of,\n  switchMap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { configuration } from \"~/_\"\nimport {\n  getElement,\n  getLocation,\n  requestJSON,\n  setLocation\n} from \"~/browser\"\nimport { getComponentElements } from \"~/components\"\nimport {\n  Version,\n  renderVersionSelector\n} from \"~/templates\"\n\nimport { fetchSitemap } from \"../sitemap\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n  document$: Subject<Document>         /* Document subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up version selector\n *\n * @param options - Options\n */\nexport function setupVersionSelector(\n  { document$ }: SetupOptions\n): void {\n  const config = configuration()\n  const versions$ = requestJSON<Version[]>(\n    new URL(\"../versions.json\", config.base)\n  )\n    .pipe(\n      catchError(() => EMPTY) // @todo refactor instant loading\n    )\n\n  /* Determine current version */\n  const current$ = versions$\n    .pipe(\n      map(versions => {\n        const [, current] = config.base.match(/([^/]+)\\/?$/)!\n        return versions.find(({ version, aliases }) => (\n          version === current || aliases.includes(current)\n        )) || versions[0]\n      })\n    )\n\n  /* Intercept inter-version navigation */\n  versions$\n    .pipe(\n      map(versions => new Map(versions.map(version => [\n        `${new URL(`../${version.version}/`, config.base)}`,\n        version\n      ]))),\n      switchMap(urls => fromEvent<MouseEvent>(document.body, \"click\")\n        .pipe(\n          filter(ev => !ev.metaKey && !ev.ctrlKey),\n          withLatestFrom(current$),\n          switchMap(([ev, current]) => {\n            if (ev.target instanceof Element) {\n              const el = ev.target.closest(\"a\")\n              if (el && !el.target && urls.has(el.href)) {\n                const url = el.href\n                // This is a temporary hack to detect if a version inside the\n                // version selector or on another part of the site was clicked.\n                // If we're inside the version selector, we definitely want to\n                // find the same page, as we might have different deployments\n                // due to aliases. However, if we're outside the version\n                // selector, we must abort here, because we might otherwise\n                // interfere with instant navigation. We need to refactor this\n                // at some point together with instant navigation.\n                //\n                // See https://github.com/squidfunk/mkdocs-material/issues/4012\n                if (!ev.target.closest(\".md-version\")) {\n                  const version = urls.get(url)!\n                  if (version === current)\n                    return EMPTY\n                }\n                ev.preventDefault()\n                return of(url)\n              }\n            }\n            return EMPTY\n          }),\n          switchMap(url => {\n            return fetchSitemap(new URL(url))\n              .pipe(\n                map(sitemap => {\n                  const location = getLocation()\n                  const path = location.href.replace(config.base, url)\n                  return sitemap.has(path.split(\"#\")[0])\n                    ? new URL(path)\n                    : new URL(url)\n                })\n              )\n          })\n        )\n      )\n    )\n      .subscribe(url => setLocation(url, true))\n\n  /* Render version selector and warning */\n  combineLatest([versions$, current$])\n    .subscribe(([versions, current]) => {\n      const topic = getElement(\".md-header__topic\")\n      topic.appendChild(renderVersionSelector(versions, current))\n    })\n\n  /* Integrate outdated version banner with instant navigation */\n  document$.pipe(switchMap(() => current$))\n    .subscribe(current => {\n\n      /* Check if version state was already determined */\n      let outdated = __md_get(\"__outdated\", sessionStorage)\n      if (outdated === null) {\n        outdated = true\n\n        /* Obtain and normalize default versions */\n        let ignored = config.version?.default || \"latest\"\n        if (!Array.isArray(ignored))\n          ignored = [ignored]\n\n        /* Check if version is considered a default */\n        main: for (const ignore of ignored)\n          for (const version of current.aliases.concat(current.version))\n            if (new RegExp(ignore, \"i\").test(version)) {\n              outdated = false\n              break main\n            }\n\n        /* Persist version state in session storage */\n        __md_set(\"__outdated\", outdated, sessionStorage)\n      }\n\n      /* Unhide outdated version banner */\n      if (outdated)\n        for (const warning of getComponentElements(\"outdated\"))\n          warning.hidden = false\n    })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  combineLatest,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  finalize,\n  first,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  shareReplay,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport {\n  getElement,\n  getLocation,\n  setToggle,\n  watchElementFocus,\n  watchToggle\n} from \"~/browser\"\nimport {\n  SearchMessage,\n  SearchMessageType,\n  isSearchReadyMessage\n} from \"~/integrations\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search query\n */\nexport interface SearchQuery {\n  value: string                        /* Query value */\n  focus: boolean                       /* Query focus */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch search query\n *\n * Note that the focus event which triggers re-reading the current query value\n * is delayed by `1ms` so the input's empty state is allowed to propagate.\n *\n * @param el - Search query element\n * @param options - Options\n *\n * @returns Search query observable\n */\nexport function watchSearchQuery(\n  el: HTMLInputElement, { worker$ }: WatchOptions\n): Observable<SearchQuery> {\n\n  /* Support search deep linking */\n  const { searchParams } = getLocation()\n  if (searchParams.has(\"q\")) {\n    setToggle(\"search\", true)\n\n    /* Set query from parameter */\n    el.value = searchParams.get(\"q\")!\n    el.focus()\n\n    /* Remove query parameter on close */\n    watchToggle(\"search\")\n      .pipe(\n        first(active => !active)\n      )\n        .subscribe(() => {\n          const url = getLocation()\n          url.searchParams.delete(\"q\")\n          history.replaceState({}, \"\", `${url}`)\n        })\n  }\n\n  /* Intercept focus and input events */\n  const focus$ = watchElementFocus(el)\n  const value$ = merge(\n    worker$.pipe(first(isSearchReadyMessage)),\n    fromEvent(el, \"keyup\"),\n    focus$\n  )\n    .pipe(\n      map(() => el.value),\n      distinctUntilChanged()\n    )\n\n  /* Combine into single observable */\n  return combineLatest([value$, focus$])\n    .pipe(\n      map(([value, focus]) => ({ value, focus })),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount search query\n *\n * @param el - Search query element\n * @param options - Options\n *\n * @returns Search query component observable\n */\nexport function mountSearchQuery(\n  el: HTMLInputElement, { worker$ }: MountOptions\n): Observable<Component<SearchQuery, HTMLInputElement>> {\n  const push$ = new Subject<SearchQuery>()\n  const done$ = push$.pipe(ignoreElements(), endWith(true))\n\n  /* Handle value change */\n  combineLatest([\n    worker$.pipe(first(isSearchReadyMessage)),\n    push$\n  ], (_, query) => query)\n    .pipe(\n      distinctUntilKeyChanged(\"value\")\n    )\n      .subscribe(({ value }) => worker$.next({\n        type: SearchMessageType.QUERY,\n        data: value\n      }))\n\n  /* Handle focus change */\n  push$\n    .pipe(\n      distinctUntilKeyChanged(\"focus\")\n    )\n      .subscribe(({ focus }) => {\n        if (focus)\n          setToggle(\"search\", focus)\n      })\n\n  /* Handle reset */\n  fromEvent(el.form!, \"reset\")\n    .pipe(\n      takeUntil(done$)\n    )\n      .subscribe(() => el.focus())\n\n  // Focus search query on label click - note that this is necessary to bring\n  // up the keyboard on iOS and other mobile platforms, as the search dialog is\n  // not visible at first, and programatically focusing an input element must\n  // be triggered by a user interaction - see https://t.ly/Cb30n\n  const label = getElement(\"header [for=__search]\")\n  fromEvent(label, \"click\")\n    .subscribe(() => el.focus())\n\n  /* Create and return component */\n  return watchSearchQuery(el, { worker$ })\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state })),\n      shareReplay(1)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  bufferCount,\n  filter,\n  finalize,\n  first,\n  fromEvent,\n  map,\n  merge,\n  mergeMap,\n  of,\n  share,\n  skipUntil,\n  switchMap,\n  takeUntil,\n  tap,\n  withLatestFrom,\n  zipWith\n} from \"rxjs\"\n\nimport { translation } from \"~/_\"\nimport {\n  getElement,\n  getOptionalElement,\n  watchElementBoundary,\n  watchToggle\n} from \"~/browser\"\nimport {\n  SearchMessage,\n  SearchResult,\n  isSearchReadyMessage,\n  isSearchResultMessage\n} from \"~/integrations\"\nimport { renderSearchResultItem } from \"~/templates\"\nimport { round } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\nimport { SearchQuery } from \"../query\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  query$: Observable<SearchQuery>      /* Search query observable */\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search result list\n *\n * This function performs a lazy rendering of the search results, depending on\n * the vertical offset of the search result container.\n *\n * @param el - Search result list element\n * @param options - Options\n *\n * @returns Search result list component observable\n */\nexport function mountSearchResult(\n  el: HTMLElement, { worker$, query$ }: MountOptions\n): Observable<Component<SearchResult>> {\n  const push$ = new Subject<SearchResult>()\n  const boundary$ = watchElementBoundary(el.parentElement!)\n    .pipe(\n      filter(Boolean)\n    )\n\n  /* Retrieve container */\n  const container = el.parentElement!\n\n  /* Retrieve nested components */\n  const meta = getElement(\":scope > :first-child\", el)\n  const list = getElement(\":scope > :last-child\", el)\n\n  /* Reveal to accessibility tree \u2013 see https://bit.ly/3iAA7t8 */\n  watchToggle(\"search\")\n    .subscribe(active => list.setAttribute(\n      \"role\", active ? \"list\" : \"presentation\"\n    ))\n\n  /* Update search result metadata */\n  push$\n    .pipe(\n      withLatestFrom(query$),\n      skipUntil(worker$.pipe(first(isSearchReadyMessage)))\n    )\n      .subscribe(([{ items }, { value }]) => {\n        switch (items.length) {\n\n          /* No results */\n          case 0:\n            meta.textContent = value.length\n              ? translation(\"search.result.none\")\n              : translation(\"search.result.placeholder\")\n            break\n\n          /* One result */\n          case 1:\n            meta.textContent = translation(\"search.result.one\")\n            break\n\n          /* Multiple result */\n          default:\n            const count = round(items.length)\n            meta.textContent = translation(\"search.result.other\", count)\n        }\n      })\n\n  /* Render search result item */\n  const render$ = push$\n    .pipe(\n      tap(() => list.innerHTML = \"\"),\n      switchMap(({ items }) => merge(\n        of(...items.slice(0, 10)),\n        of(...items.slice(10))\n          .pipe(\n            bufferCount(4),\n            zipWith(boundary$),\n            switchMap(([chunk]) => chunk)\n          )\n      )),\n      map(renderSearchResultItem),\n      share()\n    )\n\n  /* Update search result list */\n  render$.subscribe(item => list.appendChild(item))\n  render$\n    .pipe(\n      mergeMap(item => {\n        const details = getOptionalElement(\"details\", item)\n        if (typeof details === \"undefined\")\n          return EMPTY\n\n        /* Keep position of details element stable */\n        return fromEvent(details, \"toggle\")\n          .pipe(\n            takeUntil(push$),\n            map(() => details)\n          )\n      })\n    )\n      .subscribe(details => {\n        if (\n          details.open === false &&\n          details.offsetTop <= container.scrollTop\n        )\n          container.scrollTo({ top: details.offsetTop })\n      })\n\n  /* Filter search result message */\n  const result$ = worker$\n    .pipe(\n      filter(isSearchResultMessage),\n      map(({ data }) => data)\n    )\n\n  /* Create and return component */\n  return result$\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  endWith,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport { getLocation } from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { SearchQuery } from \"../query\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search sharing\n */\nexport interface SearchShare {\n  url: URL                             /* Deep link for sharing */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  query$: Observable<SearchQuery>      /* Search query observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  query$: Observable<SearchQuery>      /* Search query observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search sharing\n *\n * @param _el - Search sharing element\n * @param options - Options\n *\n * @returns Search sharing observable\n */\nexport function watchSearchShare(\n  _el: HTMLElement, { query$ }: WatchOptions\n): Observable<SearchShare> {\n  return query$\n    .pipe(\n      map(({ value }) => {\n        const url = getLocation()\n        url.hash = \"\"\n\n        /* Compute readable query strings */\n        value = value\n          .replace(/\\s+/g, \"+\")        /* Collapse whitespace */\n          .replace(/&/g, \"%26\")        /* Escape '&' character */\n          .replace(/=/g, \"%3D\")        /* Escape '=' character */\n\n        /* Replace query string */\n        url.search = `q=${value}`\n        return { url }\n      })\n    )\n}\n\n/**\n * Mount search sharing\n *\n * @param el - Search sharing element\n * @param options - Options\n *\n * @returns Search sharing component observable\n */\nexport function mountSearchShare(\n  el: HTMLAnchorElement, options: MountOptions\n): Observable<Component<SearchShare>> {\n  const push$ = new Subject<SearchShare>()\n  const done$ = push$.pipe(ignoreElements(), endWith(true))\n  push$.subscribe(({ url }) => {\n    el.setAttribute(\"data-clipboard-text\", el.href)\n    el.href = `${url}`\n  })\n\n  /* Prevent following of link */\n  fromEvent(el, \"click\")\n    .pipe(\n      takeUntil(done$)\n    )\n      .subscribe(ev => ev.preventDefault())\n\n  /* Create and return component */\n  return watchSearchShare(el, options)\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  asyncScheduler,\n  combineLatestWith,\n  distinctUntilChanged,\n  filter,\n  finalize,\n  fromEvent,\n  map,\n  merge,\n  observeOn,\n  tap\n} from \"rxjs\"\n\nimport { Keyboard } from \"~/browser\"\nimport {\n  SearchMessage,\n  SearchResult,\n  isSearchResultMessage\n} from \"~/integrations\"\n\nimport { Component, getComponentElement } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search suggestions\n */\nexport interface SearchSuggest {}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  keyboard$: Observable<Keyboard>      /* Keyboard observable */\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search suggestions\n *\n * This function will perform a lazy rendering of the search results, depending\n * on the vertical offset of the search result container.\n *\n * @param el - Search result list element\n * @param options - Options\n *\n * @returns Search result list component observable\n */\nexport function mountSearchSuggest(\n  el: HTMLElement, { worker$, keyboard$ }: MountOptions\n): Observable<Component<SearchSuggest>> {\n  const push$ = new Subject<SearchResult>()\n\n  /* Retrieve query component and track all changes */\n  const query  = getComponentElement(\"search-query\")\n  const query$ = merge(\n    fromEvent(query, \"keydown\"),\n    fromEvent(query, \"focus\")\n  )\n    .pipe(\n      observeOn(asyncScheduler),\n      map(() => query.value),\n      distinctUntilChanged(),\n    )\n\n  /* Update search suggestions */\n  push$\n    .pipe(\n      combineLatestWith(query$),\n      map(([{ suggest }, value]) => {\n        const words = value.split(/([\\s-]+)/)\n        if (suggest?.length && words[words.length - 1]) {\n          const last = suggest[suggest.length - 1]\n          if (last.startsWith(words[words.length - 1]))\n            words[words.length - 1] = last\n        } else {\n          words.length = 0\n        }\n        return words\n      })\n    )\n      .subscribe(words => el.innerHTML = words\n        .join(\"\")\n        .replace(/\\s/g, \"&nbsp;\")\n      )\n\n  /* Set up search keyboard handlers */\n  keyboard$\n    .pipe(\n      filter(({ mode }) => mode === \"search\")\n    )\n      .subscribe(key => {\n        switch (key.type) {\n\n          /* Right arrow: accept current suggestion */\n          case \"ArrowRight\":\n            if (\n              el.innerText.length &&\n              query.selectionStart === query.value.length\n            )\n              query.value = el.innerText\n            break\n        }\n      })\n\n  /* Filter search result message */\n  const result$ = worker$\n    .pipe(\n      filter(isSearchResultMessage),\n      map(({ data }) => data)\n    )\n\n  /* Create and return component */\n  return result$\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(() => ({ ref: el }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  NEVER,\n  Observable,\n  ObservableInput,\n  filter,\n  fromEvent,\n  merge,\n  mergeWith\n} from \"rxjs\"\n\nimport { configuration } from \"~/_\"\nimport {\n  Keyboard,\n  getActiveElement,\n  getElements,\n  setToggle\n} from \"~/browser\"\nimport {\n  SearchIndex,\n  SearchResult,\n  setupSearchWorker\n} from \"~/integrations\"\n\nimport {\n  Component,\n  getComponentElement,\n  getComponentElements\n} from \"../../_\"\nimport {\n  SearchQuery,\n  mountSearchQuery\n} from \"../query\"\nimport { mountSearchResult } from \"../result\"\nimport {\n  SearchShare,\n  mountSearchShare\n} from \"../share\"\nimport {\n  SearchSuggest,\n  mountSearchSuggest\n} from \"../suggest\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search\n */\nexport type Search =\n  | SearchQuery\n  | SearchResult\n  | SearchShare\n  | SearchSuggest\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  index$: ObservableInput<SearchIndex> /* Search index observable */\n  keyboard$: Observable<Keyboard>      /* Keyboard observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search\n *\n * This function sets up the search functionality, including the underlying\n * web worker and all keyboard bindings.\n *\n * @param el - Search element\n * @param options - Options\n *\n * @returns Search component observable\n */\nexport function mountSearch(\n  el: HTMLElement, { index$, keyboard$ }: MountOptions\n): Observable<Component<Search>> {\n  const config = configuration()\n  try {\n    const worker$ = setupSearchWorker(config.search, index$)\n\n    /* Retrieve query and result components */\n    const query  = getComponentElement(\"search-query\", el)\n    const result = getComponentElement(\"search-result\", el)\n\n    /* Always close search on result selection */\n    fromEvent<PointerEvent>(el, \"click\")\n      .pipe(\n        filter(({ target }) => (\n          target instanceof Element && !!target.closest(\"a\")\n        ))\n      )\n        .subscribe(() => setToggle(\"search\", false))\n\n    /* Set up search keyboard handlers */\n    keyboard$\n      .pipe(\n        filter(({ mode }) => mode === \"search\")\n      )\n        .subscribe(key => {\n          const active = getActiveElement()\n          switch (key.type) {\n\n            /* Enter: go to first (best) result */\n            case \"Enter\":\n              if (active === query) {\n                const anchors = new Map<HTMLAnchorElement, number>()\n                for (const anchor of getElements<HTMLAnchorElement>(\n                  \":first-child [href]\", result\n                )) {\n                  const article = anchor.firstElementChild!\n                  anchors.set(anchor, parseFloat(\n                    article.getAttribute(\"data-md-score\")!\n                  ))\n                }\n\n                /* Go to result with highest score, if any */\n                if (anchors.size) {\n                  const [[best]] = [...anchors].sort(([, a], [, b]) => b - a)\n                  best.click()\n                }\n\n                /* Otherwise omit form submission */\n                key.claim()\n              }\n              break\n\n            /* Escape or Tab: close search */\n            case \"Escape\":\n            case \"Tab\":\n              setToggle(\"search\", false)\n              query.blur()\n              break\n\n            /* Vertical arrows: select previous or next search result */\n            case \"ArrowUp\":\n            case \"ArrowDown\":\n              if (typeof active === \"undefined\") {\n                query.focus()\n              } else {\n                const els = [query, ...getElements(\n                  \":not(details) > [href], summary, details[open] [href]\",\n                  result\n                )]\n                const i = Math.max(0, (\n                  Math.max(0, els.indexOf(active)) + els.length + (\n                    key.type === \"ArrowUp\" ? -1 : +1\n                  )\n                ) % els.length)\n                els[i].focus()\n              }\n\n              /* Prevent scrolling of page */\n              key.claim()\n              break\n\n            /* All other keys: hand to search query */\n            default:\n              if (query !== getActiveElement())\n                query.focus()\n          }\n        })\n\n    /* Set up global keyboard handlers */\n    keyboard$\n      .pipe(\n        filter(({ mode }) => mode === \"global\")\n      )\n        .subscribe(key => {\n          switch (key.type) {\n\n            /* Open search and select query */\n            case \"f\":\n            case \"s\":\n            case \"/\":\n              query.focus()\n              query.select()\n\n              /* Prevent scrolling of page */\n              key.claim()\n              break\n          }\n        })\n\n    /* Create and return component */\n    const query$ = mountSearchQuery(query, { worker$ })\n    return merge(\n      query$,\n      mountSearchResult(result, { worker$, query$ })\n    )\n      .pipe(\n        mergeWith(\n\n          /* Search sharing */\n          ...getComponentElements(\"search-share\", el)\n            .map(child => mountSearchShare(child, { query$ })),\n\n          /* Search suggestions */\n          ...getComponentElements(\"search-suggest\", el)\n            .map(child => mountSearchSuggest(child, { worker$, keyboard$ }))\n        )\n      )\n\n  /* Gracefully handle broken search */\n  } catch (err) {\n    el.hidden = true\n    return NEVER\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  ObservableInput,\n  combineLatest,\n  filter,\n  map,\n  startWith\n} from \"rxjs\"\n\nimport { getLocation } from \"~/browser\"\nimport {\n  SearchIndex,\n  setupSearchHighlighter\n} from \"~/integrations\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search highlighting\n */\nexport interface SearchHighlight {\n  nodes: Map<ChildNode, string>        /* Map of replacements */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  index$: ObservableInput<SearchIndex> /* Search index observable */\n  location$: Observable<URL>           /* Location observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search highlighting\n *\n * @param el - Content element\n * @param options - Options\n *\n * @returns Search highlighting component observable\n */\nexport function mountSearchHiglight(\n  el: HTMLElement, { index$, location$ }: MountOptions\n): Observable<Component<SearchHighlight>> {\n  return combineLatest([\n    index$,\n    location$\n      .pipe(\n        startWith(getLocation()),\n        filter(url => !!url.searchParams.get(\"h\"))\n      )\n  ])\n    .pipe(\n      map(([index, url]) => setupSearchHighlighter(index.config)(\n        url.searchParams.get(\"h\")!\n      )),\n      map(fn => {\n        const nodes = new Map<ChildNode, string>()\n\n        /* Traverse text nodes and collect matches */\n        const it = document.createNodeIterator(el, NodeFilter.SHOW_TEXT)\n        for (let node = it.nextNode(); node; node = it.nextNode()) {\n          if (node.parentElement?.offsetHeight) {\n            const original = node.textContent!\n            const replaced = fn(original)\n            if (replaced.length > original.length)\n              nodes.set(node as ChildNode, replaced)\n          }\n        }\n\n        /* Replace original nodes with matches */\n        for (const [node, text] of nodes) {\n          const { childNodes } = h(\"span\", null, text)\n          node.replaceWith(...Array.from(childNodes))\n        }\n\n        /* Return component */\n        return { ref: el, nodes }\n      })\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  asyncScheduler,\n  auditTime,\n  combineLatest,\n  defer,\n  distinctUntilChanged,\n  endWith,\n  finalize,\n  first,\n  from,\n  fromEvent,\n  ignoreElements,\n  map,\n  mergeMap,\n  observeOn,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  getElement,\n  getElementOffset,\n  getElementSize,\n  getElements\n} from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Sidebar\n */\nexport interface Sidebar {\n  height: number                       /* Sidebar height */\n  locked: boolean                      /* Sidebar is locked */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  main$: Observable<Main>              /* Main area observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch sidebar\n *\n * This function returns an observable that computes the visual parameters of\n * the sidebar which depends on the vertical viewport offset, as well as the\n * height of the main area. When the page is scrolled beyond the header, the\n * sidebar is locked and fills the remaining space.\n *\n * @param el - Sidebar element\n * @param options - Options\n *\n * @returns Sidebar observable\n */\nexport function watchSidebar(\n  el: HTMLElement, { viewport$, main$ }: WatchOptions\n): Observable<Sidebar> {\n  const parent = el.closest<HTMLElement>(\".md-grid\")!\n  const adjust =\n    parent.offsetTop -\n    parent.parentElement!.offsetTop\n\n  /* Compute the sidebar's available height and if it should be locked */\n  return combineLatest([main$, viewport$])\n    .pipe(\n      map(([{ offset, height }, { offset: { y } }]) => {\n        height = height\n          + Math.min(adjust, Math.max(0, y - offset))\n          - adjust\n        return {\n          height,\n          locked: y >= offset + adjust\n        }\n      }),\n      distinctUntilChanged((a, b) => (\n        a.height === b.height &&\n        a.locked === b.locked\n      ))\n    )\n}\n\n/**\n * Mount sidebar\n *\n * This function doesn't set the height of the actual sidebar, but of its first\n * child \u2013 the `.md-sidebar__scrollwrap` element in order to mitigiate jittery\n * sidebars when the footer is scrolled into view. At some point we switched\n * from `absolute` / `fixed` positioning to `sticky` positioning, significantly\n * reducing jitter in some browsers (respectively Firefox and Safari) when\n * scrolling from the top. However, top-aligned sticky positioning means that\n * the sidebar snaps to the bottom when the end of the container is reached.\n * This is what leads to the mentioned jitter, as the sidebar's height may be\n * updated too slowly.\n *\n * This behaviour can be mitigiated by setting the height of the sidebar to `0`\n * while preserving the padding, and the height on its first element.\n *\n * @param el - Sidebar element\n * @param options - Options\n *\n * @returns Sidebar component observable\n */\nexport function mountSidebar(\n  el: HTMLElement, { header$, ...options }: MountOptions\n): Observable<Component<Sidebar>> {\n  const inner = getElement(\".md-sidebar__scrollwrap\", el)\n  const { y } = getElementOffset(inner)\n  return defer(() => {\n    const push$ = new Subject<Sidebar>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    const next$ = push$\n      .pipe(\n        auditTime(0, animationFrameScheduler)\n      )\n\n    /* Update sidebar height and offset */\n    next$.pipe(withLatestFrom(header$))\n      .subscribe({\n\n        /* Handle emission */\n        next([{ height }, { height: offset }]) {\n          inner.style.height = `${height - 2 * y}px`\n          el.style.top       = `${offset}px`\n        },\n\n        /* Handle complete */\n        complete() {\n          inner.style.height = \"\"\n          el.style.top       = \"\"\n        }\n      })\n\n    /* Bring active item into view on initial load */\n    next$.pipe(first())\n      .subscribe(() => {\n        for (const item of getElements(\".md-nav__link--active[href]\", el)) {\n          if (!item.clientHeight) // skip invisible toc in left sidebar\n            continue\n          const container = item.closest<HTMLElement>(\".md-sidebar__scrollwrap\")!\n          if (typeof container !== \"undefined\") {\n            const offset = item.offsetTop - container.offsetTop\n            const { height } = getElementSize(container)\n            container.scrollTo({\n              top: offset - height / 2\n            })\n          }\n        }\n      })\n\n    /* Handle accessibility for expandable items, see https://bit.ly/3jaod9p */\n    from(getElements<HTMLLabelElement>(\"label[tabindex]\", el))\n      .pipe(\n        mergeMap(label => fromEvent(label, \"click\")\n          .pipe(\n            observeOn(asyncScheduler),\n            map(() => label),\n            takeUntil(done$)\n          )\n        )\n      )\n        .subscribe(label => {\n          const input = getElement<HTMLInputElement>(`[id=\"${label.htmlFor}\"]`)\n          const nav = getElement(`[aria-labelledby=\"${label.id}\"]`)\n          nav.setAttribute(\"aria-expanded\", `${input.checked}`)\n        })\n\n    /* Create and return component */\n    return watchSidebar(el, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Repo, User } from \"github-types\"\nimport {\n  EMPTY,\n  Observable,\n  catchError,\n  defaultIfEmpty,\n  map,\n  zip\n} from \"rxjs\"\n\nimport { requestJSON } from \"~/browser\"\n\nimport { SourceFacts } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * GitHub release (partial)\n */\ninterface Release {\n  tag_name: string                     /* Tag name */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch GitHub repository facts\n *\n * @param user - GitHub user or organization\n * @param repo - GitHub repository\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFactsFromGitHub(\n  user: string, repo?: string\n): Observable<SourceFacts> {\n  if (typeof repo !== \"undefined\") {\n    const url = `https://api.github.com/repos/${user}/${repo}`\n    return zip(\n\n      /* Fetch version */\n      requestJSON<Release>(`${url}/releases/latest`)\n        .pipe(\n          catchError(() => EMPTY), // @todo refactor instant loading\n          map(release => ({\n            version: release.tag_name\n          })),\n          defaultIfEmpty({})\n        ),\n\n      /* Fetch stars and forks */\n      requestJSON<Repo>(url)\n        .pipe(\n          catchError(() => EMPTY), // @todo refactor instant loading\n          map(info => ({\n            stars: info.stargazers_count,\n            forks: info.forks_count\n          })),\n          defaultIfEmpty({})\n        )\n    )\n      .pipe(\n        map(([release, info]) => ({ ...release, ...info }))\n      )\n\n  /* User or organization */\n  } else {\n    const url = `https://api.github.com/users/${user}`\n    return requestJSON<User>(url)\n      .pipe(\n        map(info => ({\n          repositories: info.public_repos\n        })),\n        defaultIfEmpty({})\n      )\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ProjectSchema } from \"gitlab\"\nimport {\n  EMPTY,\n  Observable,\n  catchError,\n  defaultIfEmpty,\n  map\n} from \"rxjs\"\n\nimport { requestJSON } from \"~/browser\"\n\nimport { SourceFacts } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch GitLab repository facts\n *\n * @param base - GitLab base\n * @param project - GitLab project\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFactsFromGitLab(\n  base: string, project: string\n): Observable<SourceFacts> {\n  const url = `https://${base}/api/v4/projects/${encodeURIComponent(project)}`\n  return requestJSON<ProjectSchema>(url)\n    .pipe(\n      catchError(() => EMPTY), // @todo refactor instant loading\n      map(({ star_count, forks_count }) => ({\n        stars: star_count,\n        forks: forks_count\n      })),\n      defaultIfEmpty({})\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { EMPTY, Observable } from \"rxjs\"\n\nimport { fetchSourceFactsFromGitHub } from \"../github\"\nimport { fetchSourceFactsFromGitLab } from \"../gitlab\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository facts for repositories\n */\nexport interface RepositoryFacts {\n  stars?: number                       /* Number of stars */\n  forks?: number                       /* Number of forks */\n  version?: string                     /* Latest version */\n}\n\n/**\n * Repository facts for organizations\n */\nexport interface OrganizationFacts {\n  repositories?: number                /* Number of repositories */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Repository facts\n */\nexport type SourceFacts =\n  | RepositoryFacts\n  | OrganizationFacts\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch repository facts\n *\n * @param url - Repository URL\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFacts(\n  url: string\n): Observable<SourceFacts> {\n\n  /* Try to match GitHub repository */\n  let match = url.match(/^.+github\\.com\\/([^/]+)\\/?([^/]+)?/i)\n  if (match) {\n    const [, user, repo] = match\n    return fetchSourceFactsFromGitHub(user, repo)\n  }\n\n  /* Try to match GitLab repository */\n  match = url.match(/^.+?([^/]*gitlab[^/]+)\\/(.+?)\\/?$/i)\n  if (match) {\n    const [, base, slug] = match\n    return fetchSourceFactsFromGitLab(base, slug)\n  }\n\n  /* Fallback */\n  return EMPTY\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  catchError,\n  defer,\n  filter,\n  finalize,\n  map,\n  of,\n  shareReplay,\n  tap\n} from \"rxjs\"\n\nimport { getElement } from \"~/browser\"\nimport { ConsentDefaults } from \"~/components/consent\"\nimport { renderSourceFacts } from \"~/templates\"\n\nimport {\n  Component,\n  getComponentElements\n} from \"../../_\"\nimport {\n  SourceFacts,\n  fetchSourceFacts\n} from \"../facts\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository information\n */\nexport interface Source {\n  facts: SourceFacts                   /* Repository facts */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository information observable\n */\nlet fetch$: Observable<Source>\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch repository information\n *\n * This function tries to read the repository facts from session storage, and\n * if unsuccessful, fetches them from the underlying provider.\n *\n * @param el - Repository information element\n *\n * @returns Repository information observable\n */\nexport function watchSource(\n  el: HTMLAnchorElement\n): Observable<Source> {\n  return fetch$ ||= defer(() => {\n    const cached = __md_get<SourceFacts>(\"__source\", sessionStorage)\n    if (cached) {\n      return of(cached)\n    } else {\n\n      /* Check if consent is configured and was given */\n      const els = getComponentElements(\"consent\")\n      if (els.length) {\n        const consent = __md_get<ConsentDefaults>(\"__consent\")\n        if (!(consent && consent.github))\n          return EMPTY\n      }\n\n      /* Fetch repository facts */\n      return fetchSourceFacts(el.href)\n        .pipe(\n          tap(facts => __md_set(\"__source\", facts, sessionStorage))\n        )\n    }\n  })\n    .pipe(\n      catchError(() => EMPTY),\n      filter(facts => Object.keys(facts).length > 0),\n      map(facts => ({ facts })),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount repository information\n *\n * @param el - Repository information element\n *\n * @returns Repository information component observable\n */\nexport function mountSource(\n  el: HTMLAnchorElement\n): Observable<Component<Source>> {\n  const inner = getElement(\":scope > :last-child\", el)\n  return defer(() => {\n    const push$ = new Subject<Source>()\n    push$.subscribe(({ facts }) => {\n      inner.appendChild(renderSourceFacts(facts))\n      inner.classList.add(\"md-source__repository--active\")\n    })\n\n    /* Create and return component */\n    return watchSource(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  distinctUntilKeyChanged,\n  finalize,\n  map,\n  of,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  watchElementSize,\n  watchViewportAt\n} from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Navigation tabs\n */\nexport interface Tabs {\n  hidden: boolean                      /* Navigation tabs are hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch navigation tabs\n *\n * @param el - Navigation tabs element\n * @param options - Options\n *\n * @returns Navigation tabs observable\n */\nexport function watchTabs(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<Tabs> {\n  return watchElementSize(document.body)\n    .pipe(\n      switchMap(() => watchViewportAt(el, { header$, viewport$ })),\n      map(({ offset: { y } }) => {\n        return {\n          hidden: y >= 10\n        }\n      }),\n      distinctUntilKeyChanged(\"hidden\")\n    )\n}\n\n/**\n * Mount navigation tabs\n *\n * This function hides the navigation tabs when scrolling past the threshold\n * and makes them reappear in a nice CSS animation when scrolling back up.\n *\n * @param el - Navigation tabs element\n * @param options - Options\n *\n * @returns Navigation tabs component observable\n */\nexport function mountTabs(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Tabs>> {\n  return defer(() => {\n    const push$ = new Subject<Tabs>()\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ hidden }) {\n        el.hidden = hidden\n      },\n\n      /* Handle complete */\n      complete() {\n        el.hidden = false\n      }\n    })\n\n    /* Create and return component */\n    return (\n      feature(\"navigation.tabs.sticky\")\n        ? of({ hidden: false })\n        : watchTabs(el, options)\n    )\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  asyncScheduler,\n  bufferCount,\n  combineLatestWith,\n  debounceTime,\n  defer,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  filter,\n  finalize,\n  ignoreElements,\n  map,\n  merge,\n  observeOn,\n  of,\n  repeat,\n  scan,\n  share,\n  skip,\n  startWith,\n  switchMap,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElement,\n  getElementContainer,\n  getElementSize,\n  getElements,\n  getLocation,\n  getOptionalElement,\n  watchElementSize\n} from \"~/browser\"\n\nimport {\n  Component,\n  getComponentElement\n} from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Table of contents\n */\nexport interface TableOfContents {\n  prev: HTMLAnchorElement[][]          /* Anchors (previous) */\n  next: HTMLAnchorElement[][]          /* Anchors (next) */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch table of contents\n *\n * This is effectively a scroll spy implementation which will account for the\n * fixed header and automatically re-calculate anchor offsets when the viewport\n * is resized. The returned observable will only emit if the table of contents\n * needs to be repainted.\n *\n * This implementation tracks an anchor element's entire path starting from its\n * level up to the top-most anchor element, e.g. `[h3, h2, h1]`. Although the\n * Material theme currently doesn't make use of this information, it enables\n * the styling of the entire hierarchy through customization.\n *\n * Note that the current anchor is the last item of the `prev` anchor list.\n *\n * @param el - Table of contents element\n * @param options - Options\n *\n * @returns Table of contents observable\n */\nexport function watchTableOfContents(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<TableOfContents> {\n  const table = new Map<HTMLAnchorElement, HTMLElement>()\n\n  /* Compute anchor-to-target mapping */\n  const anchors = getElements<HTMLAnchorElement>(\".md-nav__link\", el)\n  for (const anchor of anchors) {\n    const id = decodeURIComponent(anchor.hash.substring(1))\n    const target = getOptionalElement(`[id=\"${id}\"]`)\n    if (typeof target !== \"undefined\")\n      table.set(anchor, target)\n  }\n\n  /* Compute necessary adjustment for header */\n  const adjust$ = header$\n    .pipe(\n      distinctUntilKeyChanged(\"height\"),\n      map(({ height }) => {\n        const main = getComponentElement(\"main\")\n        const grid = getElement(\":scope > :first-child\", main)\n        return height + 0.8 * (\n          grid.offsetTop -\n          main.offsetTop\n        )\n      }),\n      share()\n    )\n\n  /* Compute partition of previous and next anchors */\n  const partition$ = watchElementSize(document.body)\n    .pipe(\n      distinctUntilKeyChanged(\"height\"),\n\n      /* Build index to map anchor paths to vertical offsets */\n      switchMap(body => defer(() => {\n        let path: HTMLAnchorElement[] = []\n        return of([...table].reduce((index, [anchor, target]) => {\n          while (path.length) {\n            const last = table.get(path[path.length - 1])!\n            if (last.tagName >= target.tagName) {\n              path.pop()\n            } else {\n              break\n            }\n          }\n\n          /* If the current anchor is hidden, continue with its parent */\n          let offset = target.offsetTop\n          while (!offset && target.parentElement) {\n            target = target.parentElement\n            offset = target.offsetTop\n          }\n\n          /* Fix anchor offsets in tables - see https://bit.ly/3CUFOcn */\n          let parent = target.offsetParent as HTMLElement\n          for (; parent; parent = parent.offsetParent as HTMLElement)\n            offset += parent.offsetTop\n\n          /* Map reversed anchor path to vertical offset */\n          return index.set(\n            [...path = [...path, anchor]].reverse(),\n            offset\n          )\n        }, new Map<HTMLAnchorElement[], number>()))\n      })\n        .pipe(\n\n          /* Sort index by vertical offset (see https://bit.ly/30z6QSO) */\n          map(index => new Map([...index].sort(([, a], [, b]) => a - b))),\n          combineLatestWith(adjust$),\n\n          /* Re-compute partition when viewport offset changes */\n          switchMap(([index, adjust]) => viewport$\n            .pipe(\n              scan(([prev, next], { offset: { y }, size }) => {\n                const last = y + size.height >= Math.floor(body.height)\n\n                /* Look forward */\n                while (next.length) {\n                  const [, offset] = next[0]\n                  if (offset - adjust < y || last) {\n                    prev = [...prev, next.shift()!]\n                  } else {\n                    break\n                  }\n                }\n\n                /* Look backward */\n                while (prev.length) {\n                  const [, offset] = prev[prev.length - 1]\n                  if (offset - adjust >= y && !last) {\n                    next = [prev.pop()!, ...next]\n                  } else {\n                    break\n                  }\n                }\n\n                /* Return partition */\n                return [prev, next]\n              }, [[], [...index]]),\n              distinctUntilChanged((a, b) => (\n                a[0] === b[0] &&\n                a[1] === b[1]\n              ))\n            )\n          )\n        )\n      )\n    )\n\n  /* Compute and return anchor list migrations */\n  return partition$\n    .pipe(\n      map(([prev, next]) => ({\n        prev: prev.map(([path]) => path),\n        next: next.map(([path]) => path)\n      })),\n\n      /* Extract anchor list migrations */\n      startWith({ prev: [], next: [] }),\n      bufferCount(2, 1),\n      map(([a, b]) => {\n\n        /* Moving down */\n        if (a.prev.length < b.prev.length) {\n          return {\n            prev: b.prev.slice(Math.max(0, a.prev.length - 1), b.prev.length),\n            next: []\n          }\n\n        /* Moving up */\n        } else {\n          return {\n            prev: b.prev.slice(-1),\n            next: b.next.slice(0, b.next.length - a.next.length)\n          }\n        }\n      })\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount table of contents\n *\n * @param el - Table of contents element\n * @param options - Options\n *\n * @returns Table of contents component observable\n */\nexport function mountTableOfContents(\n  el: HTMLElement, { viewport$, header$, main$, target$ }: MountOptions\n): Observable<Component<TableOfContents>> {\n  return defer(() => {\n    const push$ = new Subject<TableOfContents>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    push$.subscribe(({ prev, next }) => {\n\n      /* Look forward */\n      for (const [anchor] of next) {\n        anchor.classList.remove(\"md-nav__link--passed\")\n        anchor.classList.remove(\"md-nav__link--active\")\n      }\n\n      /* Look backward */\n      for (const [index, [anchor]] of prev.entries()) {\n        anchor.classList.add(\"md-nav__link--passed\")\n        anchor.classList.toggle(\n          \"md-nav__link--active\",\n          index === prev.length - 1\n        )\n      }\n    })\n\n    /* Set up following, if enabled */\n    if (feature(\"toc.follow\")) {\n\n      /* Toggle smooth scrolling only for anchor clicks */\n      const smooth$ = merge(\n        viewport$.pipe(debounceTime(1), map(() => undefined)),\n        viewport$.pipe(debounceTime(250), map(() => \"smooth\" as const))\n      )\n\n      /* Bring active anchor into view */ // @todo: refactor\n      push$\n        .pipe(\n          filter(({ prev }) => prev.length > 0),\n          combineLatestWith(main$.pipe(observeOn(asyncScheduler))),\n          withLatestFrom(smooth$)\n        )\n          .subscribe(([[{ prev }], behavior]) => {\n            const [anchor] = prev[prev.length - 1]\n            if (anchor.offsetHeight) {\n\n              /* Retrieve overflowing container and scroll */\n              const container = getElementContainer(anchor)\n              if (typeof container !== \"undefined\") {\n                const offset = anchor.offsetTop - container.offsetTop\n                const { height } = getElementSize(container)\n                container.scrollTo({\n                  top: offset - height / 2,\n                  behavior\n                })\n              }\n            }\n          })\n    }\n\n    /* Set up anchor tracking, if enabled */\n    if (feature(\"navigation.tracking\"))\n      viewport$\n        .pipe(\n          takeUntil(done$),\n          distinctUntilKeyChanged(\"offset\"),\n          debounceTime(250),\n          skip(1),\n          takeUntil(target$.pipe(skip(1))),\n          repeat({ delay: 250 }),\n          withLatestFrom(push$)\n        )\n          .subscribe(([, { prev }]) => {\n            const url = getLocation()\n\n            /* Set hash fragment to active anchor */\n            const anchor = prev[prev.length - 1]\n            if (anchor && anchor.length) {\n              const [active] = anchor\n              const { hash } = new URL(active.href)\n              if (url.hash !== hash) {\n                url.hash = hash\n                history.replaceState({}, \"\", `${url}`)\n              }\n\n            /* Reset anchor when at the top */\n            } else {\n              url.hash = \"\"\n              history.replaceState({}, \"\", `${url}`)\n            }\n          })\n\n    /* Create and return component */\n    return watchTableOfContents(el, { viewport$, header$ })\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  bufferCount,\n  combineLatest,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  repeat,\n  skip,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport { Viewport } from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Back-to-top button\n */\nexport interface BackToTop {\n  hidden: boolean                      /* Back-to-top button is hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  main$: Observable<Main>              /* Main area observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch back-to-top\n *\n * @param _el - Back-to-top element\n * @param options - Options\n *\n * @returns Back-to-top observable\n */\nexport function watchBackToTop(\n  _el: HTMLElement, { viewport$, main$, target$ }: WatchOptions\n): Observable<BackToTop> {\n\n  /* Compute direction */\n  const direction$ = viewport$\n    .pipe(\n      map(({ offset: { y } }) => y),\n      bufferCount(2, 1),\n      map(([a, b]) => a > b && b > 0),\n      distinctUntilChanged()\n    )\n\n  /* Compute whether main area is active */\n  const active$ = main$\n    .pipe(\n      map(({ active }) => active)\n    )\n\n  /* Compute threshold for hiding */\n  return combineLatest([active$, direction$])\n    .pipe(\n      map(([active, direction]) => !(active && direction)),\n      distinctUntilChanged(),\n      takeUntil(target$.pipe(skip(1))),\n      endWith(true),\n      repeat({ delay: 250 }),\n      map(hidden => ({ hidden }))\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount back-to-top\n *\n * @param el - Back-to-top element\n * @param options - Options\n *\n * @returns Back-to-top component observable\n */\nexport function mountBackToTop(\n  el: HTMLElement, { viewport$, header$, main$, target$ }: MountOptions\n): Observable<Component<BackToTop>> {\n  const push$ = new Subject<BackToTop>()\n  const done$ = push$.pipe(ignoreElements(), endWith(true))\n  push$.subscribe({\n\n    /* Handle emission */\n    next({ hidden }) {\n      el.hidden = hidden\n      if (hidden) {\n        el.setAttribute(\"tabindex\", \"-1\")\n        el.blur()\n      } else {\n        el.removeAttribute(\"tabindex\")\n      }\n    },\n\n    /* Handle complete */\n    complete() {\n      el.style.top = \"\"\n      el.hidden = true\n      el.removeAttribute(\"tabindex\")\n    }\n  })\n\n  /* Watch header height */\n  header$\n    .pipe(\n      takeUntil(done$),\n      distinctUntilKeyChanged(\"height\")\n    )\n      .subscribe(({ height }) => {\n        el.style.top = `${height + 16}px`\n      })\n\n  /* Go back to top */\n  fromEvent(el, \"click\")\n    .subscribe(ev => {\n      ev.preventDefault()\n      window.scrollTo({ top: 0 })\n    })\n\n  /* Create and return component */\n  return watchBackToTop(el, { viewport$, main$, target$ })\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  filter,\n  finalize,\n  map,\n  mergeMap,\n  skip,\n  switchMap,\n  take,\n  takeUntil\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElements,\n  watchElementVisibility\n} from \"~/browser\"\nimport { mountInlineTooltip2 } from \"~/components/tooltip2\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  document$: Observable<Document>      /* Document observable */\n  viewport$: Observable<Viewport>      /* Viewport observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch ellipsis\n *\n * This function will fetch all elements that are shortened with ellipsis, and\n * filter those which are visible. Once they become visible, they stay in that\n * state, even though they may be hidden again. This optimization is necessary\n * to reduce pressure on the browser, with elements fading in and out of view.\n *\n * @param options - Options\n */\nexport function patchEllipsis(\n  { document$, viewport$ }: PatchOptions\n): void {\n  document$\n    .pipe(\n      switchMap(() => getElements(\".md-ellipsis\")),\n      mergeMap(el => watchElementVisibility(el)\n        .pipe(\n          takeUntil(document$.pipe(skip(1))),\n          filter(visible => visible),\n          map(() => el),\n          take(1)\n        )\n      ),\n      filter(el => el.offsetWidth < el.scrollWidth),\n      mergeMap(el => {\n        const text = el.innerText\n        const host = el.closest(\"a\") || el\n        host.title = text\n\n        // Do not mount improved tooltip if feature is disabled\n        if (!feature(\"content.tooltips\"))\n          return EMPTY\n\n        /* Mount tooltip */\n        return mountInlineTooltip2(host, { viewport$ })\n          .pipe(\n            takeUntil(document$.pipe(skip(1))),\n            finalize(() => host.removeAttribute(\"title\"))\n          )\n      })\n    )\n      .subscribe()\n\n  // @todo move this outside of here and fix memleaks\n  if (feature(\"content.tooltips\"))\n    document$\n      .pipe(\n        switchMap(() => getElements(\".md-status\")),\n        mergeMap(el => mountInlineTooltip2(el, { viewport$ }))\n      )\n        .subscribe()\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  mergeMap,\n  switchMap,\n  takeWhile,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  document$: Observable<Document>      /* Document observable */\n  tablet$: Observable<boolean>         /* Media tablet observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch indeterminate checkboxes\n *\n * This function replaces the indeterminate \"pseudo state\" with the actual\n * indeterminate state, which is used to keep navigation always expanded.\n *\n * @param options - Options\n */\nexport function patchIndeterminate(\n  { document$, tablet$ }: PatchOptions\n): void {\n  document$\n    .pipe(\n      switchMap(() => getElements<HTMLInputElement>(\n        \".md-toggle--indeterminate\"\n      )),\n      tap(el => {\n        el.indeterminate = true\n        el.checked = false\n      }),\n      mergeMap(el => fromEvent(el, \"change\")\n        .pipe(\n          takeWhile(() => el.classList.contains(\"md-toggle--indeterminate\")),\n          map(() => el)\n        )\n      ),\n      withLatestFrom(tablet$)\n    )\n      .subscribe(([el, tablet]) => {\n        el.classList.remove(\"md-toggle--indeterminate\")\n        if (tablet)\n          el.checked = false\n      })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  filter,\n  fromEvent,\n  map,\n  mergeMap,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  document$: Observable<Document>      /* Document observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Check whether the given device is an Apple device\n *\n * @returns Test result\n */\nfunction isAppleDevice(): boolean {\n  return /(iPad|iPhone|iPod)/.test(navigator.userAgent)\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch all elements with `data-md-scrollfix` attributes\n *\n * This is a year-old patch which ensures that overflow scrolling works at the\n * top and bottom of containers on iOS by ensuring a `1px` scroll offset upon\n * the start of a touch event.\n *\n * @see https://bit.ly/2SCtAOO - Original source\n *\n * @param options - Options\n */\nexport function patchScrollfix(\n  { document$ }: PatchOptions\n): void {\n  document$\n    .pipe(\n      switchMap(() => getElements(\"[data-md-scrollfix]\")),\n      tap(el => el.removeAttribute(\"data-md-scrollfix\")),\n      filter(isAppleDevice),\n      mergeMap(el => fromEvent(el, \"touchstart\")\n        .pipe(\n          map(() => el)\n        )\n      )\n    )\n      .subscribe(el => {\n        const top = el.scrollTop\n\n        /* We're at the top of the container */\n        if (top === 0) {\n          el.scrollTop = 1\n\n        /* We're at the bottom of the container */\n        } else if (top + el.offsetHeight === el.scrollHeight) {\n          el.scrollTop = top - 1\n        }\n      })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  delay,\n  map,\n  of,\n  switchMap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  watchToggle\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  tablet$: Observable<boolean>         /* Media tablet observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch the document body to lock when search is open\n *\n * For mobile and tablet viewports, the search is rendered full screen, which\n * leads to scroll leaking when at the top or bottom of the search result. This\n * function locks the body when the search is in full screen mode, and restores\n * the scroll position when leaving.\n *\n * @param options - Options\n */\nexport function patchScrolllock(\n  { viewport$, tablet$ }: PatchOptions\n): void {\n  combineLatest([watchToggle(\"search\"), tablet$])\n    .pipe(\n      map(([active, tablet]) => active && !tablet),\n      switchMap(active => of(active)\n        .pipe(\n          delay(active ? 400 : 100)\n        )\n      ),\n      withLatestFrom(viewport$)\n    )\n      .subscribe(([active, { offset: { y }}]) => {\n        if (active) {\n          document.body.setAttribute(\"data-md-scrolllock\", \"\")\n          document.body.style.top = `-${y}px`\n        } else {\n          const value = -1 * parseInt(document.body.style.top, 10)\n          document.body.removeAttribute(\"data-md-scrolllock\")\n          document.body.style.top = \"\"\n          if (value)\n            window.scrollTo(0, value)\n        }\n      })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Polyfills\n * ------------------------------------------------------------------------- */\n\n/* Polyfill `Object.entries` */\nif (!Object.entries)\n  Object.entries = function (obj: object) {\n    const data: [string, string][] = []\n    for (const key of Object.keys(obj))\n      // @ts-expect-error - ignore property access warning\n      data.push([key, obj[key]])\n\n    /* Return entries */\n    return data\n  }\n\n/* Polyfill `Object.values` */\nif (!Object.values)\n  Object.values = function (obj: object) {\n    const data: string[] = []\n    for (const key of Object.keys(obj))\n      // @ts-expect-error - ignore property access warning\n      data.push(obj[key])\n\n    /* Return values */\n    return data\n  }\n\n/* ------------------------------------------------------------------------- */\n\n/* Polyfills for `Element` */\nif (typeof Element !== \"undefined\") {\n\n  /* Polyfill `Element.scrollTo` */\n  if (!Element.prototype.scrollTo)\n    Element.prototype.scrollTo = function (\n      x?: ScrollToOptions | number, y?: number\n    ): void {\n      if (typeof x === \"object\") {\n        this.scrollLeft = x.left!\n        this.scrollTop = x.top!\n      } else {\n        this.scrollLeft = x!\n        this.scrollTop = y!\n      }\n    }\n\n  /* Polyfill `Element.replaceWith` */\n  if (!Element.prototype.replaceWith)\n    Element.prototype.replaceWith = function (\n      ...nodes: Array<string | Node>\n    ): void {\n      const parent = this.parentNode\n      if (parent) {\n        if (nodes.length === 0)\n          parent.removeChild(this)\n\n        /* Replace children and create text nodes */\n        for (let i = nodes.length - 1; i >= 0; i--) {\n          let node = nodes[i]\n          if (typeof node === \"string\")\n            node = document.createTextNode(node)\n          else if (node.parentNode)\n            node.parentNode.removeChild(node)\n\n          /* Replace child or insert before previous sibling */\n          if (!i)\n            parent.replaceChild(node, this)\n          else\n            parent.insertBefore(this.previousSibling!, node)\n        }\n      }\n    }\n}\n"],
-  "mappings": "2rCAAA,IAAAA,GAAAC,GAAA,CAAAC,GAAAC,KAAA,EAAC,SAAUC,EAAQC,EAAS,CAC1B,OAAOH,IAAY,UAAY,OAAOC,IAAW,YAAcE,EAAQ,EACvE,OAAO,QAAW,YAAc,OAAO,IAAM,OAAOA,CAAO,EAC1DA,EAAQ,CACX,GAAEH,GAAO,UAAY,CAAE,aASrB,SAASI,EAA0BC,EAAO,CACxC,IAAIC,EAAmB,GACnBC,EAA0B,GAC1BC,EAAiC,KAEjCC,EAAsB,CACxB,KAAM,GACN,OAAQ,GACR,IAAK,GACL,IAAK,GACL,MAAO,GACP,SAAU,GACV,OAAQ,GACR,KAAM,GACN,MAAO,GACP,KAAM,GACN,KAAM,GACN,SAAU,GACV,iBAAkB,EACpB,EAOA,SAASC,EAAmBC,EAAI,CAC9B,MACE,GAAAA,GACAA,IAAO,UACPA,EAAG,WAAa,QAChBA,EAAG,WAAa,QAChB,cAAeA,GACf,aAAcA,EAAG,UAKrB,CASA,SAASC,EAA8BD,EAAI,CACzC,IAAIE,GAAOF,EAAG,KACVG,GAAUH,EAAG,QAUjB,MARI,GAAAG,KAAY,SAAWL,EAAoBI,EAAI,GAAK,CAACF,EAAG,UAIxDG,KAAY,YAAc,CAACH,EAAG,UAI9BA,EAAG,kBAKT,CAOA,SAASI,EAAqBJ,EAAI,CAC5BA,EAAG,UAAU,SAAS,eAAe,IAGzCA,EAAG,UAAU,IAAI,eAAe,EAChCA,EAAG,aAAa,2BAA4B,EAAE,EAChD,CAOA,SAASK,EAAwBL,EAAI,CAC9BA,EAAG,aAAa,0BAA0B,IAG/CA,EAAG,UAAU,OAAO,eAAe,EACnCA,EAAG,gBAAgB,0BAA0B,EAC/C,CAUA,SAASM,EAAUC,EAAG,CAChBA,EAAE,SAAWA,EAAE,QAAUA,EAAE,UAI3BR,EAAmBL,EAAM,aAAa,GACxCU,EAAqBV,EAAM,aAAa,EAG1CC,EAAmB,GACrB,CAUA,SAASa,EAAcD,EAAG,CACxBZ,EAAmB,EACrB,CASA,SAASc,EAAQF,EAAG,CAEbR,EAAmBQ,EAAE,MAAM,IAI5BZ,GAAoBM,EAA8BM,EAAE,MAAM,IAC5DH,EAAqBG,EAAE,MAAM,CAEjC,CAMA,SAASG,EAAOH,EAAG,CACZR,EAAmBQ,EAAE,MAAM,IAK9BA,EAAE,OAAO,UAAU,SAAS,eAAe,GAC3CA,EAAE,OAAO,aAAa,0BAA0B,KAMhDX,EAA0B,GAC1B,OAAO,aAAaC,CAA8B,EAClDA,EAAiC,OAAO,WAAW,UAAW,CAC5DD,EAA0B,EAC5B,EAAG,GAAG,EACNS,EAAwBE,EAAE,MAAM,EAEpC,CAOA,SAASI,EAAmBJ,EAAG,CACzB,SAAS,kBAAoB,WAK3BX,IACFD,EAAmB,IAErBiB,GAA+B,EAEnC,CAQA,SAASA,IAAiC,CACxC,SAAS,iBAAiB,YAAaC,CAAoB,EAC3D,SAAS,iBAAiB,YAAaA,CAAoB,EAC3D,SAAS,iBAAiB,UAAWA,CAAoB,EACzD,SAAS,iBAAiB,cAAeA,CAAoB,EAC7D,SAAS,iBAAiB,cAAeA,CAAoB,EAC7D,SAAS,iBAAiB,YAAaA,CAAoB,EAC3D,SAAS,iBAAiB,YAAaA,CAAoB,EAC3D,SAAS,iBAAiB,aAAcA,CAAoB,EAC5D,SAAS,iBAAiB,WAAYA,CAAoB,CAC5D,CAEA,SAASC,IAAoC,CAC3C,SAAS,oBAAoB,YAAaD,CAAoB,EAC9D,SAAS,oBAAoB,YAAaA,CAAoB,EAC9D,SAAS,oBAAoB,UAAWA,CAAoB,EAC5D,SAAS,oBAAoB,cAAeA,CAAoB,EAChE,SAAS,oBAAoB,cAAeA,CAAoB,EAChE,SAAS,oBAAoB,YAAaA,CAAoB,EAC9D,SAAS,oBAAoB,YAAaA,CAAoB,EAC9D,SAAS,oBAAoB,aAAcA,CAAoB,EAC/D,SAAS,oBAAoB,WAAYA,CAAoB,CAC/D,CASA,SAASA,EAAqBN,EAAG,CAG3BA,EAAE,OAAO,UAAYA,EAAE,OAAO,SAAS,YAAY,IAAM,SAI7DZ,EAAmB,GACnBmB,GAAkC,EACpC,CAKA,SAAS,iBAAiB,UAAWR,EAAW,EAAI,EACpD,SAAS,iBAAiB,YAAaE,EAAe,EAAI,EAC1D,SAAS,iBAAiB,cAAeA,EAAe,EAAI,EAC5D,SAAS,iBAAiB,aAAcA,EAAe,EAAI,EAC3D,SAAS,iBAAiB,mBAAoBG,EAAoB,EAAI,EAEtEC,GAA+B,EAM/BlB,EAAM,iBAAiB,QAASe,EAAS,EAAI,EAC7Cf,EAAM,iBAAiB,OAAQgB,EAAQ,EAAI,EAOvChB,EAAM,WAAa,KAAK,wBAA0BA,EAAM,KAI1DA,EAAM,KAAK,aAAa,wBAAyB,EAAE,EAC1CA,EAAM,WAAa,KAAK,gBACjC,SAAS,gBAAgB,UAAU,IAAI,kBAAkB,EACzD,SAAS,gBAAgB,aAAa,wBAAyB,EAAE,EAErE,CAKA,GAAI,OAAO,QAAW,aAAe,OAAO,UAAa,YAAa,CAIpE,OAAO,0BAA4BD,EAInC,IAAIsB,EAEJ,GAAI,CACFA,EAAQ,IAAI,YAAY,8BAA8B,CACxD,OAASC,EAAO,CAEdD,EAAQ,SAAS,YAAY,aAAa,EAC1CA,EAAM,gBAAgB,+BAAgC,GAAO,GAAO,CAAC,CAAC,CACxE,CAEA,OAAO,cAAcA,CAAK,CAC5B,CAEI,OAAO,UAAa,aAGtBtB,EAA0B,QAAQ,CAGtC,CAAE,ICvTF,IAAAwB,GAAAC,GAAA,CAAAC,GAAAC,KAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAMC,SAA0CC,EAAMC,EAAS,CACtD,OAAOH,IAAY,UAAY,OAAOC,IAAW,SACnDA,GAAO,QAAUE,EAAQ,EAClB,OAAO,QAAW,YAAc,OAAO,IAC9C,OAAO,CAAC,EAAGA,CAAO,EACX,OAAOH,IAAY,SAC1BA,GAAQ,YAAiBG,EAAQ,EAEjCD,EAAK,YAAiBC,EAAQ,CAChC,GAAGH,GAAM,UAAW,CACpB,OAAiB,UAAW,CAClB,IAAII,EAAuB,CAE/B,IACC,SAASC,EAAyBC,EAAqBC,EAAqB,CAEnF,aAGAA,EAAoB,EAAED,EAAqB,CACzC,QAAW,UAAW,CAAE,OAAqBE,EAAW,CAC1D,CAAC,EAGD,IAAIC,EAAeF,EAAoB,GAAG,EACtCG,EAAoCH,EAAoB,EAAEE,CAAY,EAEtEE,EAASJ,EAAoB,GAAG,EAChCK,EAA8BL,EAAoB,EAAEI,CAAM,EAE1DE,EAAaN,EAAoB,GAAG,EACpCO,EAA8BP,EAAoB,EAAEM,CAAU,EAOlE,SAASE,EAAQC,EAAM,CACrB,GAAI,CACF,OAAO,SAAS,YAAYA,CAAI,CAClC,OAASC,EAAK,CACZ,MAAO,EACT,CACF,CAUA,IAAIC,EAAqB,SAA4BC,EAAQ,CAC3D,IAAIC,EAAeN,EAAe,EAAEK,CAAM,EAC1C,OAAAJ,EAAQ,KAAK,EACNK,CACT,EAEiCC,EAAeH,EAOhD,SAASI,EAAkBC,EAAO,CAChC,IAAIC,EAAQ,SAAS,gBAAgB,aAAa,KAAK,IAAM,MACzDC,EAAc,SAAS,cAAc,UAAU,EAEnDA,EAAY,MAAM,SAAW,OAE7BA,EAAY,MAAM,OAAS,IAC3BA,EAAY,MAAM,QAAU,IAC5BA,EAAY,MAAM,OAAS,IAE3BA,EAAY,MAAM,SAAW,WAC7BA,EAAY,MAAMD,EAAQ,QAAU,MAAM,EAAI,UAE9C,IAAIE,EAAY,OAAO,aAAe,SAAS,gBAAgB,UAC/D,OAAAD,EAAY,MAAM,IAAM,GAAG,OAAOC,EAAW,IAAI,EACjDD,EAAY,aAAa,WAAY,EAAE,EACvCA,EAAY,MAAQF,EACbE,CACT,CAYA,IAAIE,GAAiB,SAAwBJ,EAAOK,EAAS,CAC3D,IAAIH,EAAcH,EAAkBC,CAAK,EACzCK,EAAQ,UAAU,YAAYH,CAAW,EACzC,IAAIL,EAAeN,EAAe,EAAEW,CAAW,EAC/C,OAAAV,EAAQ,MAAM,EACdU,EAAY,OAAO,EACZL,CACT,EASIS,GAAsB,SAA6BV,EAAQ,CAC7D,IAAIS,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAChF,UAAW,SAAS,IACtB,EACIR,EAAe,GAEnB,OAAI,OAAOD,GAAW,SACpBC,EAAeO,GAAeR,EAAQS,CAAO,EACpCT,aAAkB,kBAAoB,CAAC,CAAC,OAAQ,SAAU,MAAO,MAAO,UAAU,EAAE,SAASA,GAAW,KAA4B,OAASA,EAAO,IAAI,EAEjKC,EAAeO,GAAeR,EAAO,MAAOS,CAAO,GAEnDR,EAAeN,EAAe,EAAEK,CAAM,EACtCJ,EAAQ,MAAM,GAGTK,CACT,EAEiCU,EAAgBD,GAEjD,SAASE,EAAQC,EAAK,CAAE,0BAA2B,OAAI,OAAO,QAAW,YAAc,OAAO,OAAO,UAAa,SAAYD,EAAU,SAAiBC,EAAK,CAAE,OAAO,OAAOA,CAAK,EAAYD,EAAU,SAAiBC,EAAK,CAAE,OAAOA,GAAO,OAAO,QAAW,YAAcA,EAAI,cAAgB,QAAUA,IAAQ,OAAO,UAAY,SAAW,OAAOA,CAAK,EAAYD,EAAQC,CAAG,CAAG,CAUzX,IAAIC,GAAyB,UAAkC,CAC7D,IAAIL,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAAC,EAE/EM,EAAkBN,EAAQ,OAC1BO,EAASD,IAAoB,OAAS,OAASA,EAC/CE,EAAYR,EAAQ,UACpBT,EAASS,EAAQ,OACjBS,GAAOT,EAAQ,KAEnB,GAAIO,IAAW,QAAUA,IAAW,MAClC,MAAM,IAAI,MAAM,oDAAoD,EAItE,GAAIhB,IAAW,OACb,GAAIA,GAAUY,EAAQZ,CAAM,IAAM,UAAYA,EAAO,WAAa,EAAG,CACnE,GAAIgB,IAAW,QAAUhB,EAAO,aAAa,UAAU,EACrD,MAAM,IAAI,MAAM,mFAAmF,EAGrG,GAAIgB,IAAW,QAAUhB,EAAO,aAAa,UAAU,GAAKA,EAAO,aAAa,UAAU,GACxF,MAAM,IAAI,MAAM,uGAAwG,CAE5H,KACE,OAAM,IAAI,MAAM,6CAA6C,EAKjE,GAAIkB,GACF,OAAOP,EAAaO,GAAM,CACxB,UAAWD,CACb,CAAC,EAIH,GAAIjB,EACF,OAAOgB,IAAW,MAAQd,EAAYF,CAAM,EAAIW,EAAaX,EAAQ,CACnE,UAAWiB,CACb,CAAC,CAEL,EAEiCE,GAAmBL,GAEpD,SAASM,GAAiBP,EAAK,CAAE,0BAA2B,OAAI,OAAO,QAAW,YAAc,OAAO,OAAO,UAAa,SAAYO,GAAmB,SAAiBP,EAAK,CAAE,OAAO,OAAOA,CAAK,EAAYO,GAAmB,SAAiBP,EAAK,CAAE,OAAOA,GAAO,OAAO,QAAW,YAAcA,EAAI,cAAgB,QAAUA,IAAQ,OAAO,UAAY,SAAW,OAAOA,CAAK,EAAYO,GAAiBP,CAAG,CAAG,CAE7Z,SAASQ,GAAgBC,EAAUC,EAAa,CAAE,GAAI,EAAED,aAAoBC,GAAgB,MAAM,IAAI,UAAU,mCAAmC,CAAK,CAExJ,SAASC,GAAkBxB,EAAQyB,EAAO,CAAE,QAASC,EAAI,EAAGA,EAAID,EAAM,OAAQC,IAAK,CAAE,IAAIC,EAAaF,EAAMC,CAAC,EAAGC,EAAW,WAAaA,EAAW,YAAc,GAAOA,EAAW,aAAe,GAAU,UAAWA,IAAYA,EAAW,SAAW,IAAM,OAAO,eAAe3B,EAAQ2B,EAAW,IAAKA,CAAU,CAAG,CAAE,CAE5T,SAASC,GAAaL,EAAaM,EAAYC,EAAa,CAAE,OAAID,GAAYL,GAAkBD,EAAY,UAAWM,CAAU,EAAOC,GAAaN,GAAkBD,EAAaO,CAAW,EAAUP,CAAa,CAEtN,SAASQ,GAAUC,EAAUC,EAAY,CAAE,GAAI,OAAOA,GAAe,YAAcA,IAAe,KAAQ,MAAM,IAAI,UAAU,oDAAoD,EAAKD,EAAS,UAAY,OAAO,OAAOC,GAAcA,EAAW,UAAW,CAAE,YAAa,CAAE,MAAOD,EAAU,SAAU,GAAM,aAAc,EAAK,CAAE,CAAC,EAAOC,GAAYC,GAAgBF,EAAUC,CAAU,CAAG,CAEhY,SAASC,GAAgBC,EAAGC,EAAG,CAAE,OAAAF,GAAkB,OAAO,gBAAkB,SAAyBC,EAAGC,EAAG,CAAE,OAAAD,EAAE,UAAYC,EAAUD,CAAG,EAAUD,GAAgBC,EAAGC,CAAC,CAAG,CAEzK,SAASC,GAAaC,EAAS,CAAE,IAAIC,EAA4BC,GAA0B,EAAG,OAAO,UAAgC,CAAE,IAAIC,EAAQC,GAAgBJ,CAAO,EAAGK,EAAQ,GAAIJ,EAA2B,CAAE,IAAIK,EAAYF,GAAgB,IAAI,EAAE,YAAaC,EAAS,QAAQ,UAAUF,EAAO,UAAWG,CAAS,CAAG,MAASD,EAASF,EAAM,MAAM,KAAM,SAAS,EAAK,OAAOI,GAA2B,KAAMF,CAAM,CAAG,CAAG,CAExa,SAASE,GAA2BC,EAAMC,EAAM,CAAE,OAAIA,IAAS3B,GAAiB2B,CAAI,IAAM,UAAY,OAAOA,GAAS,YAAsBA,EAAeC,GAAuBF,CAAI,CAAG,CAEzL,SAASE,GAAuBF,EAAM,CAAE,GAAIA,IAAS,OAAU,MAAM,IAAI,eAAe,2DAA2D,EAAK,OAAOA,CAAM,CAErK,SAASN,IAA4B,CAA0E,GAApE,OAAO,SAAY,aAAe,CAAC,QAAQ,WAA6B,QAAQ,UAAU,KAAM,MAAO,GAAO,GAAI,OAAO,OAAU,WAAY,MAAO,GAAM,GAAI,CAAE,YAAK,UAAU,SAAS,KAAK,QAAQ,UAAU,KAAM,CAAC,EAAG,UAAY,CAAC,CAAC,CAAC,EAAU,EAAM,OAASS,EAAG,CAAE,MAAO,EAAO,CAAE,CAEnU,SAASP,GAAgBP,EAAG,CAAE,OAAAO,GAAkB,OAAO,eAAiB,OAAO,eAAiB,SAAyBP,EAAG,CAAE,OAAOA,EAAE,WAAa,OAAO,eAAeA,CAAC,CAAG,EAAUO,GAAgBP,CAAC,CAAG,CAa5M,SAASe,GAAkBC,EAAQC,EAAS,CAC1C,IAAIC,EAAY,kBAAkB,OAAOF,CAAM,EAE/C,GAAKC,EAAQ,aAAaC,CAAS,EAInC,OAAOD,EAAQ,aAAaC,CAAS,CACvC,CAOA,IAAIC,GAAyB,SAAUC,EAAU,CAC/CxB,GAAUuB,EAAWC,CAAQ,EAE7B,IAAIC,EAASnB,GAAaiB,CAAS,EAMnC,SAASA,EAAUG,EAAShD,EAAS,CACnC,IAAIiD,EAEJ,OAAArC,GAAgB,KAAMiC,CAAS,EAE/BI,EAAQF,EAAO,KAAK,IAAI,EAExBE,EAAM,eAAejD,CAAO,EAE5BiD,EAAM,YAAYD,CAAO,EAElBC,CACT,CAQA,OAAA9B,GAAa0B,EAAW,CAAC,CACvB,IAAK,iBACL,MAAO,UAA0B,CAC/B,IAAI7C,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAAC,EACnF,KAAK,OAAS,OAAOA,EAAQ,QAAW,WAAaA,EAAQ,OAAS,KAAK,cAC3E,KAAK,OAAS,OAAOA,EAAQ,QAAW,WAAaA,EAAQ,OAAS,KAAK,cAC3E,KAAK,KAAO,OAAOA,EAAQ,MAAS,WAAaA,EAAQ,KAAO,KAAK,YACrE,KAAK,UAAYW,GAAiBX,EAAQ,SAAS,IAAM,SAAWA,EAAQ,UAAY,SAAS,IACnG,CAMF,EAAG,CACD,IAAK,cACL,MAAO,SAAqBgD,EAAS,CACnC,IAAIE,EAAS,KAEb,KAAK,SAAWlE,EAAe,EAAEgE,EAAS,QAAS,SAAUR,GAAG,CAC9D,OAAOU,EAAO,QAAQV,EAAC,CACzB,CAAC,CACH,CAMF,EAAG,CACD,IAAK,UACL,MAAO,SAAiBA,EAAG,CACzB,IAAIQ,EAAUR,EAAE,gBAAkBA,EAAE,cAChCjC,GAAS,KAAK,OAAOyC,CAAO,GAAK,OACjCvC,GAAOC,GAAgB,CACzB,OAAQH,GACR,UAAW,KAAK,UAChB,OAAQ,KAAK,OAAOyC,CAAO,EAC3B,KAAM,KAAK,KAAKA,CAAO,CACzB,CAAC,EAED,KAAK,KAAKvC,GAAO,UAAY,QAAS,CACpC,OAAQF,GACR,KAAME,GACN,QAASuC,EACT,eAAgB,UAA0B,CACpCA,GACFA,EAAQ,MAAM,EAGhB,OAAO,aAAa,EAAE,gBAAgB,CACxC,CACF,CAAC,CACH,CAMF,EAAG,CACD,IAAK,gBACL,MAAO,SAAuBA,EAAS,CACrC,OAAOP,GAAkB,SAAUO,CAAO,CAC5C,CAMF,EAAG,CACD,IAAK,gBACL,MAAO,SAAuBA,EAAS,CACrC,IAAIG,EAAWV,GAAkB,SAAUO,CAAO,EAElD,GAAIG,EACF,OAAO,SAAS,cAAcA,CAAQ,CAE1C,CAQF,EAAG,CACD,IAAK,cAML,MAAO,SAAqBH,EAAS,CACnC,OAAOP,GAAkB,OAAQO,CAAO,CAC1C,CAKF,EAAG,CACD,IAAK,UACL,MAAO,UAAmB,CACxB,KAAK,SAAS,QAAQ,CACxB,CACF,CAAC,EAAG,CAAC,CACH,IAAK,OACL,MAAO,SAAczD,EAAQ,CAC3B,IAAIS,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAChF,UAAW,SAAS,IACtB,EACA,OAAOE,EAAaX,EAAQS,CAAO,CACrC,CAOF,EAAG,CACD,IAAK,MACL,MAAO,SAAaT,EAAQ,CAC1B,OAAOE,EAAYF,CAAM,CAC3B,CAOF,EAAG,CACD,IAAK,cACL,MAAO,UAAuB,CAC5B,IAAIgB,EAAS,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAAC,OAAQ,KAAK,EAC3F6C,EAAU,OAAO7C,GAAW,SAAW,CAACA,CAAM,EAAIA,EAClD8C,GAAU,CAAC,CAAC,SAAS,sBACzB,OAAAD,EAAQ,QAAQ,SAAU7C,GAAQ,CAChC8C,GAAUA,IAAW,CAAC,CAAC,SAAS,sBAAsB9C,EAAM,CAC9D,CAAC,EACM8C,EACT,CACF,CAAC,CAAC,EAEKR,CACT,EAAG/D,EAAqB,CAAE,EAEOF,GAAaiE,EAExC,EAEA,IACC,SAASxE,EAAQ,CAExB,IAAIiF,EAAqB,EAKzB,GAAI,OAAO,SAAY,aAAe,CAAC,QAAQ,UAAU,QAAS,CAC9D,IAAIC,EAAQ,QAAQ,UAEpBA,EAAM,QAAUA,EAAM,iBACNA,EAAM,oBACNA,EAAM,mBACNA,EAAM,kBACNA,EAAM,qBAC1B,CASA,SAASC,EAASb,EAASQ,EAAU,CACjC,KAAOR,GAAWA,EAAQ,WAAaW,GAAoB,CACvD,GAAI,OAAOX,EAAQ,SAAY,YAC3BA,EAAQ,QAAQQ,CAAQ,EAC1B,OAAOR,EAETA,EAAUA,EAAQ,UACtB,CACJ,CAEAtE,EAAO,QAAUmF,CAGX,EAEA,IACC,SAASnF,EAAQoF,EAA0B9E,EAAqB,CAEvE,IAAI6E,EAAU7E,EAAoB,GAAG,EAYrC,SAAS+E,EAAUf,EAASQ,EAAU/D,EAAMuE,EAAUC,EAAY,CAC9D,IAAIC,EAAaC,EAAS,MAAM,KAAM,SAAS,EAE/C,OAAAnB,EAAQ,iBAAiBvD,EAAMyE,EAAYD,CAAU,EAE9C,CACH,QAAS,UAAW,CAChBjB,EAAQ,oBAAoBvD,EAAMyE,EAAYD,CAAU,CAC5D,CACJ,CACJ,CAYA,SAASG,EAASC,EAAUb,EAAU/D,EAAMuE,EAAUC,EAAY,CAE9D,OAAI,OAAOI,EAAS,kBAAqB,WAC9BN,EAAU,MAAM,KAAM,SAAS,EAItC,OAAOtE,GAAS,WAGTsE,EAAU,KAAK,KAAM,QAAQ,EAAE,MAAM,KAAM,SAAS,GAI3D,OAAOM,GAAa,WACpBA,EAAW,SAAS,iBAAiBA,CAAQ,GAI1C,MAAM,UAAU,IAAI,KAAKA,EAAU,SAAUrB,EAAS,CACzD,OAAOe,EAAUf,EAASQ,EAAU/D,EAAMuE,EAAUC,CAAU,CAClE,CAAC,EACL,CAWA,SAASE,EAASnB,EAASQ,EAAU/D,EAAMuE,EAAU,CACjD,OAAO,SAASnB,EAAG,CACfA,EAAE,eAAiBgB,EAAQhB,EAAE,OAAQW,CAAQ,EAEzCX,EAAE,gBACFmB,EAAS,KAAKhB,EAASH,CAAC,CAEhC,CACJ,CAEAnE,EAAO,QAAU0F,CAGX,EAEA,IACC,SAAStF,EAAyBL,EAAS,CAQlDA,EAAQ,KAAO,SAASuB,EAAO,CAC3B,OAAOA,IAAU,QACVA,aAAiB,aACjBA,EAAM,WAAa,CAC9B,EAQAvB,EAAQ,SAAW,SAASuB,EAAO,CAC/B,IAAIP,EAAO,OAAO,UAAU,SAAS,KAAKO,CAAK,EAE/C,OAAOA,IAAU,SACTP,IAAS,qBAAuBA,IAAS,4BACzC,WAAYO,IACZA,EAAM,SAAW,GAAKvB,EAAQ,KAAKuB,EAAM,CAAC,CAAC,EACvD,EAQAvB,EAAQ,OAAS,SAASuB,EAAO,CAC7B,OAAO,OAAOA,GAAU,UACjBA,aAAiB,MAC5B,EAQAvB,EAAQ,GAAK,SAASuB,EAAO,CACzB,IAAIP,EAAO,OAAO,UAAU,SAAS,KAAKO,CAAK,EAE/C,OAAOP,IAAS,mBACpB,CAGM,EAEA,IACC,SAASf,EAAQoF,EAA0B9E,EAAqB,CAEvE,IAAIsF,EAAKtF,EAAoB,GAAG,EAC5BoF,EAAWpF,EAAoB,GAAG,EAWtC,SAASI,EAAOQ,EAAQH,EAAMuE,EAAU,CACpC,GAAI,CAACpE,GAAU,CAACH,GAAQ,CAACuE,EACrB,MAAM,IAAI,MAAM,4BAA4B,EAGhD,GAAI,CAACM,EAAG,OAAO7E,CAAI,EACf,MAAM,IAAI,UAAU,kCAAkC,EAG1D,GAAI,CAAC6E,EAAG,GAAGN,CAAQ,EACf,MAAM,IAAI,UAAU,mCAAmC,EAG3D,GAAIM,EAAG,KAAK1E,CAAM,EACd,OAAO2E,EAAW3E,EAAQH,EAAMuE,CAAQ,EAEvC,GAAIM,EAAG,SAAS1E,CAAM,EACvB,OAAO4E,EAAe5E,EAAQH,EAAMuE,CAAQ,EAE3C,GAAIM,EAAG,OAAO1E,CAAM,EACrB,OAAO6E,EAAe7E,EAAQH,EAAMuE,CAAQ,EAG5C,MAAM,IAAI,UAAU,2EAA2E,CAEvG,CAWA,SAASO,EAAWG,EAAMjF,EAAMuE,EAAU,CACtC,OAAAU,EAAK,iBAAiBjF,EAAMuE,CAAQ,EAE7B,CACH,QAAS,UAAW,CAChBU,EAAK,oBAAoBjF,EAAMuE,CAAQ,CAC3C,CACJ,CACJ,CAWA,SAASQ,EAAeG,EAAUlF,EAAMuE,EAAU,CAC9C,aAAM,UAAU,QAAQ,KAAKW,EAAU,SAASD,EAAM,CAClDA,EAAK,iBAAiBjF,EAAMuE,CAAQ,CACxC,CAAC,EAEM,CACH,QAAS,UAAW,CAChB,MAAM,UAAU,QAAQ,KAAKW,EAAU,SAASD,EAAM,CAClDA,EAAK,oBAAoBjF,EAAMuE,CAAQ,CAC3C,CAAC,CACL,CACJ,CACJ,CAWA,SAASS,EAAejB,EAAU/D,EAAMuE,EAAU,CAC9C,OAAOI,EAAS,SAAS,KAAMZ,EAAU/D,EAAMuE,CAAQ,CAC3D,CAEAtF,EAAO,QAAUU,CAGX,EAEA,IACC,SAASV,EAAQ,CAExB,SAASkG,EAAO5B,EAAS,CACrB,IAAInD,EAEJ,GAAImD,EAAQ,WAAa,SACrBA,EAAQ,MAAM,EAEdnD,EAAemD,EAAQ,cAElBA,EAAQ,WAAa,SAAWA,EAAQ,WAAa,WAAY,CACtE,IAAI6B,EAAa7B,EAAQ,aAAa,UAAU,EAE3C6B,GACD7B,EAAQ,aAAa,WAAY,EAAE,EAGvCA,EAAQ,OAAO,EACfA,EAAQ,kBAAkB,EAAGA,EAAQ,MAAM,MAAM,EAE5C6B,GACD7B,EAAQ,gBAAgB,UAAU,EAGtCnD,EAAemD,EAAQ,KAC3B,KACK,CACGA,EAAQ,aAAa,iBAAiB,GACtCA,EAAQ,MAAM,EAGlB,IAAI8B,EAAY,OAAO,aAAa,EAChCC,EAAQ,SAAS,YAAY,EAEjCA,EAAM,mBAAmB/B,CAAO,EAChC8B,EAAU,gBAAgB,EAC1BA,EAAU,SAASC,CAAK,EAExBlF,EAAeiF,EAAU,SAAS,CACtC,CAEA,OAAOjF,CACX,CAEAnB,EAAO,QAAUkG,CAGX,EAEA,IACC,SAASlG,EAAQ,CAExB,SAASsG,GAAK,CAGd,CAEAA,EAAE,UAAY,CACZ,GAAI,SAAUC,EAAMjB,EAAUkB,EAAK,CACjC,IAAIrC,EAAI,KAAK,IAAM,KAAK,EAAI,CAAC,GAE7B,OAACA,EAAEoC,CAAI,IAAMpC,EAAEoC,CAAI,EAAI,CAAC,IAAI,KAAK,CAC/B,GAAIjB,EACJ,IAAKkB,CACP,CAAC,EAEM,IACT,EAEA,KAAM,SAAUD,EAAMjB,EAAUkB,EAAK,CACnC,IAAIxC,EAAO,KACX,SAASyB,GAAY,CACnBzB,EAAK,IAAIuC,EAAMd,CAAQ,EACvBH,EAAS,MAAMkB,EAAK,SAAS,CAC/B,CAEA,OAAAf,EAAS,EAAIH,EACN,KAAK,GAAGiB,EAAMd,EAAUe,CAAG,CACpC,EAEA,KAAM,SAAUD,EAAM,CACpB,IAAIE,EAAO,CAAC,EAAE,MAAM,KAAK,UAAW,CAAC,EACjCC,IAAW,KAAK,IAAM,KAAK,EAAI,CAAC,IAAIH,CAAI,GAAK,CAAC,GAAG,MAAM,EACvD3D,EAAI,EACJ+D,EAAMD,EAAO,OAEjB,IAAK9D,EAAGA,EAAI+D,EAAK/D,IACf8D,EAAO9D,CAAC,EAAE,GAAG,MAAM8D,EAAO9D,CAAC,EAAE,IAAK6D,CAAI,EAGxC,OAAO,IACT,EAEA,IAAK,SAAUF,EAAMjB,EAAU,CAC7B,IAAInB,EAAI,KAAK,IAAM,KAAK,EAAI,CAAC,GACzByC,EAAOzC,EAAEoC,CAAI,EACbM,EAAa,CAAC,EAElB,GAAID,GAAQtB,EACV,QAAS1C,EAAI,EAAG+D,EAAMC,EAAK,OAAQhE,EAAI+D,EAAK/D,IACtCgE,EAAKhE,CAAC,EAAE,KAAO0C,GAAYsB,EAAKhE,CAAC,EAAE,GAAG,IAAM0C,GAC9CuB,EAAW,KAAKD,EAAKhE,CAAC,CAAC,EAQ7B,OAACiE,EAAW,OACR1C,EAAEoC,CAAI,EAAIM,EACV,OAAO1C,EAAEoC,CAAI,EAEV,IACT,CACF,EAEAvG,EAAO,QAAUsG,EACjBtG,EAAO,QAAQ,YAAcsG,CAGvB,CAEI,EAGIQ,EAA2B,CAAC,EAGhC,SAASxG,EAAoByG,EAAU,CAEtC,GAAGD,EAAyBC,CAAQ,EACnC,OAAOD,EAAyBC,CAAQ,EAAE,QAG3C,IAAI/G,EAAS8G,EAAyBC,CAAQ,EAAI,CAGjD,QAAS,CAAC,CACX,EAGA,OAAA5G,EAAoB4G,CAAQ,EAAE/G,EAAQA,EAAO,QAASM,CAAmB,EAGlEN,EAAO,OACf,CAIA,OAAC,UAAW,CAEXM,EAAoB,EAAI,SAASN,EAAQ,CACxC,IAAIgH,EAAShH,GAAUA,EAAO,WAC7B,UAAW,CAAE,OAAOA,EAAO,OAAY,EACvC,UAAW,CAAE,OAAOA,CAAQ,EAC7B,OAAAM,EAAoB,EAAE0G,EAAQ,CAAE,EAAGA,CAAO,CAAC,EACpCA,CACR,CACD,EAAE,EAGD,UAAW,CAEX1G,EAAoB,EAAI,SAASP,EAASkH,EAAY,CACrD,QAAQC,KAAOD,EACX3G,EAAoB,EAAE2G,EAAYC,CAAG,GAAK,CAAC5G,EAAoB,EAAEP,EAASmH,CAAG,GAC/E,OAAO,eAAenH,EAASmH,EAAK,CAAE,WAAY,GAAM,IAAKD,EAAWC,CAAG,CAAE,CAAC,CAGjF,CACD,EAAE,EAGD,UAAW,CACX5G,EAAoB,EAAI,SAASyB,EAAKoF,EAAM,CAAE,OAAO,OAAO,UAAU,eAAe,KAAKpF,EAAKoF,CAAI,CAAG,CACvG,EAAE,EAMK7G,EAAoB,GAAG,CAC/B,EAAG,EACX,OACD,CAAC,ICz3BD,IAAA8G,GAAAC,GAAA,CAAAC,GAAAC,KAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,GAeA,IAAIC,GAAkB,UAOtBD,GAAO,QAAUE,GAUjB,SAASA,GAAWC,EAAQ,CAC1B,IAAIC,EAAM,GAAKD,EACXE,EAAQJ,GAAgB,KAAKG,CAAG,EAEpC,GAAI,CAACC,EACH,OAAOD,EAGT,IAAIE,EACAC,EAAO,GACPC,EAAQ,EACRC,EAAY,EAEhB,IAAKD,EAAQH,EAAM,MAAOG,EAAQJ,EAAI,OAAQI,IAAS,CACrD,OAAQJ,EAAI,WAAWI,CAAK,EAAG,CAC7B,IAAK,IACHF,EAAS,SACT,MACF,IAAK,IACHA,EAAS,QACT,MACF,IAAK,IACHA,EAAS,QACT,MACF,IAAK,IACHA,EAAS,OACT,MACF,IAAK,IACHA,EAAS,OACT,MACF,QACE,QACJ,CAEIG,IAAcD,IAChBD,GAAQH,EAAI,UAAUK,EAAWD,CAAK,GAGxCC,EAAYD,EAAQ,EACpBD,GAAQD,CACV,CAEA,OAAOG,IAAcD,EACjBD,EAAOH,EAAI,UAAUK,EAAWD,CAAK,EACrCD,CACN,ICvDA,IAAAG,GAAO,SCtBP;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gFAgBA,IAAIC,GAAgB,SAASC,EAAGC,EAAG,CAC/B,OAAAF,GAAgB,OAAO,gBAClB,CAAE,UAAW,CAAC,CAAE,YAAa,OAAS,SAAUC,EAAGC,EAAG,CAAED,EAAE,UAAYC,CAAG,GAC1E,SAAUD,EAAGC,EAAG,CAAE,QAASC,KAAKD,EAAO,OAAO,UAAU,eAAe,KAAKA,EAAGC,CAAC,IAAGF,EAAEE,CAAC,EAAID,EAAEC,CAAC,EAAG,EAC7FH,GAAcC,EAAGC,CAAC,CAC7B,EAEO,SAASE,GAAUH,EAAGC,EAAG,CAC5B,GAAI,OAAOA,GAAM,YAAcA,IAAM,KACjC,MAAM,IAAI,UAAU,uBAAyB,OAAOA,CAAC,EAAI,+BAA+B,EAC5FF,GAAcC,EAAGC,CAAC,EAClB,SAASG,GAAK,CAAE,KAAK,YAAcJ,CAAG,CACtCA,EAAE,UAAYC,IAAM,KAAO,OAAO,OAAOA,CAAC,GAAKG,EAAG,UAAYH,EAAE,UAAW,IAAIG,EACnF,CAwCO,SAASC,GAAUC,EAASC,EAAYC,EAAGC,EAAW,CACzD,SAASC,EAAMC,EAAO,CAAE,OAAOA,aAAiBH,EAAIG,EAAQ,IAAIH,EAAE,SAAUI,EAAS,CAAEA,EAAQD,CAAK,CAAG,CAAC,CAAG,CAC3G,OAAO,IAAKH,IAAMA,EAAI,UAAU,SAAUI,EAASC,EAAQ,CACvD,SAASC,EAAUH,EAAO,CAAE,GAAI,CAAEI,EAAKN,EAAU,KAAKE,CAAK,CAAC,CAAG,OAASK,EAAG,CAAEH,EAAOG,CAAC,CAAG,CAAE,CAC1F,SAASC,EAASN,EAAO,CAAE,GAAI,CAAEI,EAAKN,EAAU,MAASE,CAAK,CAAC,CAAG,OAASK,EAAG,CAAEH,EAAOG,CAAC,CAAG,CAAE,CAC7F,SAASD,EAAKG,EAAQ,CAAEA,EAAO,KAAON,EAAQM,EAAO,KAAK,EAAIR,EAAMQ,EAAO,KAAK,EAAE,KAAKJ,EAAWG,CAAQ,CAAG,CAC7GF,GAAMN,EAAYA,EAAU,MAAMH,EAASC,GAAc,CAAC,CAAC,GAAG,KAAK,CAAC,CACxE,CAAC,CACL,CAEO,SAASY,GAAYb,EAASc,EAAM,CACvC,IAAIC,EAAI,CAAE,MAAO,EAAG,KAAM,UAAW,CAAE,GAAIC,EAAE,CAAC,EAAI,EAAG,MAAMA,EAAE,CAAC,EAAG,OAAOA,EAAE,CAAC,CAAG,EAAG,KAAM,CAAC,EAAG,IAAK,CAAC,CAAE,EAAGC,EAAGC,EAAGF,EAAGG,EAC/G,OAAOA,EAAI,CAAE,KAAMC,EAAK,CAAC,EAAG,MAASA,EAAK,CAAC,EAAG,OAAUA,EAAK,CAAC,CAAE,EAAG,OAAO,QAAW,aAAeD,EAAE,OAAO,QAAQ,EAAI,UAAW,CAAE,OAAO,IAAM,GAAIA,EACvJ,SAASC,EAAKC,EAAG,CAAE,OAAO,SAAUC,EAAG,CAAE,OAAOb,EAAK,CAACY,EAAGC,CAAC,CAAC,CAAG,CAAG,CACjE,SAASb,EAAKc,EAAI,CACd,GAAIN,EAAG,MAAM,IAAI,UAAU,iCAAiC,EAC5D,KAAOF,GAAG,GAAI,CACV,GAAIE,EAAI,EAAGC,IAAMF,EAAIO,EAAG,CAAC,EAAI,EAAIL,EAAE,OAAYK,EAAG,CAAC,EAAIL,EAAE,SAAcF,EAAIE,EAAE,SAAcF,EAAE,KAAKE,CAAC,EAAG,GAAKA,EAAE,OAAS,EAAEF,EAAIA,EAAE,KAAKE,EAAGK,EAAG,CAAC,CAAC,GAAG,KAAM,OAAOP,EAE3J,OADIE,EAAI,EAAGF,IAAGO,EAAK,CAACA,EAAG,CAAC,EAAI,EAAGP,EAAE,KAAK,GAC9BO,EAAG,CAAC,EAAG,CACX,IAAK,GAAG,IAAK,GAAGP,EAAIO,EAAI,MACxB,IAAK,GAAG,OAAAR,EAAE,QAAgB,CAAE,MAAOQ,EAAG,CAAC,EAAG,KAAM,EAAM,EACtD,IAAK,GAAGR,EAAE,QAASG,EAAIK,EAAG,CAAC,EAAGA,EAAK,CAAC,CAAC,EAAG,SACxC,IAAK,GAAGA,EAAKR,EAAE,IAAI,IAAI,EAAGA,EAAE,KAAK,IAAI,EAAG,SACxC,QACI,GAAMC,EAAID,EAAE,KAAM,EAAAC,EAAIA,EAAE,OAAS,GAAKA,EAAEA,EAAE,OAAS,CAAC,KAAOO,EAAG,CAAC,IAAM,GAAKA,EAAG,CAAC,IAAM,GAAI,CAAER,EAAI,EAAG,QAAU,CAC3G,GAAIQ,EAAG,CAAC,IAAM,IAAM,CAACP,GAAMO,EAAG,CAAC,EAAIP,EAAE,CAAC,GAAKO,EAAG,CAAC,EAAIP,EAAE,CAAC,GAAK,CAAED,EAAE,MAAQQ,EAAG,CAAC,EAAG,KAAO,CACrF,GAAIA,EAAG,CAAC,IAAM,GAAKR,EAAE,MAAQC,EAAE,CAAC,EAAG,CAAED,EAAE,MAAQC,EAAE,CAAC,EAAGA,EAAIO,EAAI,KAAO,CACpE,GAAIP,GAAKD,EAAE,MAAQC,EAAE,CAAC,EAAG,CAAED,EAAE,MAAQC,EAAE,CAAC,EAAGD,EAAE,IAAI,KAAKQ,CAAE,EAAG,KAAO,CAC9DP,EAAE,CAAC,GAAGD,EAAE,IAAI,IAAI,EACpBA,EAAE,KAAK,IAAI,EAAG,QACtB,CACAQ,EAAKT,EAAK,KAAKd,EAASe,CAAC,CAC7B,OAASL,EAAG,CAAEa,EAAK,CAAC,EAAGb,CAAC,EAAGQ,EAAI,CAAG,QAAE,CAAUD,EAAID,EAAI,CAAG,CACzD,GAAIO,EAAG,CAAC,EAAI,EAAG,MAAMA,EAAG,CAAC,EAAG,MAAO,CAAE,MAAOA,EAAG,CAAC,EAAIA,EAAG,CAAC,EAAI,OAAQ,KAAM,EAAK,CACnF,CACJ,CAcO,SAASC,GAASC,EAAG,CACxB,IAAIC,EAAI,OAAO,QAAW,YAAc,OAAO,SAAUC,EAAID,GAAKD,EAAEC,CAAC,EAAGE,EAAI,EAC5E,GAAID,EAAG,OAAOA,EAAE,KAAKF,CAAC,EACtB,GAAIA,GAAK,OAAOA,EAAE,QAAW,SAAU,MAAO,CAC1C,KAAM,UAAY,CACd,OAAIA,GAAKG,GAAKH,EAAE,SAAQA,EAAI,QACrB,CAAE,MAAOA,GAAKA,EAAEG,GAAG,EAAG,KAAM,CAACH,CAAE,CAC1C,CACJ,EACA,MAAM,IAAI,UAAUC,EAAI,0BAA4B,iCAAiC,CACzF,CAEO,SAASG,EAAOJ,EAAGK,EAAG,CACzB,IAAIH,EAAI,OAAO,QAAW,YAAcF,EAAE,OAAO,QAAQ,EACzD,GAAI,CAACE,EAAG,OAAOF,EACf,IAAIG,EAAID,EAAE,KAAKF,CAAC,EAAGM,EAAGC,EAAK,CAAC,EAAGC,EAC/B,GAAI,CACA,MAAQH,IAAM,QAAUA,KAAM,IAAM,EAAEC,EAAIH,EAAE,KAAK,GAAG,MAAMI,EAAG,KAAKD,EAAE,KAAK,CAC7E,OACOG,EAAO,CAAED,EAAI,CAAE,MAAOC,CAAM,CAAG,QACtC,CACI,GAAI,CACIH,GAAK,CAACA,EAAE,OAASJ,EAAIC,EAAE,SAAYD,EAAE,KAAKC,CAAC,CACnD,QACA,CAAU,GAAIK,EAAG,MAAMA,EAAE,KAAO,CACpC,CACA,OAAOD,CACX,CAkBO,SAASG,EAAcC,EAAIC,EAAMC,EAAM,CAC1C,GAAIA,GAAQ,UAAU,SAAW,EAAG,QAASC,EAAI,EAAGC,EAAIH,EAAK,OAAQI,EAAIF,EAAIC,EAAGD,KACxEE,GAAM,EAAEF,KAAKF,MACRI,IAAIA,EAAK,MAAM,UAAU,MAAM,KAAKJ,EAAM,EAAGE,CAAC,GACnDE,EAAGF,CAAC,EAAIF,EAAKE,CAAC,GAGtB,OAAOH,EAAG,OAAOK,GAAM,MAAM,UAAU,MAAM,KAAKJ,CAAI,CAAC,CAC3D,CAEO,SAASK,GAAQC,EAAG,CACvB,OAAO,gBAAgBD,IAAW,KAAK,EAAIC,EAAG,MAAQ,IAAID,GAAQC,CAAC,CACvE,CAEO,SAASC,GAAiBC,EAASC,EAAYC,EAAW,CAC7D,GAAI,CAAC,OAAO,cAAe,MAAM,IAAI,UAAU,sCAAsC,EACrF,IAAIC,EAAID,EAAU,MAAMF,EAASC,GAAc,CAAC,CAAC,EAAGP,EAAGU,EAAI,CAAC,EAC5D,OAAOV,EAAI,CAAC,EAAGW,EAAK,MAAM,EAAGA,EAAK,OAAO,EAAGA,EAAK,QAAQ,EAAGX,EAAE,OAAO,aAAa,EAAI,UAAY,CAAE,OAAO,IAAM,EAAGA,EACpH,SAASW,EAAKC,EAAG,CAAMH,EAAEG,CAAC,IAAGZ,EAAEY,CAAC,EAAI,SAAUR,EAAG,CAAE,OAAO,IAAI,QAAQ,SAAUS,EAAGC,EAAG,CAAEJ,EAAE,KAAK,CAACE,EAAGR,EAAGS,EAAGC,CAAC,CAAC,EAAI,GAAKC,EAAOH,EAAGR,CAAC,CAAG,CAAC,CAAG,EAAG,CACzI,SAASW,EAAOH,EAAGR,EAAG,CAAE,GAAI,CAAEY,EAAKP,EAAEG,CAAC,EAAER,CAAC,CAAC,CAAG,OAASa,EAAG,CAAEC,EAAOR,EAAE,CAAC,EAAE,CAAC,EAAGO,CAAC,CAAG,CAAE,CACjF,SAASD,EAAKG,EAAG,CAAEA,EAAE,iBAAiBhB,GAAU,QAAQ,QAAQgB,EAAE,MAAM,CAAC,EAAE,KAAKC,EAASC,CAAM,EAAIH,EAAOR,EAAE,CAAC,EAAE,CAAC,EAAGS,CAAC,CAAG,CACvH,SAASC,EAAQE,EAAO,CAAEP,EAAO,OAAQO,CAAK,CAAG,CACjD,SAASD,EAAOC,EAAO,CAAEP,EAAO,QAASO,CAAK,CAAG,CACjD,SAASJ,EAAOK,EAAGnB,EAAG,CAAMmB,EAAEnB,CAAC,EAAGM,EAAE,MAAM,EAAGA,EAAE,QAAQK,EAAOL,EAAE,CAAC,EAAE,CAAC,EAAGA,EAAE,CAAC,EAAE,CAAC,CAAC,CAAG,CACrF,CAQO,SAASc,GAAcC,EAAG,CAC7B,GAAI,CAAC,OAAO,cAAe,MAAM,IAAI,UAAU,sCAAsC,EACrF,IAAIC,EAAID,EAAE,OAAO,aAAa,EAAGE,EACjC,OAAOD,EAAIA,EAAE,KAAKD,CAAC,GAAKA,EAAI,OAAOG,IAAa,WAAaA,GAASH,CAAC,EAAIA,EAAE,OAAO,QAAQ,EAAE,EAAGE,EAAI,CAAC,EAAGE,EAAK,MAAM,EAAGA,EAAK,OAAO,EAAGA,EAAK,QAAQ,EAAGF,EAAE,OAAO,aAAa,EAAI,UAAY,CAAE,OAAO,IAAM,EAAGA,GAC9M,SAASE,EAAKC,EAAG,CAAEH,EAAEG,CAAC,EAAIL,EAAEK,CAAC,GAAK,SAAUC,EAAG,CAAE,OAAO,IAAI,QAAQ,SAAUC,EAASC,EAAQ,CAAEF,EAAIN,EAAEK,CAAC,EAAEC,CAAC,EAAGG,EAAOF,EAASC,EAAQF,EAAE,KAAMA,EAAE,KAAK,CAAG,CAAC,CAAG,CAAG,CAC/J,SAASG,EAAOF,EAASC,EAAQE,EAAGJ,EAAG,CAAE,QAAQ,QAAQA,CAAC,EAAE,KAAK,SAASA,EAAG,CAAEC,EAAQ,CAAE,MAAOD,EAAG,KAAMI,CAAE,CAAC,CAAG,EAAGF,CAAM,CAAG,CAC/H,CCtMM,SAAUG,EAAWC,EAAU,CACnC,OAAO,OAAOA,GAAU,UAC1B,CCGM,SAAUC,GAAoBC,EAAgC,CAClE,IAAMC,EAAS,SAACC,EAAa,CAC3B,MAAM,KAAKA,CAAQ,EACnBA,EAAS,MAAQ,IAAI,MAAK,EAAG,KAC/B,EAEMC,EAAWH,EAAWC,CAAM,EAClC,OAAAE,EAAS,UAAY,OAAO,OAAO,MAAM,SAAS,EAClDA,EAAS,UAAU,YAAcA,EAC1BA,CACT,CCDO,IAAMC,GAA+CC,GAC1D,SAACC,EAAM,CACL,OAAA,SAA4CC,EAA0B,CACpED,EAAO,IAAI,EACX,KAAK,QAAUC,EACRA,EAAO,OAAM;EACxBA,EAAO,IAAI,SAACC,EAAKC,EAAC,CAAK,OAAGA,EAAI,EAAC,KAAKD,EAAI,SAAQ,CAAzB,CAA6B,EAAE,KAAK;GAAM,EACzD,GACJ,KAAK,KAAO,sBACZ,KAAK,OAASD,CAChB,CARA,CAQC,ECvBC,SAAUG,GAAaC,EAA6BC,EAAO,CAC/D,GAAID,EAAK,CACP,IAAME,EAAQF,EAAI,QAAQC,CAAI,EAC9B,GAAKC,GAASF,EAAI,OAAOE,EAAO,CAAC,EAErC,CCOA,IAAAC,GAAA,UAAA,CAyBE,SAAAA,EAAoBC,EAA4B,CAA5B,KAAA,gBAAAA,EAdb,KAAA,OAAS,GAER,KAAA,WAAmD,KAMnD,KAAA,YAAqD,IAMV,CAQnD,OAAAD,EAAA,UAAA,YAAA,UAAA,aACME,EAEJ,GAAI,CAAC,KAAK,OAAQ,CAChB,KAAK,OAAS,GAGN,IAAAC,EAAe,KAAI,WAC3B,GAAIA,EAEF,GADA,KAAK,WAAa,KACd,MAAM,QAAQA,CAAU,MAC1B,QAAqBC,EAAAC,GAAAF,CAAU,EAAAG,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAA5B,IAAMG,EAAMD,EAAA,MACfC,EAAO,OAAO,IAAI,yGAGpBJ,EAAW,OAAO,IAAI,EAIlB,IAAiBK,EAAqB,KAAI,gBAClD,GAAIC,EAAWD,CAAgB,EAC7B,GAAI,CACFA,EAAgB,QACTE,EAAG,CACVR,EAASQ,aAAaC,GAAsBD,EAAE,OAAS,CAACA,CAAC,EAIrD,IAAAE,EAAgB,KAAI,YAC5B,GAAIA,EAAa,CACf,KAAK,YAAc,SACnB,QAAwBC,EAAAR,GAAAO,CAAW,EAAAE,EAAAD,EAAA,KAAA,EAAA,CAAAC,EAAA,KAAAA,EAAAD,EAAA,KAAA,EAAE,CAAhC,IAAME,EAASD,EAAA,MAClB,GAAI,CACFE,GAAcD,CAAS,QAChBE,EAAK,CACZf,EAASA,GAAM,KAANA,EAAU,CAAA,EACfe,aAAeN,GACjBT,EAAMgB,EAAAA,EAAA,CAAA,EAAAC,EAAOjB,CAAM,CAAA,EAAAiB,EAAKF,EAAI,MAAM,CAAA,EAElCf,EAAO,KAAKe,CAAG,sGAMvB,GAAIf,EACF,MAAM,IAAIS,GAAoBT,CAAM,EAG1C,EAoBAF,EAAA,UAAA,IAAA,SAAIoB,EAAuB,OAGzB,GAAIA,GAAYA,IAAa,KAC3B,GAAI,KAAK,OAGPJ,GAAcI,CAAQ,MACjB,CACL,GAAIA,aAAoBpB,EAAc,CAGpC,GAAIoB,EAAS,QAAUA,EAAS,WAAW,IAAI,EAC7C,OAEFA,EAAS,WAAW,IAAI,GAEzB,KAAK,aAAcC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAAA,EAAI,CAAA,GAAI,KAAKD,CAAQ,EAG/D,EAOQpB,EAAA,UAAA,WAAR,SAAmBsB,EAAoB,CAC7B,IAAAnB,EAAe,KAAI,WAC3B,OAAOA,IAAemB,GAAW,MAAM,QAAQnB,CAAU,GAAKA,EAAW,SAASmB,CAAM,CAC1F,EASQtB,EAAA,UAAA,WAAR,SAAmBsB,EAAoB,CAC7B,IAAAnB,EAAe,KAAI,WAC3B,KAAK,WAAa,MAAM,QAAQA,CAAU,GAAKA,EAAW,KAAKmB,CAAM,EAAGnB,GAAcA,EAAa,CAACA,EAAYmB,CAAM,EAAIA,CAC5H,EAMQtB,EAAA,UAAA,cAAR,SAAsBsB,EAAoB,CAChC,IAAAnB,EAAe,KAAI,WACvBA,IAAemB,EACjB,KAAK,WAAa,KACT,MAAM,QAAQnB,CAAU,GACjCoB,GAAUpB,EAAYmB,CAAM,CAEhC,EAgBAtB,EAAA,UAAA,OAAA,SAAOoB,EAAsC,CACnC,IAAAR,EAAgB,KAAI,YAC5BA,GAAeW,GAAUX,EAAaQ,CAAQ,EAE1CA,aAAoBpB,GACtBoB,EAAS,cAAc,IAAI,CAE/B,EAlLcpB,EAAA,MAAS,UAAA,CACrB,IAAMwB,EAAQ,IAAIxB,EAClB,OAAAwB,EAAM,OAAS,GACRA,CACT,EAAE,EA+KJxB,GArLA,EAuLO,IAAMyB,GAAqBC,GAAa,MAEzC,SAAUC,GAAeC,EAAU,CACvC,OACEA,aAAiBF,IAChBE,GAAS,WAAYA,GAASC,EAAWD,EAAM,MAAM,GAAKC,EAAWD,EAAM,GAAG,GAAKC,EAAWD,EAAM,WAAW,CAEpH,CAEA,SAASE,GAAcC,EAAwC,CACzDF,EAAWE,CAAS,EACtBA,EAAS,EAETA,EAAU,YAAW,CAEzB,CChNO,IAAMC,GAAuB,CAClC,iBAAkB,KAClB,sBAAuB,KACvB,QAAS,OACT,sCAAuC,GACvC,yBAA0B,ICGrB,IAAMC,GAAmC,CAG9C,WAAA,SAAWC,EAAqBC,EAAgB,SAAEC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,EAAA,CAAA,EAAA,UAAAA,CAAA,EACxC,IAAAC,EAAaL,GAAe,SACpC,OAAIK,GAAQ,MAARA,EAAU,WACLA,EAAS,WAAU,MAAnBA,EAAQC,EAAA,CAAYL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,EAE/C,WAAU,MAAA,OAAAG,EAAA,CAACL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,CAC7C,EACA,aAAA,SAAaK,EAAM,CACT,IAAAH,EAAaL,GAAe,SACpC,QAAQK,GAAQ,KAAA,OAARA,EAAU,eAAgB,cAAcG,CAAa,CAC/D,EACA,SAAU,QCjBN,SAAUC,GAAqBC,EAAQ,CAC3CC,GAAgB,WAAW,UAAA,CACjB,IAAAC,EAAqBC,GAAM,iBACnC,GAAID,EAEFA,EAAiBF,CAAG,MAGpB,OAAMA,CAEV,CAAC,CACH,CCtBM,SAAUI,IAAI,CAAK,CCMlB,IAAMC,GAAyB,UAAA,CAAM,OAAAC,GAAmB,IAAK,OAAW,MAAS,CAA5C,EAAsE,EAO5G,SAAUC,GAAkBC,EAAU,CAC1C,OAAOF,GAAmB,IAAK,OAAWE,CAAK,CACjD,CAOM,SAAUC,GAAoBC,EAAQ,CAC1C,OAAOJ,GAAmB,IAAKI,EAAO,MAAS,CACjD,CAQM,SAAUJ,GAAmBK,EAAuBD,EAAYF,EAAU,CAC9E,MAAO,CACL,KAAIG,EACJ,MAAKD,EACL,MAAKF,EAET,CCrCA,IAAII,GAAuD,KASrD,SAAUC,GAAaC,EAAc,CACzC,GAAIC,GAAO,sCAAuC,CAChD,IAAMC,EAAS,CAACJ,GAKhB,GAJII,IACFJ,GAAU,CAAE,YAAa,GAAO,MAAO,IAAI,GAE7CE,EAAE,EACEE,EAAQ,CACJ,IAAAC,EAAyBL,GAAvBM,EAAWD,EAAA,YAAEE,EAAKF,EAAA,MAE1B,GADAL,GAAU,KACNM,EACF,MAAMC,QAMVL,EAAE,CAEN,CAMM,SAAUM,GAAaC,EAAQ,CAC/BN,GAAO,uCAAyCH,KAClDA,GAAQ,YAAc,GACtBA,GAAQ,MAAQS,EAEpB,CCrBA,IAAAC,GAAA,SAAAC,EAAA,CAAmCC,GAAAF,EAAAC,CAAA,EA6BjC,SAAAD,EAAYG,EAA6C,CAAzD,IAAAC,EACEH,EAAA,KAAA,IAAA,GAAO,KATC,OAAAG,EAAA,UAAqB,GAUzBD,GACFC,EAAK,YAAcD,EAGfE,GAAeF,CAAW,GAC5BA,EAAY,IAAIC,CAAI,GAGtBA,EAAK,YAAcE,IAEvB,CAzBO,OAAAN,EAAA,OAAP,SAAiBO,EAAwBC,EAA2BC,EAAqB,CACvF,OAAO,IAAIC,GAAeH,EAAMC,EAAOC,CAAQ,CACjD,EAgCAT,EAAA,UAAA,KAAA,SAAKW,EAAS,CACR,KAAK,UACPC,GAA0BC,GAAiBF,CAAK,EAAG,IAAI,EAEvD,KAAK,MAAMA,CAAM,CAErB,EASAX,EAAA,UAAA,MAAA,SAAMc,EAAS,CACT,KAAK,UACPF,GAA0BG,GAAkBD,CAAG,EAAG,IAAI,GAEtD,KAAK,UAAY,GACjB,KAAK,OAAOA,CAAG,EAEnB,EAQAd,EAAA,UAAA,SAAA,UAAA,CACM,KAAK,UACPY,GAA0BI,GAAuB,IAAI,GAErD,KAAK,UAAY,GACjB,KAAK,UAAS,EAElB,EAEAhB,EAAA,UAAA,YAAA,UAAA,CACO,KAAK,SACR,KAAK,UAAY,GACjBC,EAAA,UAAM,YAAW,KAAA,IAAA,EACjB,KAAK,YAAc,KAEvB,EAEUD,EAAA,UAAA,MAAV,SAAgBW,EAAQ,CACtB,KAAK,YAAY,KAAKA,CAAK,CAC7B,EAEUX,EAAA,UAAA,OAAV,SAAiBc,EAAQ,CACvB,GAAI,CACF,KAAK,YAAY,MAAMA,CAAG,UAE1B,KAAK,YAAW,EAEpB,EAEUd,EAAA,UAAA,UAAV,UAAA,CACE,GAAI,CACF,KAAK,YAAY,SAAQ,UAEzB,KAAK,YAAW,EAEpB,EACFA,CAAA,EApHmCiB,EAAY,EA2H/C,IAAMC,GAAQ,SAAS,UAAU,KAEjC,SAASC,GAAyCC,EAAQC,EAAY,CACpE,OAAOH,GAAM,KAAKE,EAAIC,CAAO,CAC/B,CAMA,IAAAC,GAAA,UAAA,CACE,SAAAA,EAAoBC,EAAqC,CAArC,KAAA,gBAAAA,CAAwC,CAE5D,OAAAD,EAAA,UAAA,KAAA,SAAKE,EAAQ,CACH,IAAAD,EAAoB,KAAI,gBAChC,GAAIA,EAAgB,KAClB,GAAI,CACFA,EAAgB,KAAKC,CAAK,QACnBC,EAAO,CACdC,GAAqBD,CAAK,EAGhC,EAEAH,EAAA,UAAA,MAAA,SAAMK,EAAQ,CACJ,IAAAJ,EAAoB,KAAI,gBAChC,GAAIA,EAAgB,MAClB,GAAI,CACFA,EAAgB,MAAMI,CAAG,QAClBF,EAAO,CACdC,GAAqBD,CAAK,OAG5BC,GAAqBC,CAAG,CAE5B,EAEAL,EAAA,UAAA,SAAA,UAAA,CACU,IAAAC,EAAoB,KAAI,gBAChC,GAAIA,EAAgB,SAClB,GAAI,CACFA,EAAgB,SAAQ,QACjBE,EAAO,CACdC,GAAqBD,CAAK,EAGhC,EACFH,CAAA,EArCA,EAuCAM,GAAA,SAAAC,EAAA,CAAuCC,GAAAF,EAAAC,CAAA,EACrC,SAAAD,EACEG,EACAN,EACAO,EAA8B,CAHhC,IAAAC,EAKEJ,EAAA,KAAA,IAAA,GAAO,KAEHN,EACJ,GAAIW,EAAWH,CAAc,GAAK,CAACA,EAGjCR,EAAkB,CAChB,KAAOQ,GAAc,KAAdA,EAAkB,OACzB,MAAON,GAAK,KAALA,EAAS,OAChB,SAAUO,GAAQ,KAARA,EAAY,YAEnB,CAEL,IAAIG,EACAF,GAAQG,GAAO,0BAIjBD,EAAU,OAAO,OAAOJ,CAAc,EACtCI,EAAQ,YAAc,UAAA,CAAM,OAAAF,EAAK,YAAW,CAAhB,EAC5BV,EAAkB,CAChB,KAAMQ,EAAe,MAAQZ,GAAKY,EAAe,KAAMI,CAAO,EAC9D,MAAOJ,EAAe,OAASZ,GAAKY,EAAe,MAAOI,CAAO,EACjE,SAAUJ,EAAe,UAAYZ,GAAKY,EAAe,SAAUI,CAAO,IAI5EZ,EAAkBQ,EAMtB,OAAAE,EAAK,YAAc,IAAIX,GAAiBC,CAAe,GACzD,CACF,OAAAK,CAAA,EAzCuCS,EAAU,EA2CjD,SAASC,GAAqBC,EAAU,CAClCC,GAAO,sCACTC,GAAaF,CAAK,EAIlBG,GAAqBH,CAAK,CAE9B,CAQA,SAASI,GAAoBC,EAAQ,CACnC,MAAMA,CACR,CAOA,SAASC,GAA0BC,EAA2CC,EAA2B,CAC/F,IAAAC,EAA0BR,GAAM,sBACxCQ,GAAyBC,GAAgB,WAAW,UAAA,CAAM,OAAAD,EAAsBF,EAAcC,CAAU,CAA9C,CAA+C,CAC3G,CAOO,IAAMG,GAA6D,CACxE,OAAQ,GACR,KAAMC,GACN,MAAOR,GACP,SAAUQ,IC5QL,IAAMC,GAA+B,UAAA,CAAM,OAAC,OAAO,QAAW,YAAc,OAAO,YAAe,cAAvD,EAAsE,ECoClH,SAAUC,GAAYC,EAAI,CAC9B,OAAOA,CACT,CCiCM,SAAUC,IAAI,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACnB,OAAOC,GAAcF,CAAG,CAC1B,CAGM,SAAUE,GAAoBF,EAA+B,CACjE,OAAIA,EAAI,SAAW,EACVG,GAGLH,EAAI,SAAW,EACVA,EAAI,CAAC,EAGP,SAAeI,EAAQ,CAC5B,OAAOJ,EAAI,OAAO,SAACK,EAAWC,EAAuB,CAAK,OAAAA,EAAGD,CAAI,CAAP,EAAUD,CAAY,CAClF,CACF,CC9EA,IAAAG,EAAA,UAAA,CAkBE,SAAAA,EAAYC,EAA6E,CACnFA,IACF,KAAK,WAAaA,EAEtB,CA4BA,OAAAD,EAAA,UAAA,KAAA,SAAQE,EAAyB,CAC/B,IAAMC,EAAa,IAAIH,EACvB,OAAAG,EAAW,OAAS,KACpBA,EAAW,SAAWD,EACfC,CACT,EA6IAH,EAAA,UAAA,UAAA,SACEI,EACAC,EACAC,EAA8B,CAHhC,IAAAC,EAAA,KAKQC,EAAaC,GAAaL,CAAc,EAAIA,EAAiB,IAAIM,GAAeN,EAAgBC,EAAOC,CAAQ,EAErH,OAAAK,GAAa,UAAA,CACL,IAAAC,EAAuBL,EAArBL,EAAQU,EAAA,SAAEC,EAAMD,EAAA,OACxBJ,EAAW,IACTN,EAGIA,EAAS,KAAKM,EAAYK,CAAM,EAChCA,EAIAN,EAAK,WAAWC,CAAU,EAG1BD,EAAK,cAAcC,CAAU,CAAC,CAEtC,CAAC,EAEMA,CACT,EAGUR,EAAA,UAAA,cAAV,SAAwBc,EAAmB,CACzC,GAAI,CACF,OAAO,KAAK,WAAWA,CAAI,QACpBC,EAAK,CAIZD,EAAK,MAAMC,CAAG,EAElB,EA6DAf,EAAA,UAAA,QAAA,SAAQgB,EAA0BC,EAAoC,CAAtE,IAAAV,EAAA,KACE,OAAAU,EAAcC,GAAeD,CAAW,EAEjC,IAAIA,EAAkB,SAACE,EAASC,EAAM,CAC3C,IAAMZ,EAAa,IAAIE,GAAkB,CACvC,KAAM,SAACW,EAAK,CACV,GAAI,CACFL,EAAKK,CAAK,QACHN,EAAK,CACZK,EAAOL,CAAG,EACVP,EAAW,YAAW,EAE1B,EACA,MAAOY,EACP,SAAUD,EACX,EACDZ,EAAK,UAAUC,CAAU,CAC3B,CAAC,CACH,EAGUR,EAAA,UAAA,WAAV,SAAqBQ,EAA2B,OAC9C,OAAOI,EAAA,KAAK,UAAM,MAAAA,IAAA,OAAA,OAAAA,EAAE,UAAUJ,CAAU,CAC1C,EAOAR,EAAA,UAACG,EAAiB,EAAlB,UAAA,CACE,OAAO,IACT,EA4FAH,EAAA,UAAA,KAAA,UAAA,SAAKsB,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACH,OAAOC,GAAcF,CAAU,EAAE,IAAI,CACvC,EA6BAtB,EAAA,UAAA,UAAA,SAAUiB,EAAoC,CAA9C,IAAAV,EAAA,KACE,OAAAU,EAAcC,GAAeD,CAAW,EAEjC,IAAIA,EAAY,SAACE,EAASC,EAAM,CACrC,IAAIC,EACJd,EAAK,UACH,SAACkB,EAAI,CAAK,OAACJ,EAAQI,CAAT,EACV,SAACV,EAAQ,CAAK,OAAAK,EAAOL,CAAG,CAAV,EACd,UAAA,CAAM,OAAAI,EAAQE,CAAK,CAAb,CAAc,CAExB,CAAC,CACH,EA1aOrB,EAAA,OAAkC,SAAIC,EAAwD,CACnG,OAAO,IAAID,EAAcC,CAAS,CACpC,EAyaFD,GA9cA,EAudA,SAAS0B,GAAeC,EAA+C,OACrE,OAAOC,EAAAD,GAAW,KAAXA,EAAeE,GAAO,WAAO,MAAAD,IAAA,OAAAA,EAAI,OAC1C,CAEA,SAASE,GAAcC,EAAU,CAC/B,OAAOA,GAASC,EAAWD,EAAM,IAAI,GAAKC,EAAWD,EAAM,KAAK,GAAKC,EAAWD,EAAM,QAAQ,CAChG,CAEA,SAASE,GAAgBF,EAAU,CACjC,OAAQA,GAASA,aAAiBG,IAAgBJ,GAAWC,CAAK,GAAKI,GAAeJ,CAAK,CAC7F,CCzeM,SAAUK,GAAQC,EAAW,CACjC,OAAOC,EAAWD,GAAM,KAAA,OAANA,EAAQ,IAAI,CAChC,CAMM,SAAUE,EACdC,EAAqF,CAErF,OAAO,SAACH,EAAqB,CAC3B,GAAID,GAAQC,CAAM,EAChB,OAAOA,EAAO,KAAK,SAA+BI,EAA2B,CAC3E,GAAI,CACF,OAAOD,EAAKC,EAAc,IAAI,QACvBC,EAAK,CACZ,KAAK,MAAMA,CAAG,EAElB,CAAC,EAEH,MAAM,IAAI,UAAU,wCAAwC,CAC9D,CACF,CCjBM,SAAUC,EACdC,EACAC,EACAC,EACAC,EACAC,EAAuB,CAEvB,OAAO,IAAIC,GAAmBL,EAAaC,EAAQC,EAAYC,EAASC,CAAU,CACpF,CAMA,IAAAC,GAAA,SAAAC,EAAA,CAA2CC,GAAAF,EAAAC,CAAA,EAiBzC,SAAAD,EACEL,EACAC,EACAC,EACAC,EACQC,EACAI,EAAiC,CAN3C,IAAAC,EAoBEH,EAAA,KAAA,KAAMN,CAAW,GAAC,KAfV,OAAAS,EAAA,WAAAL,EACAK,EAAA,kBAAAD,EAeRC,EAAK,MAAQR,EACT,SAAuCS,EAAQ,CAC7C,GAAI,CACFT,EAAOS,CAAK,QACLC,EAAK,CACZX,EAAY,MAAMW,CAAG,EAEzB,EACAL,EAAA,UAAM,MACVG,EAAK,OAASN,EACV,SAAuCQ,EAAQ,CAC7C,GAAI,CACFR,EAAQQ,CAAG,QACJA,EAAK,CAEZX,EAAY,MAAMW,CAAG,UAGrB,KAAK,YAAW,EAEpB,EACAL,EAAA,UAAM,OACVG,EAAK,UAAYP,EACb,UAAA,CACE,GAAI,CACFA,EAAU,QACHS,EAAK,CAEZX,EAAY,MAAMW,CAAG,UAGrB,KAAK,YAAW,EAEpB,EACAL,EAAA,UAAM,WACZ,CAEA,OAAAD,EAAA,UAAA,YAAA,UAAA,OACE,GAAI,CAAC,KAAK,mBAAqB,KAAK,kBAAiB,EAAI,CAC/C,IAAAO,EAAW,KAAI,OACvBN,EAAA,UAAM,YAAW,KAAA,IAAA,EAEjB,CAACM,KAAUC,EAAA,KAAK,cAAU,MAAAA,IAAA,QAAAA,EAAA,KAAf,IAAI,GAEnB,EACFR,CAAA,EAnF2CS,EAAU,ECd9C,IAAMC,GAAiD,CAG5D,SAAA,SAASC,EAAQ,CACf,IAAIC,EAAU,sBACVC,EAAkD,qBAC9CC,EAAaJ,GAAsB,SACvCI,IACFF,EAAUE,EAAS,sBACnBD,EAASC,EAAS,sBAEpB,IAAMC,EAASH,EAAQ,SAACI,EAAS,CAI/BH,EAAS,OACTF,EAASK,CAAS,CACpB,CAAC,EACD,OAAO,IAAIC,GAAa,UAAA,CAAM,OAAAJ,GAAM,KAAA,OAANA,EAASE,CAAM,CAAf,CAAgB,CAChD,EACA,sBAAqB,UAAA,SAACG,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACZ,IAAAL,EAAaJ,GAAsB,SAC3C,QAAQI,GAAQ,KAAA,OAARA,EAAU,wBAAyB,uBAAsB,MAAA,OAAAM,EAAA,CAAA,EAAAC,EAAIH,CAAI,CAAA,CAAA,CAC3E,EACA,qBAAoB,UAAA,SAACA,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACX,IAAAL,EAAaJ,GAAsB,SAC3C,QAAQI,GAAQ,KAAA,OAARA,EAAU,uBAAwB,sBAAqB,MAAA,OAAAM,EAAA,CAAA,EAAAC,EAAIH,CAAI,CAAA,CAAA,CACzE,EACA,SAAU,QCrBL,IAAMI,GAAuDC,GAClE,SAACC,EAAM,CACL,OAAA,UAAoC,CAClCA,EAAO,IAAI,EACX,KAAK,KAAO,0BACZ,KAAK,QAAU,qBACjB,CAJA,CAIC,ECXL,IAAAC,EAAA,SAAAC,EAAA,CAAgCC,GAAAF,EAAAC,CAAA,EAwB9B,SAAAD,GAAA,CAAA,IAAAG,EAEEF,EAAA,KAAA,IAAA,GAAO,KAzBT,OAAAE,EAAA,OAAS,GAEDA,EAAA,iBAAyC,KAGjDA,EAAA,UAA2B,CAAA,EAE3BA,EAAA,UAAY,GAEZA,EAAA,SAAW,GAEXA,EAAA,YAAmB,MAenB,CAGA,OAAAH,EAAA,UAAA,KAAA,SAAQI,EAAwB,CAC9B,IAAMC,EAAU,IAAIC,GAAiB,KAAM,IAAI,EAC/C,OAAAD,EAAQ,SAAWD,EACZC,CACT,EAGUL,EAAA,UAAA,eAAV,UAAA,CACE,GAAI,KAAK,OACP,MAAM,IAAIO,EAEd,EAEAP,EAAA,UAAA,KAAA,SAAKQ,EAAQ,CAAb,IAAAL,EAAA,KACEM,GAAa,UAAA,SAEX,GADAN,EAAK,eAAc,EACf,CAACA,EAAK,UAAW,CACdA,EAAK,mBACRA,EAAK,iBAAmB,MAAM,KAAKA,EAAK,SAAS,OAEnD,QAAuBO,EAAAC,GAAAR,EAAK,gBAAgB,EAAAS,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAzC,IAAMG,EAAQD,EAAA,MACjBC,EAAS,KAAKL,CAAK,qGAGzB,CAAC,CACH,EAEAR,EAAA,UAAA,MAAA,SAAMc,EAAQ,CAAd,IAAAX,EAAA,KACEM,GAAa,UAAA,CAEX,GADAN,EAAK,eAAc,EACf,CAACA,EAAK,UAAW,CACnBA,EAAK,SAAWA,EAAK,UAAY,GACjCA,EAAK,YAAcW,EAEnB,QADQC,EAAcZ,EAAI,UACnBY,EAAU,QACfA,EAAU,MAAK,EAAI,MAAMD,CAAG,EAGlC,CAAC,CACH,EAEAd,EAAA,UAAA,SAAA,UAAA,CAAA,IAAAG,EAAA,KACEM,GAAa,UAAA,CAEX,GADAN,EAAK,eAAc,EACf,CAACA,EAAK,UAAW,CACnBA,EAAK,UAAY,GAEjB,QADQY,EAAcZ,EAAI,UACnBY,EAAU,QACfA,EAAU,MAAK,EAAI,SAAQ,EAGjC,CAAC,CACH,EAEAf,EAAA,UAAA,YAAA,UAAA,CACE,KAAK,UAAY,KAAK,OAAS,GAC/B,KAAK,UAAY,KAAK,iBAAmB,IAC3C,EAEA,OAAA,eAAIA,EAAA,UAAA,WAAQ,KAAZ,UAAA,OACE,QAAOgB,EAAA,KAAK,aAAS,MAAAA,IAAA,OAAA,OAAAA,EAAE,QAAS,CAClC,kCAGUhB,EAAA,UAAA,cAAV,SAAwBiB,EAAyB,CAC/C,YAAK,eAAc,EACZhB,EAAA,UAAM,cAAa,KAAA,KAACgB,CAAU,CACvC,EAGUjB,EAAA,UAAA,WAAV,SAAqBiB,EAAyB,CAC5C,YAAK,eAAc,EACnB,KAAK,wBAAwBA,CAAU,EAChC,KAAK,gBAAgBA,CAAU,CACxC,EAGUjB,EAAA,UAAA,gBAAV,SAA0BiB,EAA2B,CAArD,IAAAd,EAAA,KACQa,EAAqC,KAAnCE,EAAQF,EAAA,SAAEG,EAASH,EAAA,UAAED,EAASC,EAAA,UACtC,OAAIE,GAAYC,EACPC,IAET,KAAK,iBAAmB,KACxBL,EAAU,KAAKE,CAAU,EAClB,IAAII,GAAa,UAAA,CACtBlB,EAAK,iBAAmB,KACxBmB,GAAUP,EAAWE,CAAU,CACjC,CAAC,EACH,EAGUjB,EAAA,UAAA,wBAAV,SAAkCiB,EAA2B,CACrD,IAAAD,EAAuC,KAArCE,EAAQF,EAAA,SAAEO,EAAWP,EAAA,YAAEG,EAASH,EAAA,UACpCE,EACFD,EAAW,MAAMM,CAAW,EACnBJ,GACTF,EAAW,SAAQ,CAEvB,EAQAjB,EAAA,UAAA,aAAA,UAAA,CACE,IAAMwB,EAAkB,IAAIC,EAC5B,OAAAD,EAAW,OAAS,KACbA,CACT,EAxHOxB,EAAA,OAAkC,SAAI0B,EAA0BC,EAAqB,CAC1F,OAAO,IAAIrB,GAAoBoB,EAAaC,CAAM,CACpD,EAuHF3B,GA7IgCyB,CAAU,EAkJ1C,IAAAG,GAAA,SAAAC,EAAA,CAAyCC,GAAAF,EAAAC,CAAA,EACvC,SAAAD,EAESG,EACPC,EAAsB,CAHxB,IAAAC,EAKEJ,EAAA,KAAA,IAAA,GAAO,KAHA,OAAAI,EAAA,YAAAF,EAIPE,EAAK,OAASD,GAChB,CAEA,OAAAJ,EAAA,UAAA,KAAA,SAAKM,EAAQ,UACXC,GAAAC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAA,OAAAA,EAAE,QAAI,MAAAD,IAAA,QAAAA,EAAA,KAAAC,EAAGF,CAAK,CAChC,EAEAN,EAAA,UAAA,MAAA,SAAMS,EAAQ,UACZF,GAAAC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAA,OAAAA,EAAE,SAAK,MAAAD,IAAA,QAAAA,EAAA,KAAAC,EAAGC,CAAG,CAC/B,EAEAT,EAAA,UAAA,SAAA,UAAA,UACEO,GAAAC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAA,OAAAA,EAAE,YAAQ,MAAAD,IAAA,QAAAA,EAAA,KAAAC,CAAA,CAC5B,EAGUR,EAAA,UAAA,WAAV,SAAqBU,EAAyB,SAC5C,OAAOH,GAAAC,EAAA,KAAK,UAAM,MAAAA,IAAA,OAAA,OAAAA,EAAE,UAAUE,CAAU,KAAC,MAAAH,IAAA,OAAAA,EAAII,EAC/C,EACFX,CAAA,EA1ByCY,CAAO,ECxJhD,IAAAC,GAAA,SAAAC,EAAA,CAAwCC,GAAAF,EAAAC,CAAA,EACtC,SAAAD,EAAoBG,EAAS,CAA7B,IAAAC,EACEH,EAAA,KAAA,IAAA,GAAO,KADW,OAAAG,EAAA,OAAAD,GAEpB,CAEA,cAAA,eAAIH,EAAA,UAAA,QAAK,KAAT,UAAA,CACE,OAAO,KAAK,SAAQ,CACtB,kCAGUA,EAAA,UAAA,WAAV,SAAqBK,EAAyB,CAC5C,IAAMC,EAAeL,EAAA,UAAM,WAAU,KAAA,KAACI,CAAU,EAChD,OAACC,EAAa,QAAUD,EAAW,KAAK,KAAK,MAAM,EAC5CC,CACT,EAEAN,EAAA,UAAA,SAAA,UAAA,CACQ,IAAAO,EAAoC,KAAlCC,EAAQD,EAAA,SAAEE,EAAWF,EAAA,YAAEJ,EAAMI,EAAA,OACrC,GAAIC,EACF,MAAMC,EAER,YAAK,eAAc,EACZN,CACT,EAEAH,EAAA,UAAA,KAAA,SAAKU,EAAQ,CACXT,EAAA,UAAM,KAAI,KAAA,KAAE,KAAK,OAASS,CAAM,CAClC,EACFV,CAAA,EA5BwCW,CAAO,ECJxC,IAAMC,GAA+C,CAC1D,IAAG,UAAA,CAGD,OAAQA,GAAsB,UAAY,MAAM,IAAG,CACrD,EACA,SAAU,QCwBZ,IAAAC,GAAA,SAAAC,EAAA,CAAsCC,GAAAF,EAAAC,CAAA,EAUpC,SAAAD,EACUG,EACAC,EACAC,EAA6D,CAF7DF,IAAA,SAAAA,EAAA,KACAC,IAAA,SAAAA,EAAA,KACAC,IAAA,SAAAA,EAAAC,IAHV,IAAAC,EAKEN,EAAA,KAAA,IAAA,GAAO,KAJC,OAAAM,EAAA,YAAAJ,EACAI,EAAA,YAAAH,EACAG,EAAA,mBAAAF,EAZFE,EAAA,QAA0B,CAAA,EAC1BA,EAAA,oBAAsB,GAc5BA,EAAK,oBAAsBH,IAAgB,IAC3CG,EAAK,YAAc,KAAK,IAAI,EAAGJ,CAAW,EAC1CI,EAAK,YAAc,KAAK,IAAI,EAAGH,CAAW,GAC5C,CAEA,OAAAJ,EAAA,UAAA,KAAA,SAAKQ,EAAQ,CACL,IAAAC,EAA+E,KAA7EC,EAASD,EAAA,UAAEE,EAAOF,EAAA,QAAEG,EAAmBH,EAAA,oBAAEJ,EAAkBI,EAAA,mBAAEL,EAAWK,EAAA,YAC3EC,IACHC,EAAQ,KAAKH,CAAK,EAClB,CAACI,GAAuBD,EAAQ,KAAKN,EAAmB,IAAG,EAAKD,CAAW,GAE7E,KAAK,YAAW,EAChBH,EAAA,UAAM,KAAI,KAAA,KAACO,CAAK,CAClB,EAGUR,EAAA,UAAA,WAAV,SAAqBa,EAAyB,CAC5C,KAAK,eAAc,EACnB,KAAK,YAAW,EAQhB,QANMC,EAAe,KAAK,gBAAgBD,CAAU,EAE9CJ,EAAmC,KAAjCG,EAAmBH,EAAA,oBAAEE,EAAOF,EAAA,QAG9BM,EAAOJ,EAAQ,MAAK,EACjBK,EAAI,EAAGA,EAAID,EAAK,QAAU,CAACF,EAAW,OAAQG,GAAKJ,EAAsB,EAAI,EACpFC,EAAW,KAAKE,EAAKC,CAAC,CAAM,EAG9B,YAAK,wBAAwBH,CAAU,EAEhCC,CACT,EAEQd,EAAA,UAAA,YAAR,UAAA,CACQ,IAAAS,EAAoE,KAAlEN,EAAWM,EAAA,YAAEJ,EAAkBI,EAAA,mBAAEE,EAAOF,EAAA,QAAEG,EAAmBH,EAAA,oBAK/DQ,GAAsBL,EAAsB,EAAI,GAAKT,EAK3D,GAJAA,EAAc,KAAYc,EAAqBN,EAAQ,QAAUA,EAAQ,OAAO,EAAGA,EAAQ,OAASM,CAAkB,EAIlH,CAACL,EAAqB,CAKxB,QAJMM,EAAMb,EAAmB,IAAG,EAC9Bc,EAAO,EAGFH,EAAI,EAAGA,EAAIL,EAAQ,QAAWA,EAAQK,CAAC,GAAgBE,EAAKF,GAAK,EACxEG,EAAOH,EAETG,GAAQR,EAAQ,OAAO,EAAGQ,EAAO,CAAC,EAEtC,EACFnB,CAAA,EAzEsCoB,CAAO,EClB7C,IAAAC,GAAA,SAAAC,EAAA,CAA+BC,GAAAF,EAAAC,CAAA,EAC7B,SAAAD,EAAYG,EAAsBC,EAAmD,QACnFH,EAAA,KAAA,IAAA,GAAO,IACT,CAWO,OAAAD,EAAA,UAAA,SAAP,SAAgBK,EAAWC,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,GAClB,IACT,EACFN,CAAA,EAjB+BO,EAAY,ECHpC,IAAMC,GAAqC,CAGhD,YAAA,SAAYC,EAAqBC,EAAgB,SAAEC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,EAAA,CAAA,EAAA,UAAAA,CAAA,EACzC,IAAAC,EAAaL,GAAgB,SACrC,OAAIK,GAAQ,MAARA,EAAU,YACLA,EAAS,YAAW,MAApBA,EAAQC,EAAA,CAAaL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,EAEhD,YAAW,MAAA,OAAAG,EAAA,CAACL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,CAC9C,EACA,cAAA,SAAcK,EAAM,CACV,IAAAH,EAAaL,GAAgB,SACrC,QAAQK,GAAQ,KAAA,OAARA,EAAU,gBAAiB,eAAeG,CAAa,CACjE,EACA,SAAU,QCrBZ,IAAAC,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAOlC,SAAAD,EAAsBG,EAAqCC,EAAmD,CAA9G,IAAAC,EACEJ,EAAA,KAAA,KAAME,EAAWC,CAAI,GAAC,KADF,OAAAC,EAAA,UAAAF,EAAqCE,EAAA,KAAAD,EAFjDC,EAAA,QAAmB,IAI7B,CAEO,OAAAL,EAAA,UAAA,SAAP,SAAgBM,EAAWC,EAAiB,OAC1C,GADyBA,IAAA,SAAAA,EAAA,GACrB,KAAK,OACP,OAAO,KAIT,KAAK,MAAQD,EAEb,IAAME,EAAK,KAAK,GACVL,EAAY,KAAK,UAuBvB,OAAIK,GAAM,OACR,KAAK,GAAK,KAAK,eAAeL,EAAWK,EAAID,CAAK,GAKpD,KAAK,QAAU,GAEf,KAAK,MAAQA,EAEb,KAAK,IAAKE,EAAA,KAAK,MAAE,MAAAA,IAAA,OAAAA,EAAI,KAAK,eAAeN,EAAW,KAAK,GAAII,CAAK,EAE3D,IACT,EAEUP,EAAA,UAAA,eAAV,SAAyBG,EAA2BO,EAAmBH,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,GAC9DI,GAAiB,YAAYR,EAAU,MAAM,KAAKA,EAAW,IAAI,EAAGI,CAAK,CAClF,EAEUP,EAAA,UAAA,eAAV,SAAyBY,EAA4BJ,EAAkBD,EAAwB,CAE7F,GAFqEA,IAAA,SAAAA,EAAA,GAEjEA,GAAS,MAAQ,KAAK,QAAUA,GAAS,KAAK,UAAY,GAC5D,OAAOC,EAILA,GAAM,MACRG,GAAiB,cAAcH,CAAE,CAIrC,EAMOR,EAAA,UAAA,QAAP,SAAeM,EAAUC,EAAa,CACpC,GAAI,KAAK,OACP,OAAO,IAAI,MAAM,8BAA8B,EAGjD,KAAK,QAAU,GACf,IAAMM,EAAQ,KAAK,SAASP,EAAOC,CAAK,EACxC,GAAIM,EACF,OAAOA,EACE,KAAK,UAAY,IAAS,KAAK,IAAM,OAc9C,KAAK,GAAK,KAAK,eAAe,KAAK,UAAW,KAAK,GAAI,IAAI,EAE/D,EAEUb,EAAA,UAAA,SAAV,SAAmBM,EAAUQ,EAAc,CACzC,IAAIC,EAAmB,GACnBC,EACJ,GAAI,CACF,KAAK,KAAKV,CAAK,QACRW,EAAG,CACVF,EAAU,GAIVC,EAAaC,GAAQ,IAAI,MAAM,oCAAoC,EAErE,GAAIF,EACF,YAAK,YAAW,EACTC,CAEX,EAEAhB,EAAA,UAAA,YAAA,UAAA,CACE,GAAI,CAAC,KAAK,OAAQ,CACV,IAAAS,EAAoB,KAAlBD,EAAEC,EAAA,GAAEN,EAASM,EAAA,UACbS,EAAYf,EAAS,QAE7B,KAAK,KAAO,KAAK,MAAQ,KAAK,UAAY,KAC1C,KAAK,QAAU,GAEfgB,GAAUD,EAAS,IAAI,EACnBV,GAAM,OACR,KAAK,GAAK,KAAK,eAAeL,EAAWK,EAAI,IAAI,GAGnD,KAAK,MAAQ,KACbP,EAAA,UAAM,YAAW,KAAA,IAAA,EAErB,EACFD,CAAA,EA9IoCoB,EAAM,ECgB1C,IAAAC,GAAA,UAAA,CAGE,SAAAA,EAAoBC,EAAoCC,EAAiC,CAAjCA,IAAA,SAAAA,EAAoBF,EAAU,KAAlE,KAAA,oBAAAC,EAClB,KAAK,IAAMC,CACb,CA6BO,OAAAF,EAAA,UAAA,SAAP,SAAmBG,EAAqDC,EAAmBC,EAAS,CAA5B,OAAAD,IAAA,SAAAA,EAAA,GAC/D,IAAI,KAAK,oBAAuB,KAAMD,CAAI,EAAE,SAASE,EAAOD,CAAK,CAC1E,EAnCcJ,EAAA,IAAoBM,GAAsB,IAoC1DN,GArCA,ECnBA,IAAAO,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAkBlC,SAAAD,EAAYG,EAAgCC,EAAiC,CAAjCA,IAAA,SAAAA,EAAoBC,GAAU,KAA1E,IAAAC,EACEL,EAAA,KAAA,KAAME,EAAiBC,CAAG,GAAC,KAlBtB,OAAAE,EAAA,QAAmC,CAAA,EAOnCA,EAAA,QAAmB,IAY1B,CAEO,OAAAN,EAAA,UAAA,MAAP,SAAaO,EAAwB,CAC3B,IAAAC,EAAY,KAAI,QAExB,GAAI,KAAK,QAAS,CAChBA,EAAQ,KAAKD,CAAM,EACnB,OAGF,IAAIE,EACJ,KAAK,QAAU,GAEf,EACE,IAAKA,EAAQF,EAAO,QAAQA,EAAO,MAAOA,EAAO,KAAK,EACpD,YAEMA,EAASC,EAAQ,MAAK,GAIhC,GAFA,KAAK,QAAU,GAEXC,EAAO,CACT,KAAQF,EAASC,EAAQ,MAAK,GAC5BD,EAAO,YAAW,EAEpB,MAAME,EAEV,EACFT,CAAA,EAhDoCK,EAAS,EC6CtC,IAAMK,GAAiB,IAAIC,GAAeC,EAAW,EAK/CC,GAAQH,GCjDrB,IAAAI,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAClC,SAAAD,EAAsBG,EAAqCC,EAAmD,CAA9G,IAAAC,EACEJ,EAAA,KAAA,KAAME,EAAWC,CAAI,GAAC,KADF,OAAAC,EAAA,UAAAF,EAAqCE,EAAA,KAAAD,GAE3D,CAEO,OAAAJ,EAAA,UAAA,SAAP,SAAgBM,EAAWC,EAAiB,CAC1C,OADyBA,IAAA,SAAAA,EAAA,GACrBA,EAAQ,EACHN,EAAA,UAAM,SAAQ,KAAA,KAACK,EAAOC,CAAK,GAEpC,KAAK,MAAQA,EACb,KAAK,MAAQD,EACb,KAAK,UAAU,MAAM,IAAI,EAClB,KACT,EAEON,EAAA,UAAA,QAAP,SAAeM,EAAUC,EAAa,CACpC,OAAOA,EAAQ,GAAK,KAAK,OAASN,EAAA,UAAM,QAAO,KAAA,KAACK,EAAOC,CAAK,EAAI,KAAK,SAASD,EAAOC,CAAK,CAC5F,EAEUP,EAAA,UAAA,eAAV,SAAyBG,EAA2BK,EAAkBD,EAAiB,CAKrF,OALoEA,IAAA,SAAAA,EAAA,GAK/DA,GAAS,MAAQA,EAAQ,GAAOA,GAAS,MAAQ,KAAK,MAAQ,EAC1DN,EAAA,UAAM,eAAc,KAAA,KAACE,EAAWK,EAAID,CAAK,GAIlDJ,EAAU,MAAM,IAAI,EAMb,EACT,EACFH,CAAA,EArCoCS,EAAW,ECJ/C,IAAAC,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAApC,SAAAD,GAAA,+CACA,CAAA,OAAAA,CAAA,EADoCG,EAAc,ECgE3C,IAAMC,GAAiB,IAAIC,GAAeC,EAAW,EC5D5D,IAAAC,GAAA,SAAAC,EAAA,CAA6CC,GAAAF,EAAAC,CAAA,EAC3C,SAAAD,EAAsBG,EAA8CC,EAAmD,CAAvH,IAAAC,EACEJ,EAAA,KAAA,KAAME,EAAWC,CAAI,GAAC,KADF,OAAAC,EAAA,UAAAF,EAA8CE,EAAA,KAAAD,GAEpE,CAEU,OAAAJ,EAAA,UAAA,eAAV,SAAyBG,EAAoCG,EAAkBC,EAAiB,CAE9F,OAF6EA,IAAA,SAAAA,EAAA,GAEzEA,IAAU,MAAQA,EAAQ,EACrBN,EAAA,UAAM,eAAc,KAAA,KAACE,EAAWG,EAAIC,CAAK,GAGlDJ,EAAU,QAAQ,KAAK,IAAI,EAIpBA,EAAU,aAAeA,EAAU,WAAaK,GAAuB,sBAAsB,UAAA,CAAM,OAAAL,EAAU,MAAM,MAAS,CAAzB,CAA0B,GACtI,EAEUH,EAAA,UAAA,eAAV,SAAyBG,EAAoCG,EAAkBC,EAAiB,OAI9F,GAJ6EA,IAAA,SAAAA,EAAA,GAIzEA,GAAS,KAAOA,EAAQ,EAAI,KAAK,MAAQ,EAC3C,OAAON,EAAA,UAAM,eAAc,KAAA,KAACE,EAAWG,EAAIC,CAAK,EAK1C,IAAAE,EAAYN,EAAS,QACzBG,GAAM,QAAQI,EAAAD,EAAQA,EAAQ,OAAS,CAAC,KAAC,MAAAC,IAAA,OAAA,OAAAA,EAAE,MAAOJ,IACpDE,GAAuB,qBAAqBF,CAAY,EACxDH,EAAU,WAAa,OAI3B,EACFH,CAAA,EApC6CW,EAAW,ECHxD,IAAAC,GAAA,SAAAC,EAAA,CAA6CC,GAAAF,EAAAC,CAAA,EAA7C,SAAAD,GAAA,+CAkCA,CAjCS,OAAAA,EAAA,UAAA,MAAP,SAAaG,EAAyB,CACpC,KAAK,QAAU,GAUf,IAAMC,EAAU,KAAK,WACrB,KAAK,WAAa,OAEV,IAAAC,EAAY,KAAI,QACpBC,EACJH,EAASA,GAAUE,EAAQ,MAAK,EAEhC,EACE,IAAKC,EAAQH,EAAO,QAAQA,EAAO,MAAOA,EAAO,KAAK,EACpD,aAEMA,EAASE,EAAQ,CAAC,IAAMF,EAAO,KAAOC,GAAWC,EAAQ,MAAK,GAIxE,GAFA,KAAK,QAAU,GAEXC,EAAO,CACT,MAAQH,EAASE,EAAQ,CAAC,IAAMF,EAAO,KAAOC,GAAWC,EAAQ,MAAK,GACpEF,EAAO,YAAW,EAEpB,MAAMG,EAEV,EACFN,CAAA,EAlC6CO,EAAc,ECgCpD,IAAMC,GAA0B,IAAIC,GAAwBC,EAAoB,EC8BhF,IAAMC,EAAQ,IAAIC,EAAkB,SAACC,EAAU,CAAK,OAAAA,EAAW,SAAQ,CAAnB,CAAqB,EC9D1E,SAAUC,GAAYC,EAAU,CACpC,OAAOA,GAASC,EAAWD,EAAM,QAAQ,CAC3C,CCDA,SAASE,GAAQC,EAAQ,CACvB,OAAOA,EAAIA,EAAI,OAAS,CAAC,CAC3B,CAEM,SAAUC,GAAkBC,EAAW,CAC3C,OAAOC,EAAWJ,GAAKG,CAAI,CAAC,EAAIA,EAAK,IAAG,EAAK,MAC/C,CAEM,SAAUE,GAAaF,EAAW,CACtC,OAAOG,GAAYN,GAAKG,CAAI,CAAC,EAAIA,EAAK,IAAG,EAAK,MAChD,CAEM,SAAUI,GAAUJ,EAAaK,EAAoB,CACzD,OAAO,OAAOR,GAAKG,CAAI,GAAM,SAAWA,EAAK,IAAG,EAAMK,CACxD,CClBO,IAAMC,GAAe,SAAIC,EAAM,CAAwB,OAAAA,GAAK,OAAOA,EAAE,QAAW,UAAY,OAAOA,GAAM,UAAlD,ECMxD,SAAUC,GAAUC,EAAU,CAClC,OAAOC,EAAWD,GAAK,KAAA,OAALA,EAAO,IAAI,CAC/B,CCHM,SAAUE,GAAoBC,EAAU,CAC5C,OAAOC,EAAWD,EAAME,EAAiB,CAAC,CAC5C,CCLM,SAAUC,GAAmBC,EAAQ,CACzC,OAAO,OAAO,eAAiBC,EAAWD,GAAG,KAAA,OAAHA,EAAM,OAAO,aAAa,CAAC,CACvE,CCAM,SAAUE,GAAiCC,EAAU,CAEzD,OAAO,IAAI,UACT,iBACEA,IAAU,MAAQ,OAAOA,GAAU,SAAW,oBAAsB,IAAIA,EAAK,KAAG,0HACwC,CAE9H,CCXM,SAAUC,IAAiB,CAC/B,OAAI,OAAO,QAAW,YAAc,CAAC,OAAO,SACnC,aAGF,OAAO,QAChB,CAEO,IAAMC,GAAWD,GAAiB,ECJnC,SAAUE,GAAWC,EAAU,CACnC,OAAOC,EAAWD,GAAK,KAAA,OAALA,EAAQE,EAAe,CAAC,CAC5C,CCHM,SAAiBC,GAAsCC,EAAqC,mGAC1FC,EAASD,EAAe,UAAS,2DAGX,MAAA,CAAA,EAAAE,GAAMD,EAAO,KAAI,CAAE,CAAA,gBAArCE,EAAkBC,EAAA,KAAA,EAAhBC,EAAKF,EAAA,MAAEG,EAAIH,EAAA,KACfG,iBAAA,CAAA,EAAA,CAAA,SACF,MAAA,CAAA,EAAAF,EAAA,KAAA,CAAA,qBAEIC,CAAM,CAAA,SAAZ,MAAA,CAAA,EAAAD,EAAA,KAAA,CAAA,SAAA,OAAAA,EAAA,KAAA,mCAGF,OAAAH,EAAO,YAAW,6BAIhB,SAAUM,GAAwBC,EAAQ,CAG9C,OAAOC,EAAWD,GAAG,KAAA,OAAHA,EAAK,SAAS,CAClC,CCPM,SAAUE,EAAaC,EAAyB,CACpD,GAAIA,aAAiBC,EACnB,OAAOD,EAET,GAAIA,GAAS,KAAM,CACjB,GAAIE,GAAoBF,CAAK,EAC3B,OAAOG,GAAsBH,CAAK,EAEpC,GAAII,GAAYJ,CAAK,EACnB,OAAOK,GAAcL,CAAK,EAE5B,GAAIM,GAAUN,CAAK,EACjB,OAAOO,GAAYP,CAAK,EAE1B,GAAIQ,GAAgBR,CAAK,EACvB,OAAOS,GAAkBT,CAAK,EAEhC,GAAIU,GAAWV,CAAK,EAClB,OAAOW,GAAaX,CAAK,EAE3B,GAAIY,GAAqBZ,CAAK,EAC5B,OAAOa,GAAuBb,CAAK,EAIvC,MAAMc,GAAiCd,CAAK,CAC9C,CAMM,SAAUG,GAAyBY,EAAQ,CAC/C,OAAO,IAAId,EAAW,SAACe,EAAyB,CAC9C,IAAMC,EAAMF,EAAIG,EAAiB,EAAC,EAClC,GAAIC,EAAWF,EAAI,SAAS,EAC1B,OAAOA,EAAI,UAAUD,CAAU,EAGjC,MAAM,IAAI,UAAU,gEAAgE,CACtF,CAAC,CACH,CASM,SAAUX,GAAiBe,EAAmB,CAClD,OAAO,IAAInB,EAAW,SAACe,EAAyB,CAU9C,QAASK,EAAI,EAAGA,EAAID,EAAM,QAAU,CAACJ,EAAW,OAAQK,IACtDL,EAAW,KAAKI,EAAMC,CAAC,CAAC,EAE1BL,EAAW,SAAQ,CACrB,CAAC,CACH,CAEM,SAAUT,GAAee,EAAuB,CACpD,OAAO,IAAIrB,EAAW,SAACe,EAAyB,CAC9CM,EACG,KACC,SAACC,EAAK,CACCP,EAAW,SACdA,EAAW,KAAKO,CAAK,EACrBP,EAAW,SAAQ,EAEvB,EACA,SAACQ,EAAQ,CAAK,OAAAR,EAAW,MAAMQ,CAAG,CAApB,CAAqB,EAEpC,KAAK,KAAMC,EAAoB,CACpC,CAAC,CACH,CAEM,SAAUd,GAAgBe,EAAqB,CACnD,OAAO,IAAIzB,EAAW,SAACe,EAAyB,aAC9C,QAAoBW,EAAAC,GAAAF,CAAQ,EAAAG,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAzB,IAAMJ,EAAKM,EAAA,MAEd,GADAb,EAAW,KAAKO,CAAK,EACjBP,EAAW,OACb,yGAGJA,EAAW,SAAQ,CACrB,CAAC,CACH,CAEM,SAAUP,GAAqBqB,EAA+B,CAClE,OAAO,IAAI7B,EAAW,SAACe,EAAyB,CAC9Ce,GAAQD,EAAed,CAAU,EAAE,MAAM,SAACQ,EAAG,CAAK,OAAAR,EAAW,MAAMQ,CAAG,CAApB,CAAqB,CACzE,CAAC,CACH,CAEM,SAAUX,GAA0BmB,EAAqC,CAC7E,OAAOvB,GAAkBwB,GAAmCD,CAAc,CAAC,CAC7E,CAEA,SAAeD,GAAWD,EAAiCd,EAAyB,uIACxDkB,EAAAC,GAAAL,CAAa,gFAIrC,GAJeP,EAAKa,EAAA,MACpBpB,EAAW,KAAKO,CAAK,EAGjBP,EAAW,OACb,MAAA,CAAA,CAAA,6RAGJ,OAAAA,EAAW,SAAQ,WChHf,SAAUqB,GACdC,EACAC,EACAC,EACAC,EACAC,EAAc,CADdD,IAAA,SAAAA,EAAA,GACAC,IAAA,SAAAA,EAAA,IAEA,IAAMC,EAAuBJ,EAAU,SAAS,UAAA,CAC9CC,EAAI,EACAE,EACFJ,EAAmB,IAAI,KAAK,SAAS,KAAMG,CAAK,CAAC,EAEjD,KAAK,YAAW,CAEpB,EAAGA,CAAK,EAIR,GAFAH,EAAmB,IAAIK,CAAoB,EAEvC,CAACD,EAKH,OAAOC,CAEX,CCeM,SAAUC,GAAaC,EAA0BC,EAAS,CAAT,OAAAA,IAAA,SAAAA,EAAA,GAC9CC,EAAQ,SAACC,EAAQC,EAAU,CAChCD,EAAO,UACLE,EACED,EACA,SAACE,EAAK,CAAK,OAAAC,GAAgBH,EAAYJ,EAAW,UAAA,CAAM,OAAAI,EAAW,KAAKE,CAAK,CAArB,EAAwBL,CAAK,CAA1E,EACX,UAAA,CAAM,OAAAM,GAAgBH,EAAYJ,EAAW,UAAA,CAAM,OAAAI,EAAW,SAAQ,CAAnB,EAAuBH,CAAK,CAAzE,EACN,SAACO,EAAG,CAAK,OAAAD,GAAgBH,EAAYJ,EAAW,UAAA,CAAM,OAAAI,EAAW,MAAMI,CAAG,CAApB,EAAuBP,CAAK,CAAzE,CAA0E,CACpF,CAEL,CAAC,CACH,CCPM,SAAUQ,GAAeC,EAA0BC,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,GAChDC,EAAQ,SAACC,EAAQC,EAAU,CAChCA,EAAW,IAAIJ,EAAU,SAAS,UAAA,CAAM,OAAAG,EAAO,UAAUC,CAAU,CAA3B,EAA8BH,CAAK,CAAC,CAC9E,CAAC,CACH,CC7DM,SAAUI,GAAsBC,EAA6BC,EAAwB,CACzF,OAAOC,EAAUF,CAAK,EAAE,KAAKG,GAAYF,CAAS,EAAGG,GAAUH,CAAS,CAAC,CAC3E,CCFM,SAAUI,GAAmBC,EAAuBC,EAAwB,CAChF,OAAOC,EAAUF,CAAK,EAAE,KAAKG,GAAYF,CAAS,EAAGG,GAAUH,CAAS,CAAC,CAC3E,CCJM,SAAUI,GAAiBC,EAAqBC,EAAwB,CAC5E,OAAO,IAAIC,EAAc,SAACC,EAAU,CAElC,IAAIC,EAAI,EAER,OAAOH,EAAU,SAAS,UAAA,CACpBG,IAAMJ,EAAM,OAGdG,EAAW,SAAQ,GAInBA,EAAW,KAAKH,EAAMI,GAAG,CAAC,EAIrBD,EAAW,QACd,KAAK,SAAQ,EAGnB,CAAC,CACH,CAAC,CACH,CCfM,SAAUE,GAAoBC,EAAoBC,EAAwB,CAC9E,OAAO,IAAIC,EAAc,SAACC,EAAU,CAClC,IAAIC,EAKJ,OAAAC,GAAgBF,EAAYF,EAAW,UAAA,CAErCG,EAAYJ,EAAcI,EAAe,EAAC,EAE1CC,GACEF,EACAF,EACA,UAAA,OACMK,EACAC,EACJ,GAAI,CAEDC,EAAkBJ,EAAS,KAAI,EAA7BE,EAAKE,EAAA,MAAED,EAAIC,EAAA,WACPC,EAAK,CAEZN,EAAW,MAAMM,CAAG,EACpB,OAGEF,EAKFJ,EAAW,SAAQ,EAGnBA,EAAW,KAAKG,CAAK,CAEzB,EACA,EACA,EAAI,CAER,CAAC,EAMM,UAAA,CAAM,OAAAI,EAAWN,GAAQ,KAAA,OAARA,EAAU,MAAM,GAAKA,EAAS,OAAM,CAA/C,CACf,CAAC,CACH,CCvDM,SAAUO,GAAyBC,EAAyBC,EAAwB,CACxF,GAAI,CAACD,EACH,MAAM,IAAI,MAAM,yBAAyB,EAE3C,OAAO,IAAIE,EAAc,SAACC,EAAU,CAClCC,GAAgBD,EAAYF,EAAW,UAAA,CACrC,IAAMI,EAAWL,EAAM,OAAO,aAAa,EAAC,EAC5CI,GACED,EACAF,EACA,UAAA,CACEI,EAAS,KAAI,EAAG,KAAK,SAACC,EAAM,CACtBA,EAAO,KAGTH,EAAW,SAAQ,EAEnBA,EAAW,KAAKG,EAAO,KAAK,CAEhC,CAAC,CACH,EACA,EACA,EAAI,CAER,CAAC,CACH,CAAC,CACH,CCzBM,SAAUC,GAA8BC,EAA8BC,EAAwB,CAClG,OAAOC,GAAsBC,GAAmCH,CAAK,EAAGC,CAAS,CACnF,CCoBM,SAAUG,GAAaC,EAA2BC,EAAwB,CAC9E,GAAID,GAAS,KAAM,CACjB,GAAIE,GAAoBF,CAAK,EAC3B,OAAOG,GAAmBH,EAAOC,CAAS,EAE5C,GAAIG,GAAYJ,CAAK,EACnB,OAAOK,GAAcL,EAAOC,CAAS,EAEvC,GAAIK,GAAUN,CAAK,EACjB,OAAOO,GAAgBP,EAAOC,CAAS,EAEzC,GAAIO,GAAgBR,CAAK,EACvB,OAAOS,GAAsBT,EAAOC,CAAS,EAE/C,GAAIS,GAAWV,CAAK,EAClB,OAAOW,GAAiBX,EAAOC,CAAS,EAE1C,GAAIW,GAAqBZ,CAAK,EAC5B,OAAOa,GAA2Bb,EAAOC,CAAS,EAGtD,MAAMa,GAAiCd,CAAK,CAC9C,CCoDM,SAAUe,GAAQC,EAA2BC,EAAyB,CAC1E,OAAOA,EAAYC,GAAUF,EAAOC,CAAS,EAAIE,EAAUH,CAAK,CAClE,CCxBM,SAAUI,GAAE,SAAIC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACpB,IAAMC,EAAYC,GAAaH,CAAI,EACnC,OAAOI,GAAKJ,EAAaE,CAAS,CACpC,CCsCM,SAAUG,GAAWC,EAA0BC,EAAyB,CAC5E,IAAMC,EAAeC,EAAWH,CAAmB,EAAIA,EAAsB,UAAA,CAAM,OAAAA,CAAA,EAC7EI,EAAO,SAACC,EAA6B,CAAK,OAAAA,EAAW,MAAMH,EAAY,CAAE,CAA/B,EAChD,OAAO,IAAII,EAAWL,EAAY,SAACI,EAAU,CAAK,OAAAJ,EAAU,SAASG,EAAa,EAAGC,CAAU,CAA7C,EAAiDD,CAAI,CACzG,CCpGO,IAAMG,GAA6BC,GAAiB,SAACC,EAAM,CAAK,OAAA,UAAuB,CAC5FA,EAAO,IAAI,EACX,KAAK,KAAO,aACZ,KAAK,QAAU,yBACjB,CAJuE,CAItE,ECrBK,SAAUC,GAAYC,EAAU,CACpC,OAAOA,aAAiB,MAAQ,CAAC,MAAMA,CAAY,CACrD,CCsCM,SAAUC,EAAUC,EAAyCC,EAAa,CAC9E,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAEhC,IAAIC,EAAQ,EAGZF,EAAO,UACLG,EAAyBF,EAAY,SAACG,EAAQ,CAG5CH,EAAW,KAAKJ,EAAQ,KAAKC,EAASM,EAAOF,GAAO,CAAC,CACvD,CAAC,CAAC,CAEN,CAAC,CACH,CC1DQ,IAAAG,GAAY,MAAK,QAEzB,SAASC,GAAkBC,EAA6BC,EAAW,CAC/D,OAAOH,GAAQG,CAAI,EAAID,EAAE,MAAA,OAAAE,EAAA,CAAA,EAAAC,EAAIF,CAAI,CAAA,CAAA,EAAID,EAAGC,CAAI,CAChD,CAMM,SAAUG,GAAuBJ,EAA2B,CAC9D,OAAOK,EAAI,SAAAJ,EAAI,CAAI,OAAAF,GAAYC,EAAIC,CAAI,CAApB,CAAqB,CAC5C,CCfQ,IAAAK,GAAY,MAAK,QACjBC,GAA0D,OAAM,eAArCC,GAA+B,OAAM,UAAlBC,GAAY,OAAM,KAQlE,SAAUC,GAAqDC,EAAuB,CAC1F,GAAIA,EAAK,SAAW,EAAG,CACrB,IAAMC,EAAQD,EAAK,CAAC,EACpB,GAAIL,GAAQM,CAAK,EACf,MAAO,CAAE,KAAMA,EAAO,KAAM,IAAI,EAElC,GAAIC,GAAOD,CAAK,EAAG,CACjB,IAAME,EAAOL,GAAQG,CAAK,EAC1B,MAAO,CACL,KAAME,EAAK,IAAI,SAACC,EAAG,CAAK,OAAAH,EAAMG,CAAG,CAAT,CAAU,EAClC,KAAID,IAKV,MAAO,CAAE,KAAMH,EAAa,KAAM,IAAI,CACxC,CAEA,SAASE,GAAOG,EAAQ,CACtB,OAAOA,GAAO,OAAOA,GAAQ,UAAYT,GAAeS,CAAG,IAAMR,EACnE,CC7BM,SAAUS,GAAaC,EAAgBC,EAAa,CACxD,OAAOD,EAAK,OAAO,SAACE,EAAQC,EAAKC,EAAC,CAAK,OAAEF,EAAOC,CAAG,EAAIF,EAAOG,CAAC,EAAIF,CAA5B,EAAqC,CAAA,CAAS,CACvF,CCsMM,SAAUG,GAAa,SAAoCC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAC/D,IAAMC,EAAYC,GAAaH,CAAI,EAC7BI,EAAiBC,GAAkBL,CAAI,EAEvCM,EAA8BC,GAAqBP,CAAI,EAA/CQ,EAAWF,EAAA,KAAEG,EAAIH,EAAA,KAE/B,GAAIE,EAAY,SAAW,EAIzB,OAAOE,GAAK,CAAA,EAAIR,CAAgB,EAGlC,IAAMS,EAAS,IAAIC,EACjBC,GACEL,EACAN,EACAO,EAEI,SAACK,EAAM,CAAK,OAAAC,GAAaN,EAAMK,CAAM,CAAzB,EAEZE,EAAQ,CACb,EAGH,OAAOZ,EAAkBO,EAAO,KAAKM,GAAiBb,CAAc,CAAC,EAAsBO,CAC7F,CAEM,SAAUE,GACdL,EACAN,EACAgB,EAAiD,CAAjD,OAAAA,IAAA,SAAAA,EAAAF,IAEO,SAACG,EAA2B,CAGjCC,GACElB,EACA,UAAA,CAaE,QAZQmB,EAAWb,EAAW,OAExBM,EAAS,IAAI,MAAMO,CAAM,EAG3BC,EAASD,EAITE,EAAuBF,aAGlBG,EAAC,CACRJ,GACElB,EACA,UAAA,CACE,IAAMuB,EAASf,GAAKF,EAAYgB,CAAC,EAAGtB,CAAgB,EAChDwB,EAAgB,GACpBD,EAAO,UACLE,EACER,EACA,SAACS,EAAK,CAEJd,EAAOU,CAAC,EAAII,EACPF,IAEHA,EAAgB,GAChBH,KAEGA,GAGHJ,EAAW,KAAKD,EAAeJ,EAAO,MAAK,CAAE,CAAC,CAElD,EACA,UAAA,CACO,EAAEQ,GAGLH,EAAW,SAAQ,CAEvB,CAAC,CACF,CAEL,EACAA,CAAU,GAjCLK,EAAI,EAAGA,EAAIH,EAAQG,MAAnBA,CAAC,CAoCZ,EACAL,CAAU,CAEd,CACF,CAMA,SAASC,GAAclB,EAAsC2B,EAAqBC,EAA0B,CACtG5B,EACF6B,GAAgBD,EAAc5B,EAAW2B,CAAO,EAEhDA,EAAO,CAEX,CC3RM,SAAUG,GACdC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EAAgC,CAGhC,IAAMC,EAAc,CAAA,EAEhBC,EAAS,EAETC,EAAQ,EAERC,EAAa,GAKXC,EAAgB,UAAA,CAIhBD,GAAc,CAACH,EAAO,QAAU,CAACC,GACnCR,EAAW,SAAQ,CAEvB,EAGMY,EAAY,SAACC,EAAQ,CAAK,OAACL,EAASN,EAAaY,EAAWD,CAAK,EAAIN,EAAO,KAAKM,CAAK,CAA5D,EAE1BC,EAAa,SAACD,EAAQ,CAI1BT,GAAUJ,EAAW,KAAKa,CAAY,EAItCL,IAKA,IAAIO,GAAgB,GAGpBC,EAAUf,EAAQY,EAAOJ,GAAO,CAAC,EAAE,UACjCQ,EACEjB,EACA,SAACkB,GAAU,CAGTf,GAAY,MAAZA,EAAee,EAAU,EAErBd,EAGFQ,EAAUM,EAAiB,EAG3BlB,EAAW,KAAKkB,EAAU,CAE9B,EACA,UAAA,CAGEH,GAAgB,EAClB,EAEA,OACA,UAAA,CAIE,GAAIA,GAKF,GAAI,CAIFP,IAKA,sBACE,IAAMW,EAAgBZ,EAAO,MAAK,EAI9BF,EACFe,GAAgBpB,EAAYK,EAAmB,UAAA,CAAM,OAAAS,EAAWK,CAAa,CAAxB,CAAyB,EAE9EL,EAAWK,CAAa,GARrBZ,EAAO,QAAUC,EAASN,QAYjCS,EAAa,QACNU,EAAK,CACZrB,EAAW,MAAMqB,CAAG,EAG1B,CAAC,CACF,CAEL,EAGA,OAAAtB,EAAO,UACLkB,EAAyBjB,EAAYY,EAAW,UAAA,CAE9CF,EAAa,GACbC,EAAa,CACf,CAAC,CAAC,EAKG,UAAA,CACLL,GAAmB,MAAnBA,EAAmB,CACrB,CACF,CClEM,SAAUgB,GACdC,EACAC,EACAC,EAA6B,CAE7B,OAFAA,IAAA,SAAAA,EAAA,KAEIC,EAAWF,CAAc,EAEpBF,GAAS,SAACK,EAAGC,EAAC,CAAK,OAAAC,EAAI,SAACC,EAAQC,EAAU,CAAK,OAAAP,EAAeG,EAAGG,EAAGF,EAAGG,CAAE,CAA1B,CAA2B,EAAEC,EAAUT,EAAQI,EAAGC,CAAC,CAAC,CAAC,CAAjF,EAAoFH,CAAU,GAC/G,OAAOD,GAAmB,WACnCC,EAAaD,GAGRS,EAAQ,SAACC,EAAQC,EAAU,CAAK,OAAAC,GAAeF,EAAQC,EAAYZ,EAASE,CAAU,CAAtD,CAAuD,EAChG,CChCM,SAAUY,GAAyCC,EAA6B,CAA7B,OAAAA,IAAA,SAAAA,EAAA,KAChDC,GAASC,GAAUF,CAAU,CACtC,CCNM,SAAUG,IAAS,CACvB,OAAOC,GAAS,CAAC,CACnB,CCmDM,SAAUC,IAAM,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACrB,OAAOC,GAAS,EAAGC,GAAKH,EAAMI,GAAaJ,CAAI,CAAC,CAAC,CACnD,CC9DM,SAAUK,EAAsCC,EAA0B,CAC9E,OAAO,IAAIC,EAA+B,SAACC,EAAU,CACnDC,EAAUH,EAAiB,CAAE,EAAE,UAAUE,CAAU,CACrD,CAAC,CACH,CChDA,IAAME,GAA0B,CAAC,cAAe,gBAAgB,EAC1DC,GAAqB,CAAC,mBAAoB,qBAAqB,EAC/DC,GAAgB,CAAC,KAAM,KAAK,EAkO5B,SAAUC,EACdC,EACAC,EACAC,EACAC,EAAsC,CAMtC,GAJIC,EAAWF,CAAO,IACpBC,EAAiBD,EACjBA,EAAU,QAERC,EACF,OAAOJ,EAAaC,EAAQC,EAAWC,CAA+B,EAAE,KAAKG,GAAiBF,CAAc,CAAC,EAUzG,IAAAG,EAAAC,EAEJC,GAAcR,CAAM,EAChBH,GAAmB,IAAI,SAACY,EAAU,CAAK,OAAA,SAACC,EAAY,CAAK,OAAAV,EAAOS,CAAU,EAAER,EAAWS,EAASR,CAA+B,CAAtE,CAAlB,CAAyF,EAElIS,GAAwBX,CAAM,EAC5BJ,GAAwB,IAAIgB,GAAwBZ,EAAQC,CAAS,CAAC,EACtEY,GAA0Bb,CAAM,EAChCF,GAAc,IAAIc,GAAwBZ,EAAQC,CAAS,CAAC,EAC5D,CAAA,EAAE,CAAA,EATDa,EAAGR,EAAA,CAAA,EAAES,EAAMT,EAAA,CAAA,EAgBlB,GAAI,CAACQ,GACCE,GAAYhB,CAAM,EACpB,OAAOiB,GAAS,SAACC,EAAc,CAAK,OAAAnB,EAAUmB,EAAWjB,EAAWC,CAA+B,CAA/D,CAAgE,EAClGiB,EAAUnB,CAAM,CAAC,EAOvB,GAAI,CAACc,EACH,MAAM,IAAI,UAAU,sBAAsB,EAG5C,OAAO,IAAIM,EAAc,SAACC,EAAU,CAIlC,IAAMX,EAAU,UAAA,SAACY,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAAmB,OAAAF,EAAW,KAAK,EAAIC,EAAK,OAASA,EAAOA,EAAK,CAAC,CAAC,CAAhD,EAEpC,OAAAR,EAAIJ,CAAO,EAEJ,UAAA,CAAM,OAAAK,EAAQL,CAAO,CAAf,CACf,CAAC,CACH,CASA,SAASE,GAAwBZ,EAAaC,EAAiB,CAC7D,OAAO,SAACQ,EAAkB,CAAK,OAAA,SAACC,EAAY,CAAK,OAAAV,EAAOS,CAAU,EAAER,EAAWS,CAAO,CAArC,CAAlB,CACjC,CAOA,SAASC,GAAwBX,EAAW,CAC1C,OAAOI,EAAWJ,EAAO,WAAW,GAAKI,EAAWJ,EAAO,cAAc,CAC3E,CAOA,SAASa,GAA0Bb,EAAW,CAC5C,OAAOI,EAAWJ,EAAO,EAAE,GAAKI,EAAWJ,EAAO,GAAG,CACvD,CAOA,SAASQ,GAAcR,EAAW,CAChC,OAAOI,EAAWJ,EAAO,gBAAgB,GAAKI,EAAWJ,EAAO,mBAAmB,CACrF,CCnMM,SAAUwB,GACdC,EACAC,EACAC,EAAsC,CAEtC,OAAIA,EACKH,GAAoBC,EAAYC,CAAa,EAAE,KAAKE,GAAiBD,CAAc,CAAC,EAGtF,IAAIE,EAAoB,SAACC,EAAU,CACxC,IAAMC,EAAU,UAAA,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAAc,OAAAH,EAAW,KAAKE,EAAE,SAAW,EAAIA,EAAE,CAAC,EAAIA,CAAC,CAAzC,EACzBE,EAAWT,EAAWM,CAAO,EACnC,OAAOI,EAAWT,CAAa,EAAI,UAAA,CAAM,OAAAA,EAAcK,EAASG,CAAQ,CAA/B,EAAmC,MAC9E,CAAC,CACH,CCtBM,SAAUE,GACdC,EACAC,EACAC,EAAyC,CAFzCF,IAAA,SAAAA,EAAA,GAEAE,IAAA,SAAAA,EAAAC,IAIA,IAAIC,EAAmB,GAEvB,OAAIH,GAAuB,OAIrBI,GAAYJ,CAAmB,EACjCC,EAAYD,EAIZG,EAAmBH,GAIhB,IAAIK,EAAW,SAACC,EAAU,CAI/B,IAAIC,EAAMC,GAAYT,CAAO,EAAI,CAACA,EAAUE,EAAW,IAAG,EAAKF,EAE3DQ,EAAM,IAERA,EAAM,GAIR,IAAIE,EAAI,EAGR,OAAOR,EAAU,SAAS,UAAA,CACnBK,EAAW,SAEdA,EAAW,KAAKG,GAAG,EAEf,GAAKN,EAGP,KAAK,SAAS,OAAWA,CAAgB,EAGzCG,EAAW,SAAQ,EAGzB,EAAGC,CAAG,CACR,CAAC,CACH,CChGM,SAAUG,GAAK,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACpB,IAAMC,EAAYC,GAAaH,CAAI,EAC7BI,EAAaC,GAAUL,EAAM,GAAQ,EACrCM,EAAUN,EAChB,OAAQM,EAAQ,OAGZA,EAAQ,SAAW,EAEnBC,EAAUD,EAAQ,CAAC,CAAC,EAEpBE,GAASJ,CAAU,EAAEK,GAAKH,EAASJ,CAAS,CAAC,EAL7CQ,CAMN,CCjEO,IAAMC,GAAQ,IAAIC,EAAkBC,EAAI,ECpCvC,IAAAC,GAAY,MAAK,QAMnB,SAAUC,GAAkBC,EAAiB,CACjD,OAAOA,EAAK,SAAW,GAAKF,GAAQE,EAAK,CAAC,CAAC,EAAIA,EAAK,CAAC,EAAKA,CAC5D,CCoDM,SAAUC,EAAUC,EAAiDC,EAAa,CACtF,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAEhC,IAAIC,EAAQ,EAIZF,EAAO,UAILG,EAAyBF,EAAY,SAACG,EAAK,CAAK,OAAAP,EAAU,KAAKC,EAASM,EAAOF,GAAO,GAAKD,EAAW,KAAKG,CAAK,CAAhE,CAAiE,CAAC,CAEtH,CAAC,CACH,CCxBM,SAAUC,IAAG,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAClB,IAAMC,EAAiBC,GAAkBH,CAAI,EAEvCI,EAAUC,GAAeL,CAAI,EAEnC,OAAOI,EAAQ,OACX,IAAIE,EAAsB,SAACC,EAAU,CAGnC,IAAIC,EAAuBJ,EAAQ,IAAI,UAAA,CAAM,MAAA,CAAA,CAAA,CAAE,EAK3CK,EAAYL,EAAQ,IAAI,UAAA,CAAM,MAAA,EAAA,CAAK,EAGvCG,EAAW,IAAI,UAAA,CACbC,EAAUC,EAAY,IACxB,CAAC,EAKD,mBAASC,EAAW,CAClBC,EAAUP,EAAQM,CAAW,CAAC,EAAE,UAC9BE,EACEL,EACA,SAACM,EAAK,CAKJ,GAJAL,EAAQE,CAAW,EAAE,KAAKG,CAAK,EAI3BL,EAAQ,MAAM,SAACM,EAAM,CAAK,OAAAA,EAAO,MAAP,CAAa,EAAG,CAC5C,IAAMC,EAAcP,EAAQ,IAAI,SAACM,EAAM,CAAK,OAAAA,EAAO,MAAK,CAAZ,CAAe,EAE3DP,EAAW,KAAKL,EAAiBA,EAAc,MAAA,OAAAc,EAAA,CAAA,EAAAC,EAAIF,CAAM,CAAA,CAAA,EAAIA,CAAM,EAI/DP,EAAQ,KAAK,SAACM,EAAQI,EAAC,CAAK,MAAA,CAACJ,EAAO,QAAUL,EAAUS,CAAC,CAA7B,CAA8B,GAC5DX,EAAW,SAAQ,EAGzB,EACA,UAAA,CAGEE,EAAUC,CAAW,EAAI,GAIzB,CAACF,EAAQE,CAAW,EAAE,QAAUH,EAAW,SAAQ,CACrD,CAAC,CACF,GA9BIG,EAAc,EAAG,CAACH,EAAW,QAAUG,EAAcN,EAAQ,OAAQM,MAArEA,CAAW,EAmCpB,OAAO,UAAA,CACLF,EAAUC,EAAY,IACxB,CACF,CAAC,EACDU,CACN,CC9DM,SAAUC,GAASC,EAAoD,CAC3E,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACXC,EAAsB,KACtBC,EAA6C,KAC7CC,EAAa,GAEXC,EAAc,UAAA,CAGlB,GAFAF,GAAkB,MAAlBA,EAAoB,YAAW,EAC/BA,EAAqB,KACjBF,EAAU,CACZA,EAAW,GACX,IAAMK,EAAQJ,EACdA,EAAY,KACZF,EAAW,KAAKM,CAAK,EAEvBF,GAAcJ,EAAW,SAAQ,CACnC,EAEMO,EAAkB,UAAA,CACtBJ,EAAqB,KACrBC,GAAcJ,EAAW,SAAQ,CACnC,EAEAD,EAAO,UACLS,EACER,EACA,SAACM,EAAK,CACJL,EAAW,GACXC,EAAYI,EACPH,GACHM,EAAUZ,EAAiBS,CAAK,CAAC,EAAE,UAChCH,EAAqBK,EAAyBR,EAAYK,EAAaE,CAAe,CAAE,CAG/F,EACA,UAAA,CACEH,EAAa,IACZ,CAACH,GAAY,CAACE,GAAsBA,EAAmB,SAAWH,EAAW,SAAQ,CACxF,CAAC,CACF,CAEL,CAAC,CACH,CC3CM,SAAUU,GAAaC,EAAkBC,EAAyC,CAAzC,OAAAA,IAAA,SAAAA,EAAAC,IACtCC,GAAM,UAAA,CAAM,OAAAC,GAAMJ,EAAUC,CAAS,CAAzB,CAA0B,CAC/C,CCEM,SAAUI,GAAeC,EAAoBC,EAAsC,CAAtC,OAAAA,IAAA,SAAAA,EAAA,MAGjDA,EAAmBA,GAAgB,KAAhBA,EAAoBD,EAEhCE,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAiB,CAAA,EACjBC,EAAQ,EAEZH,EAAO,UACLI,EACEH,EACA,SAACI,EAAK,aACAC,EAAuB,KAKvBH,IAAUL,IAAsB,GAClCI,EAAQ,KAAK,CAAA,CAAE,MAIjB,QAAqBK,EAAAC,GAAAN,CAAO,EAAAO,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAzB,IAAMG,EAAMD,EAAA,MACfC,EAAO,KAAKL,CAAK,EAMbR,GAAca,EAAO,SACvBJ,EAASA,GAAM,KAANA,EAAU,CAAA,EACnBA,EAAO,KAAKI,CAAM,uGAItB,GAAIJ,MAIF,QAAqBK,EAAAH,GAAAF,CAAM,EAAAM,GAAAD,EAAA,KAAA,EAAA,CAAAC,GAAA,KAAAA,GAAAD,EAAA,KAAA,EAAE,CAAxB,IAAMD,EAAME,GAAA,MACfC,GAAUX,EAASQ,CAAM,EACzBT,EAAW,KAAKS,CAAM,wGAG5B,EACA,UAAA,aAGE,QAAqBI,EAAAN,GAAAN,CAAO,EAAAa,EAAAD,EAAA,KAAA,EAAA,CAAAC,EAAA,KAAAA,EAAAD,EAAA,KAAA,EAAE,CAAzB,IAAMJ,EAAMK,EAAA,MACfd,EAAW,KAAKS,CAAM,oGAExBT,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEC,EAAU,IACZ,CAAC,CACF,CAEL,CAAC,CACH,CCbM,SAAUc,GACdC,EAAgD,CAEhD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAgC,KAChCC,EAAY,GACZC,EAEJF,EAAWF,EAAO,UAChBK,EAAyBJ,EAAY,OAAW,OAAW,SAACK,EAAG,CAC7DF,EAAgBG,EAAUT,EAASQ,EAAKT,GAAWC,CAAQ,EAAEE,CAAM,CAAC,CAAC,EACjEE,GACFA,EAAS,YAAW,EACpBA,EAAW,KACXE,EAAc,UAAUH,CAAU,GAIlCE,EAAY,EAEhB,CAAC,CAAC,EAGAA,IAMFD,EAAS,YAAW,EACpBA,EAAW,KACXE,EAAe,UAAUH,CAAU,EAEvC,CAAC,CACH,CC/HM,SAAUO,GACdC,EACAC,EACAC,EACAC,EACAC,EAAqC,CAErC,OAAO,SAACC,EAAuBC,EAA2B,CAIxD,IAAIC,EAAWL,EAIXM,EAAaP,EAEbQ,EAAQ,EAGZJ,EAAO,UACLK,EACEJ,EACA,SAACK,EAAK,CAEJ,IAAMC,EAAIH,IAEVD,EAAQD,EAEJP,EAAYQ,EAAOG,EAAOC,CAAC,GAIzBL,EAAW,GAAOI,GAGxBR,GAAcG,EAAW,KAAKE,CAAK,CACrC,EAGAJ,GACG,UAAA,CACCG,GAAYD,EAAW,KAAKE,CAAK,EACjCF,EAAW,SAAQ,CACrB,CAAE,CACL,CAEL,CACF,CCnCM,SAAUO,IAAa,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAClC,IAAMC,EAAiBC,GAAkBH,CAAI,EAC7C,OAAOE,EACHE,GAAKL,GAAa,MAAA,OAAAM,EAAA,CAAA,EAAAC,EAAKN,CAAoC,CAAA,CAAA,EAAGO,GAAiBL,CAAc,CAAC,EAC9FM,EAAQ,SAACC,EAAQC,EAAU,CACzBC,GAAiBN,EAAA,CAAEI,CAAM,EAAAH,EAAKM,GAAeZ,CAAI,CAAC,CAAA,CAAA,EAAGU,CAAU,CACjE,CAAC,CACP,CCUM,SAAUG,IAAiB,SAC/BC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAEA,OAAOC,GAAa,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIJ,CAAY,CAAA,CAAA,CACtC,CCkBM,SAAUK,GAAYC,EAAoD,CAC9E,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACXC,EAAsB,KAEtBC,EAA6C,KAE3CC,EAAO,UAAA,CAMX,GAFAD,GAAkB,MAAlBA,EAAoB,YAAW,EAC/BA,EAAqB,KACjBF,EAAU,CAEZA,EAAW,GACX,IAAMI,EAAQH,EACdA,EAAY,KACZF,EAAW,KAAKK,CAAK,EAEzB,EAEAN,EAAO,UACLO,EACEN,EACA,SAACK,EAAQ,CAIPF,GAAkB,MAAlBA,EAAoB,YAAW,EAC/BF,EAAW,GACXC,EAAYG,EAGZF,EAAqBG,EAAyBN,EAAYI,EAAMG,EAAI,EAEpEC,EAAUX,EAAiBQ,CAAK,CAAC,EAAE,UAAUF,CAAkB,CACjE,EACA,UAAA,CAGEC,EAAI,EACJJ,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEE,EAAYC,EAAqB,IACnC,CAAC,CACF,CAEL,CAAC,CACH,CCvDM,SAAUM,GAAgBC,EAAiBC,EAAyC,CAAzC,OAAAA,IAAA,SAAAA,EAAAC,IACxCC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAkC,KAClCC,EAAsB,KACtBC,EAA0B,KAExBC,EAAO,UAAA,CACX,GAAIH,EAAY,CAEdA,EAAW,YAAW,EACtBA,EAAa,KACb,IAAMI,EAAQH,EACdA,EAAY,KACZF,EAAW,KAAKK,CAAK,EAEzB,EACA,SAASC,GAAY,CAInB,IAAMC,EAAaJ,EAAYR,EACzBa,EAAMZ,EAAU,IAAG,EACzB,GAAIY,EAAMD,EAAY,CAEpBN,EAAa,KAAK,SAAS,OAAWM,EAAaC,CAAG,EACtDR,EAAW,IAAIC,CAAU,EACzB,OAGFG,EAAI,CACN,CAEAL,EAAO,UACLU,EACET,EACA,SAACK,EAAQ,CACPH,EAAYG,EACZF,EAAWP,EAAU,IAAG,EAGnBK,IACHA,EAAaL,EAAU,SAASU,EAAcX,CAAO,EACrDK,EAAW,IAAIC,CAAU,EAE7B,EACA,UAAA,CAGEG,EAAI,EACJJ,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEE,EAAYD,EAAa,IAC3B,CAAC,CACF,CAEL,CAAC,CACH,CCpFM,SAAUS,GAAqBC,EAAe,CAClD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACfF,EAAO,UACLG,EACEF,EACA,SAACG,EAAK,CACJF,EAAW,GACXD,EAAW,KAAKG,CAAK,CACvB,EACA,UAAA,CACOF,GACHD,EAAW,KAAKH,CAAa,EAE/BG,EAAW,SAAQ,CACrB,CAAC,CACF,CAEL,CAAC,CACH,CCXM,SAAUI,GAAQC,EAAa,CACnC,OAAOA,GAAS,EAEZ,UAAA,CAAM,OAAAC,CAAA,EACNC,EAAQ,SAACC,EAAQC,EAAU,CACzB,IAAIC,EAAO,EACXF,EAAO,UACLG,EAAyBF,EAAY,SAACG,EAAK,CAIrC,EAAEF,GAAQL,IACZI,EAAW,KAAKG,CAAK,EAIjBP,GAASK,GACXD,EAAW,SAAQ,EAGzB,CAAC,CAAC,CAEN,CAAC,CACP,CC9BM,SAAUI,GAAc,CAC5B,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChCD,EAAO,UAAUE,EAAyBD,EAAYE,EAAI,CAAC,CAC7D,CAAC,CACH,CCCM,SAAUC,GAASC,EAAQ,CAC/B,OAAOC,EAAI,UAAA,CAAM,OAAAD,CAAA,CAAK,CACxB,CC4CM,SAAUE,GACdC,EACAC,EAAmC,CAEnC,OAAIA,EAEK,SAACC,EAAqB,CAC3B,OAAAC,GAAOF,EAAkB,KAAKG,GAAK,CAAC,EAAGC,EAAc,CAAE,EAAGH,EAAO,KAAKH,GAAUC,CAAqB,CAAC,CAAC,CAAvG,EAGGM,GAAS,SAACC,EAAOC,EAAK,CAAK,OAAAC,EAAUT,EAAsBO,EAAOC,CAAK,CAAC,EAAE,KAAKJ,GAAK,CAAC,EAAGM,GAAMH,CAAK,CAAC,CAAzE,CAA0E,CAC9G,CCzCM,SAAUI,GAASC,EAAoBC,EAAyC,CAAzCA,IAAA,SAAAA,EAAAC,IAC3C,IAAMC,EAAWC,GAAMJ,EAAKC,CAAS,EACrC,OAAOI,GAAU,UAAA,CAAM,OAAAF,CAAA,CAAQ,CACjC,CC0EM,SAAUG,EACdC,EACAC,EAA0D,CAA1D,OAAAA,IAAA,SAAAA,EAA+BC,IAK/BF,EAAaA,GAAU,KAAVA,EAAcG,GAEpBC,EAAQ,SAACC,EAAQC,EAAU,CAGhC,IAAIC,EAEAC,EAAQ,GAEZH,EAAO,UACLI,EAAyBH,EAAY,SAACI,EAAK,CAEzC,IAAMC,EAAaV,EAAYS,CAAK,GAKhCF,GAAS,CAACR,EAAYO,EAAaI,CAAU,KAM/CH,EAAQ,GACRD,EAAcI,EAGdL,EAAW,KAAKI,CAAK,EAEzB,CAAC,CAAC,CAEN,CAAC,CACH,CAEA,SAASP,GAAeS,EAAQC,EAAM,CACpC,OAAOD,IAAMC,CACf,CCjHM,SAAUC,EAA8CC,EAAQC,EAAuC,CAC3G,OAAOC,EAAqB,SAACC,EAAMC,EAAI,CAAK,OAAAH,EAAUA,EAAQE,EAAEH,CAAG,EAAGI,EAAEJ,CAAG,CAAC,EAAIG,EAAEH,CAAG,IAAMI,EAAEJ,CAAG,CAApD,CAAqD,CACnG,CC7BM,SAAUK,GAAgBC,EAA6C,CAA7C,OAAAA,IAAA,SAAAA,EAAAC,IACvBC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACfF,EAAO,UACLG,EACEF,EACA,SAACG,EAAK,CACJF,EAAW,GACXD,EAAW,KAAKG,CAAK,CACvB,EACA,UAAA,CAAM,OAACF,EAAWD,EAAW,SAAQ,EAAKA,EAAW,MAAMJ,EAAY,CAAE,CAAnE,CAAqE,CAC5E,CAEL,CAAC,CACH,CAEA,SAASC,IAAmB,CAC1B,OAAO,IAAIO,EACb,CCMM,SAAUC,IAAO,SAAIC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACzB,OAAO,SAACC,EAAqB,CAAK,OAAAC,GAAOD,EAAQE,EAAE,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIN,CAAM,CAAA,CAAA,CAAA,CAA3B,CACpC,CCHM,SAAUO,EAAYC,EAAoB,CAC9C,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAGhC,GAAI,CACFD,EAAO,UAAUC,CAAU,UAE3BA,EAAW,IAAIH,CAAQ,EAE3B,CAAC,CACH,CCMM,SAAUI,GACdC,EACAC,EAAgB,CAEhB,IAAMC,EAAkB,UAAU,QAAU,EAC5C,OAAO,SAACC,EAAqB,CAC3B,OAAAA,EAAO,KACLH,EAAYI,EAAO,SAACC,EAAG,EAAC,CAAK,OAAAL,EAAUK,EAAG,EAAGF,CAAM,CAAtB,CAAuB,EAAIG,GACxDC,GAAK,CAAC,EACNL,EAAkBM,GAAeP,CAAa,EAAIQ,GAAa,UAAA,CAAM,OAAA,IAAIC,EAAJ,CAAgB,CAAC,CAHxF,CAKJ,CC/CM,SAAUC,GAAYC,EAAa,CACvC,OAAOA,GAAS,EACZ,UAAA,CAAM,OAAAC,CAAA,EACNC,EAAQ,SAACC,EAAQC,EAAU,CAKzB,IAAIC,EAAc,CAAA,EAClBF,EAAO,UACLG,EACEF,EACA,SAACG,EAAK,CAEJF,EAAO,KAAKE,CAAK,EAGjBP,EAAQK,EAAO,QAAUA,EAAO,MAAK,CACvC,EACA,UAAA,aAGE,QAAoBG,EAAAC,GAAAJ,CAAM,EAAAK,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAvB,IAAMD,EAAKG,EAAA,MACdN,EAAW,KAAKG,CAAK,oGAEvBH,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEC,EAAS,IACX,CAAC,CACF,CAEL,CAAC,CACP,CC1DM,SAAUM,IAAK,SAAIC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACvB,IAAMC,EAAYC,GAAaH,CAAI,EAC7BI,EAAaC,GAAUL,EAAM,GAAQ,EAC3C,OAAAA,EAAOM,GAAeN,CAAI,EAEnBO,EAAQ,SAACC,EAAQC,EAAU,CAChCC,GAASN,CAAU,EAAEO,GAAIC,EAAA,CAAEJ,CAAM,EAAAK,EAAMb,CAA6B,CAAA,EAAGE,CAAS,CAAC,EAAE,UAAUO,CAAU,CACzG,CAAC,CACH,CCcM,SAAUK,IAAS,SACvBC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAEA,OAAOC,GAAK,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIJ,CAAY,CAAA,CAAA,CAC9B,CCmEM,SAAUK,GAAUC,EAAqC,OACzDC,EAAQ,IACRC,EAEJ,OAAIF,GAAiB,OACf,OAAOA,GAAkB,UACxBG,EAA4BH,EAAa,MAAzCC,EAAKE,IAAA,OAAG,IAAQA,EAAED,EAAUF,EAAa,OAE5CC,EAAQD,GAILC,GAAS,EACZ,UAAA,CAAM,OAAAG,CAAA,EACNC,EAAQ,SAACC,EAAQC,EAAU,CACzB,IAAIC,EAAQ,EACRC,EAEEC,EAAc,UAAA,CAGlB,GAFAD,GAAS,MAATA,EAAW,YAAW,EACtBA,EAAY,KACRP,GAAS,KAAM,CACjB,IAAMS,EAAW,OAAOT,GAAU,SAAWU,GAAMV,CAAK,EAAIW,EAAUX,EAAMM,CAAK,CAAC,EAC5EM,EAAqBC,EAAyBR,EAAY,UAAA,CAC9DO,EAAmB,YAAW,EAC9BE,EAAiB,CACnB,CAAC,EACDL,EAAS,UAAUG,CAAkB,OAErCE,EAAiB,CAErB,EAEMA,EAAoB,UAAA,CACxB,IAAIC,EAAY,GAChBR,EAAYH,EAAO,UACjBS,EAAyBR,EAAY,OAAW,UAAA,CAC1C,EAAEC,EAAQP,EACRQ,EACFC,EAAW,EAEXO,EAAY,GAGdV,EAAW,SAAQ,CAEvB,CAAC,CAAC,EAGAU,GACFP,EAAW,CAEf,EAEAM,EAAiB,CACnB,CAAC,CACP,CCpFM,SAAUE,GAAcC,EAA6DC,EAAQ,CAMjG,OAAOC,EAAQC,GAAcH,EAAaC,EAAW,UAAU,QAAU,EAAG,EAAI,CAAC,CACnF,CC+CM,SAAUG,GAASC,EAA4B,CAA5BA,IAAA,SAAAA,EAAA,CAAA,GACf,IAAAC,EAAgHD,EAAO,UAAvHE,EAASD,IAAA,OAAG,UAAA,CAAM,OAAA,IAAIE,CAAJ,EAAgBF,EAAEG,EAA4EJ,EAAO,aAAnFK,EAAYD,IAAA,OAAG,GAAIA,EAAEE,EAAuDN,EAAO,gBAA9DO,EAAeD,IAAA,OAAG,GAAIA,EAAEE,EAA+BR,EAAO,oBAAtCS,EAAmBD,IAAA,OAAG,GAAIA,EAUnH,OAAO,SAACE,EAAa,CACnB,IAAIC,EACAC,EACAC,EACAC,EAAW,EACXC,EAAe,GACfC,EAAa,GAEXC,GAAc,UAAA,CAClBL,GAAe,MAAfA,EAAiB,YAAW,EAC5BA,EAAkB,MACpB,EAGMM,GAAQ,UAAA,CACZD,GAAW,EACXN,EAAaE,EAAU,OACvBE,EAAeC,EAAa,EAC9B,EACMG,EAAsB,UAAA,CAG1B,IAAMC,EAAOT,EACbO,GAAK,EACLE,GAAI,MAAJA,EAAM,YAAW,CACnB,EAEA,OAAOC,EAAc,SAACC,EAAQC,GAAU,CACtCT,IACI,CAACE,GAAc,CAACD,GAClBE,GAAW,EAOb,IAAMO,GAAQX,EAAUA,GAAO,KAAPA,EAAWX,EAAS,EAO5CqB,GAAW,IAAI,UAAA,CACbT,IAKIA,IAAa,GAAK,CAACE,GAAc,CAACD,IACpCH,EAAkBa,GAAYN,EAAqBV,CAAmB,EAE1E,CAAC,EAIDe,GAAK,UAAUD,EAAU,EAGvB,CAACZ,GAIDG,EAAW,IAOXH,EAAa,IAAIe,GAAe,CAC9B,KAAM,SAACC,GAAK,CAAK,OAAAH,GAAK,KAAKG,EAAK,CAAf,EACjB,MAAO,SAACC,GAAG,CACTZ,EAAa,GACbC,GAAW,EACXL,EAAkBa,GAAYP,GAAOb,EAAcuB,EAAG,EACtDJ,GAAK,MAAMI,EAAG,CAChB,EACA,SAAU,UAAA,CACRb,EAAe,GACfE,GAAW,EACXL,EAAkBa,GAAYP,GAAOX,CAAe,EACpDiB,GAAK,SAAQ,CACf,EACD,EACDK,EAAUP,CAAM,EAAE,UAAUX,CAAU,EAE1C,CAAC,EAAED,CAAa,CAClB,CACF,CAEA,SAASe,GACPP,EACAY,EAAoD,SACpDC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,EAAA,CAAA,EAAA,UAAAA,CAAA,EAEA,GAAIF,IAAO,GAAM,CACfZ,EAAK,EACL,OAGF,GAAIY,IAAO,GAIX,KAAMG,EAAe,IAAIP,GAAe,CACtC,KAAM,UAAA,CACJO,EAAa,YAAW,EACxBf,EAAK,CACP,EACD,EAED,OAAOW,EAAUC,EAAE,MAAA,OAAAI,EAAA,CAAA,EAAAC,EAAIJ,CAAI,CAAA,CAAA,CAAA,EAAG,UAAUE,CAAY,EACtD,CChHM,SAAUG,EACdC,EACAC,EACAC,EAAyB,WAErBC,EACAC,EAAW,GACf,OAAIJ,GAAsB,OAAOA,GAAuB,UACnDK,EAA8EL,EAAkB,WAAhGG,EAAUE,IAAA,OAAG,IAAQA,EAAEC,EAAuDN,EAAkB,WAAzEC,EAAUK,IAAA,OAAG,IAAQA,EAAEC,EAAgCP,EAAkB,SAAlDI,EAAQG,IAAA,OAAG,GAAKA,EAAEL,EAAcF,EAAkB,WAEnGG,EAAcH,GAAkB,KAAlBA,EAAsB,IAE/BQ,GAAS,CACd,UAAW,UAAA,CAAM,OAAA,IAAIC,GAAcN,EAAYF,EAAYC,CAAS,CAAnD,EACjB,aAAc,GACd,gBAAiB,GACjB,oBAAqBE,EACtB,CACH,CCxIM,SAAUM,GAAQC,EAAa,CACnC,OAAOC,EAAO,SAACC,EAAGC,EAAK,CAAK,OAAAH,GAASG,CAAT,CAAc,CAC5C,CCaM,SAAUC,GAAaC,EAA8B,CACzD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAS,GAEPC,EAAiBC,EACrBH,EACA,UAAA,CACEE,GAAc,MAAdA,EAAgB,YAAW,EAC3BD,EAAS,EACX,EACAG,EAAI,EAGNC,EAAUR,CAAQ,EAAE,UAAUK,CAAc,EAE5CH,EAAO,UAAUI,EAAyBH,EAAY,SAACM,EAAK,CAAK,OAAAL,GAAUD,EAAW,KAAKM,CAAK,CAA/B,CAAgC,CAAC,CACpG,CAAC,CACH,CCVM,SAAUC,GAAS,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAC9B,IAAMC,EAAYC,GAAaH,CAAM,EACrC,OAAOI,EAAQ,SAACC,EAAQC,EAAU,EAI/BJ,EAAYK,GAAOP,EAAQK,EAAQH,CAAS,EAAIK,GAAOP,EAAQK,CAAM,GAAG,UAAUC,CAAU,CAC/F,CAAC,CACH,CCmBM,SAAUE,EACdC,EACAC,EAA6G,CAE7G,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAyD,KACzDC,EAAQ,EAERC,EAAa,GAIXC,EAAgB,UAAA,CAAM,OAAAD,GAAc,CAACF,GAAmBD,EAAW,SAAQ,CAArD,EAE5BD,EAAO,UACLM,EACEL,EACA,SAACM,EAAK,CAEJL,GAAe,MAAfA,EAAiB,YAAW,EAC5B,IAAIM,EAAa,EACXC,EAAaN,IAEnBO,EAAUb,EAAQU,EAAOE,CAAU,CAAC,EAAE,UACnCP,EAAkBI,EACjBL,EAIA,SAACU,EAAU,CAAK,OAAAV,EAAW,KAAKH,EAAiBA,EAAeS,EAAOI,EAAYF,EAAYD,GAAY,EAAIG,CAAU,CAAzG,EAChB,UAAA,CAIET,EAAkB,KAClBG,EAAa,CACf,CAAC,CACD,CAEN,EACA,UAAA,CACED,EAAa,GACbC,EAAa,CACf,CAAC,CACF,CAEL,CAAC,CACH,CCvFM,SAAUO,EAAaC,EAA8B,CACzD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChCC,EAAUJ,CAAQ,EAAE,UAAUK,EAAyBF,EAAY,UAAA,CAAM,OAAAA,EAAW,SAAQ,CAAnB,EAAuBG,EAAI,CAAC,EACrG,CAACH,EAAW,QAAUD,EAAO,UAAUC,CAAU,CACnD,CAAC,CACH,CCIM,SAAUI,GAAaC,EAAiDC,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,IACrEC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAQ,EACZF,EAAO,UACLG,EAAyBF,EAAY,SAACG,EAAK,CACzC,IAAMC,EAASR,EAAUO,EAAOF,GAAO,GACtCG,GAAUP,IAAcG,EAAW,KAAKG,CAAK,EAC9C,CAACC,GAAUJ,EAAW,SAAQ,CAChC,CAAC,CAAC,CAEN,CAAC,CACH,CCqGM,SAAUK,EACdC,EACAC,EACAC,EAA8B,CAK9B,IAAMC,EACJC,EAAWJ,CAAc,GAAKC,GAASC,EAElC,CAAE,KAAMF,EAA2E,MAAKC,EAAE,SAAQC,CAAA,EACnGF,EAEN,OAAOG,EACHE,EAAQ,SAACC,EAAQC,EAAU,QACzBC,EAAAL,EAAY,aAAS,MAAAK,IAAA,QAAAA,EAAA,KAArBL,CAAW,EACX,IAAIM,EAAU,GACdH,EAAO,UACLI,EACEH,EACA,SAACI,EAAK,QACJH,EAAAL,EAAY,QAAI,MAAAK,IAAA,QAAAA,EAAA,KAAhBL,EAAmBQ,CAAK,EACxBJ,EAAW,KAAKI,CAAK,CACvB,EACA,UAAA,OACEF,EAAU,IACVD,EAAAL,EAAY,YAAQ,MAAAK,IAAA,QAAAA,EAAA,KAApBL,CAAW,EACXI,EAAW,SAAQ,CACrB,EACA,SAACK,EAAG,OACFH,EAAU,IACVD,EAAAL,EAAY,SAAK,MAAAK,IAAA,QAAAA,EAAA,KAAjBL,EAAoBS,CAAG,EACvBL,EAAW,MAAMK,CAAG,CACtB,EACA,UAAA,SACMH,KACFD,EAAAL,EAAY,eAAW,MAAAK,IAAA,QAAAA,EAAA,KAAvBL,CAAW,IAEbU,EAAAV,EAAY,YAAQ,MAAAU,IAAA,QAAAA,EAAA,KAApBV,CAAW,CACb,CAAC,CACF,CAEL,CAAC,EAIDW,EACN,CCnIM,SAAUC,GAAYC,EAAsDC,EAAuB,CACvG,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAC1B,IAAAC,EAAuCJ,GAAM,KAANA,EAAU,CAAA,EAA/CK,EAAAD,EAAA,QAAAE,EAAOD,IAAA,OAAG,GAAIA,EAAEE,EAAAH,EAAA,SAAAI,EAAQD,IAAA,OAAG,GAAKA,EACpCE,EAAW,GACXC,EAAsB,KACtBC,EAAiC,KACjCC,EAAa,GAEXC,EAAgB,UAAA,CACpBF,GAAS,MAATA,EAAW,YAAW,EACtBA,EAAY,KACRH,IACFM,GAAI,EACJF,GAAcT,EAAW,SAAQ,EAErC,EAEMY,EAAoB,UAAA,CACxBJ,EAAY,KACZC,GAAcT,EAAW,SAAQ,CACnC,EAEMa,EAAgB,SAACC,GAAQ,CAC7B,OAACN,EAAYO,EAAUnB,EAAiBkB,EAAK,CAAC,EAAE,UAAUE,EAAyBhB,EAAYU,EAAeE,CAAiB,CAAC,CAAhI,EAEID,GAAO,UAAA,CACX,GAAIL,EAAU,CAIZA,EAAW,GACX,IAAMQ,GAAQP,EACdA,EAAY,KAEZP,EAAW,KAAKc,EAAK,EACrB,CAACL,GAAcI,EAAcC,EAAK,EAEtC,EAEAf,EAAO,UACLiB,EACEhB,EAMA,SAACc,GAAK,CACJR,EAAW,GACXC,EAAYO,GACZ,EAAEN,GAAa,CAACA,EAAU,UAAYL,EAAUQ,GAAI,EAAKE,EAAcC,EAAK,EAC9E,EACA,UAAA,CACEL,EAAa,GACb,EAAEJ,GAAYC,GAAYE,GAAa,CAACA,EAAU,SAAWR,EAAW,SAAQ,CAClF,CAAC,CACF,CAEL,CAAC,CACH,CCxFM,SAAUiB,GACdC,EACAC,EACAC,EAAuB,CADvBD,IAAA,SAAAA,EAAAE,IAGA,IAAMC,EAAYC,GAAML,EAAUC,CAAS,EAC3C,OAAOK,GAAS,UAAA,CAAM,OAAAF,CAAA,EAAWF,CAAM,CACzC,CCJM,SAAUK,IAAc,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACnC,IAAMC,EAAUC,GAAkBH,CAAM,EAExC,OAAOI,EAAQ,SAACC,EAAQC,EAAU,CAehC,QAdMC,EAAMP,EAAO,OACbQ,EAAc,IAAI,MAAMD,CAAG,EAI7BE,EAAWT,EAAO,IAAI,UAAA,CAAM,MAAA,EAAA,CAAK,EAGjCU,EAAQ,cAMHC,EAAC,CACRC,EAAUZ,EAAOW,CAAC,CAAC,EAAE,UACnBE,EACEP,EACA,SAACQ,EAAK,CACJN,EAAYG,CAAC,EAAIG,EACb,CAACJ,GAAS,CAACD,EAASE,CAAC,IAEvBF,EAASE,CAAC,EAAI,IAKbD,EAAQD,EAAS,MAAMM,EAAQ,KAAON,EAAW,MAEtD,EAGAO,EAAI,CACL,GAnBIL,EAAI,EAAGA,EAAIJ,EAAKI,MAAhBA,CAAC,EAwBVN,EAAO,UACLQ,EAAyBP,EAAY,SAACQ,EAAK,CACzC,GAAIJ,EAAO,CAET,IAAMO,EAAMC,EAAA,CAAIJ,CAAK,EAAAK,EAAKX,CAAW,CAAA,EACrCF,EAAW,KAAKJ,EAAUA,EAAO,MAAA,OAAAgB,EAAA,CAAA,EAAAC,EAAIF,CAAM,CAAA,CAAA,EAAIA,CAAM,EAEzD,CAAC,CAAC,CAEN,CAAC,CACH,CCxFM,SAAUG,IAAG,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACxB,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChCL,GAAS,MAAA,OAAAM,EAAA,CAACF,CAA8B,EAAAG,EAAMN,CAAuC,CAAA,CAAA,EAAE,UAAUI,CAAU,CAC7G,CAAC,CACH,CCCM,SAAUG,IAAO,SAAkCC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACvD,OAAOC,GAAG,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIJ,CAAW,CAAA,CAAA,CAC3B,CCYO,SAASK,IAAmC,CACjD,IAAMC,EAAY,IAAIC,GAAwB,CAAC,EAC/C,OAAAC,EAAU,SAAU,mBAAoB,CAAE,KAAM,EAAK,CAAC,EACnD,UAAU,IAAMF,EAAU,KAAK,QAAQ,CAAC,EAGpCA,CACT,CCHO,SAASG,EACdC,EAAkBC,EAAmB,SAChC,CACL,OAAO,MAAM,KAAKA,EAAK,iBAAoBD,CAAQ,CAAC,CACtD,CAuBO,SAASE,EACdF,EAAkBC,EAAmB,SAClC,CACH,IAAME,EAAKC,GAAsBJ,EAAUC,CAAI,EAC/C,GAAI,OAAOE,GAAO,YAChB,MAAM,IAAI,eACR,8BAA8BH,CAAQ,iBACxC,EAGF,OAAOG,CACT,CAsBO,SAASC,GACdJ,EAAkBC,EAAmB,SACtB,CACf,OAAOA,EAAK,cAAiBD,CAAQ,GAAK,MAC5C,CAOO,SAASK,IAA4C,CAnH5D,IAAAC,EAAAC,EAAAC,EAAAC,EAoHE,OACEA,GAAAD,GAAAD,GAAAD,EAAA,SAAS,gBAAT,YAAAA,EAAwB,aAAxB,YAAAC,EAAoC,gBAApC,KAAAC,EACA,SAAS,gBADT,KAAAC,EAEA,MAEJ,CCvEA,IAAMC,GAAYC,EAChBC,EAAU,SAAS,KAAM,SAAS,EAClCA,EAAU,SAAS,KAAM,UAAU,CACrC,EACG,KACCC,GAAa,CAAC,EACdC,EAAU,MAAS,EACnBC,EAAI,IAAMC,GAAiB,GAAK,SAAS,IAAI,EAC7CC,EAAY,CAAC,CACf,EAaK,SAASC,GACdC,EACqB,CACrB,OAAOT,GACJ,KACCK,EAAIK,GAAUD,EAAG,SAASC,CAAM,CAAC,EACjCC,EAAqB,CACvB,CACJ,CC7BO,SAASC,GACdC,EAAiBC,EACI,CACrB,OAAOC,EAAM,IAAMC,EACjBC,EAAUJ,EAAI,YAAY,EAAE,KAAKK,EAAI,IAAM,EAAI,CAAC,EAChDD,EAAUJ,EAAI,YAAY,EAAE,KAAKK,EAAI,IAAM,EAAK,CAAC,CACnD,EACG,KACCJ,EAAUK,GAASC,GAAUC,GAAM,CAAC,CAACD,EAASN,CAAO,CAAC,EAAIQ,GAC1DC,EAAUV,EAAG,QAAQ,QAAQ,CAAC,CAChC,CACF,CACF,CCPA,SAASW,GAAYC,EAAiBC,EAA8B,CAGlE,GAAI,OAAOA,GAAU,UAAY,OAAOA,GAAU,SAChDD,EAAG,WAAaC,EAAM,SAAS,UAGtBA,aAAiB,KAC1BD,EAAG,YAAYC,CAAK,UAGX,MAAM,QAAQA,CAAK,EAC5B,QAAWC,KAAQD,EACjBF,GAAYC,EAAIE,CAAI,CAE1B,CAyBO,SAASC,EACdC,EAAaC,KAAmCC,EAC7C,CACH,IAAMN,EAAK,SAAS,cAAcI,CAAG,EAGrC,GAAIC,EACF,QAAWE,KAAQ,OAAO,KAAKF,CAAU,EACnC,OAAOA,EAAWE,CAAI,GAAM,cAI5B,OAAOF,EAAWE,CAAI,GAAM,UAC9BP,EAAG,aAAaO,EAAMF,EAAWE,CAAI,CAAC,EAEtCP,EAAG,aAAaO,EAAM,EAAE,GAI9B,QAAWN,KAASK,EAClBP,GAAYC,EAAIC,CAAK,EAGvB,OAAOD,CACT,CC9EO,SAASQ,GAAMC,EAAuB,CAC3C,GAAIA,EAAQ,IAAK,CACf,IAAMC,EAAS,GAAGD,EAAQ,KAAO,IAAO,IACxC,MAAO,KAAKA,EAAQ,MAAY,KAAM,QAAQC,CAAM,CAAC,GACvD,KACE,QAAOD,EAAM,SAAS,CAE1B,CCCO,SAASE,GAAYC,EAA+B,CACzD,IAAMC,EAASC,EAAE,SAAU,CAAE,IAAAF,CAAI,CAAC,EAClC,OAAOG,EAAM,KACX,SAAS,KAAK,YAAYF,CAAM,EACzBG,EACLC,EAAUJ,EAAQ,MAAM,EACxBI,EAAUJ,EAAQ,OAAO,EACtB,KACCK,EAAU,IACRC,GAAW,IAAM,IAAI,eAAe,mBAAmBP,CAAG,EAAE,CAAC,CAC9D,CACH,CACJ,EACG,KACCQ,EAAI,IAAG,EAAY,EACnBC,EAAS,IAAM,SAAS,KAAK,YAAYR,CAAM,CAAC,EAChDS,GAAK,CAAC,CACR,EACH,CACH,CCVA,IAAMC,GAAS,IAAIC,EAiBbC,GAAYC,EAAM,IACtB,OAAO,gBAAmB,YACtBC,GAAY,4CAA4C,EACxDC,EAAG,MAAS,CACjB,EACE,KACCC,EAAI,IAAM,IAAI,eAAeC,GAC3BA,EAAQ,QAAQC,GAASR,GAAO,KAAKQ,CAAK,CAAC,CAC5C,CAAC,EACFC,EAAUC,GAAYC,EAAMC,GAAOP,EAAGK,CAAQ,CAAC,EAAE,KAC/CG,EAAS,IAAMH,EAAS,WAAW,CAAC,CACtC,CAAC,EACDI,EAAY,CAAC,CACf,EAaK,SAASC,GACdC,EACa,CACb,MAAO,CACL,MAAQA,EAAG,YACX,OAAQA,EAAG,YACb,CACF,CAuBO,SAASC,GACdD,EACyB,CAMzB,IAAIE,EAASF,EACb,KAAOE,EAAO,cAAgB,GACxBA,EAAO,eACTA,EAASA,EAAO,cAMpB,OAAOhB,GAAU,KACfiB,EAAIT,GAAYA,EAAS,QAAQQ,CAAM,CAAC,EACxCT,EAAUC,GAAYV,GAAO,KAC3BoB,EAAOZ,GAASA,EAAM,SAAWU,CAAM,EACvCL,EAAS,IAAMH,EAAS,UAAUQ,CAAM,CAAC,CAC3C,CAAC,EACDZ,EAAI,IAAMS,GAAeC,CAAE,CAAC,EAC5BK,EAAUN,GAAeC,CAAE,CAAC,CAC9B,CACF,CC3HO,SAASM,GACdC,EACa,CACb,MAAO,CACL,MAAQA,EAAG,YACX,OAAQA,EAAG,YACb,CACF,CASO,SAASC,GACdD,EACyB,CACzB,IAAIE,EAASF,EAAG,cAChB,KAAOE,IAEHF,EAAG,aAAgBE,EAAO,aAC1BF,EAAG,cAAgBE,EAAO,eAE1BA,GAAUF,EAAKE,GAAQ,cAK3B,OAAOA,EAASF,EAAK,MACvB,CAYO,SAASG,GACdH,EACe,CACf,IAAMI,EAA4B,CAAC,EAG/BF,EAASF,EAAG,cAChB,KAAOE,IAEHF,EAAG,YAAeE,EAAO,aACzBF,EAAG,aAAeE,EAAO,eAEzBE,EAAW,KAAKF,CAAM,EAGxBA,GAAUF,EAAKE,GAAQ,cAKzB,OAAIE,EAAW,SAAW,GACxBA,EAAW,KAAK,SAAS,eAAe,EAGnCA,CACT,CC9CO,SAASC,GACdC,EACe,CACf,MAAO,CACL,EAAGA,EAAG,WACN,EAAGA,EAAG,SACR,CACF,CASO,SAASC,GACdD,EACe,CACf,IAAME,EAAOF,EAAG,sBAAsB,EACtC,MAAO,CACL,EAAGE,EAAK,EAAI,OAAO,QACnB,EAAGA,EAAK,EAAI,OAAO,OACrB,CACF,CAWO,SAASC,GACdH,EAC2B,CAC3B,OAAOI,EACLC,EAAU,OAAQ,MAAM,EACxBA,EAAU,OAAQ,QAAQ,CAC5B,EACG,KACCC,GAAU,EAAGC,EAAuB,EACpCC,EAAI,IAAMT,GAAiBC,CAAE,CAAC,EAC9BS,EAAUV,GAAiBC,CAAE,CAAC,CAChC,CACJ,CC3DO,SAASU,GACdC,EACe,CACf,MAAO,CACL,EAAGA,EAAG,WACN,EAAGA,EAAG,SACR,CACF,CAWO,SAASC,GACdD,EAC2B,CAC3B,OAAOE,EACLC,EAAUH,EAAI,QAAQ,EACtBG,EAAU,OAAQ,QAAQ,EAC1BA,EAAU,OAAQ,QAAQ,CAC5B,EACG,KACCC,GAAU,EAAGC,EAAuB,EACpCC,EAAI,IAAMP,GAAwBC,CAAE,CAAC,EACrCO,EAAUR,GAAwBC,CAAE,CAAC,CACvC,CACJ,CCzBA,IAAMQ,GAAS,IAAIC,EAUbC,GAAYC,EAAM,IAAMC,EAC5B,IAAI,qBAAqBC,GAAW,CAClC,QAAWC,KAASD,EAClBL,GAAO,KAAKM,CAAK,CACrB,EAAG,CACD,UAAW,CACb,CAAC,CACH,CAAC,EACE,KACCC,EAAUC,GAAYC,EAAMC,GAAON,EAAGI,CAAQ,CAAC,EAC5C,KACCG,EAAS,IAAMH,EAAS,WAAW,CAAC,CACtC,CACF,EACAI,EAAY,CAAC,CACf,EAaK,SAASC,GACdC,EACqB,CACrB,OAAOZ,GACJ,KACCa,EAAIP,GAAYA,EAAS,QAAQM,CAAE,CAAC,EACpCP,EAAUC,GAAYR,GACnB,KACCgB,EAAO,CAAC,CAAE,OAAAC,CAAO,IAAMA,IAAWH,CAAE,EACpCH,EAAS,IAAMH,EAAS,UAAUM,CAAE,CAAC,EACrCI,EAAI,CAAC,CAAE,eAAAC,CAAe,IAAMA,CAAc,CAC5C,CACF,CACF,CACJ,CAaO,SAASC,GACdN,EAAiBO,EAAY,GACR,CACrB,OAAOC,GAA0BR,CAAE,EAChC,KACCI,EAAI,CAAC,CAAE,EAAAK,CAAE,IAAM,CACb,IAAMC,EAAUC,GAAeX,CAAE,EAC3BY,EAAUC,GAAsBb,CAAE,EACxC,OAAOS,GACLG,EAAQ,OAASF,EAAQ,OAASH,CAEtC,CAAC,EACDO,EAAqB,CACvB,CACJ,CCjFA,IAAMC,GAA4C,CAChD,OAAQC,EAAW,yBAAyB,EAC5C,OAAQA,EAAW,yBAAyB,CAC9C,EAaO,SAASC,GAAUC,EAAuB,CAC/C,OAAOH,GAAQG,CAAI,EAAE,OACvB,CAaO,SAASC,GAAUD,EAAcE,EAAsB,CACxDL,GAAQG,CAAI,EAAE,UAAYE,GAC5BL,GAAQG,CAAI,EAAE,MAAM,CACxB,CAWO,SAASG,GAAYH,EAAmC,CAC7D,IAAMI,EAAKP,GAAQG,CAAI,EACvB,OAAOK,EAAUD,EAAI,QAAQ,EAC1B,KACCE,EAAI,IAAMF,EAAG,OAAO,EACpBG,EAAUH,EAAG,OAAO,CACtB,CACJ,CC9BA,SAASI,GACPC,EAAiBC,EACR,CACT,OAAQD,EAAG,YAAa,CAGtB,KAAK,iBAEH,OAAIA,EAAG,OAAS,QACP,SAAS,KAAKC,CAAI,EAElB,GAGX,KAAK,kBACL,KAAK,oBACH,MAAO,GAGT,QACE,OAAOD,EAAG,iBACd,CACF,CAWO,SAASE,IAAwC,CACtD,OAAOC,EACLC,EAAU,OAAQ,kBAAkB,EAAE,KAAKC,EAAI,IAAM,EAAI,CAAC,EAC1DD,EAAU,OAAQ,gBAAgB,EAAE,KAAKC,EAAI,IAAM,EAAK,CAAC,CAC3D,EACG,KACCC,EAAU,EAAK,CACjB,CACJ,CAOO,SAASC,IAAsC,CACpD,IAAMC,EAAYJ,EAAyB,OAAQ,SAAS,EACzD,KACCK,EAAOC,GAAM,EAAEA,EAAG,SAAWA,EAAG,QAAQ,EACxCL,EAAIK,IAAO,CACT,KAAMC,GAAU,QAAQ,EAAI,SAAW,SACvC,KAAMD,EAAG,IACT,OAAQ,CACNA,EAAG,eAAe,EAClBA,EAAG,gBAAgB,CACrB,CACF,EAAc,EACdD,EAAO,CAAC,CAAE,KAAAG,EAAM,KAAAX,CAAK,IAAM,CACzB,GAAIW,IAAS,SAAU,CACrB,IAAMC,EAASC,GAAiB,EAChC,GAAI,OAAOD,GAAW,YACpB,MAAO,CAACd,GAAwBc,EAAQZ,CAAI,CAChD,CACA,MAAO,EACT,CAAC,EACDc,GAAM,CACR,EAGF,OAAOb,GAAiB,EACrB,KACCc,EAAUH,GAAWA,EAAqBI,EAAZT,CAAiB,CACjD,CACJ,CC1GO,SAASU,IAAmB,CACjC,OAAO,IAAI,IAAI,SAAS,IAAI,CAC9B,CAgBO,SAASC,GACdC,EAA4BC,EAAW,GACjC,CACN,GAAIC,EAAQ,oBAAoB,GAAK,CAACD,EAAU,CAC9C,IAAME,EAAKC,EAAE,IAAK,CAAE,KAAMJ,EAAI,IAAK,CAAC,EACpC,SAAS,KAAK,YAAYG,CAAE,EAC5BA,EAAG,MAAM,EACTA,EAAG,OAAO,CAIZ,MACE,SAAS,KAAOH,EAAI,IAExB,CASO,SAASK,IAA8B,CAC5C,OAAO,IAAIC,CACb,CCxCO,SAASC,IAA0B,CACxC,OAAO,SAAS,KAAK,MAAM,CAAC,CAC9B,CAYO,SAASC,GAAgBC,EAAoB,CAClD,IAAMC,EAAKC,EAAE,IAAK,CAAE,KAAMF,CAAK,CAAC,EAChCC,EAAG,iBAAiB,QAASE,GAAMA,EAAG,gBAAgB,CAAC,EACvDF,EAAG,MAAM,CACX,CAWO,SAASG,GACdC,EACoB,CACpB,OAAOC,EACLC,EAA2B,OAAQ,YAAY,EAC/CF,CACF,EACG,KACCG,EAAIV,EAAe,EACnBW,EAAUX,GAAgB,CAAC,EAC3BY,EAAOV,GAAQA,EAAK,OAAS,CAAC,EAC9BW,EAAY,CAAC,CACf,CACJ,CASO,SAASC,GACdP,EACyB,CACzB,OAAOD,GAAkBC,CAAS,EAC/B,KACCG,EAAIK,GAAMC,GAAmB,QAAQD,CAAE,IAAI,CAAE,EAC7CH,EAAOT,GAAM,OAAOA,GAAO,WAAW,CACxC,CACJ,CCtDO,SAASc,GAAWC,EAAoC,CAC7D,IAAMC,EAAQ,WAAWD,CAAK,EAC9B,OAAOE,GAA0BC,GAC/BF,EAAM,YAAY,IAAME,EAAKF,EAAM,OAAO,CAAC,CAC5C,EACE,KACCG,EAAUH,EAAM,OAAO,CACzB,CACJ,CAOO,SAASI,IAAkC,CAChD,IAAMJ,EAAQ,WAAW,OAAO,EAChC,OAAOK,EACLC,EAAU,OAAQ,aAAa,EAAE,KAAKC,EAAI,IAAM,EAAI,CAAC,EACrDD,EAAU,OAAQ,YAAY,EAAE,KAAKC,EAAI,IAAM,EAAK,CAAC,CACvD,EACG,KACCJ,EAAUH,EAAM,OAAO,CACzB,CACJ,CAcO,SAASQ,GACdC,EAA6BC,EACd,CACf,OAAOD,EACJ,KACCE,EAAUC,GAAUA,EAASF,EAAQ,EAAIG,CAAK,CAChD,CACJ,CC/BO,SAASC,GACdC,EAAmBC,EACD,CAClB,OAAO,IAAIC,EAAiBC,GAAY,CACtC,IAAMC,EAAM,IAAI,eAChB,OAAAA,EAAI,KAAK,MAAO,GAAGJ,CAAG,EAAE,EACxBI,EAAI,aAAe,OAGnBA,EAAI,iBAAiB,OAAQ,IAAM,CAC7BA,EAAI,QAAU,KAAOA,EAAI,OAAS,KACpCD,EAAS,KAAKC,EAAI,QAAQ,EAC1BD,EAAS,SAAS,GAIlBA,EAAS,MAAM,IAAI,MAAMC,EAAI,UAAU,CAAC,CAE5C,CAAC,EAGDA,EAAI,iBAAiB,QAAS,IAAM,CAClCD,EAAS,MAAM,IAAI,MAAM,eAAe,CAAC,CAC3C,CAAC,EAGDC,EAAI,iBAAiB,QAAS,IAAM,CAClCD,EAAS,SAAS,CACpB,CAAC,EAGG,OAAOF,GAAA,YAAAA,EAAS,YAAc,cAChCG,EAAI,iBAAiB,WAAYC,GAAS,CA/FhD,IAAAC,EAgGQ,GAAID,EAAM,iBACRJ,EAAQ,UAAW,KAAMI,EAAM,OAASA,EAAM,MAAS,GAAG,MAIrD,CACL,IAAME,GAASD,EAAAF,EAAI,kBAAkB,gBAAgB,IAAtC,KAAAE,EAA2C,EAC1DL,EAAQ,UAAW,KAAMI,EAAM,OAAS,CAACE,EAAU,GAAG,CACxD,CACF,CAAC,EAGDN,EAAQ,UAAU,KAAK,CAAC,GAI1BG,EAAI,KAAK,EACF,IAAMA,EAAI,MAAM,CACzB,CAAC,CACH,CAcO,SAASI,GACdR,EAAmBC,EACJ,CACf,OAAOF,GAAQC,EAAKC,CAAO,EACxB,KACCQ,EAAUC,GAAOA,EAAI,KAAK,CAAC,EAC3BC,EAAIC,GAAQ,KAAK,MAAMA,CAAI,CAAM,EACjCC,EAAY,CAAC,CACf,CACJ,CAUO,SAASC,GACdd,EAAmBC,EACG,CACtB,IAAMc,EAAM,IAAI,UAChB,OAAOhB,GAAQC,EAAKC,CAAO,EACxB,KACCQ,EAAUC,GAAOA,EAAI,KAAK,CAAC,EAC3BC,EAAID,GAAOK,EAAI,gBAAgBL,EAAK,WAAW,CAAC,EAChDG,EAAY,CAAC,CACf,CACJ,CAUO,SAASG,GACdhB,EAAmBC,EACG,CACtB,IAAMc,EAAM,IAAI,UAChB,OAAOhB,GAAQC,EAAKC,CAAO,EACxB,KACCQ,EAAUC,GAAOA,EAAI,KAAK,CAAC,EAC3BC,EAAID,GAAOK,EAAI,gBAAgBL,EAAK,UAAU,CAAC,EAC/CG,EAAY,CAAC,CACf,CACJ,CC5HO,SAASI,IAAoC,CAClD,MAAO,CACL,EAAG,KAAK,IAAI,EAAG,OAAO,EACtB,EAAG,KAAK,IAAI,EAAG,OAAO,CACxB,CACF,CASO,SAASC,IAAkD,CAChE,OAAOC,EACLC,EAAU,OAAQ,SAAU,CAAE,QAAS,EAAK,CAAC,EAC7CA,EAAU,OAAQ,SAAU,CAAE,QAAS,EAAK,CAAC,CAC/C,EACG,KACCC,EAAIJ,EAAiB,EACrBK,EAAUL,GAAkB,CAAC,CAC/B,CACJ,CC3BO,SAASM,IAAgC,CAC9C,MAAO,CACL,MAAQ,WACR,OAAQ,WACV,CACF,CASO,SAASC,IAA8C,CAC5D,OAAOC,EAAU,OAAQ,SAAU,CAAE,QAAS,EAAK,CAAC,EACjD,KACCC,EAAIH,EAAe,EACnBI,EAAUJ,GAAgB,CAAC,CAC7B,CACJ,CCXO,SAASK,IAAsC,CACpD,OAAOC,EAAc,CACnBC,GAAoB,EACpBC,GAAkB,CACpB,CAAC,EACE,KACCC,EAAI,CAAC,CAACC,EAAQC,CAAI,KAAO,CAAE,OAAAD,EAAQ,KAAAC,CAAK,EAAE,EAC1CC,EAAY,CAAC,CACf,CACJ,CCVO,SAASC,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EAChB,CACtB,IAAMC,EAAQF,EACX,KACCG,EAAwB,MAAM,CAChC,EAGIC,EAAUC,EAAc,CAACH,EAAOD,CAAO,CAAC,EAC3C,KACCK,EAAI,IAAMC,GAAiBR,CAAE,CAAC,CAChC,EAGF,OAAOM,EAAc,CAACJ,EAASD,EAAWI,CAAO,CAAC,EAC/C,KACCE,EAAI,CAAC,CAAC,CAAE,OAAAE,CAAO,EAAG,CAAE,OAAAC,EAAQ,KAAAC,CAAK,EAAG,CAAE,EAAAC,EAAG,EAAAC,CAAE,CAAC,KAAO,CACjD,OAAQ,CACN,EAAGH,EAAO,EAAIE,EACd,EAAGF,EAAO,EAAIG,EAAIJ,CACpB,EACA,KAAAE,CACF,EAAE,CACJ,CACJ,CCzBA,SAASG,GAAQC,EAA+B,CAC9C,OAAOC,EAA8BD,EAAQ,UAAWE,GAAMA,EAAG,IAAI,CACvE,CAWA,SAASC,GAAQH,EAA4B,CAC3C,IAAMI,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAUE,GAAQN,EAAO,YAAYM,CAAI,CAAC,EAGzCF,CACT,CAgBO,SAASG,GACdC,EAAaR,EAAS,IAAI,OAAOQ,CAAG,EACxB,CACZ,IAAMC,EAAQV,GAAQC,CAAM,EACtBI,EAAQD,GAAQH,CAAM,EAGtBU,EAAU,IAAIL,EACpBK,EAAQ,UAAUN,CAAK,EAGvB,IAAMO,EAAQP,EAAM,KAAKQ,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxD,OAAOH,EACJ,KACCE,EAAe,EACfE,GAAUL,EAAM,KAAKM,EAAUJ,CAAK,CAAC,CAAC,EACtCK,GAAM,CACR,CACJ,CCJA,IAAMC,GAASC,EAAW,WAAW,EAC/BC,GAAiB,KAAK,MAAMF,GAAO,WAAY,EACrDE,GAAO,KAAO,GAAG,IAAI,IAAIA,GAAO,KAAMC,GAAY,CAAC,CAAC,GAW7C,SAASC,IAAwB,CACtC,OAAOF,EACT,CASO,SAASG,EAAQC,EAAqB,CAC3C,OAAOJ,GAAO,SAAS,SAASI,CAAI,CACtC,CAUO,SAASC,GACdC,EAAkBC,EACV,CACR,OAAO,OAAOA,GAAU,YACpBP,GAAO,aAAaM,CAAG,EAAE,QAAQ,IAAKC,EAAM,SAAS,CAAC,EACtDP,GAAO,aAAaM,CAAG,CAC7B,CChCO,SAASE,GACdC,EAASC,EAAmB,SACP,CACrB,OAAOC,EAAW,sBAAsBF,CAAI,IAAKC,CAAI,CACvD,CAYO,SAASE,GACdH,EAASC,EAAmB,SACL,CACvB,OAAOG,EAAY,sBAAsBJ,CAAI,IAAKC,CAAI,CACxD,CC7EO,SAASI,GACdC,EACsB,CACtB,IAAMC,EAASC,EAAW,6BAA8BF,CAAE,EAC1D,OAAOG,EAAUF,EAAQ,QAAS,CAAE,KAAM,EAAK,CAAC,EAC7C,KACCG,EAAI,IAAMF,EAAW,cAAeF,CAAE,CAAC,EACvCI,EAAIC,IAAY,CAAE,KAAM,UAAUA,EAAQ,SAAS,CAAE,EAAE,CACzD,CACJ,CASO,SAASC,GACdN,EACiC,CACjC,GAAI,CAACO,EAAQ,kBAAkB,GAAK,CAACP,EAAG,kBACtC,OAAOQ,EAGT,GAAI,CAACR,EAAG,OAAQ,CACd,IAAMK,EAAUH,EAAW,cAAeF,CAAE,EACxC,UAAUK,EAAQ,SAAS,IAAM,SAAS,YAAY,IACxDL,EAAG,OAAS,GAChB,CAGA,OAAOS,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,KAAAE,CAAK,IAAM,CAC5BZ,EAAG,OAAS,GAGZ,SAAiB,aAAcY,CAAI,CACrC,CAAC,EAGMb,GAAcC,CAAE,EACpB,KACCa,EAAIC,GAASJ,EAAM,KAAKI,CAAK,CAAC,EAC9BC,EAAS,IAAML,EAAM,SAAS,CAAC,EAC/BN,EAAIU,GAAUE,EAAA,CAAE,IAAKhB,GAAOc,EAAQ,CACtC,CACJ,CAAC,CACH,CC5BO,SAASG,GACdC,EAAiB,CAAE,QAAAC,CAAQ,EACN,CACrB,OAAOA,EACJ,KACCC,EAAIC,IAAW,CAAE,OAAQA,IAAWH,CAAG,EAAE,CAC3C,CACJ,CAYO,SAASI,GACdJ,EAAiBK,EACe,CAChC,IAAMC,EAAY,IAAIC,EACtB,OAAAD,EAAU,UAAU,CAAC,CAAE,OAAAE,CAAO,IAAM,CAClCR,EAAG,OAASQ,CACd,CAAC,EAGMT,GAAaC,EAAIK,CAAO,EAC5B,KACCI,EAAIC,GAASJ,EAAU,KAAKI,CAAK,CAAC,EAClCC,EAAS,IAAML,EAAU,SAAS,CAAC,EACnCJ,EAAIQ,GAAUE,EAAA,CAAE,IAAKZ,GAAOU,EAAQ,CACtC,CACJ,CCnEO,SAASG,GACdC,EAAaC,EACA,CACb,OAAIA,IAAU,SAEVC,EAAC,OAAI,MAAM,gCAAgC,GAAIF,EAAI,KAAK,WACtDE,EAAC,OAAI,MAAM,+BAA+B,CAC5C,EAIAA,EAAC,OAAI,MAAM,aAAa,GAAIF,EAAI,KAAK,WACnCE,EAAC,OAAI,MAAM,+BAA+B,CAC5C,CAGN,CAGO,SAASC,MACXC,EACU,CACb,OACEF,EAAC,OAAI,MAAM,cAAc,KAAK,WAC5BA,EAAC,OAAI,MAAM,iCACRE,CACH,CACF,CAEJ,CCvCO,SAASC,GACdC,EAAqBC,EACR,CAIb,GAHAA,EAASA,EAAS,GAAGA,CAAM,eAAeD,CAAE,GAAK,OAG7CC,EAAQ,CACV,IAAMC,EAASD,EAAS,IAAIA,CAAM,GAAK,OACvC,OACEE,EAAC,SAAM,MAAM,gBAAgB,SAAU,GACpCC,GAAcH,CAAM,EACrBE,EAAC,KAAE,KAAMD,EAAQ,MAAM,uBAAuB,SAAU,IACtDC,EAAC,QAAK,wBAAuBH,EAAI,CACnC,CACF,CAEJ,KACE,QACEG,EAAC,SAAM,MAAM,gBAAgB,SAAU,GACpCC,GAAcH,CAAM,EACrBE,EAAC,QAAK,MAAM,uBAAuB,SAAU,IAC3CA,EAAC,QAAK,wBAAuBH,EAAI,CACnC,CACF,CAGN,CC5BO,SAASK,GAAsBC,EAAyB,CAC7D,OACEC,EAAC,UACC,MAAM,uBACN,MAAOC,GAAY,gBAAgB,EACnC,wBAAuB,IAAIF,CAAE,UAC9B,CAEL,CCQA,SAASG,GACPC,EAAsBC,EACT,CACb,IAAMC,EAASD,EAAO,EAChBE,EAASF,EAAO,EAGhBG,EAAU,OAAO,KAAKJ,EAAS,KAAK,EACvC,OAAOK,GAAO,CAACL,EAAS,MAAMK,CAAG,CAAC,EAClC,OAAyB,CAACC,EAAMD,IAAQ,CACvC,GAAGC,EAAMC,EAAC,WAAKF,CAAI,EAAQ,GAC7B,EAAG,CAAC,CAAC,EACJ,MAAM,EAAG,EAAE,EAGRG,EAASC,GAAc,EACvBC,EAAM,IAAI,IAAIV,EAAS,SAAUQ,EAAO,IAAI,EAC9CG,EAAQ,kBAAkB,GAC5BD,EAAI,aAAa,IAAI,IAAK,OAAO,QAAQV,EAAS,KAAK,EACpD,OAAO,CAAC,CAAC,CAAEY,CAAK,IAAMA,CAAK,EAC3B,OAAO,CAACC,EAAW,CAACC,CAAK,IAAM,GAAGD,CAAS,IAAIC,CAAK,GAAG,KAAK,EAAG,EAAE,CACpE,EAGF,GAAM,CAAE,KAAAC,CAAK,EAAIN,GAAc,EAC/B,OACEF,EAAC,KAAE,KAAM,GAAGG,CAAG,GAAI,MAAM,yBAAyB,SAAU,IAC1DH,EAAC,WACC,MAAM,uCACN,gBAAeP,EAAS,MAAM,QAAQ,CAAC,GAEtCE,EAAS,GAAKK,EAAC,OAAI,MAAM,iCAAiC,EAC1DL,EAAS,GAAKK,EAAC,UAAIP,EAAS,KAAM,EAClCE,GAAU,GAAKK,EAAC,UAAIP,EAAS,KAAM,EACnCG,EAAS,GAAKH,EAAS,KAAK,OAAS,GACpCA,EAAS,KAEVA,EAAS,MAAQA,EAAS,KAAK,IAAIgB,GAAO,CACzC,IAAMC,EAAOF,EACTC,KAAOD,EACL,uBAAuBA,EAAKC,CAAG,CAAC,GAChC,cACF,GACJ,OACET,EAAC,QAAK,MAAO,UAAUU,CAAI,IAAKD,CAAI,CAExC,CAAC,EACAb,EAAS,GAAKC,EAAQ,OAAS,GAC9BG,EAAC,KAAE,MAAM,2BACNW,GAAY,4BAA4B,EAAE,KAAG,GAAGd,CACnD,CAEJ,CACF,CAEJ,CAaO,SAASe,GACdC,EACa,CACb,IAAMC,EAAYD,EAAO,CAAC,EAAE,MACtBE,EAAO,CAAC,GAAGF,CAAM,EAEjBZ,EAASC,GAAc,EAGvBP,EAASoB,EAAK,UAAUC,GAErB,CADG,GAAG,IAAI,IAAIA,EAAI,SAAUf,EAAO,IAAI,CAAC,GACrC,SAAS,GAAG,CACvB,EACK,CAACgB,CAAO,EAAIF,EAAK,OAAOpB,EAAQ,CAAC,EAGnCuB,EAAQH,EAAK,UAAUC,GAAOA,EAAI,MAAQF,CAAS,EACnDI,IAAU,KACZA,EAAQH,EAAK,QAGf,IAAMI,EAAOJ,EAAK,MAAM,EAAGG,CAAK,EAC1BE,EAAOL,EAAK,MAAMG,CAAK,EAGvBG,EAAW,CACf7B,GAAqByB,EAAS,EAAc,EAAE,CAACtB,GAAUuB,IAAU,EAAE,EACrE,GAAGC,EAAK,IAAIG,GAAW9B,GAAqB8B,EAAS,CAAW,CAAC,EACjE,GAAGF,EAAK,OAAS,CACfpB,EAAC,WAAQ,MAAM,0BACbA,EAAC,WAAQ,SAAU,IACjBA,EAAC,WACEoB,EAAK,OAAS,GAAKA,EAAK,SAAW,EAChCT,GAAY,wBAAwB,EACpCA,GAAY,2BAA4BS,EAAK,MAAM,CAEzD,CACF,EACC,GAAGA,EAAK,IAAIE,GAAW9B,GAAqB8B,EAAS,CAAW,CAAC,CACpE,CACF,EAAI,CAAC,CACP,EAGA,OACEtB,EAAC,MAAG,MAAM,0BACPqB,CACH,CAEJ,CCrIO,SAASE,GAAkBC,EAAiC,CACjE,OACEC,EAAC,MAAG,MAAM,oBACP,OAAO,QAAQD,CAAK,EAAE,IAAI,CAAC,CAACE,EAAKC,CAAK,IACrCF,EAAC,MAAG,MAAO,oCAAoCC,CAAG,IAC/C,OAAOC,GAAU,SAAWC,GAAMD,CAAK,EAAIA,CAC9C,CACD,CACH,CAEJ,CCAO,SAASE,GACdC,EACa,CACb,IAAMC,EAAU,kCAAkCD,CAAI,GACtD,OACEE,EAAC,OAAI,MAAOD,EAAS,OAAM,IACzBC,EAAC,UAAO,MAAM,gBAAgB,SAAU,GAAI,cAAY,OAAO,CACjE,CAEJ,CCpBO,SAASC,GAAYC,EAAiC,CAC3D,OACEC,EAAC,OAAI,MAAM,0BACTA,EAAC,OAAI,MAAM,qBACRD,CACH,CACF,CAEJ,CCcA,SAASE,GAAcC,EAA+B,CAzDtD,IAAAC,EA0DE,IAAMC,EAASC,GAAc,EAGvBC,EAAM,IAAI,IAAI,MAAMJ,EAAQ,OAAO,IAAKE,EAAO,IAAI,EACzD,OACEG,EAAC,MAAG,MAAM,oBACRA,EAAC,KAAE,KAAM,GAAGD,CAAG,GAAI,MAAM,oBACtBJ,EAAQ,QACRC,EAAAC,EAAO,UAAP,YAAAD,EAAgB,QAASD,EAAQ,QAAQ,OAAS,GACjDK,EAAC,QAAK,MAAM,qBACTL,EAAQ,QAAQ,CAAC,CACpB,CAEJ,CACF,CAEJ,CAcO,SAASM,GACdC,EAAqBC,EACR,CA1Ff,IAAAP,EA2FE,IAAMC,EAASC,GAAc,EAC7B,OAAAI,EAAWA,EAAS,OAAOP,GAAQ,CA5FrC,IAAAC,EA4FwC,SAACA,EAAAD,EAAQ,aAAR,MAAAC,EAAoB,QAAM,EAE/DI,EAAC,OAAI,MAAM,cACTA,EAAC,UACC,MAAM,sBACN,aAAYI,GAAY,gBAAgB,GAEvCD,EAAO,QACPP,EAAAC,EAAO,UAAP,YAAAD,EAAgB,QAASO,EAAO,QAAQ,OAAS,GAChDH,EAAC,QAAK,MAAM,qBACTG,EAAO,QAAQ,CAAC,CACnB,CAEJ,EACAH,EAAC,MAAG,MAAM,oBACPE,EAAS,IAAIR,EAAa,CAC7B,CACF,CAEJ,CCfA,IAAIW,GAAW,EAkBR,SAASC,GACdC,EACqB,CAMrB,IAAMC,EACJC,EAAc,CACZC,GAAkBH,CAAE,EACpBI,GAAkBJ,CAAE,CACtB,CAAC,EACE,KACCK,EAAI,CAAC,CAACC,EAAOC,CAAK,IAAMD,GAASC,CAAK,EACtCC,EAAqB,CACvB,EAMEC,EACJC,EAAM,IAAMC,GAAqBX,CAAE,CAAC,EAAE,KACpCY,GAASC,EAAyB,EAClCC,GAAa,CAAC,EACdT,EAAI,IAAMU,GAAyBf,CAAE,CAAC,CACxC,EAMF,OAAOC,EAAQ,KACbe,GAAMC,GAAUA,CAAM,EACtBC,EAAU,IAAMhB,EAAc,CAACD,EAASQ,CAAO,CAAC,CAAC,EACjDJ,EAAI,CAAC,CAACY,EAAQE,CAAM,KAAO,CAAE,OAAAF,EAAQ,OAAAE,CAAO,EAAE,EAC9CC,GAAM,CACR,CACF,CAoBO,SAASC,GACdrB,EAAiBsB,EACe,CAChC,GAAM,CAAE,SAAAC,EAAU,UAAAC,CAAU,EAAIF,EAI1BG,EAAK,cAAc3B,IAAU,GAGnC,OAAOY,EAAM,IAAM,CACjB,IAAMgB,EAAQ,IAAIC,EAMZC,EAAQ,IAAIC,GAAgB,EAAK,EACvCH,EAAM,KAAKI,EAAe,EAAGC,GAAQ,EAAK,CAAC,EACxC,UAAUH,CAAK,EAUlB,IAAMI,EAAQJ,EAAM,KAClBK,GAAShB,GAAUiB,GAAM,CAAC,CAACjB,EAAS,IAAKkB,EAAc,CAAC,EACxD3B,EAAqB,EACrBU,EAAUD,GAAUA,EAASM,EAAWa,CAAK,EAC7CC,EAAIC,GAAQA,EAAK,GAAKb,CAAE,EACxBL,GAAM,CACR,EAIAlB,EAAc,CACZwB,EAAM,KAAKrB,EAAI,CAAC,CAAE,OAAAY,CAAO,IAAMA,CAAM,CAAC,EACtCe,EAAM,KACJd,EAAUoB,GAAQlC,GAAkBkC,EAAM,GAAG,CAAC,EAC9CC,EAAU,EAAK,CACjB,CACF,CAAC,EACE,KAAKlC,EAAImC,GAAUA,EAAO,KAAKvB,GAAUA,CAAM,CAAC,CAAC,EACjD,UAAUW,CAAK,EAMlB,IAAMa,EAAUb,EAAM,KACpBc,EAAOzB,GAAUA,CAAM,EACvB0B,GAAeX,EAAOR,CAAS,EAC/BnB,EAAI,CAAC,CAACuC,EAAGN,EAAM,CAAE,KAAAO,CAAK,CAAC,IAAM,CAC3B,IAAMC,EAAO9C,EAAG,sBAAsB,EAChC+C,EAAID,EAAK,MAAQ,EAIvB,GAAIR,EAAK,OAAS,UAChB,MAAO,CAAE,EAAAS,EAAG,EAAG,EAAID,EAAK,MAAO,EAI1B,GAAIA,EAAK,GAAKD,EAAK,OAAS,EAAG,CACpC,GAAM,CAAE,OAAAG,CAAO,EAAIC,GAAeX,CAAI,EACtC,MAAO,CAAE,EAAAS,EAAG,EAAG,IAAMC,CAAO,CAC9B,KACE,OAAO,CAAE,EAAAD,EAAG,EAAG,GAAMD,EAAK,MAAO,CAErC,CAAC,CACH,EAIA,OAAA5C,EAAc,CAAC8B,EAAON,EAAOe,CAAO,CAAC,EAClC,UAAU,CAAC,CAACH,EAAM,CAAE,OAAAnB,CAAO,EAAG+B,CAAM,IAAM,CACzCZ,EAAK,MAAM,YAAY,sBAAuB,GAAGnB,EAAO,CAAC,IAAI,EAC7DmB,EAAK,MAAM,YAAY,sBAAuB,GAAGnB,EAAO,CAAC,IAAI,EAI7DmB,EAAK,MAAM,YAAY,iBAAkB,GAAGY,EAAO,CAAC,IAAI,EACxDZ,EAAK,MAAM,YAAY,iBAAkB,GAAGY,EAAO,CAAC,IAAI,EAIxDZ,EAAK,UAAU,OAAO,mBAAuBY,EAAO,EAAK,CAAC,EAC1DZ,EAAK,UAAU,OAAO,sBAAuBY,EAAO,GAAK,CAAC,CAC5D,CAAC,EAIHtB,EAAM,KACJc,EAAOzB,GAAUA,CAAM,EACvB0B,GAAeX,EAAO,CAACY,EAAGN,IAASA,CAAI,EACvCI,EAAOJ,GAAQA,EAAK,OAAS,SAAS,CACxC,EACG,UAAUA,GAAQ,CACjB,IAAMO,EAAOI,GAAeE,EAAW,aAAcb,CAAI,CAAC,EAI1DA,EAAK,MAAM,YAAY,qBAAsB,GAAGO,EAAK,KAAK,IAAI,EAC9DP,EAAK,MAAM,YAAY,oBAAsB,KAAQ,CACvD,CAAC,EAMHV,EAAM,KACJpB,EAAqB,EACrB4C,GAAUC,EAAuB,EACjCV,GAAeX,CAAK,CACtB,EACG,UAAU,CAAC,CAACf,EAAQqB,CAAI,IAAM,CAC7BA,EAAK,UAAU,OAAO,sBAAuBrB,CAAM,CACrD,CAAC,EAGHf,EAAc,CACZ0B,EAAM,KAAKc,EAAOzB,GAAUA,CAAM,CAAC,EACnCe,CACF,CAAC,EACE,UAAU,CAAC,CAACY,EAAGN,CAAI,IAAM,CACpBA,EAAK,OAAS,UAChBtC,EAAG,aAAa,gBAAiByB,CAAE,EACnCzB,EAAG,aAAa,gBAAiB,QAAQ,GAEzCA,EAAG,aAAa,mBAAoByB,CAAE,CAE1C,CAAC,EAGHG,EAAM,KAAKc,EAAOzB,GAAU,CAACA,CAAM,CAAC,EACjC,UAAU,IAAM,CACfjB,EAAG,gBAAgB,eAAe,EAClCA,EAAG,gBAAgB,kBAAkB,EACrCA,EAAG,gBAAgB,eAAe,CACpC,CAAC,EAGID,GAAcC,CAAE,EACpB,KACCqC,EAAIiB,GAAS5B,EAAM,KAAK4B,CAAK,CAAC,EAC9BC,EAAS,IAAM7B,EAAM,SAAS,CAAC,EAC/BrB,EAAIiD,GAAUE,EAAA,CAAE,IAAKxD,GAAOsD,EAAQ,CACtC,CACJ,CAAC,CACH,CAeO,SAASG,GACdzD,EAAiB,CAAE,UAAAwB,CAAU,EAC7BkC,EAAY,SAAS,KACW,CAChC,OAAOrC,GAAcrB,EAAI,CACvB,SAAU,IAAI2D,EAAwBC,GAAY,CAChD,IAAMC,EAAQ7D,EAAG,MACXsC,EAAOwB,GAAqBD,CAAK,EACvC,OAAAD,EAAS,KAAKtB,CAAI,EAClBtC,EAAG,gBAAgB,OAAO,EAE1B0D,EAAU,OAAOpB,CAAI,EACd,IAAM,CACXA,EAAK,OAAO,EACZtC,EAAG,aAAa,QAAS6D,CAAK,CAChC,CACF,CAAC,EACD,UAAArC,CACF,CAAC,CACH,CC3QO,SAASuC,GACdC,EAAiBC,EACO,CACxB,IAAMC,EAAUC,EAAM,IAAMC,EAAc,CACxCC,GAAmBL,CAAE,EACrBM,GAA0BL,CAAS,CACrC,CAAC,CAAC,EACC,KACCM,EAAI,CAAC,CAAC,CAAE,EAAAC,EAAG,EAAAC,CAAE,EAAGC,CAAM,IAAqB,CACzC,GAAM,CAAE,MAAAC,EAAO,OAAAC,CAAO,EAAIC,GAAeb,CAAE,EAC3C,MAAQ,CACN,EAAGQ,EAAIE,EAAO,EAAIC,EAAS,EAC3B,EAAGF,EAAIC,EAAO,EAAIE,EAAS,CAC7B,CACF,CAAC,CACH,EAGF,OAAOE,GAAkBd,CAAE,EACxB,KACCe,EAAUC,GAAUd,EACjB,KACCK,EAAIU,IAAW,CAAE,OAAAD,EAAQ,OAAAC,CAAO,EAAE,EAClCC,GAAK,CAAC,CAACF,GAAU,GAAQ,CAC3B,CACF,CACF,CACJ,CAWO,SAASG,GACdnB,EAAiBC,EAAwB,CAAE,QAAAmB,CAAQ,EAChB,CACnC,GAAM,CAACC,EAASC,CAAK,EAAI,MAAM,KAAKtB,EAAG,QAAQ,EAG/C,OAAOG,EAAM,IAAM,CACjB,IAAMoB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxD,OAAAJ,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAN,CAAO,EAAG,CACfjB,EAAG,MAAM,YAAY,iBAAkB,GAAGiB,EAAO,CAAC,IAAI,EACtDjB,EAAG,MAAM,YAAY,iBAAkB,GAAGiB,EAAO,CAAC,IAAI,CACxD,EAGA,UAAW,CACTjB,EAAG,MAAM,eAAe,gBAAgB,EACxCA,EAAG,MAAM,eAAe,gBAAgB,CAC1C,CACF,CAAC,EAGD4B,GAAuB5B,CAAE,EACtB,KACC6B,EAAUJ,CAAK,CACjB,EACG,UAAUK,GAAW,CACpB9B,EAAG,gBAAgB,kBAAmB8B,CAAO,CAC/C,CAAC,EAGLC,EACER,EAAM,KAAKS,EAAO,CAAC,CAAE,OAAAhB,CAAO,IAAMA,CAAM,CAAC,EACzCO,EAAM,KAAKU,GAAa,GAAG,EAAGD,EAAO,CAAC,CAAE,OAAAhB,CAAO,IAAM,CAACA,CAAM,CAAC,CAC/D,EACG,UAAU,CAGT,KAAK,CAAE,OAAAA,CAAO,EAAG,CACXA,EACFhB,EAAG,QAAQqB,CAAO,EAElBA,EAAQ,OAAO,CACnB,EAGA,UAAW,CACTrB,EAAG,QAAQqB,CAAO,CACpB,CACF,CAAC,EAGHE,EACG,KACCW,GAAU,GAAIC,EAAuB,CACvC,EACG,UAAU,CAAC,CAAE,OAAAnB,CAAO,IAAM,CACzBK,EAAQ,UAAU,OAAO,qBAAsBL,CAAM,CACvD,CAAC,EAGLO,EACG,KACCa,GAAa,IAAKD,EAAuB,EACzCH,EAAO,IAAM,CAAC,CAAChC,EAAG,YAAY,EAC9BO,EAAI,IAAMP,EAAG,aAAc,sBAAsB,CAAC,EAClDO,EAAI,CAAC,CAAE,EAAAC,CAAE,IAAMA,CAAC,CAClB,EACG,UAAU,CAGT,KAAK6B,EAAQ,CACPA,EACFrC,EAAG,MAAM,YAAY,iBAAkB,GAAG,CAACqC,CAAM,IAAI,EAErDrC,EAAG,MAAM,eAAe,gBAAgB,CAC5C,EAGA,UAAW,CACTA,EAAG,MAAM,eAAe,gBAAgB,CAC1C,CACF,CAAC,EAGLsC,EAAsBhB,EAAO,OAAO,EACjC,KACCO,EAAUJ,CAAK,EACfO,EAAOO,GAAM,EAAEA,EAAG,SAAWA,EAAG,QAAQ,CAC1C,EACG,UAAUA,GAAM,CACfA,EAAG,gBAAgB,EACnBA,EAAG,eAAe,CACpB,CAAC,EAGLD,EAAsBhB,EAAO,WAAW,EACrC,KACCO,EAAUJ,CAAK,EACfe,GAAejB,CAAK,CACtB,EACG,UAAU,CAAC,CAACgB,EAAI,CAAE,OAAAvB,CAAO,CAAC,IAAM,CA3OzC,IAAAyB,EA8OU,GAAIF,EAAG,SAAW,GAAKA,EAAG,SAAWA,EAAG,QACtCA,EAAG,eAAe,UAGTvB,EAAQ,CACjBuB,EAAG,eAAe,EAGlB,IAAMG,EAAS1C,EAAG,cAAe,QAAQ,gBAAgB,EACrD0C,aAAkB,YACpBA,EAAO,MAAM,GAEbD,EAAAE,GAAiB,IAAjB,MAAAF,EAAoB,MACxB,CACF,CAAC,EAGLrB,EACG,KACCS,EAAUJ,CAAK,EACfO,EAAOY,GAAUA,IAAWvB,CAAO,EACnCwB,GAAM,GAAG,CACX,EACG,UAAU,IAAM7C,EAAG,MAAM,CAAC,EAGxBD,GAAgBC,EAAIC,CAAS,EACjC,KACC6C,EAAIC,GAASxB,EAAM,KAAKwB,CAAK,CAAC,EAC9BC,EAAS,IAAMzB,EAAM,SAAS,CAAC,EAC/BhB,EAAIwC,GAAUE,EAAA,CAAE,IAAKjD,GAAO+C,EAAQ,CACtC,CACJ,CAAC,CACH,CCxMA,SAASG,GAAUC,EAAuC,CACxD,OAAOA,EAAU,UAAY,OACzBC,EAAY,eAAgBD,CAAS,EACrC,CAACA,CAAS,CAChB,CASA,SAASE,GAAYF,EAAgC,CACnD,IAAMG,EAAkB,CAAC,EACzB,QAAWC,KAAML,GAAUC,CAAS,EAAG,CACrC,IAAMK,EAAgB,CAAC,EAGjBC,EAAK,SAAS,mBAAmBF,EAAI,WAAW,SAAS,EAC/D,QAASG,EAAOD,EAAG,SAAS,EAAGC,EAAMA,EAAOD,EAAG,SAAS,EACtDD,EAAM,KAAKE,CAAY,EAGzB,QAASC,KAAQH,EAAO,CACtB,IAAII,EAGJ,KAAQA,EAAQ,gBAAgB,KAAKD,EAAK,WAAY,GAAI,CACxD,GAAM,CAAC,CAAEE,EAAIC,CAAK,EAAIF,EACtB,GAAI,OAAOE,GAAU,YAAa,CAChC,IAAMC,EAASJ,EAAK,UAAUC,EAAM,KAAK,EACzCD,EAAOI,EAAO,UAAUF,EAAG,MAAM,EACjCP,EAAQ,KAAKS,CAAM,CAGrB,KAAO,CACLJ,EAAK,YAAcE,EACnBP,EAAQ,KAAKK,CAAI,EACjB,KACF,CACF,CACF,CACF,CACA,OAAOL,CACT,CAQA,SAASU,GAAKC,EAAqBC,EAA2B,CAC5DA,EAAO,OAAO,GAAG,MAAM,KAAKD,EAAO,UAAU,CAAC,CAChD,CAoBO,SAASE,GACdZ,EAAiBJ,EAAwB,CAAE,QAAAiB,EAAS,OAAAC,CAAO,EACxB,CAGnC,IAAMC,EAASnB,EAAU,QAAQ,MAAM,EACjCoB,EAASD,GAAA,YAAAA,EAAQ,GAGjBE,EAAc,IAAI,IACxB,QAAWT,KAAUV,GAAYF,CAAS,EAAG,CAC3C,GAAM,CAAC,CAAEU,CAAE,EAAIE,EAAO,YAAa,MAAM,WAAW,EAChDU,GAAmB,yBAAyBZ,CAAE,IAAKN,CAAE,IACvDiB,EAAY,IAAIX,EAAIa,GAAiBb,EAAIU,CAAM,CAAC,EAChDR,EAAO,YAAYS,EAAY,IAAIX,CAAE,CAAE,EAE3C,CAGA,OAAIW,EAAY,OAAS,EAChBG,EAGFC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAGlDC,EAAsC,CAAC,EAC7C,OAAW,CAACrB,EAAIsB,CAAU,IAAKX,EAC7BU,EAAM,KAAK,CACTE,EAAW,cAAeD,CAAU,EACpCC,EAAW,yBAAyBvB,CAAE,IAAKN,CAAE,CAC/C,CAAC,EAGH,OAAAc,EAAO,KAAKgB,EAAUN,CAAK,CAAC,EACzB,UAAUO,GAAU,CACnB/B,EAAG,OAAS,CAAC+B,EAGb/B,EAAG,UAAU,OAAO,qBAAsB+B,CAAM,EAGhD,OAAW,CAACC,EAAOC,CAAK,IAAKN,EACtBI,EAGHtB,GAAKuB,EAAOC,CAAK,EAFjBxB,GAAKwB,EAAOD,CAAK,CAGvB,CAAC,EAGIE,EAAM,GAAG,CAAC,GAAGjB,CAAW,EAC5B,IAAI,CAAC,CAAC,CAAEW,CAAU,IACjBO,GAAgBP,EAAYhC,EAAW,CAAE,QAAAiB,CAAQ,CAAC,CACnD,CACH,EACG,KACCuB,EAAS,IAAMd,EAAM,SAAS,CAAC,EAC/Be,GAAM,CACR,CACJ,CAAC,CACH,CC7JA,SAASC,GAASC,EAA0C,CAC1D,GAAIA,EAAG,mBAAoB,CACzB,IAAMC,EAAUD,EAAG,mBACnB,GAAIC,EAAQ,UAAY,KACtB,OAAOA,EAGJ,GAAIA,EAAQ,UAAY,KAAO,CAACA,EAAQ,SAAS,OACpD,OAAOF,GAASE,CAAO,CAC3B,CAIF,CAcO,SAASC,GACdF,EAAiBG,EACkB,CACnC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAON,GAASC,CAAE,EACxB,OAAO,OAAOK,GAAS,YACnBC,GAAoBD,EAAML,EAAIG,CAAO,EACrCI,CACN,CAAC,CACH,CCjEA,IAAAC,GAAwB,SA4ExB,IAAIC,GAAW,EAaf,SAASC,GAAkBC,EAA0C,CACnE,GAAIA,EAAG,mBAAoB,CACzB,IAAMC,EAAUD,EAAG,mBACnB,GAAIC,EAAQ,UAAY,KACtB,OAAOA,EAGJ,GAAIA,EAAQ,UAAY,KAAO,CAACA,EAAQ,SAAS,OACpD,OAAOF,GAAkBE,CAAO,CACpC,CAIF,CAgBO,SAASC,GACdF,EACsB,CACtB,OAAOG,GAAiBH,CAAE,EACvB,KACCI,EAAI,CAAC,CAAE,MAAAC,CAAM,KAEJ,CACL,WAFcC,GAAsBN,CAAE,EAElB,MAAQK,CAC9B,EACD,EACDE,EAAwB,YAAY,CACtC,CACJ,CAoBO,SAASC,GACdR,EAAiBS,EACiB,CAClC,GAAM,CAAE,QAASC,CAAM,EAAI,WAAW,SAAS,EAGzCC,EAAWC,EAAM,IAAM,CAC3B,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,GAAS,CAAC,CAAC,EACpCH,EAAM,UAAU,CAAC,CAAE,WAAAI,CAAW,IAAM,CAC9BA,GAAcP,EAChBV,EAAG,aAAa,WAAY,GAAG,EAE/BA,EAAG,gBAAgB,UAAU,CACjC,CAAC,EAGD,IAAMkB,EAAoD,CAAC,EAC3D,GAAI,GAAAC,QAAY,YAAY,IACtBnB,EAAG,QAAQ,OAAO,GACpBoB,EAAQ,mBAAmB,GAAK,CAACpB,EAAG,QAAQ,UAAU,GACrD,CACD,IAAMqB,EAASrB,EAAG,QAAQ,KAAK,EAC/BqB,EAAO,GAAK,UAAUvB,IAAU,GAGhC,IAAMwB,EAASC,GAAsBF,EAAO,EAAE,EAC9CA,EAAO,aAAaC,EAAQtB,CAAE,EAC1BoB,EAAQ,kBAAkB,GAC5BF,EAAS,KAAKM,GAAoBF,EAAQ,CAAE,SAAU,CAAC,CAAC,CAC5D,CAIF,IAAMG,EAAYzB,EAAG,QAAQ,YAAY,EACzC,GAAIyB,aAAqB,YAAa,CACpC,IAAMC,EAAO3B,GAAkB0B,CAAS,EAGxC,GAAI,OAAOC,GAAS,cAClBD,EAAU,UAAU,SAAS,UAAU,GACvCL,EAAQ,uBAAuB,GAC9B,CACD,IAAMO,EAAeC,GAAoBF,EAAM1B,EAAIS,CAAO,EAC1DS,EAAS,KACPf,GAAiBsB,CAAS,EACvB,KACCI,EAAUd,CAAK,EACfX,EAAI,CAAC,CAAE,MAAAC,EAAO,OAAAyB,CAAO,IAAMzB,GAASyB,CAAM,EAC1CC,EAAqB,EACrBC,EAAUC,GAAUA,EAASN,EAAeO,CAAK,CACnD,CACJ,CACF,CACF,CAOA,OADcC,EAAY,oBAAqBnC,CAAE,EACvC,QACRA,EAAG,UAAU,IAAI,kBAAkB,EAG9BE,GAAeF,CAAE,EACrB,KACCoC,EAAIC,GAASxB,EAAM,KAAKwB,CAAK,CAAC,EAC9BC,EAAS,IAAMzB,EAAM,SAAS,CAAC,EAC/BT,EAAIiC,GAAUE,EAAA,CAAE,IAAKvC,GAAOqC,EAAQ,EACpCG,GAAU,GAAGtB,CAAQ,CACvB,CACJ,CAAC,EAGD,OAAIE,EAAQ,cAAc,EACjBqB,GAAuBzC,CAAE,EAC7B,KACC0C,EAAOC,GAAWA,CAAO,EACzBC,GAAK,CAAC,EACNZ,EAAU,IAAMrB,CAAQ,CAC1B,EAGGA,CACT,CCnLO,SAASkC,GACdC,EAAwB,CAAE,QAAAC,EAAS,OAAAC,CAAO,EACrB,CACrB,IAAIC,EAAO,GACX,OAAOC,EAGLH,EACG,KACCI,EAAIC,GAAUA,EAAO,QAAQ,qBAAqB,CAAE,EACpDC,EAAOC,GAAWR,IAAOQ,CAAO,EAChCH,EAAI,KAAO,CACT,OAAQ,OAAQ,OAAQ,EAC1B,EAAa,CACf,EAGFH,EACG,KACCK,EAAOE,GAAUA,GAAU,CAACN,CAAI,EAChCO,EAAI,IAAMP,EAAOH,EAAG,IAAI,EACxBK,EAAII,IAAW,CACb,OAAQA,EAAS,OAAS,OAC5B,EAAa,CACf,CACJ,CACF,CAaO,SAASE,GACdX,EAAwBY,EACQ,CAChC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,OAAAE,EAAQ,OAAAC,CAAO,IAAM,CACtCjB,EAAG,gBAAgB,OAAQgB,IAAW,MAAM,EACxCC,GACFjB,EAAG,eAAe,CACtB,CAAC,EAGMD,GAAaC,EAAIY,CAAO,EAC5B,KACCF,EAAIQ,GAASJ,EAAM,KAAKI,CAAK,CAAC,EAC9BC,EAAS,IAAML,EAAM,SAAS,CAAC,EAC/BT,EAAIa,GAAUE,EAAA,CAAE,IAAKpB,GAAOkB,EAAQ,CACtC,CACJ,CAAC,CACH,CCzIA,IAAAG,GAAA,yvLCqDA,IAAIC,GAKAC,GAAW,EAWf,SAASC,IAAiC,CACxC,OAAO,OAAO,SAAY,aAAe,mBAAmB,QACxDC,GAAY,kDAAkD,EAC9DC,EAAG,MAAS,CAClB,CAaO,SAASC,GACdC,EACgC,CAChC,OAAAA,EAAG,UAAU,OAAO,SAAS,EAC7BN,QAAaE,GAAa,EACvB,KACCK,EAAI,IAAM,QAAQ,WAAW,CAC3B,YAAa,GACb,SAAAC,GACA,SAAU,CACR,cAAe,OACf,gBAAiB,OACjB,aAAc,MAChB,CACF,CAAC,CAAC,EACFC,EAAI,IAAG,EAAY,EACnBC,EAAY,CAAC,CACf,GAGFV,GAAS,UAAU,IAAYW,GAAA,sBAC7BL,EAAG,UAAU,IAAI,SAAS,EAC1B,IAAMM,EAAK,aAAaX,IAAU,GAG5BY,EAAOC,EAAE,MAAO,CAAE,MAAO,SAAU,CAAC,EACpCC,EAAOT,EAAG,YAGV,CAAE,IAAAU,EAAK,GAAAC,CAAG,EAAI,MAAM,QAAQ,OAAOL,EAAIG,CAAI,EAG3CG,EAASL,EAAK,aAAa,CAAE,KAAM,QAAS,CAAC,EACnDK,EAAO,UAAYF,EAGnBV,EAAG,YAAYO,CAAI,EACnBI,GAAA,MAAAA,EAAKC,EACP,EAAC,EAGMlB,GACJ,KACCS,EAAI,KAAO,CAAE,IAAKH,CAAG,EAAE,CACzB,CACJ,CCtFA,IAAMa,GAAWC,EAAE,OAAO,EAgBnB,SAASC,GACdC,EACkC,CAClC,OAAAA,EAAG,YAAYH,EAAQ,EACvBA,GAAS,YAAYI,GAAYD,CAAE,CAAC,EAG7BE,EAAG,CAAE,IAAKF,CAAG,CAAC,CACvB,CC4BO,SAASG,GACdC,EACyB,CACzB,IAAMC,EAAUD,EAAO,KAAKE,GAASA,EAAM,OAAO,GAAKF,EAAO,CAAC,EAC/D,OAAOG,EAAM,GAAGH,EAAO,IAAIE,GAASE,EAAUF,EAAO,QAAQ,EAC1D,KACCG,EAAI,IAAMC,EAA6B,cAAcJ,EAAM,EAAE,IAAI,CAAC,CACpE,CACF,CAAC,EACE,KACCK,EAAUD,EAA6B,cAAcL,EAAQ,EAAE,IAAI,CAAC,EACpEI,EAAIG,IAAW,CAAE,OAAAA,CAAO,EAAE,CAC5B,CACJ,CAUO,SAASC,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACF,CACpC,IAAMC,EAAYP,EAAW,iBAAkBI,CAAE,EAC3CV,EAASc,EAA8B,iBAAkBJ,CAAE,EAG3DK,EAAOC,GAAoB,MAAM,EACvCN,EAAG,OAAOK,CAAI,EAGd,IAAME,EAAOD,GAAoB,MAAM,EACvC,OAAAN,EAAG,OAAOO,CAAI,EAGPC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxDC,EAAc,CAACL,EAAOM,GAAiBf,CAAE,EAAGgB,GAAuBhB,CAAE,CAAC,CAAC,EACpE,KACCiB,EAAUN,CAAK,EACfO,GAAU,EAAGC,EAAuB,CACtC,EACG,UAAU,CAGT,KAAK,CAAC,CAAE,OAAArB,CAAO,EAAGsB,CAAI,EAAG,CACvB,IAAMC,EAASC,GAAiBxB,CAAM,EAChC,CAAE,MAAAyB,CAAM,EAAIC,GAAe1B,CAAM,EAGvCE,EAAG,MAAM,YAAY,mBAAoB,GAAGqB,EAAO,CAAC,IAAI,EACxDrB,EAAG,MAAM,YAAY,uBAAwB,GAAGuB,CAAK,IAAI,EAGzD,IAAME,EAAUC,GAAwBvB,CAAS,GAE/CkB,EAAO,EAAYI,EAAQ,GAC3BJ,EAAO,EAAIE,EAAQE,EAAQ,EAAIL,EAAK,QAEpCjB,EAAU,SAAS,CACjB,KAAM,KAAK,IAAI,EAAGkB,EAAO,EAAI,EAAE,EAC/B,SAAU,QACZ,CAAC,CACL,EAGA,UAAW,CACTrB,EAAG,MAAM,eAAe,kBAAkB,EAC1CA,EAAG,MAAM,eAAe,sBAAsB,CAChD,CACF,CAAC,EAGLc,EAAc,CACZa,GAA0BxB,CAAS,EACnCY,GAAiBZ,CAAS,CAC5B,CAAC,EACE,KACCc,EAAUN,CAAK,CACjB,EACG,UAAU,CAAC,CAACU,EAAQD,CAAI,IAAM,CAC7B,IAAMK,EAAUG,GAAsBzB,CAAS,EAC/CE,EAAK,OAASgB,EAAO,EAAI,GACzBd,EAAK,OAASc,EAAO,EAAII,EAAQ,MAAQL,EAAK,MAAQ,EACxD,CAAC,EAGL3B,EACEC,EAAUW,EAAM,OAAO,EAAE,KAAKV,EAAI,IAAM,EAAE,CAAC,EAC3CD,EAAUa,EAAM,OAAO,EAAE,KAAKZ,EAAI,IAAM,CAAE,CAAC,CAC7C,EACG,KACCsB,EAAUN,CAAK,CACjB,EACG,UAAUkB,GAAa,CACtB,GAAM,CAAE,MAAAN,CAAM,EAAIC,GAAerB,CAAS,EAC1CA,EAAU,SAAS,CACjB,KAAMoB,EAAQM,EACd,SAAU,QACZ,CAAC,CACH,CAAC,EAGL3B,EACG,KACCe,EAAUN,CAAK,EACfmB,EAAOtC,GAASF,EAAO,SAASE,CAAyB,CAAC,CAC5D,EACG,UAAUA,GAASA,EAAM,MAAM,CAAC,EAGrCW,EAAU,UAAU,IAAI,uBAAuB,EAC/C,QAAWX,KAASF,EAAQ,CAC1B,IAAMyC,EAAQnC,EAA6B,cAAcJ,EAAM,EAAE,IAAI,EACrEuC,EAAM,gBAAgBC,EAAE,IAAK,CAC3B,KAAM,IAAID,EAAM,OAAO,GACvB,SAAU,EACZ,EAAG,GAAG,MAAM,KAAKA,EAAM,UAAU,CAAC,CAAC,EAGnCrC,EAAsBqC,EAAM,kBAAoB,OAAO,EACpD,KACCd,EAAUN,CAAK,EACfmB,EAAOG,GAAM,EAAEA,EAAG,SAAWA,EAAG,QAAQ,EACxCC,EAAID,GAAM,CACRA,EAAG,eAAe,EAClBA,EAAG,gBAAgB,CACrB,CAAC,CACH,EAEG,UAAU,IAAM,CACf,QAAQ,aAAa,CAAC,EAAG,GAAI,IAAIF,EAAM,OAAO,EAAE,EAChDA,EAAM,MAAM,CACd,CAAC,CACP,CAGA,OAAII,EAAQ,mBAAmB,GAC7B1B,EAAM,KACJ2B,GAAK,CAAC,EACNC,GAAepC,CAAS,CAC1B,EACG,UAAU,CAAC,CAAC,CAAE,OAAAH,CAAO,EAAG,CAAE,OAAAuB,CAAO,CAAC,IAAM,CACvC,IAAMiB,EAAMxC,EAAO,UAAU,KAAK,EAClC,GAAIA,EAAO,aAAa,mBAAmB,EACzCA,EAAO,gBAAgB,mBAAmB,MAGrC,CACL,IAAMyC,EAAIvC,EAAG,UAAYqB,EAAO,EAGhC,QAAWmB,KAAOpC,EAAY,aAAa,EACzC,QAAWZ,KAASY,EAClB,iBAAkBoC,CACpB,EAAG,CACD,IAAMT,GAAQnC,EAAW,cAAcJ,EAAM,EAAE,IAAI,EACnD,GACEuC,KAAUjC,GACViC,GAAM,UAAU,KAAK,IAAMO,EAC3B,CACAP,GAAM,aAAa,oBAAqB,EAAE,EAC1CvC,EAAM,MAAM,EACZ,KACF,CACF,CAGF,OAAO,SAAS,CACd,IAAKQ,EAAG,UAAYuC,CACtB,CAAC,EAGD,IAAME,EAAO,SAAmB,QAAQ,GAAK,CAAC,EAC9C,SAAS,SAAU,CAAC,GAAG,IAAI,IAAI,CAACH,EAAK,GAAGG,CAAI,CAAC,CAAC,CAAC,CACjD,CACF,CAAC,EAGLhC,EAAM,KAAKQ,EAAUN,CAAK,CAAC,EACxB,UAAU,IAAM,CACf,QAAW+B,KAAStC,EAA8B,eAAgBJ,CAAE,EAClE0C,EAAM,MAAM,CAChB,CAAC,EAGIrD,GAAiBC,CAAM,EAC3B,KACC4C,EAAIS,GAASlC,EAAM,KAAKkC,CAAK,CAAC,EAC9BC,EAAS,IAAMnC,EAAM,SAAS,CAAC,EAC/Bd,EAAIgD,GAAUE,EAAA,CAAE,IAAK7C,GAAO2C,EAAQ,CACtC,CACJ,CAAC,EACE,KACCG,GAAYC,EAAc,CAC5B,CACJ,CCpMO,SAASC,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,EAAS,OAAAC,CAAO,EACd,CAChC,OAAOC,EAGL,GAAGC,EAAY,4BAA6BL,CAAE,EAC3C,IAAIM,GAASC,GAAqBD,EAAO,CAAE,QAAAJ,EAAS,OAAAC,CAAO,CAAC,CAAC,EAGhE,GAAGE,EAAY,2BAA4BL,CAAE,EAC1C,IAAIM,GAASE,GAAeF,EAAO,CAAE,QAAAJ,EAAS,OAAAC,CAAO,CAAC,CAAC,EAG1D,GAAGE,EAAY,cAAeL,CAAE,EAC7B,IAAIM,GAASG,GAAaH,CAAK,CAAC,EAGnC,GAAGD,EAAY,qBAAsBL,CAAE,EACpC,IAAIM,GAASI,GAAeJ,CAAK,CAAC,EAGrC,GAAGD,EAAY,UAAWL,CAAE,EACzB,IAAIM,GAASK,GAAaL,EAAO,CAAE,QAAAJ,EAAS,OAAAC,CAAO,CAAC,CAAC,EAGxD,GAAGE,EAAY,cAAeL,CAAE,EAC7B,IAAIM,GAASM,GAAiBN,EAAO,CAAE,UAAAL,EAAW,QAAAC,CAAQ,CAAC,CAAC,EAG/D,GAAGG,EAAY,UAAWL,CAAE,EACzB,OAAO,IAAMa,EAAQ,kBAAkB,CAAC,EACxC,IAAIP,GAASQ,GAAoBR,EAAO,CAAE,UAAAL,CAAU,CAAC,CAAC,CAC3D,CACF,CCtDO,SAASc,GACdC,EAAkB,CAAE,OAAAC,CAAO,EACP,CACpB,OAAOA,EACJ,KACCC,EAAUC,GAAWC,EACnBC,EAAG,EAAI,EACPA,EAAG,EAAK,EAAE,KAAKC,GAAM,GAAI,CAAC,CAC5B,EACG,KACCC,EAAIC,IAAW,CAAE,QAAAL,EAAS,OAAAK,CAAO,EAAE,CACrC,CACF,CACF,CACJ,CAaO,SAASC,GACdC,EAAiBC,EACc,CAC/B,IAAMC,EAAQC,EAAW,cAAeH,CAAE,EAC1C,OAAOI,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,QAAAZ,EAAS,OAAAK,CAAO,IAAM,CACvCE,EAAG,UAAU,OAAO,oBAAqBF,CAAM,EAC/CI,EAAM,YAAcT,CACtB,CAAC,EAGMJ,GAAYW,EAAIC,CAAO,EAC3B,KACCM,EAAIC,GAASH,EAAM,KAAKG,CAAK,CAAC,EAC9BC,EAAS,IAAMJ,EAAM,SAAS,CAAC,EAC/BR,EAAIW,GAAUE,EAAA,CAAE,IAAKV,GAAOQ,EAAQ,CACtC,CACJ,CAAC,CACH,CCnDA,IAAIG,GAAW,EAiBR,SAASC,GACdC,EAAiBC,EACI,CACrB,SAAS,KAAK,OAAOD,CAAE,EAGvB,GAAM,CAAE,MAAAE,CAAM,EAAIC,GAAeH,CAAE,EACnCA,EAAG,MAAM,YAAY,qBAAsB,GAAGE,CAAK,IAAI,EACvDF,EAAG,OAAO,EAGV,IAAMI,EAAYC,GAAoBJ,CAAI,EACpCK,EACJ,OAAOF,GAAc,YACjBG,GAA0BH,CAAS,EACnCI,EAAG,CAAE,EAAG,EAAG,EAAG,CAAE,CAAC,EAGjBC,EAAUC,EACdC,GAAkBV,CAAI,EACtBW,GAAkBX,CAAI,CACxB,EACG,KACCY,EAAqB,CACvB,EAGF,OAAOC,EAAc,CAACL,EAASH,CAAO,CAAC,EACpC,KACCS,EAAI,CAAC,CAACC,EAAQC,CAAM,IAAM,CACxB,GAAI,CAAE,EAAAC,EAAG,EAAAC,CAAE,EAAIC,GAAiBnB,CAAI,EAC9BoB,EAAOlB,GAAeF,CAAI,EAU1BqB,EAAQrB,EAAK,QAAQ,OAAO,EAClC,OAAIqB,GAASrB,EAAK,gBAChBiB,GAAKI,EAAM,WAAarB,EAAK,cAAc,WAC3CkB,GAAKG,EAAM,UAAarB,EAAK,cAAc,WAEtC,CACL,OAAAe,EACA,OAAQ,CACN,EAAGE,EAAID,EAAO,EAAII,EAAK,MAAS,EAAInB,EAAQ,EAC5C,EAAGiB,EAAIF,EAAO,EAAII,EAAK,OAAS,CAClC,CACF,CACF,CAAC,CACH,CACJ,CASO,SAASE,GACdvB,EACgC,CAChC,IAAMwB,EAAQxB,EAAG,MACjB,GAAI,CAACwB,EAAM,OACT,OAAOC,EAGT,IAAMC,EAAK,aAAa5B,IAAU,GAC5B6B,EAAUC,GAAcF,EAAI,QAAQ,EACpCG,EAAUC,EAAW,cAAeH,CAAO,EACjD,OAAAE,EAAQ,UAAYL,EAGbO,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAE,CAAO,EAAG,CACfP,EAAQ,MAAM,YAAY,iBAAkB,GAAGO,EAAO,CAAC,IAAI,EAC3DP,EAAQ,MAAM,YAAY,iBAAkB,GAAGO,EAAO,CAAC,IAAI,CAC7D,EAGA,UAAW,CACTP,EAAQ,MAAM,eAAe,gBAAgB,EAC7CA,EAAQ,MAAM,eAAe,gBAAgB,CAC/C,CACF,CAAC,EAGDjB,EACEsB,EAAM,KAAKG,EAAO,CAAC,CAAE,OAAAnB,CAAO,IAAMA,CAAM,CAAC,EACzCgB,EAAM,KAAKI,GAAa,GAAG,EAAGD,EAAO,CAAC,CAAE,OAAAnB,CAAO,IAAM,CAACA,CAAM,CAAC,CAC/D,EACG,UAAU,CAGT,KAAK,CAAE,OAAAA,CAAO,EAAG,CACXA,GACFhB,EAAG,sBAAsB,WAAY2B,CAAO,EAC5C3B,EAAG,aAAa,mBAAoB0B,CAAE,EACtC1B,EAAG,gBAAgB,OAAO,IAE1B2B,EAAQ,OAAO,EACf3B,EAAG,gBAAgB,kBAAkB,EACrCA,EAAG,aAAa,QAASwB,CAAK,EAElC,EAGA,UAAW,CACTG,EAAQ,OAAO,EACf3B,EAAG,gBAAgB,kBAAkB,EACrCA,EAAG,aAAa,QAASwB,CAAK,CAChC,CACF,CAAC,EAGHQ,EACG,KACCK,GAAU,GAAIC,EAAuB,CACvC,EACG,UAAU,CAAC,CAAE,OAAAtB,CAAO,IAAM,CACzBW,EAAQ,UAAU,OAAO,qBAAsBX,CAAM,CACvD,CAAC,EAMLgB,EACG,KACCO,GAAa,IAAKD,EAAuB,EACzCH,EAAO,IAAM,CAAC,CAACnC,EAAG,YAAY,EAC9Be,EAAI,IAAMf,EAAG,aAAc,sBAAsB,CAAC,EAClDe,EAAI,CAAC,CAAE,EAAAG,CAAE,IAAMA,CAAC,CAClB,EACC,UAAU,CAGT,KAAKsB,EAAQ,CACPA,EACFb,EAAQ,MAAM,YAAY,iBAAkB,GAAG,CAACa,CAAM,IAAI,EAE1Db,EAAQ,MAAM,eAAe,gBAAgB,CACjD,EAGA,UAAW,CACTA,EAAQ,MAAM,eAAe,gBAAgB,CAC/C,CACF,CAAC,EAGI5B,GAAa4B,EAAS3B,CAAE,EAC5B,KACCyC,EAAIC,GAASV,EAAM,KAAKU,CAAK,CAAC,EAC9BC,EAAS,IAAMX,EAAM,SAAS,CAAC,EAC/BjB,EAAI2B,GAAUE,EAAA,CAAE,IAAK5C,GAAO0C,EAAQ,CACtC,CACJ,CAAC,EACE,KACCG,GAAYC,EAAc,CAC5B,CACJ,CC7JA,SAASC,GAAS,CAAE,UAAAC,CAAU,EAAsC,CAClE,GAAI,CAACC,EAAQ,iBAAiB,EAC5B,OAAOC,EAAG,EAAK,EAGjB,IAAMC,EAAaH,EAChB,KACCI,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,IAAMA,CAAC,EAC5BC,GAAY,EAAG,CAAC,EAChBF,EAAI,CAAC,CAACG,EAAGC,CAAC,IAAM,CAACD,EAAIC,EAAGA,CAAC,CAAU,EACnCC,EAAwB,CAAC,CAC3B,EAGIC,EAAUC,EAAc,CAACX,EAAWG,CAAU,CAAC,EAClD,KACCS,EAAO,CAAC,CAAC,CAAE,OAAAC,CAAO,EAAG,CAAC,CAAER,CAAC,CAAC,IAAM,KAAK,IAAIA,EAAIQ,EAAO,CAAC,EAAI,GAAG,EAC5DT,EAAI,CAAC,CAAC,CAAE,CAACU,CAAS,CAAC,IAAMA,CAAS,EAClCC,EAAqB,CACvB,EAGIC,EAAUC,GAAY,QAAQ,EACpC,OAAON,EAAc,CAACX,EAAWgB,CAAO,CAAC,EACtC,KACCZ,EAAI,CAAC,CAAC,CAAE,OAAAS,CAAO,EAAGK,CAAM,IAAML,EAAO,EAAI,KAAO,CAACK,CAAM,EACvDH,EAAqB,EACrBI,EAAUC,GAAUA,EAASV,EAAUR,EAAG,EAAK,CAAC,EAChDmB,EAAU,EAAK,CACjB,CACJ,CAcO,SAASC,GACdC,EAAiBC,EACG,CACpB,OAAOC,EAAM,IAAMd,EAAc,CAC/Be,GAAiBH,CAAE,EACnBxB,GAASyB,CAAO,CAClB,CAAC,CAAC,EACC,KACCpB,EAAI,CAAC,CAAC,CAAE,OAAAuB,CAAO,EAAGC,CAAM,KAAO,CAC7B,OAAAD,EACA,OAAAC,CACF,EAAE,EACFb,EAAqB,CAACR,EAAGC,IACvBD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,MAChB,EACDqB,EAAY,CAAC,CACf,CACJ,CAaO,SAASC,GACdP,EAAiB,CAAE,QAAAQ,EAAS,MAAAC,CAAM,EACO,CACzC,OAAOP,EAAM,IAAM,CACjB,IAAMQ,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxDJ,EACG,KACCxB,EAAwB,QAAQ,EAChC6B,GAAkBP,CAAO,CAC3B,EACG,UAAU,CAAC,CAAC,CAAE,OAAAX,CAAO,EAAG,CAAE,OAAAQ,CAAO,CAAC,IAAM,CACvCL,EAAG,UAAU,OAAO,oBAAqBH,GAAU,CAACQ,CAAM,EAC1DL,EAAG,OAASK,CACd,CAAC,EAGL,IAAMW,EAAWC,GAAKC,EAAY,UAAWlB,CAAE,CAAC,EAC7C,KACCX,EAAO,IAAMX,EAAQ,kBAAkB,CAAC,EACxCyC,GAASC,GAASC,GAAaD,CAAK,CAAC,CACvC,EAGF,OAAAX,EAAM,UAAUC,CAAK,EAGdF,EACJ,KACCc,EAAUV,CAAK,EACf/B,EAAI0C,GAAUC,EAAA,CAAE,IAAKxB,GAAOuB,EAAQ,EACpCE,GAAUT,EAAS,KAAKM,EAAUV,CAAK,CAAC,CAAC,CAC3C,CACJ,CAAC,CACH,CCjIO,SAASc,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACb,CACzB,OAAOC,GAAgBH,EAAI,CAAE,UAAAC,EAAW,QAAAC,CAAQ,CAAC,EAC9C,KACCE,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,IAAM,CACzB,GAAM,CAAE,OAAAC,CAAO,EAAIC,GAAeP,CAAE,EACpC,MAAO,CACL,OAAQK,GAAKC,CACf,CACF,CAAC,EACDE,EAAwB,QAAQ,CAClC,CACJ,CAaO,SAASC,GACdT,EAAiBU,EACmB,CACpC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClBD,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAE,CAAO,EAAG,CACfd,EAAG,UAAU,OAAO,2BAA4Bc,CAAM,CACxD,EAGA,UAAW,CACTd,EAAG,UAAU,OAAO,0BAA0B,CAChD,CACF,CAAC,EAGD,IAAMe,EAAUC,GAAmB,gBAAgB,EACnD,OAAI,OAAOD,GAAY,YACdE,EAGFlB,GAAiBgB,EAASL,CAAO,EACrC,KACCQ,EAAIC,GAASP,EAAM,KAAKO,CAAK,CAAC,EAC9BC,EAAS,IAAMR,EAAM,SAAS,CAAC,EAC/BR,EAAIe,GAAUE,EAAA,CAAE,IAAKrB,GAAOmB,EAAQ,CACtC,CACJ,CAAC,CACH,CChEO,SAASG,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACpB,CAGlB,IAAMC,EAAUD,EACb,KACCE,EAAI,CAAC,CAAE,OAAAC,CAAO,IAAMA,CAAM,EAC1BC,EAAqB,CACvB,EAGIC,EAAUJ,EACb,KACCK,EAAU,IAAMC,GAAiBT,CAAE,EAChC,KACCI,EAAI,CAAC,CAAE,OAAAC,CAAO,KAAO,CACnB,IAAQL,EAAG,UACX,OAAQA,EAAG,UAAYK,CACzB,EAAE,EACFK,EAAwB,QAAQ,CAClC,CACF,CACF,EAGF,OAAOC,EAAc,CAACR,EAASI,EAASN,CAAS,CAAC,EAC/C,KACCG,EAAI,CAAC,CAACQ,EAAQ,CAAE,IAAAC,EAAK,OAAAC,CAAO,EAAG,CAAE,OAAQ,CAAE,EAAAC,CAAE,EAAG,KAAM,CAAE,OAAAV,CAAO,CAAE,CAAC,KAChEA,EAAS,KAAK,IAAI,EAAGA,EACjB,KAAK,IAAI,EAAGQ,EAASE,EAAIH,CAAM,EAC/B,KAAK,IAAI,EAAGP,EAASU,EAAID,CAAM,CACnC,EACO,CACL,OAAQD,EAAMD,EACd,OAAAP,EACA,OAAQQ,EAAMD,GAAUG,CAC1B,EACD,EACDT,EAAqB,CAACU,EAAGC,IACvBD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,MAChB,CACH,CACJ,CCxCO,SAASC,GACdC,EACqB,CACrB,IAAMC,EAAU,SAAkB,WAAW,GAAK,CAChD,MAAOD,EAAO,UAAUE,GAAS,WAC/BA,EAAM,aAAa,qBAAqB,CAC1C,EAAE,OAAO,CACX,EAGMC,EAAQ,KAAK,IAAI,EAAG,KAAK,IAAIF,EAAQ,MAAOD,EAAO,OAAS,CAAC,CAAC,EACpE,OAAOI,EAAG,GAAGJ,CAAM,EAChB,KACCK,GAASH,GAASI,EAAUJ,EAAO,QAAQ,EAAE,KAAKK,EAAI,IAAML,CAAK,CAAC,CAAC,EACnEM,EAAUR,EAAOG,CAAK,CAAC,EACvBI,EAAIL,IAAU,CACZ,MAAOF,EAAO,QAAQE,CAAK,EAC3B,MAAO,CACL,MAASA,EAAM,aAAa,qBAAqB,EACjD,OAASA,EAAM,aAAa,sBAAsB,EAClD,QAASA,EAAM,aAAa,uBAAuB,EACnD,OAASA,EAAM,aAAa,sBAAsB,CACpD,CACF,EAAa,EACbO,EAAY,CAAC,CACf,CACJ,CASO,SAASC,GACdC,EACgC,CAChC,IAAMX,EAASY,EAA8B,QAASD,CAAE,EAClDE,EAAOC,EAAE,OAAQ,CAAE,KAAM,aAAc,CAAC,EAC9C,SAAS,KAAK,YAAYD,CAAI,EAG9B,IAAME,EAASD,EAAE,OAAQ,CAAE,KAAM,cAAe,CAAC,EACjD,SAAS,KAAK,YAAYC,CAAM,EAGhC,IAAMC,EAASC,GAAW,+BAA+B,EACzD,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAUE,GAAW,CAIzB,GAHA,SAAS,KAAK,aAAa,0BAA2B,EAAE,EAGpDA,EAAQ,MAAM,QAAU,yBAA0B,CACpD,IAAMC,EAAQ,WAAW,+BAA+B,EAClDpB,EAAQ,SAAS,cAAcoB,EAAM,QACvC,wDACA,sDACJ,EAGAD,EAAQ,MAAM,OAAUnB,EAAM,aAAa,sBAAsB,EACjEmB,EAAQ,MAAM,QAAUnB,EAAM,aAAa,uBAAuB,EAClEmB,EAAQ,MAAM,OAAUnB,EAAM,aAAa,sBAAsB,CACnE,CAGA,OAAW,CAACqB,EAAKC,CAAK,IAAK,OAAO,QAAQH,EAAQ,KAAK,EACrD,SAAS,KAAK,aAAa,iBAAiBE,CAAG,GAAIC,CAAK,EAG1D,QAASrB,EAAQ,EAAGA,EAAQH,EAAO,OAAQG,IAAS,CAClD,IAAMsB,EAAQzB,EAAOG,CAAK,EAAE,mBACxBsB,aAAiB,cACnBA,EAAM,OAASJ,EAAQ,QAAUlB,EACrC,CAGA,SAAS,YAAakB,CAAO,CAC/B,CAAC,EAGDf,EAAyBK,EAAI,SAAS,EAAE,KACtCe,EAAOC,GAAMA,EAAG,MAAQ,OAAO,EAC/BC,GAAeT,EAAO,CAACU,EAAGR,IAAYA,CAAO,CAC/C,EACG,UAAU,CAAC,CAAE,MAAAlB,CAAM,IAAM,CACxBA,GAASA,EAAQ,GAAKH,EAAO,OAC7BA,EAAOG,CAAK,EAAE,MAAM,EACpBH,EAAOG,CAAK,EAAE,MAAM,CACtB,CAAC,EAGHgB,EACG,KACCZ,EAAI,IAAM,CACR,IAAMuB,EAASC,GAAoB,QAAQ,EACrCC,EAAS,OAAO,iBAAiBF,CAAM,EAG7C,OAAAf,EAAO,QAAUiB,EAAM,YAGhBA,EAAM,gBAAgB,MAAM,MAAM,EACtC,IAAIR,IAAU,CAACA,GAAO,SAAS,EAAE,EAAE,SAAS,EAAG,GAAG,CAAC,EACnD,KAAK,EAAE,CACZ,CAAC,CACH,EACG,UAAUS,GAASpB,EAAK,QAAU,IAAIoB,CAAK,EAAE,EAGlDd,EAAM,KAAKe,GAAUC,EAAc,CAAC,EACjC,UAAU,IAAM,CACf,SAAS,KAAK,gBAAgB,yBAAyB,CACzD,CAAC,EAGIpC,GAAaC,CAAM,EACvB,KACCoC,EAAUpB,EAAO,KAAKqB,GAAK,CAAC,CAAC,CAAC,EAC9BC,GAAO,EACPC,EAAIC,GAASrB,EAAM,KAAKqB,CAAK,CAAC,EAC9BC,EAAS,IAAMtB,EAAM,SAAS,CAAC,EAC/BZ,EAAIiC,GAAUE,EAAA,CAAE,IAAK/B,GAAO6B,EAAQ,CACtC,CACJ,CAAC,CACH,CChJO,SAASG,GACdC,EAAiB,CAAE,UAAAC,CAAU,EACI,CAGjC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,MAAAE,CAAM,IAAM,CAC7BL,EAAG,MAAM,YAAY,sBAAuB,GAAGK,CAAK,EAAE,CACxD,CAAC,EAGMJ,EACJ,KACCK,EAAID,GAASF,EAAM,KAAK,CAAE,MAAAE,CAAM,CAAC,CAAC,EAClCE,EAAS,IAAMJ,EAAM,SAAS,CAAC,EAC/BK,EAAIH,IAAU,CAAE,IAAKL,EAAI,MAAAK,CAAM,EAAE,CACnC,CACJ,CAAC,CACH,CChEA,IAAAI,GAAwB,SAiCxB,SAASC,GAAQC,EAAyB,CACxCA,EAAG,aAAa,kBAAmB,EAAE,EACrC,IAAMC,EAAOD,EAAG,QAAQ,aAAa,EAC/BE,EAAOD,EACTA,EAAK,aAAa,WAAW,EAC7BD,EAAG,UACP,OAAAA,EAAG,gBAAgB,iBAAiB,EAC7BE,EAAK,QAAQ,CACtB,CAWO,SAASC,GACd,CAAE,OAAAC,CAAO,EACH,CACF,GAAAC,QAAY,YAAY,GAC1B,IAAIC,EAA8BC,GAAc,CAC9C,IAAI,GAAAF,QAAY,iDAAkD,CAChE,KAAML,GACJA,EAAG,aAAa,qBAAqB,GACrCD,GAAQS,EACNR,EAAG,aAAa,uBAAuB,CACzC,CAAC,CAEL,CAAC,EACE,GAAG,UAAWS,GAAMF,EAAW,KAAKE,CAAE,CAAC,CAC5C,CAAC,EACE,KACCC,EAAID,GAAM,CACQA,EAAG,QACX,MAAM,CAChB,CAAC,EACDE,EAAI,IAAMC,GAAY,kBAAkB,CAAC,CAC3C,EACG,UAAUR,CAAM,CAEzB,CCrCA,SAASS,GAAQC,EAAUC,EAAW,CACpC,OAAAD,EAAI,SAAWC,EAAK,SACpBD,EAAI,SAAWC,EAAK,SACbD,CACT,CA2BA,SAASE,GAAQC,EAAoBF,EAAoB,CACvD,IAAMG,EAAmB,IAAI,IAC7B,QAAWC,KAAMC,EAAY,MAAOH,CAAQ,EAAG,CAC7C,IAAMH,EAAMO,EAAW,MAAOF,CAAE,EAG1BG,EAAQ,CAACT,GAAQ,IAAI,IAAIC,EAAI,WAAY,EAAGC,CAAI,CAAC,EACvDG,EAAQ,IAAI,GAAGI,EAAM,CAAC,CAAC,GAAIA,CAAK,EAGhC,QAAWC,KAAQH,EAAY,kBAAmBD,CAAE,EAAG,CACrD,IAAMK,EAAOD,EAAK,aAAa,MAAM,EACjCC,GAAQ,MACVF,EAAM,KAAKT,GAAQ,IAAI,IAAIW,CAAI,EAAGT,CAAI,CAAC,CAC3C,CACF,CAGA,OAAOG,CACT,CAgBO,SAASO,GAAaV,EAAyC,CACpE,OAAOW,GAAW,IAAI,IAAI,cAAeX,CAAI,CAAC,EAC3C,KACCY,EAAIV,GAAYD,GAAQC,EAAU,IAAI,IAAIF,CAAI,CAAC,CAAC,EAChDa,GAAW,IAAMC,EAAG,IAAI,GAAK,CAAC,CAChC,CACJ,CClDA,SAASC,GACPC,EAAgBC,EACC,CACjB,GAAI,EAAED,EAAG,kBAAkB,SACzB,OAAOE,EAIT,IAAMC,EAAKH,EAAG,OAAO,QAAQ,GAAG,EAChC,GAAIG,IAAO,KACT,OAAOD,EAMT,GAAIC,EAAG,QAAUH,EAAG,SAAWA,EAAG,QAChC,OAAOE,EAQT,IAAME,EAAM,IAAI,IAAID,EAAG,IAAI,EAO3B,OANAC,EAAI,OAASA,EAAI,KAAO,GAMnBH,EAAQ,IAAI,GAAGG,CAAG,EAAE,GASzBJ,EAAG,eAAe,EACXK,EAAG,IAAI,IAAIF,EAAG,IAAI,CAAC,GATjBD,CAUX,CASA,SAASI,GAAKC,EAA8C,CAC1D,IAAMC,EAAO,IAAI,IACjB,QAAWL,KAAMM,EAAY,aAAcF,EAAS,IAAI,EACtDC,EAAK,IAAIL,EAAG,UAAWA,CAAE,EAG3B,OAAOK,CACT,CAYA,SAASE,GAAQH,EAA0C,CACzD,QAAWJ,KAAMM,EAAY,gBAAiBF,CAAQ,EACpD,QAAWI,IAAO,CAAC,OAAQ,KAAK,EAAG,CACjC,IAAMC,EAAQT,EAAG,aAAaQ,CAAG,EACjC,GAAIC,GAAS,CAAC,qBAAqB,KAAKA,CAAK,EAAG,CAE9CT,EAAGQ,CAAG,EAAIR,EAAGQ,CAAG,EAChB,KACF,CACF,CAGF,OAAON,EAAGE,CAAQ,CACpB,CASA,SAASM,GAAOC,EAAsC,CACpD,QAAWC,IAAY,CACrB,+BACA,gCACA,mCACA,+BACA,2BACA,2BACA,GAAGC,EAAQ,wBAAwB,EAC/B,CAAC,0BAA0B,EAC3B,CAAC,CACP,EAAG,CACD,IAAMC,EAASC,GAAmBH,CAAQ,EACpCI,EAASD,GAAmBH,EAAUD,CAAI,EAE9C,OAAOG,GAAW,aAClB,OAAOE,GAAW,aAElBF,EAAO,YAAYE,CAAM,CAE7B,CAGA,IAAMX,EAAOF,GAAK,QAAQ,EAC1B,OAAW,CAACc,EAAMjB,CAAE,IAAKG,GAAKQ,CAAI,EAC5BN,EAAK,IAAIY,CAAI,EACfZ,EAAK,OAAOY,CAAI,EAEhB,SAAS,KAAK,YAAYjB,CAAE,EAGhC,QAAWA,KAAMK,EAAK,OAAO,EAAG,CAC9B,IAAMa,EAAOlB,EAAG,aAAa,MAAM,EAI/BkB,IAAS,eAAiBA,IAAS,gBACrClB,EAAG,OAAO,CACd,CAIA,IAAMmB,EAAYC,GAAoB,WAAW,EACjD,OAAOC,GAAOf,EAAY,SAAUa,CAAS,CAAC,EAC3C,KACCG,EAAUtB,GAAM,CACd,IAAMuB,EAASZ,EAAK,cAAc,QAAQ,EAC1C,GAAIX,EAAG,IAAK,CACV,QAAWkB,KAAQlB,EAAG,kBAAkB,EACtCuB,EAAO,aAAaL,EAAMlB,EAAG,aAAakB,CAAI,CAAE,EAClD,OAAAlB,EAAG,YAAYuB,CAAM,EAGd,IAAIC,EAAWC,GAAY,CAChCF,EAAO,OAAS,IAAME,EAAS,SAAS,CAC1C,CAAC,CAGH,KACE,QAAAF,EAAO,YAAcvB,EAAG,YACxBA,EAAG,YAAYuB,CAAM,EACdxB,CAEX,CAAC,EACD2B,EAAe,EACfC,GAAQ,QAAQ,CAClB,CACJ,CAgBO,SAASC,GACd,CAAE,UAAAC,EAAW,UAAAC,EAAW,UAAAC,CAAU,EACZ,CACtB,IAAMC,EAASC,GAAc,EAC7B,GAAI,SAAS,WAAa,QACxB,OAAOlC,EAIT,IAAMmC,EAAWC,GAAaH,EAAO,IAAI,EAUzC9B,EAAG,QAAQ,EACR,UAAUK,EAAO,EAUpB,IAAM6B,EACJC,EAAsB,SAAS,KAAM,OAAO,EACzC,KACCC,GAAkBJ,CAAQ,EAC1BZ,EAAU,CAAC,CAACzB,EAAIC,CAAO,IAAMF,GAAOC,EAAIC,CAAO,CAAC,EAChDyC,GAAM,CACR,EAIEC,EACJH,EAAyB,OAAQ,UAAU,EACxC,KACCI,EAAIC,EAAW,EACfH,GAAM,CACR,EAMJH,EAAS,KAAKO,GAAeb,CAAS,CAAC,EACpC,UAAU,CAAC,CAAC7B,EAAK,CAAE,OAAA2C,CAAO,CAAC,IAAM,CAChC,QAAQ,aAAaA,EAAQ,EAAE,EAC/B,QAAQ,UAAU,KAAM,GAAI3C,CAAG,CACjC,CAAC,EAMH4C,EAAMT,EAAUI,CAAQ,EACrB,UAAUX,CAAS,EActB,IAAMiB,EACJjB,EAAU,KACRkB,EAAwB,UAAU,EAClCzB,EAAUrB,GAAO+C,GAAY/C,EAAK,CAAE,UAAA8B,CAAU,CAAC,EAC5C,KACCkB,GAAW,KACTC,GAAYjD,EAAK,EAAI,EACdF,EACR,CACH,CACF,EAIAuB,EAAUf,EAAO,EACjBe,EAAUZ,EAAM,EAChB6B,GAAM,CACR,EAUF,OAAAM,EACEC,EAAU,KAAKH,GAAed,EAAW,CAACsB,EAAGlD,IAAQA,CAAG,CAAC,EASzD6C,EAAU,KACRxB,EAAU,IAAMO,CAAS,EACzBkB,EAAwB,UAAU,EAClCzB,EAAU,IAAMO,CAAS,EACzBkB,EAAwB,MAAM,CAChC,EAQAlB,EAAU,KACRuB,EAAqB,CAACC,EAAGC,IACvBD,EAAE,WAAaC,EAAE,UACjBD,EAAE,OAAaC,EAAE,IAClB,EACDhC,EAAU,IAAMc,CAAQ,EACxBmB,EAAI,IAAM,QAAQ,KAAK,CAAC,CAC1B,CACF,EACG,UAAUtD,GAAO,CA1YtB,IAAAuD,EAAAC,EAgZU,QAAQ,QAAU,MAAQ,CAACxD,EAAI,KACjC,OAAO,SAAS,GAAGwD,GAAAD,EAAA,QAAQ,QAAR,YAAAA,EAAe,IAAf,KAAAC,EAAoB,CAAC,GAExC,QAAQ,kBAAoB,OAC5BC,GAAgBzD,EAAI,IAAI,EACxB,QAAQ,kBAAoB,SAEhC,CAAC,EAMH4B,EAAU,UAAU,IAAM,CACxB,QAAQ,kBAAoB,QAC9B,CAAC,EAMDQ,EAAU,OAAQ,cAAc,EAC7B,UAAU,IAAM,CACf,QAAQ,kBAAoB,MAC9B,CAAC,EAMHP,EAAU,KACRiB,EAAwB,QAAQ,EAChCY,GAAa,GAAG,CAClB,EACG,UAAU,CAAC,CAAE,OAAAf,CAAO,IAAM,CACzB,QAAQ,aAAaA,EAAQ,EAAE,CACjC,CAAC,EAGIE,CACT,CClaA,IAAAc,GAAuB,SAqChB,SAASC,GACdC,EAC0B,CAE1B,IAAMC,EAAQD,EAAO,UAAU,MAAM,GAAG,EAAE,IAAIE,GAC/BA,EAAK,QAAQ,sBAAuB,EAAE,EACvC,SAAW,EAAI,SAAMA,CAClC,EACE,KAAK,GAAG,EAELC,EAAY,IAAI,OAAOF,EAAO,KAAK,EACnCG,EAAY,CAACC,EAAYC,EAAcJ,IACpC,GAAGI,CAAI,2BAA2BJ,CAAI,UAI/C,OAAQK,GAAkB,CACxBA,EAAQA,EACL,QAAQ,gBAAiB,GAAG,EAC5B,KAAK,EAGR,IAAMC,EAAQ,IAAI,OAAO,MAAMR,EAAO,SAAS,MAC7CO,EACG,QAAQ,uBAAwB,MAAM,EACtC,QAAQJ,EAAW,GAAG,CAC3B,IAAK,KAAK,EAGV,OAAOM,MAAS,GAAAC,SAAWD,CAAK,EAC7B,QAAQD,EAAOJ,CAAS,EACxB,QAAQ,8BAA+B,IAAI,CAChD,CACF,CCEO,SAASO,GACdC,EAC+B,CAC/B,OAAOA,EAAQ,OAAS,CAC1B,CASO,SAASC,GACdD,EACgC,CAChC,OAAOA,EAAQ,OAAS,CAC1B,CC1CO,SAASE,GACdC,EAAaC,EACW,CACxB,IAAMC,EAAUC,GAA2BH,CAAG,EAC9C,OAAAI,EACEC,EAAG,SAAS,WAAa,OAAO,EAChCC,GAAY,QAAQ,CACtB,EACG,KACCC,GAAMC,GAAUA,CAAM,EACtBC,EAAU,IAAMR,CAAM,CACxB,EACG,UAAU,CAAC,CAAE,OAAAS,EAAQ,KAAAC,CAAK,IAAMT,EAAQ,KAAK,CAC5C,OACA,KAAM,CACJ,OAAAQ,EACA,KAAAC,EACA,QAAS,CACP,QAASC,EAAQ,gBAAgB,CACnC,CACF,CACF,CAAC,CAAC,EAGCV,CACT,CCxBO,SAASW,GACd,CAAE,UAAAC,CAAU,EACN,CACN,IAAMC,EAASC,GAAc,EACvBC,EAAYC,GAChB,IAAI,IAAI,mBAAoBH,EAAO,IAAI,CACzC,EACG,KACCI,GAAW,IAAMC,CAAK,CACxB,EAGIC,EAAWJ,EACd,KACCK,EAAIC,GAAY,CACd,GAAM,CAAC,CAAEC,CAAO,EAAIT,EAAO,KAAK,MAAM,aAAa,EACnD,OAAOQ,EAAS,KAAK,CAAC,CAAE,QAAAE,EAAS,QAAAC,CAAQ,IACvCD,IAAYD,GAAWE,EAAQ,SAASF,CAAO,CAChD,GAAKD,EAAS,CAAC,CAClB,CAAC,CACH,EAGFN,EACG,KACCK,EAAIC,GAAY,IAAI,IAAIA,EAAS,IAAIE,GAAW,CAC9C,GAAG,IAAI,IAAI,MAAMA,EAAQ,OAAO,IAAKV,EAAO,IAAI,CAAC,GACjDU,CACF,CAAC,CAAC,CAAC,EACHE,EAAUC,GAAQC,EAAsB,SAAS,KAAM,OAAO,EAC3D,KACCC,EAAOC,GAAM,CAACA,EAAG,SAAW,CAACA,EAAG,OAAO,EACvCC,GAAeX,CAAQ,EACvBM,EAAU,CAAC,CAACI,EAAIP,CAAO,IAAM,CAC3B,GAAIO,EAAG,kBAAkB,QAAS,CAChC,IAAME,EAAKF,EAAG,OAAO,QAAQ,GAAG,EAChC,GAAIE,GAAM,CAACA,EAAG,QAAUL,EAAK,IAAIK,EAAG,IAAI,EAAG,CACzC,IAAMC,EAAMD,EAAG,KAWf,MAAI,CAACF,EAAG,OAAO,QAAQ,aAAa,GAClBH,EAAK,IAAIM,CAAG,IACZV,EACPJ,GAEXW,EAAG,eAAe,EACXI,EAAGD,CAAG,EACf,CACF,CACA,OAAOd,CACT,CAAC,EACDO,EAAUO,GACDE,GAAa,IAAI,IAAIF,CAAG,CAAC,EAC7B,KACCZ,EAAIe,GAAW,CAEb,IAAMC,EADWC,GAAY,EACP,KAAK,QAAQxB,EAAO,KAAMmB,CAAG,EACnD,OAAOG,EAAQ,IAAIC,EAAK,MAAM,GAAG,EAAE,CAAC,CAAC,EACjC,IAAI,IAAIA,CAAI,EACZ,IAAI,IAAIJ,CAAG,CACjB,CAAC,CACH,CACH,CACH,CACF,CACF,EACG,UAAUA,GAAOM,GAAYN,EAAK,EAAI,CAAC,EAG5CO,EAAc,CAACxB,EAAWI,CAAQ,CAAC,EAChC,UAAU,CAAC,CAACE,EAAUC,CAAO,IAAM,CACpBkB,EAAW,mBAAmB,EACtC,YAAYC,GAAsBpB,EAAUC,CAAO,CAAC,CAC5D,CAAC,EAGHV,EAAU,KAAKa,EAAU,IAAMN,CAAQ,CAAC,EACrC,UAAUG,GAAW,CA3J1B,IAAAoB,EA8JM,IAAIC,EAAW,SAAS,aAAc,cAAc,EACpD,GAAIA,IAAa,KAAM,CACrBA,EAAW,GAGX,IAAIC,IAAUF,EAAA7B,EAAO,UAAP,YAAA6B,EAAgB,UAAW,SACpC,MAAM,QAAQE,CAAO,IACxBA,EAAU,CAACA,CAAO,GAGpBC,EAAM,QAAWC,KAAUF,EACzB,QAAWrB,KAAWD,EAAQ,QAAQ,OAAOA,EAAQ,OAAO,EAC1D,GAAI,IAAI,OAAOwB,EAAQ,GAAG,EAAE,KAAKvB,CAAO,EAAG,CACzCoB,EAAW,GACX,MAAME,CACR,CAGJ,SAAS,aAAcF,EAAU,cAAc,CACjD,CAGA,GAAIA,EACF,QAAWI,KAAWC,GAAqB,UAAU,EACnDD,EAAQ,OAAS,EACvB,CAAC,CACL,CCpFO,SAASE,GACdC,EAAsB,CAAE,QAAAC,CAAQ,EACP,CAGzB,GAAM,CAAE,aAAAC,CAAa,EAAIC,GAAY,EACjCD,EAAa,IAAI,GAAG,IACtBE,GAAU,SAAU,EAAI,EAGxBJ,EAAG,MAAQE,EAAa,IAAI,GAAG,EAC/BF,EAAG,MAAM,EAGTK,GAAY,QAAQ,EACjB,KACCC,GAAMC,GAAU,CAACA,CAAM,CACzB,EACG,UAAU,IAAM,CACf,IAAMC,EAAML,GAAY,EACxBK,EAAI,aAAa,OAAO,GAAG,EAC3B,QAAQ,aAAa,CAAC,EAAG,GAAI,GAAGA,CAAG,EAAE,CACvC,CAAC,GAIP,IAAMC,EAASC,GAAkBV,CAAE,EAC7BW,EAASC,EACbX,EAAQ,KAAKK,GAAMO,EAAoB,CAAC,EACxCC,EAAUd,EAAI,OAAO,EACrBS,CACF,EACG,KACCM,EAAI,IAAMf,EAAG,KAAK,EAClBgB,EAAqB,CACvB,EAGF,OAAOC,EAAc,CAACN,EAAQF,CAAM,CAAC,EAClC,KACCM,EAAI,CAAC,CAACG,EAAOC,CAAK,KAAO,CAAE,MAAAD,EAAO,MAAAC,CAAM,EAAE,EAC1CC,EAAY,CAAC,CACf,CACJ,CAUO,SAASC,GACdrB,EAAsB,CAAE,QAAAC,CAAQ,EACsB,CACtD,IAAMqB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAGxDT,EAAc,CACZhB,EAAQ,KAAKK,GAAMO,EAAoB,CAAC,EACxCS,CACF,EAAG,CAACK,EAAGC,IAAUA,CAAK,EACnB,KACCC,EAAwB,OAAO,CACjC,EACG,UAAU,CAAC,CAAE,MAAAX,CAAM,IAAMjB,EAAQ,KAAK,CACrC,OACA,KAAMiB,CACR,CAAC,CAAC,EAGNI,EACG,KACCO,EAAwB,OAAO,CACjC,EACG,UAAU,CAAC,CAAE,MAAAV,CAAM,IAAM,CACpBA,GACFf,GAAU,SAAUe,CAAK,CAC7B,CAAC,EAGLL,EAAUd,EAAG,KAAO,OAAO,EACxB,KACC8B,EAAUN,CAAK,CACjB,EACG,UAAU,IAAMxB,EAAG,MAAM,CAAC,EAM/B,IAAM+B,EAAQC,EAAW,uBAAuB,EAChD,OAAAlB,EAAUiB,EAAO,OAAO,EACrB,UAAU,IAAM/B,EAAG,MAAM,CAAC,EAGtBD,GAAiBC,EAAI,CAAE,QAAAC,CAAQ,CAAC,EACpC,KACCgC,EAAIC,GAASZ,EAAM,KAAKY,CAAK,CAAC,EAC9BC,EAAS,IAAMb,EAAM,SAAS,CAAC,EAC/BP,EAAImB,GAAUE,EAAA,CAAE,IAAKpC,GAAOkC,EAAQ,EACpCd,EAAY,CAAC,CACf,CACJ,CCnHO,SAASiB,GACdC,EAAiB,CAAE,QAAAC,EAAS,OAAAC,CAAO,EACE,CACrC,IAAMC,EAAQ,IAAIC,EACZC,EAAYC,GAAqBN,EAAG,aAAc,EACrD,KACCO,EAAO,OAAO,CAChB,EAGIC,EAAYR,EAAG,cAGfS,EAAOC,EAAW,wBAAyBV,CAAE,EAC7CW,EAAOD,EAAW,uBAAwBV,CAAE,EAGlDY,GAAY,QAAQ,EACjB,UAAUC,GAAUF,EAAK,aACxB,OAAQE,EAAS,OAAS,cAC5B,CAAC,EAGHV,EACG,KACCW,GAAeZ,CAAM,EACrBa,GAAUd,EAAQ,KAAKe,GAAMC,EAAoB,CAAC,CAAC,CACrD,EACG,UAAU,CAAC,CAAC,CAAE,MAAAC,CAAM,EAAG,CAAE,MAAAC,CAAM,CAAC,IAAM,CACrC,OAAQD,EAAM,OAAQ,CAGpB,IAAK,GACHT,EAAK,YAAcU,EAAM,OACrBC,GAAY,oBAAoB,EAChCA,GAAY,2BAA2B,EAC3C,MAGF,IAAK,GACHX,EAAK,YAAcW,GAAY,mBAAmB,EAClD,MAGF,QACE,IAAMC,EAAQC,GAAMJ,EAAM,MAAM,EAChCT,EAAK,YAAcW,GAAY,sBAAuBC,CAAK,CAC/D,CACF,CAAC,EAGL,IAAME,EAAUpB,EACb,KACCqB,EAAI,IAAMb,EAAK,UAAY,EAAE,EAC7Bc,EAAU,CAAC,CAAE,MAAAP,CAAM,IAAMQ,EACvBC,EAAG,GAAGT,EAAM,MAAM,EAAG,EAAE,CAAC,EACxBS,EAAG,GAAGT,EAAM,MAAM,EAAE,CAAC,EAClB,KACCU,GAAY,CAAC,EACbC,GAAQxB,CAAS,EACjBoB,EAAU,CAAC,CAACK,CAAK,IAAMA,CAAK,CAC9B,CACJ,CAAC,EACDC,EAAIC,EAAsB,EAC1BC,GAAM,CACR,EAGF,OAAAV,EAAQ,UAAUW,GAAQvB,EAAK,YAAYuB,CAAI,CAAC,EAChDX,EACG,KACCY,GAASD,GAAQ,CACf,IAAME,EAAUC,GAAmB,UAAWH,CAAI,EAClD,OAAI,OAAOE,GAAY,YACdE,EAGFC,EAAUH,EAAS,QAAQ,EAC/B,KACCI,EAAUrC,CAAK,EACf4B,EAAI,IAAMK,CAAO,CACnB,CACJ,CAAC,CACH,EACG,UAAUA,GAAW,CAElBA,EAAQ,OAAS,IACjBA,EAAQ,WAAa5B,EAAU,WAE/BA,EAAU,SAAS,CAAE,IAAK4B,EAAQ,SAAU,CAAC,CACjD,CAAC,EAGWnC,EACb,KACCM,EAAOkC,EAAqB,EAC5BV,EAAI,CAAC,CAAE,KAAAW,CAAK,IAAMA,CAAI,CACxB,EAIC,KACClB,EAAImB,GAASxC,EAAM,KAAKwC,CAAK,CAAC,EAC9BC,EAAS,IAAMzC,EAAM,SAAS,CAAC,EAC/B4B,EAAIY,GAAUE,EAAA,CAAE,IAAK7C,GAAO2C,EAAQ,CACtC,CACJ,CCpHO,SAASG,GACdC,EAAkB,CAAE,OAAAC,CAAO,EACF,CACzB,OAAOA,EACJ,KACCC,EAAI,CAAC,CAAE,MAAAC,CAAM,IAAM,CACjB,IAAMC,EAAMC,GAAY,EACxB,OAAAD,EAAI,KAAO,GAGXD,EAAQA,EACL,QAAQ,OAAQ,GAAG,EACnB,QAAQ,KAAM,KAAK,EACnB,QAAQ,KAAM,KAAK,EAGtBC,EAAI,OAAS,KAAKD,CAAK,GAChB,CAAE,IAAAC,CAAI,CACf,CAAC,CACH,CACJ,CAUO,SAASE,GACdC,EAAuBC,EACa,CACpC,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxD,OAAAJ,EAAM,UAAU,CAAC,CAAE,IAAAL,CAAI,IAAM,CAC3BG,EAAG,aAAa,sBAAuBA,EAAG,IAAI,EAC9CA,EAAG,KAAO,GAAGH,CAAG,EAClB,CAAC,EAGDU,EAAUP,EAAI,OAAO,EAClB,KACCQ,EAAUJ,CAAK,CACjB,EACG,UAAUK,GAAMA,EAAG,eAAe,CAAC,EAGjCjB,GAAiBQ,EAAIC,CAAO,EAChC,KACCS,EAAIC,GAAST,EAAM,KAAKS,CAAK,CAAC,EAC9BC,EAAS,IAAMV,EAAM,SAAS,CAAC,EAC/BP,EAAIgB,GAAUE,EAAA,CAAE,IAAKb,GAAOW,EAAQ,CACtC,CACJ,CCpDO,SAASG,GACdC,EAAiB,CAAE,QAAAC,EAAS,UAAAC,CAAU,EACA,CACtC,IAAMC,EAAQ,IAAIC,EAGZC,EAASC,GAAoB,cAAc,EAC3CC,EAASC,EACbC,EAAUJ,EAAO,SAAS,EAC1BI,EAAUJ,EAAO,OAAO,CAC1B,EACG,KACCK,GAAUC,EAAc,EACxBC,EAAI,IAAMP,EAAM,KAAK,EACrBQ,EAAqB,CACvB,EAGF,OAAAV,EACG,KACCW,GAAkBP,CAAM,EACxBK,EAAI,CAAC,CAAC,CAAE,QAAAG,CAAQ,EAAGC,CAAK,IAAM,CAC5B,IAAMC,EAAQD,EAAM,MAAM,UAAU,EACpC,GAAID,GAAA,MAAAA,EAAS,QAAUE,EAAMA,EAAM,OAAS,CAAC,EAAG,CAC9C,IAAMC,EAAOH,EAAQA,EAAQ,OAAS,CAAC,EACnCG,EAAK,WAAWD,EAAMA,EAAM,OAAS,CAAC,CAAC,IACzCA,EAAMA,EAAM,OAAS,CAAC,EAAIC,EAC9B,MACED,EAAM,OAAS,EAEjB,OAAOA,CACT,CAAC,CACH,EACG,UAAUA,GAASjB,EAAG,UAAYiB,EAChC,KAAK,EAAE,EACP,QAAQ,MAAO,QAAQ,CAC1B,EAGJf,EACG,KACCiB,EAAO,CAAC,CAAE,KAAAC,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,OAAQA,EAAI,KAAM,CAGhB,IAAK,aAEDrB,EAAG,UAAU,QACbK,EAAM,iBAAmBA,EAAM,MAAM,SAErCA,EAAM,MAAQL,EAAG,WACnB,KACJ,CACF,CAAC,EAGWC,EACb,KACCkB,EAAOG,EAAqB,EAC5BV,EAAI,CAAC,CAAE,KAAAW,CAAK,IAAMA,CAAI,CACxB,EAIC,KACCC,EAAIC,GAAStB,EAAM,KAAKsB,CAAK,CAAC,EAC9BC,EAAS,IAAMvB,EAAM,SAAS,CAAC,EAC/BS,EAAI,KAAO,CAAE,IAAKZ,CAAG,EAAE,CACzB,CACJ,CCjDO,SAAS2B,GACdC,EAAiB,CAAE,OAAAC,EAAQ,UAAAC,CAAU,EACN,CAC/B,IAAMC,EAASC,GAAc,EAC7B,GAAI,CACF,IAAMC,EAAUC,GAAkBH,EAAO,OAAQF,CAAM,EAGjDM,EAASC,GAAoB,eAAgBR,CAAE,EAC/CS,EAASD,GAAoB,gBAAiBR,CAAE,EAGtDU,EAAwBV,EAAI,OAAO,EAChC,KACCW,EAAO,CAAC,CAAE,OAAAC,CAAO,IACfA,aAAkB,SAAW,CAAC,CAACA,EAAO,QAAQ,GAAG,CAClD,CACH,EACG,UAAU,IAAMC,GAAU,SAAU,EAAK,CAAC,EAG/CX,EACG,KACCS,EAAO,CAAC,CAAE,KAAAG,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,IAAMC,EAASC,GAAiB,EAChC,OAAQF,EAAI,KAAM,CAGhB,IAAK,QACH,GAAIC,IAAWT,EAAO,CACpB,IAAMW,EAAU,IAAI,IACpB,QAAWC,KAAUC,EACnB,sBAAuBX,CACzB,EAAG,CACD,IAAMY,EAAUF,EAAO,kBACvBD,EAAQ,IAAIC,EAAQ,WAClBE,EAAQ,aAAa,eAAe,CACtC,CAAC,CACH,CAGA,GAAIH,EAAQ,KAAM,CAChB,GAAM,CAAC,CAACI,CAAI,CAAC,EAAI,CAAC,GAAGJ,CAAO,EAAE,KAAK,CAAC,CAAC,CAAEK,CAAC,EAAG,CAAC,CAAEC,CAAC,IAAMA,EAAID,CAAC,EAC1DD,EAAK,MAAM,CACb,CAGAP,EAAI,MAAM,CACZ,CACA,MAGF,IAAK,SACL,IAAK,MACHF,GAAU,SAAU,EAAK,EACzBN,EAAM,KAAK,EACX,MAGF,IAAK,UACL,IAAK,YACH,GAAI,OAAOS,GAAW,YACpBT,EAAM,MAAM,MACP,CACL,IAAMkB,EAAM,CAAClB,EAAO,GAAGa,EACrB,wDACAX,CACF,CAAC,EACKiB,EAAI,KAAK,IAAI,GACjB,KAAK,IAAI,EAAGD,EAAI,QAAQT,CAAM,CAAC,EAAIS,EAAI,QACrCV,EAAI,OAAS,UAAY,GAAK,IAE9BU,EAAI,MAAM,EACdA,EAAIC,CAAC,EAAE,MAAM,CACf,CAGAX,EAAI,MAAM,EACV,MAGF,QACMR,IAAUU,GAAiB,GAC7BV,EAAM,MAAM,CAClB,CACF,CAAC,EAGLL,EACG,KACCS,EAAO,CAAC,CAAE,KAAAG,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,OAAQA,EAAI,KAAM,CAGhB,IAAK,IACL,IAAK,IACL,IAAK,IACHR,EAAM,MAAM,EACZA,EAAM,OAAO,EAGbQ,EAAI,MAAM,EACV,KACJ,CACF,CAAC,EAGL,IAAMY,EAASC,GAAiBrB,EAAO,CAAE,QAAAF,CAAQ,CAAC,EAClD,OAAOwB,EACLF,EACAG,GAAkBrB,EAAQ,CAAE,QAAAJ,EAAS,OAAAsB,CAAO,CAAC,CAC/C,EACG,KACCI,GAGE,GAAGC,GAAqB,eAAgBhC,CAAE,EACvC,IAAIiC,GAASC,GAAiBD,EAAO,CAAE,OAAAN,CAAO,CAAC,CAAC,EAGnD,GAAGK,GAAqB,iBAAkBhC,CAAE,EACzC,IAAIiC,GAASE,GAAmBF,EAAO,CAAE,QAAA5B,EAAS,UAAAH,CAAU,CAAC,CAAC,CACnE,CACF,CAGJ,OAASkC,EAAK,CACZ,OAAApC,EAAG,OAAS,GACLqC,EACT,CACF,CCnKO,SAASC,GACdC,EAAiB,CAAE,OAAAC,EAAQ,UAAAC,CAAU,EACG,CACxC,OAAOC,EAAc,CACnBF,EACAC,EACG,KACCE,EAAUC,GAAY,CAAC,EACvBC,EAAOC,GAAO,CAAC,CAACA,EAAI,aAAa,IAAI,GAAG,CAAC,CAC3C,CACJ,CAAC,EACE,KACCC,EAAI,CAAC,CAACC,EAAOF,CAAG,IAAMG,GAAuBD,EAAM,MAAM,EACvDF,EAAI,aAAa,IAAI,GAAG,CAC1B,CAAC,EACDC,EAAIG,GAAM,CA1FhB,IAAAC,EA2FQ,IAAMC,EAAQ,IAAI,IAGZC,EAAK,SAAS,mBAAmBd,EAAI,WAAW,SAAS,EAC/D,QAASe,EAAOD,EAAG,SAAS,EAAGC,EAAMA,EAAOD,EAAG,SAAS,EACtD,IAAIF,EAAAG,EAAK,gBAAL,MAAAH,EAAoB,aAAc,CACpC,IAAMI,EAAWD,EAAK,YAChBE,EAAWN,EAAGK,CAAQ,EACxBC,EAAS,OAASD,EAAS,QAC7BH,EAAM,IAAIE,EAAmBE,CAAQ,CACzC,CAIF,OAAW,CAACF,EAAMG,CAAI,IAAKL,EAAO,CAChC,GAAM,CAAE,WAAAM,CAAW,EAAIC,EAAE,OAAQ,KAAMF,CAAI,EAC3CH,EAAK,YAAY,GAAG,MAAM,KAAKI,CAAU,CAAC,CAC5C,CAGA,MAAO,CAAE,IAAKnB,EAAI,MAAAa,CAAM,CAC1B,CAAC,CACH,CACJ,CCPO,SAASQ,GACdC,EAAiB,CAAE,UAAAC,EAAW,MAAAC,CAAM,EACf,CACrB,IAAMC,EAASH,EAAG,QAAqB,UAAU,EAC3CI,EACJD,EAAO,UACPA,EAAO,cAAe,UAGxB,OAAOE,EAAc,CAACH,EAAOD,CAAS,CAAC,EACpC,KACCK,EAAI,CAAC,CAAC,CAAE,OAAAC,EAAQ,OAAAC,CAAO,EAAG,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,CAAC,KACzCD,EAASA,EACL,KAAK,IAAIJ,EAAQ,KAAK,IAAI,EAAGK,EAAIF,CAAM,CAAC,EACxCH,EACG,CACL,OAAAI,EACA,OAAQC,GAAKF,EAASH,CACxB,EACD,EACDM,EAAqB,CAACC,EAAGC,IACvBD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,MAChB,CACH,CACJ,CAuBO,SAASC,GACdb,EAAiBc,EACe,CADf,IAAAC,EAAAD,EAAE,SAAAE,CA5JrB,EA4JmBD,EAAcE,EAAAC,GAAdH,EAAc,CAAZ,YAEnB,IAAMI,EAAQC,EAAW,0BAA2BpB,CAAE,EAChD,CAAE,EAAAS,CAAE,EAAIY,GAAiBF,CAAK,EACpC,OAAOG,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAClDC,EAAQL,EACX,KACCM,GAAU,EAAGC,EAAuB,CACtC,EAGF,OAAAF,EAAM,KAAKG,GAAef,CAAO,CAAC,EAC/B,UAAU,CAGT,KAAK,CAAC,CAAE,OAAAR,CAAO,EAAG,CAAE,OAAQD,CAAO,CAAC,EAAG,CACrCY,EAAM,MAAM,OAAS,GAAGX,EAAS,EAAIC,CAAC,KACtCT,EAAG,MAAM,IAAY,GAAGO,CAAM,IAChC,EAGA,UAAW,CACTY,EAAM,MAAM,OAAS,GACrBnB,EAAG,MAAM,IAAY,EACvB,CACF,CAAC,EAGH4B,EAAM,KAAKI,GAAM,CAAC,EACf,UAAU,IAAM,CACf,QAAWC,KAAQC,EAAY,8BAA+BlC,CAAE,EAAG,CACjE,GAAI,CAACiC,EAAK,aACR,SACF,IAAME,EAAYF,EAAK,QAAqB,yBAAyB,EACrE,GAAI,OAAOE,GAAc,YAAa,CACpC,IAAM5B,EAAS0B,EAAK,UAAYE,EAAU,UACpC,CAAE,OAAA3B,CAAO,EAAI4B,GAAeD,CAAS,EAC3CA,EAAU,SAAS,CACjB,IAAK5B,EAASC,EAAS,CACzB,CAAC,CACH,CACF,CACF,CAAC,EAGH6B,GAAKH,EAA8B,kBAAmBlC,CAAE,CAAC,EACtD,KACCsC,GAASC,GAASC,EAAUD,EAAO,OAAO,EACvC,KACCE,GAAUC,EAAc,EACxBpC,EAAI,IAAMiC,CAAK,EACfI,EAAUlB,CAAK,CACjB,CACF,CACF,EACG,UAAUc,GAAS,CAClB,IAAMK,EAAQxB,EAA6B,QAAQmB,EAAM,OAAO,IAAI,EACxDnB,EAAW,qBAAqBmB,EAAM,EAAE,IAAI,EACpD,aAAa,gBAAiB,GAAGK,EAAM,OAAO,EAAE,CACtD,CAAC,EAGE7C,GAAaC,EAAIiB,CAAO,EAC5B,KACC4B,EAAIC,GAASvB,EAAM,KAAKuB,CAAK,CAAC,EAC9BC,EAAS,IAAMxB,EAAM,SAAS,CAAC,EAC/BjB,EAAIwC,GAAUE,EAAA,CAAE,IAAKhD,GAAO8C,EAAQ,CACtC,CACJ,CAAC,CACH,CCxKO,SAASG,GACdC,EAAcC,EACW,CACzB,GAAI,OAAOA,GAAS,YAAa,CAC/B,IAAMC,EAAM,gCAAgCF,CAAI,IAAIC,CAAI,GACxD,OAAOE,GAGLC,GAAqB,GAAGF,CAAG,kBAAkB,EAC1C,KACCG,GAAW,IAAMC,CAAK,EACtBC,EAAIC,IAAY,CACd,QAASA,EAAQ,QACnB,EAAE,EACFC,GAAe,CAAC,CAAC,CACnB,EAGFL,GAAkBF,CAAG,EAClB,KACCG,GAAW,IAAMC,CAAK,EACtBC,EAAIG,IAAS,CACX,MAAOA,EAAK,iBACZ,MAAOA,EAAK,WACd,EAAE,EACFD,GAAe,CAAC,CAAC,CACnB,CACJ,EACG,KACCF,EAAI,CAAC,CAACC,EAASE,CAAI,IAAOC,IAAA,GAAKH,GAAYE,EAAO,CACpD,CAGJ,KAAO,CACL,IAAMR,EAAM,gCAAgCF,CAAI,GAChD,OAAOI,GAAkBF,CAAG,EACzB,KACCK,EAAIG,IAAS,CACX,aAAcA,EAAK,YACrB,EAAE,EACFD,GAAe,CAAC,CAAC,CACnB,CACJ,CACF,CCvDO,SAASG,GACdC,EAAcC,EACW,CACzB,IAAMC,EAAM,WAAWF,CAAI,oBAAoB,mBAAmBC,CAAO,CAAC,GAC1E,OAAOE,GAA2BD,CAAG,EAClC,KACCE,GAAW,IAAMC,CAAK,EACtBC,EAAI,CAAC,CAAE,WAAAC,EAAY,YAAAC,CAAY,KAAO,CACpC,MAAOD,EACP,MAAOC,CACT,EAAE,EACFC,GAAe,CAAC,CAAC,CACnB,CACJ,CCOO,SAASC,GACdC,EACyB,CAGzB,IAAIC,EAAQD,EAAI,MAAM,qCAAqC,EAC3D,GAAIC,EAAO,CACT,GAAM,CAAC,CAAEC,EAAMC,CAAI,EAAIF,EACvB,OAAOG,GAA2BF,EAAMC,CAAI,CAC9C,CAIA,GADAF,EAAQD,EAAI,MAAM,oCAAoC,EAClDC,EAAO,CACT,GAAM,CAAC,CAAEI,EAAMC,CAAI,EAAIL,EACvB,OAAOM,GAA2BF,EAAMC,CAAI,CAC9C,CAGA,OAAOE,CACT,CCpBA,IAAIC,GAgBG,SAASC,GACdC,EACoB,CACpB,OAAOF,QAAWG,EAAM,IAAM,CAC5B,IAAMC,EAAS,SAAsB,WAAY,cAAc,EAC/D,GAAIA,EACF,OAAOC,EAAGD,CAAM,EAKhB,GADYE,GAAqB,SAAS,EAClC,OAAQ,CACd,IAAMC,EAAU,SAA0B,WAAW,EACrD,GAAI,EAAEA,GAAWA,EAAQ,QACvB,OAAOC,CACX,CAGA,OAAOC,GAAiBP,EAAG,IAAI,EAC5B,KACCQ,EAAIC,GAAS,SAAS,WAAYA,EAAO,cAAc,CAAC,CAC1D,CAEN,CAAC,EACE,KACCC,GAAW,IAAMJ,CAAK,EACtBK,EAAOF,GAAS,OAAO,KAAKA,CAAK,EAAE,OAAS,CAAC,EAC7CG,EAAIH,IAAU,CAAE,MAAAA,CAAM,EAAE,EACxBI,EAAY,CAAC,CACf,EACJ,CASO,SAASC,GACdd,EAC+B,CAC/B,IAAMe,EAAQC,EAAW,uBAAwBhB,CAAE,EACnD,OAAOC,EAAM,IAAM,CACjB,IAAMgB,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,MAAAR,CAAM,IAAM,CAC7BM,EAAM,YAAYI,GAAkBV,CAAK,CAAC,EAC1CM,EAAM,UAAU,IAAI,+BAA+B,CACrD,CAAC,EAGMhB,GAAYC,CAAE,EAClB,KACCQ,EAAIY,GAASH,EAAM,KAAKG,CAAK,CAAC,EAC9BC,EAAS,IAAMJ,EAAM,SAAS,CAAC,EAC/BL,EAAIQ,GAAUE,EAAA,CAAE,IAAKtB,GAAOoB,EAAQ,CACtC,CACJ,CAAC,CACH,CCtDO,SAASG,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACpB,CAClB,OAAOC,GAAiB,SAAS,IAAI,EAClC,KACCC,EAAU,IAAMC,GAAgBL,EAAI,CAAE,QAAAE,EAAS,UAAAD,CAAU,CAAC,CAAC,EAC3DK,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,KACZ,CACL,OAAQA,GAAK,EACf,EACD,EACDC,EAAwB,QAAQ,CAClC,CACJ,CAaO,SAASC,GACdT,EAAiBU,EACY,CAC7B,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAE,CAAO,EAAG,CACfd,EAAG,OAASc,CACd,EAGA,UAAW,CACTd,EAAG,OAAS,EACd,CACF,CAAC,GAICe,EAAQ,wBAAwB,EAC5BC,EAAG,CAAE,OAAQ,EAAM,CAAC,EACpBjB,GAAUC,EAAIU,CAAO,GAExB,KACCO,EAAIC,GAASN,EAAM,KAAKM,CAAK,CAAC,EAC9BC,EAAS,IAAMP,EAAM,SAAS,CAAC,EAC/BN,EAAIY,GAAUE,EAAA,CAAE,IAAKpB,GAAOkB,EAAQ,CACtC,CACJ,CAAC,CACH,CCfO,SAASG,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACT,CAC7B,IAAMC,EAAQ,IAAI,IAGZC,EAAUC,EAA+B,gBAAiBL,CAAE,EAClE,QAAWM,KAAUF,EAAS,CAC5B,IAAMG,EAAK,mBAAmBD,EAAO,KAAK,UAAU,CAAC,CAAC,EAChDE,EAASC,GAAmB,QAAQF,CAAE,IAAI,EAC5C,OAAOC,GAAW,aACpBL,EAAM,IAAIG,EAAQE,CAAM,CAC5B,CAGA,IAAME,EAAUR,EACb,KACCS,EAAwB,QAAQ,EAChCC,EAAI,CAAC,CAAE,OAAAC,CAAO,IAAM,CAClB,IAAMC,EAAOC,GAAoB,MAAM,EACjCC,EAAOC,EAAW,wBAAyBH,CAAI,EACrD,OAAOD,EAAS,IACdG,EAAK,UACLF,EAAK,UAET,CAAC,EACDI,GAAM,CACR,EAqFF,OAlFmBC,GAAiB,SAAS,IAAI,EAC9C,KACCR,EAAwB,QAAQ,EAGhCS,EAAUC,GAAQC,EAAM,IAAM,CAC5B,IAAIC,EAA4B,CAAC,EACjC,OAAOC,EAAG,CAAC,GAAGrB,CAAK,EAAE,OAAO,CAACsB,EAAO,CAACnB,EAAQE,CAAM,IAAM,CACvD,KAAOe,EAAK,QACGpB,EAAM,IAAIoB,EAAKA,EAAK,OAAS,CAAC,CAAC,EACnC,SAAWf,EAAO,SACzBe,EAAK,IAAI,EAOb,IAAIG,EAASlB,EAAO,UACpB,KAAO,CAACkB,GAAUlB,EAAO,eACvBA,EAASA,EAAO,cAChBkB,EAASlB,EAAO,UAIlB,IAAImB,EAASnB,EAAO,aACpB,KAAOmB,EAAQA,EAASA,EAAO,aAC7BD,GAAUC,EAAO,UAGnB,OAAOF,EAAM,IACX,CAAC,GAAGF,EAAO,CAAC,GAAGA,EAAMjB,CAAM,CAAC,EAAE,QAAQ,EACtCoB,CACF,CACF,EAAG,IAAI,GAAkC,CAAC,CAC5C,CAAC,EACE,KAGCd,EAAIa,GAAS,IAAI,IAAI,CAAC,GAAGA,CAAK,EAAE,KAAK,CAAC,CAAC,CAAEG,CAAC,EAAG,CAAC,CAAEC,CAAC,IAAMD,EAAIC,CAAC,CAAC,CAAC,EAC9DC,GAAkBpB,CAAO,EAGzBU,EAAU,CAAC,CAACK,EAAOM,CAAM,IAAM9B,EAC5B,KACC+B,GAAK,CAAC,CAACC,EAAMC,CAAI,EAAG,CAAE,OAAQ,CAAE,EAAAC,CAAE,EAAG,KAAAC,CAAK,IAAM,CAC9C,IAAMC,EAAOF,EAAIC,EAAK,QAAU,KAAK,MAAMf,EAAK,MAAM,EAGtD,KAAOa,EAAK,QAAQ,CAClB,GAAM,CAAC,CAAER,CAAM,EAAIQ,EAAK,CAAC,EACzB,GAAIR,EAASK,EAASI,GAAKE,EACzBJ,EAAO,CAAC,GAAGA,EAAMC,EAAK,MAAM,CAAE,MAE9B,MAEJ,CAGA,KAAOD,EAAK,QAAQ,CAClB,GAAM,CAAC,CAAEP,CAAM,EAAIO,EAAKA,EAAK,OAAS,CAAC,EACvC,GAAIP,EAASK,GAAUI,GAAK,CAACE,EAC3BH,EAAO,CAACD,EAAK,IAAI,EAAI,GAAGC,CAAI,MAE5B,MAEJ,CAGA,MAAO,CAACD,EAAMC,CAAI,CACpB,EAAG,CAAC,CAAC,EAAG,CAAC,GAAGT,CAAK,CAAC,CAAC,EACnBa,EAAqB,CAACV,EAAGC,IACvBD,EAAE,CAAC,IAAMC,EAAE,CAAC,GACZD,EAAE,CAAC,IAAMC,EAAE,CAAC,CACb,CACH,CACF,CACF,CACF,CACF,EAIC,KACCjB,EAAI,CAAC,CAACqB,EAAMC,CAAI,KAAO,CACrB,KAAMD,EAAK,IAAI,CAAC,CAACV,CAAI,IAAMA,CAAI,EAC/B,KAAMW,EAAK,IAAI,CAAC,CAACX,CAAI,IAAMA,CAAI,CACjC,EAAE,EAGFgB,EAAU,CAAE,KAAM,CAAC,EAAG,KAAM,CAAC,CAAE,CAAC,EAChCC,GAAY,EAAG,CAAC,EAChB5B,EAAI,CAAC,CAACgB,EAAGC,CAAC,IAGJD,EAAE,KAAK,OAASC,EAAE,KAAK,OAClB,CACL,KAAMA,EAAE,KAAK,MAAM,KAAK,IAAI,EAAGD,EAAE,KAAK,OAAS,CAAC,EAAGC,EAAE,KAAK,MAAM,EAChE,KAAM,CAAC,CACT,EAIO,CACL,KAAMA,EAAE,KAAK,MAAM,EAAE,EACrB,KAAMA,EAAE,KAAK,MAAM,EAAGA,EAAE,KAAK,OAASD,EAAE,KAAK,MAAM,CACrD,CAEH,CACH,CACJ,CAYO,SAASa,GACdzC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,EAAS,MAAAwC,EAAO,QAAAC,CAAQ,EACd,CACxC,OAAOrB,EAAM,IAAM,CACjB,IAAMsB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAoBxD,GAnBAJ,EAAM,UAAU,CAAC,CAAE,KAAAX,EAAM,KAAAC,CAAK,IAAM,CAGlC,OAAW,CAAC5B,CAAM,IAAK4B,EACrB5B,EAAO,UAAU,OAAO,sBAAsB,EAC9CA,EAAO,UAAU,OAAO,sBAAsB,EAIhD,OAAW,CAACmB,EAAO,CAACnB,CAAM,CAAC,IAAK2B,EAAK,QAAQ,EAC3C3B,EAAO,UAAU,IAAI,sBAAsB,EAC3CA,EAAO,UAAU,OACf,uBACAmB,IAAUQ,EAAK,OAAS,CAC1B,CAEJ,CAAC,EAGGgB,EAAQ,YAAY,EAAG,CAGzB,IAAMC,EAAUC,EACdlD,EAAU,KAAKmD,GAAa,CAAC,EAAGxC,EAAI,IAAG,EAAY,CAAC,EACpDX,EAAU,KAAKmD,GAAa,GAAG,EAAGxC,EAAI,IAAM,QAAiB,CAAC,CAChE,EAGAgC,EACG,KACCS,EAAO,CAAC,CAAE,KAAApB,CAAK,IAAMA,EAAK,OAAS,CAAC,EACpCH,GAAkBY,EAAM,KAAKY,GAAUC,EAAc,CAAC,CAAC,EACvDC,GAAeN,CAAO,CACxB,EACG,UAAU,CAAC,CAAC,CAAC,CAAE,KAAAjB,CAAK,CAAC,EAAGwB,CAAQ,IAAM,CACrC,GAAM,CAACnD,CAAM,EAAI2B,EAAKA,EAAK,OAAS,CAAC,EACrC,GAAI3B,EAAO,aAAc,CAGvB,IAAMoD,EAAYC,GAAoBrD,CAAM,EAC5C,GAAI,OAAOoD,GAAc,YAAa,CACpC,IAAMhC,EAASpB,EAAO,UAAYoD,EAAU,UACtC,CAAE,OAAA7C,CAAO,EAAI+C,GAAeF,CAAS,EAC3CA,EAAU,SAAS,CACjB,IAAKhC,EAASb,EAAS,EACvB,SAAA4C,CACF,CAAC,CACH,CACF,CACF,CAAC,CACP,CAGA,OAAIR,EAAQ,qBAAqB,GAC/BhD,EACG,KACC4D,EAAUf,CAAK,EACfnC,EAAwB,QAAQ,EAChCyC,GAAa,GAAG,EAChBU,GAAK,CAAC,EACND,EAAUlB,EAAQ,KAAKmB,GAAK,CAAC,CAAC,CAAC,EAC/BC,GAAO,CAAE,MAAO,GAAI,CAAC,EACrBP,GAAeZ,CAAK,CACtB,EACG,UAAU,CAAC,CAAC,CAAE,CAAE,KAAAX,CAAK,CAAC,IAAM,CAC3B,IAAM+B,EAAMC,GAAY,EAGlB3D,EAAS2B,EAAKA,EAAK,OAAS,CAAC,EACnC,GAAI3B,GAAUA,EAAO,OAAQ,CAC3B,GAAM,CAAC4D,CAAM,EAAI5D,EACX,CAAE,KAAA6D,CAAK,EAAI,IAAI,IAAID,EAAO,IAAI,EAChCF,EAAI,OAASG,IACfH,EAAI,KAAOG,EACX,QAAQ,aAAa,CAAC,EAAG,GAAI,GAAGH,CAAG,EAAE,EAIzC,MACEA,EAAI,KAAO,GACX,QAAQ,aAAa,CAAC,EAAG,GAAI,GAAGA,CAAG,EAAE,CAEzC,CAAC,EAGAjE,GAAqBC,EAAI,CAAE,UAAAC,EAAW,QAAAC,CAAQ,CAAC,EACnD,KACCkE,EAAIC,GAASzB,EAAM,KAAKyB,CAAK,CAAC,EAC9BC,EAAS,IAAM1B,EAAM,SAAS,CAAC,EAC/BhC,EAAIyD,GAAUE,EAAA,CAAE,IAAKvE,GAAOqE,EAAQ,CACtC,CACJ,CAAC,CACH,CC9RO,SAASG,GACdC,EAAkB,CAAE,UAAAC,EAAW,MAAAC,EAAO,QAAAC,CAAQ,EACvB,CAGvB,IAAMC,EAAaH,EAChB,KACCI,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,IAAMA,CAAC,EAC5BC,GAAY,EAAG,CAAC,EAChBF,EAAI,CAAC,CAAC,EAAGG,CAAC,IAAM,EAAIA,GAAKA,EAAI,CAAC,EAC9BC,EAAqB,CACvB,EAGIC,EAAUR,EACb,KACCG,EAAI,CAAC,CAAE,OAAAM,CAAO,IAAMA,CAAM,CAC5B,EAGF,OAAOC,EAAc,CAACF,EAASN,CAAU,CAAC,EACvC,KACCC,EAAI,CAAC,CAACM,EAAQE,CAAS,IAAM,EAAEF,GAAUE,EAAU,EACnDJ,EAAqB,EACrBK,EAAUX,EAAQ,KAAKY,GAAK,CAAC,CAAC,CAAC,EAC/BC,GAAQ,EAAI,EACZC,GAAO,CAAE,MAAO,GAAI,CAAC,EACrBZ,EAAIa,IAAW,CAAE,OAAAA,CAAO,EAAE,CAC5B,CACJ,CAYO,SAASC,GACdC,EAAiB,CAAE,UAAAnB,EAAW,QAAAoB,EAAS,MAAAnB,EAAO,QAAAC,CAAQ,EACpB,CAClC,IAAMmB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGT,GAAQ,EAAI,CAAC,EACxD,OAAAM,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAJ,CAAO,EAAG,CACfE,EAAG,OAASF,EACRA,GACFE,EAAG,aAAa,WAAY,IAAI,EAChCA,EAAG,KAAK,GAERA,EAAG,gBAAgB,UAAU,CAEjC,EAGA,UAAW,CACTA,EAAG,MAAM,IAAM,GACfA,EAAG,OAAS,GACZA,EAAG,gBAAgB,UAAU,CAC/B,CACF,CAAC,EAGDC,EACG,KACCP,EAAUU,CAAK,EACfE,EAAwB,QAAQ,CAClC,EACG,UAAU,CAAC,CAAE,OAAAC,CAAO,IAAM,CACzBP,EAAG,MAAM,IAAM,GAAGO,EAAS,EAAE,IAC/B,CAAC,EAGLC,EAAUR,EAAI,OAAO,EAClB,UAAUS,GAAM,CACfA,EAAG,eAAe,EAClB,OAAO,SAAS,CAAE,IAAK,CAAE,CAAC,CAC5B,CAAC,EAGI9B,GAAeqB,EAAI,CAAE,UAAAnB,EAAW,MAAAC,EAAO,QAAAC,CAAQ,CAAC,EACpD,KACC2B,EAAIC,GAAST,EAAM,KAAKS,CAAK,CAAC,EAC9BC,EAAS,IAAMV,EAAM,SAAS,CAAC,EAC/BjB,EAAI0B,GAAUE,EAAA,CAAE,IAAKb,GAAOW,EAAQ,CACtC,CACJ,CClHO,SAASG,GACd,CAAE,UAAAC,EAAW,UAAAC,CAAU,EACjB,CACND,EACG,KACCE,EAAU,IAAMC,EAAY,cAAc,CAAC,EAC3CC,GAASC,GAAMC,GAAuBD,CAAE,EACrC,KACCE,EAAUP,EAAU,KAAKQ,GAAK,CAAC,CAAC,CAAC,EACjCC,EAAOC,GAAWA,CAAO,EACzBC,EAAI,IAAMN,CAAE,EACZO,GAAK,CAAC,CACR,CACF,EACAH,EAAOJ,GAAMA,EAAG,YAAcA,EAAG,WAAW,EAC5CD,GAASC,GAAM,CACb,IAAMQ,EAAOR,EAAG,UACVS,EAAOT,EAAG,QAAQ,GAAG,GAAKA,EAIhC,OAHAS,EAAK,MAAQD,EAGRE,EAAQ,kBAAkB,EAIxBC,GAAoBF,EAAM,CAAE,UAAAb,CAAU,CAAC,EAC3C,KACCM,EAAUP,EAAU,KAAKQ,GAAK,CAAC,CAAC,CAAC,EACjCS,EAAS,IAAMH,EAAK,gBAAgB,OAAO,CAAC,CAC9C,EAPOI,CAQX,CAAC,CACH,EACG,UAAU,EAGXH,EAAQ,kBAAkB,GAC5Bf,EACG,KACCE,EAAU,IAAMC,EAAY,YAAY,CAAC,EACzCC,GAASC,GAAMW,GAAoBX,EAAI,CAAE,UAAAJ,CAAU,CAAC,CAAC,CACvD,EACG,UAAU,CACnB,CCpDO,SAASkB,GACd,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACf,CACND,EACG,KACCE,EAAU,IAAMC,EACd,2BACF,CAAC,EACDC,EAAIC,GAAM,CACRA,EAAG,cAAgB,GACnBA,EAAG,QAAU,EACf,CAAC,EACDC,GAASD,GAAME,EAAUF,EAAI,QAAQ,EAClC,KACCG,GAAU,IAAMH,EAAG,UAAU,SAAS,0BAA0B,CAAC,EACjEI,EAAI,IAAMJ,CAAE,CACd,CACF,EACAK,GAAeT,CAAO,CACxB,EACG,UAAU,CAAC,CAACI,EAAIM,CAAM,IAAM,CAC3BN,EAAG,UAAU,OAAO,0BAA0B,EAC1CM,IACFN,EAAG,QAAU,GACjB,CAAC,CACP,CC9BA,SAASO,IAAyB,CAChC,MAAO,qBAAqB,KAAK,UAAU,SAAS,CACtD,CAiBO,SAASC,GACd,CAAE,UAAAC,CAAU,EACN,CACNA,EACG,KACCC,EAAU,IAAMC,EAAY,qBAAqB,CAAC,EAClDC,EAAIC,GAAMA,EAAG,gBAAgB,mBAAmB,CAAC,EACjDC,EAAOP,EAAa,EACpBQ,GAASF,GAAMG,EAAUH,EAAI,YAAY,EACtC,KACCI,EAAI,IAAMJ,CAAE,CACd,CACF,CACF,EACG,UAAUA,GAAM,CACf,IAAMK,EAAML,EAAG,UAGXK,IAAQ,EACVL,EAAG,UAAY,EAGNK,EAAML,EAAG,eAAiBA,EAAG,eACtCA,EAAG,UAAYK,EAAM,EAEzB,CAAC,CACP,CCpCO,SAASC,GACd,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACf,CACNC,EAAc,CAACC,GAAY,QAAQ,EAAGF,CAAO,CAAC,EAC3C,KACCG,EAAI,CAAC,CAACC,EAAQC,CAAM,IAAMD,GAAU,CAACC,CAAM,EAC3CC,EAAUF,GAAUG,EAAGH,CAAM,EAC1B,KACCI,GAAMJ,EAAS,IAAM,GAAG,CAC1B,CACF,EACAK,GAAeV,CAAS,CAC1B,EACG,UAAU,CAAC,CAACK,EAAQ,CAAE,OAAQ,CAAE,EAAAM,CAAE,CAAC,CAAC,IAAM,CACzC,GAAIN,EACF,SAAS,KAAK,aAAa,qBAAsB,EAAE,EACnD,SAAS,KAAK,MAAM,IAAM,IAAIM,CAAC,SAC1B,CACL,IAAMC,EAAQ,GAAK,SAAS,SAAS,KAAK,MAAM,IAAK,EAAE,EACvD,SAAS,KAAK,gBAAgB,oBAAoB,EAClD,SAAS,KAAK,MAAM,IAAM,GACtBA,GACF,OAAO,SAAS,EAAGA,CAAK,CAC5B,CACF,CAAC,CACP,CC7DK,OAAO,UACV,OAAO,QAAU,SAAUC,EAAa,CACtC,IAAMC,EAA2B,CAAC,EAClC,QAAWC,KAAO,OAAO,KAAKF,CAAG,EAE/BC,EAAK,KAAK,CAACC,EAAKF,EAAIE,CAAG,CAAC,CAAC,EAG3B,OAAOD,CACT,GAGG,OAAO,SACV,OAAO,OAAS,SAAUD,EAAa,CACrC,IAAMC,EAAiB,CAAC,EACxB,QAAWC,KAAO,OAAO,KAAKF,CAAG,EAE/BC,EAAK,KAAKD,EAAIE,CAAG,CAAC,EAGpB,OAAOD,CACT,GAKE,OAAO,SAAY,cAGhB,QAAQ,UAAU,WACrB,QAAQ,UAAU,SAAW,SAC3BE,EAA8BC,EACxB,CACF,OAAOD,GAAM,UACf,KAAK,WAAaA,EAAE,KACpB,KAAK,UAAYA,EAAE,MAEnB,KAAK,WAAaA,EAClB,KAAK,UAAYC,EAErB,GAGG,QAAQ,UAAU,cACrB,QAAQ,UAAU,YAAc,YAC3BC,EACG,CACN,IAAMC,EAAS,KAAK,WACpB,GAAIA,EAAQ,CACND,EAAM,SAAW,GACnBC,EAAO,YAAY,IAAI,EAGzB,QAASC,EAAIF,EAAM,OAAS,EAAGE,GAAK,EAAGA,IAAK,CAC1C,IAAIC,EAAOH,EAAME,CAAC,EACd,OAAOC,GAAS,SAClBA,EAAO,SAAS,eAAeA,CAAI,EAC5BA,EAAK,YACZA,EAAK,WAAW,YAAYA,CAAI,EAG7BD,EAGHD,EAAO,aAAa,KAAK,gBAAkBE,CAAI,EAF/CF,EAAO,aAAaE,EAAM,IAAI,CAGlC,CACF,CACF,I1MMJ,SAASC,IAA4C,CACnD,OAAI,SAAS,WAAa,QACjBC,GACL,GAAG,IAAI,IAAI,yBAA0BC,GAAO,IAAI,CAAC,EACnD,EACG,KAECC,EAAI,IAAM,OAAO,EACjBC,EAAY,CAAC,CACf,EAEKC,GACL,IAAI,IAAI,2BAA4BH,GAAO,IAAI,CACjD,CAEJ,CAOA,SAAS,gBAAgB,UAAU,OAAO,OAAO,EACjD,SAAS,gBAAgB,UAAU,IAAI,IAAI,EAG3C,IAAMI,GAAYC,GAAc,EAC1BC,GAAYC,GAAc,EAC1BC,GAAYC,GAAoBH,EAAS,EACzCI,GAAYC,GAAc,EAG1BC,GAAYC,GAAc,EAC1BC,GAAYC,GAAW,oBAAoB,EAC3CC,GAAYD,GAAW,qBAAqB,EAC5CE,GAAYC,GAAW,EAGvBlB,GAASmB,GAAc,EACvBC,GAAS,SAAS,MAAM,UAAU,QAAQ,EAC5CtB,GAAiB,EACjBuB,GAGEC,GAAS,IAAIC,EACnBC,GAAiB,CAAE,OAAAF,EAAO,CAAC,EAG3B,IAAMG,GAAY,IAAIF,EAGlBG,EAAQ,oBAAoB,GAC9BC,GAAuB,CAAE,UAAArB,GAAW,UAAAM,GAAW,UAAAa,EAAU,CAAC,EACvD,UAAUrB,EAAS,EAzJxB,IAAAwB,KA4JIA,GAAA5B,GAAO,UAAP,YAAA4B,GAAgB,YAAa,QAC/BC,GAAqB,CAAE,UAAAzB,EAAU,CAAC,EAGpC0B,EAAMxB,GAAWE,EAAO,EACrB,KACCuB,GAAM,GAAG,CACX,EACG,UAAU,IAAM,CACfC,GAAU,SAAU,EAAK,EACzBA,GAAU,SAAU,EAAK,CAC3B,CAAC,EAGLtB,GACG,KACCuB,EAAO,CAAC,CAAE,KAAAC,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,OAAQA,EAAI,KAAM,CAGhB,IAAK,IACL,IAAK,IACH,IAAMC,EAAOC,GAAoC,gBAAgB,EAC7D,OAAOD,GAAS,aAClBE,GAAYF,CAAI,EAClB,MAGF,IAAK,IACL,IAAK,IACH,IAAMG,EAAOF,GAAoC,gBAAgB,EAC7D,OAAOE,GAAS,aAClBD,GAAYC,CAAI,EAClB,MAGF,IAAK,QACH,IAAMC,EAASC,GAAiB,EAC5BD,aAAkB,kBACpBA,EAAO,MAAM,CACnB,CACF,CAAC,EAGLE,GAAc,CAAE,UAAA9B,GAAW,UAAAR,EAAU,CAAC,EACtCuC,GAAmB,CAAE,UAAAvC,GAAW,QAAAU,EAAQ,CAAC,EACzC8B,GAAe,CAAE,UAAAxC,EAAU,CAAC,EAC5ByC,GAAgB,CAAE,UAAAjC,GAAW,QAAAE,EAAQ,CAAC,EAGtC,IAAMgC,GAAUC,GAAYC,GAAoB,QAAQ,EAAG,CAAE,UAAApC,EAAU,CAAC,EAClEqC,GAAQ7C,GACX,KACCH,EAAI,IAAM+C,GAAoB,MAAM,CAAC,EACrCE,EAAUC,GAAMC,GAAUD,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,EAAQ,CAAC,CAAC,EACrD5C,EAAY,CAAC,CACf,EAGImD,GAAWvB,EAGf,GAAGwB,GAAqB,SAAS,EAC9B,IAAIH,GAAMI,GAAaJ,EAAI,CAAE,QAAA3C,EAAQ,CAAC,CAAC,EAG1C,GAAG8C,GAAqB,QAAQ,EAC7B,IAAIH,GAAMK,GAAYL,EAAI,CAAE,OAAA7B,EAAO,CAAC,CAAC,EAGxC,GAAGgC,GAAqB,QAAQ,EAC7B,IAAIH,GAAMM,GAAYN,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,EAAM,CAAC,CAAC,EAG3D,GAAGK,GAAqB,SAAS,EAC9B,IAAIH,GAAMO,GAAaP,CAAE,CAAC,EAG7B,GAAGG,GAAqB,UAAU,EAC/B,IAAIH,GAAMQ,GAAcR,EAAI,CAAE,UAAA1B,EAAU,CAAC,CAAC,EAG7C,GAAG6B,GAAqB,QAAQ,EAC7B,IAAIH,GAAMS,GAAYT,EAAI,CAAE,OAAA/B,GAAQ,UAAAV,EAAU,CAAC,CAAC,EAGnD,GAAG4C,GAAqB,QAAQ,EAC7B,IAAIH,GAAMU,GAAYV,CAAE,CAAC,CAC9B,EAGMW,GAAWC,EAAM,IAAMjC,EAG3B,GAAGwB,GAAqB,UAAU,EAC/B,IAAIH,GAAMa,GAAcb,CAAE,CAAC,EAG9B,GAAGG,GAAqB,SAAS,EAC9B,IAAIH,GAAMc,GAAad,EAAI,CAAE,UAAAvC,GAAW,QAAAJ,GAAS,OAAAS,EAAO,CAAC,CAAC,EAG7D,GAAGqC,GAAqB,SAAS,EAC9B,IAAIH,GAAMzB,EAAQ,kBAAkB,EACjCwC,GAAoBf,EAAI,CAAE,OAAA/B,GAAQ,UAAAd,EAAU,CAAC,EAC7C6D,CACJ,EAGF,GAAGb,GAAqB,cAAc,EACnC,IAAIH,GAAMiB,GAAiBjB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,EAAQ,CAAC,CAAC,EAGzD,GAAGQ,GAAqB,SAAS,EAC9B,IAAIH,GAAMA,EAAG,aAAa,cAAc,IAAM,aAC3CkB,GAAGrD,GAAS,IAAMsD,GAAanB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,EAAM,CAAC,CAAC,EACjEoB,GAAGvD,GAAS,IAAMwD,GAAanB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,EAAM,CAAC,CAAC,CACrE,EAGF,GAAGK,GAAqB,MAAM,EAC3B,IAAIH,GAAMoB,GAAUpB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,EAAQ,CAAC,CAAC,EAGlD,GAAGQ,GAAqB,KAAK,EAC1B,IAAIH,GAAMqB,GAAqBrB,EAAI,CAClC,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,GAAO,QAAAzC,EAC7B,CAAC,CAAC,EAGJ,GAAG8C,GAAqB,KAAK,EAC1B,IAAIH,GAAMsB,GAAetB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,GAAO,QAAAzC,EAAQ,CAAC,CAAC,CACzE,CAAC,EAGKkE,GAAatE,GAChB,KACC8C,EAAU,IAAMY,EAAQ,EACxBa,GAAUtB,EAAQ,EAClBnD,EAAY,CAAC,CACf,EAGFwE,GAAW,UAAU,EAMrB,OAAO,UAAatE,GACpB,OAAO,UAAaE,GACpB,OAAO,QAAaE,GACpB,OAAO,UAAaE,GACpB,OAAO,UAAaE,GACpB,OAAO,QAAaE,GACpB,OAAO,QAAaE,GACpB,OAAO,OAAaC,GACpB,OAAO,OAAaK,GACpB,OAAO,UAAaG,GACpB,OAAO,WAAaiD",
-  "names": ["require_focus_visible", "__commonJSMin", "exports", "module", "global", "factory", "applyFocusVisiblePolyfill", "scope", "hadKeyboardEvent", "hadFocusVisibleRecently", "hadFocusVisibleRecentlyTimeout", "inputTypesAllowlist", "isValidFocusTarget", "el", "focusTriggersKeyboardModality", "type", "tagName", "addFocusVisibleClass", "removeFocusVisibleClass", "onKeyDown", "e", "onPointerDown", "onFocus", "onBlur", "onVisibilityChange", "addInitialPointerMoveListeners", "onInitialPointerMove", "removeInitialPointerMoveListeners", "event", "error", "require_clipboard", "__commonJSMin", "exports", "module", "root", "factory", "__webpack_modules__", "__unused_webpack_module", "__webpack_exports__", "__webpack_require__", "clipboard", "tiny_emitter", "tiny_emitter_default", "listen", "listen_default", "src_select", "select_default", "command", "type", "err", "ClipboardActionCut", "target", "selectedText", "actions_cut", "createFakeElement", "value", "isRTL", "fakeElement", "yPosition", "fakeCopyAction", "options", "ClipboardActionCopy", "actions_copy", "_typeof", "obj", "ClipboardActionDefault", "_options$action", "action", "container", "text", "actions_default", "clipboard_typeof", "_classCallCheck", "instance", "Constructor", "_defineProperties", "props", "i", "descriptor", "_createClass", "protoProps", "staticProps", "_inherits", "subClass", "superClass", "_setPrototypeOf", "o", "p", "_createSuper", "Derived", "hasNativeReflectConstruct", "_isNativeReflectConstruct", "Super", "_getPrototypeOf", "result", "NewTarget", "_possibleConstructorReturn", "self", "call", "_assertThisInitialized", "e", "getAttributeValue", "suffix", "element", "attribute", "Clipboard", "_Emitter", "_super", "trigger", "_this", "_this2", "selector", "actions", "support", "DOCUMENT_NODE_TYPE", "proto", "closest", "__unused_webpack_exports", "_delegate", "callback", "useCapture", "listenerFn", "listener", "delegate", "elements", "is", "listenNode", "listenNodeList", "listenSelector", "node", "nodeList", "select", "isReadOnly", "selection", "range", "E", "name", "ctx", "data", "evtArr", "len", "evts", "liveEvents", "__webpack_module_cache__", "moduleId", "getter", "definition", "key", "prop", "require_escape_html", "__commonJSMin", "exports", "module", "matchHtmlRegExp", "escapeHtml", "string", "str", "match", "escape", "html", "index", "lastIndex", "import_focus_visible", "extendStatics", "d", "b", "p", "__extends", "__", "__awaiter", "thisArg", "_arguments", "P", "generator", "adopt", "value", "resolve", "reject", "fulfilled", "step", "e", "rejected", "result", "__generator", "body", "_", "t", "f", "y", "g", "verb", "n", "v", "op", "__values", "o", "s", "m", "i", "__read", "n", "r", "ar", "e", "error", "__spreadArray", "to", "from", "pack", "i", "l", "ar", "__await", "v", "__asyncGenerator", "thisArg", "_arguments", "generator", "g", "q", "verb", "n", "a", "b", "resume", "step", "e", "settle", "r", "fulfill", "reject", "value", "f", "__asyncValues", "o", "m", "i", "__values", "verb", "n", "v", "resolve", "reject", "settle", "d", "isFunction", "value", "createErrorClass", "createImpl", "_super", "instance", "ctorFunc", "UnsubscriptionError", "createErrorClass", "_super", "errors", "err", "i", "arrRemove", "arr", "item", "index", "Subscription", "initialTeardown", "errors", "_parentage", "_parentage_1", "__values", "_parentage_1_1", "parent_1", "initialFinalizer", "isFunction", "e", "UnsubscriptionError", "_finalizers", "_finalizers_1", "_finalizers_1_1", "finalizer", "execFinalizer", "err", "__spreadArray", "__read", "teardown", "_a", "parent", "arrRemove", "empty", "EMPTY_SUBSCRIPTION", "Subscription", "isSubscription", "value", "isFunction", "execFinalizer", "finalizer", "config", "timeoutProvider", "handler", "timeout", "args", "_i", "delegate", "__spreadArray", "__read", "handle", "reportUnhandledError", "err", "timeoutProvider", "onUnhandledError", "config", "noop", "COMPLETE_NOTIFICATION", "createNotification", "errorNotification", "error", "nextNotification", "value", "kind", "context", "errorContext", "cb", "config", "isRoot", "_a", "errorThrown", "error", "captureError", "err", "Subscriber", "_super", "__extends", "destination", "_this", "isSubscription", "EMPTY_OBSERVER", "next", "error", "complete", "SafeSubscriber", "value", "handleStoppedNotification", "nextNotification", "err", "errorNotification", "COMPLETE_NOTIFICATION", "Subscription", "_bind", "bind", "fn", "thisArg", "ConsumerObserver", "partialObserver", "value", "error", "handleUnhandledError", "err", "SafeSubscriber", "_super", "__extends", "observerOrNext", "complete", "_this", "isFunction", "context_1", "config", "Subscriber", "handleUnhandledError", "error", "config", "captureError", "reportUnhandledError", "defaultErrorHandler", "err", "handleStoppedNotification", "notification", "subscriber", "onStoppedNotification", "timeoutProvider", "EMPTY_OBSERVER", "noop", "observable", "identity", "x", "pipe", "fns", "_i", "pipeFromArray", "identity", "input", "prev", "fn", "Observable", "subscribe", "operator", "observable", "observerOrNext", "error", "complete", "_this", "subscriber", "isSubscriber", "SafeSubscriber", "errorContext", "_a", "source", "sink", "err", "next", "promiseCtor", "getPromiseCtor", "resolve", "reject", "value", "operations", "_i", "pipeFromArray", "x", "getPromiseCtor", "promiseCtor", "_a", "config", "isObserver", "value", "isFunction", "isSubscriber", "Subscriber", "isSubscription", "hasLift", "source", "isFunction", "operate", "init", "liftedSource", "err", "createOperatorSubscriber", "destination", "onNext", "onComplete", "onError", "onFinalize", "OperatorSubscriber", "_super", "__extends", "shouldUnsubscribe", "_this", "value", "err", "closed_1", "_a", "Subscriber", "animationFrameProvider", "callback", "request", "cancel", "delegate", "handle", "timestamp", "Subscription", "args", "_i", "__spreadArray", "__read", "ObjectUnsubscribedError", "createErrorClass", "_super", "Subject", "_super", "__extends", "_this", "operator", "subject", "AnonymousSubject", "ObjectUnsubscribedError", "value", "errorContext", "_b", "__values", "_c", "observer", "err", "observers", "_a", "subscriber", "hasError", "isStopped", "EMPTY_SUBSCRIPTION", "Subscription", "arrRemove", "thrownError", "observable", "Observable", "destination", "source", "AnonymousSubject", "_super", "__extends", "destination", "source", "_this", "value", "_b", "_a", "err", "subscriber", "EMPTY_SUBSCRIPTION", "Subject", "BehaviorSubject", "_super", "__extends", "_value", "_this", "subscriber", "subscription", "_a", "hasError", "thrownError", "value", "Subject", "dateTimestampProvider", "ReplaySubject", "_super", "__extends", "_bufferSize", "_windowTime", "_timestampProvider", "dateTimestampProvider", "_this", "value", "_a", "isStopped", "_buffer", "_infiniteTimeWindow", "subscriber", "subscription", "copy", "i", "adjustedBufferSize", "now", "last", "Subject", "Action", "_super", "__extends", "scheduler", "work", "state", "delay", "Subscription", "intervalProvider", "handler", "timeout", "args", "_i", "delegate", "__spreadArray", "__read", "handle", "AsyncAction", "_super", "__extends", "scheduler", "work", "_this", "state", "delay", "id", "_a", "_id", "intervalProvider", "_scheduler", "error", "_delay", "errored", "errorValue", "e", "actions", "arrRemove", "Action", "Scheduler", "schedulerActionCtor", "now", "work", "delay", "state", "dateTimestampProvider", "AsyncScheduler", "_super", "__extends", "SchedulerAction", "now", "Scheduler", "_this", "action", "actions", "error", "asyncScheduler", "AsyncScheduler", "AsyncAction", "async", "QueueAction", "_super", "__extends", "scheduler", "work", "_this", "state", "delay", "id", "AsyncAction", "QueueScheduler", "_super", "__extends", "AsyncScheduler", "queueScheduler", "QueueScheduler", "QueueAction", "AnimationFrameAction", "_super", "__extends", "scheduler", "work", "_this", "id", "delay", "animationFrameProvider", "actions", "_a", "AsyncAction", "AnimationFrameScheduler", "_super", "__extends", "action", "flushId", "actions", "error", "AsyncScheduler", "animationFrameScheduler", "AnimationFrameScheduler", "AnimationFrameAction", "EMPTY", "Observable", "subscriber", "isScheduler", "value", "isFunction", "last", "arr", "popResultSelector", "args", "isFunction", "popScheduler", "isScheduler", "popNumber", "defaultValue", "isArrayLike", "x", "isPromise", "value", "isFunction", "isInteropObservable", "input", "isFunction", "observable", "isAsyncIterable", "obj", "isFunction", "createInvalidObservableTypeError", "input", "getSymbolIterator", "iterator", "isIterable", "input", "isFunction", "iterator", "readableStreamLikeToAsyncGenerator", "readableStream", "reader", "__await", "_a", "_b", "value", "done", "isReadableStreamLike", "obj", "isFunction", "innerFrom", "input", "Observable", "isInteropObservable", "fromInteropObservable", "isArrayLike", "fromArrayLike", "isPromise", "fromPromise", "isAsyncIterable", "fromAsyncIterable", "isIterable", "fromIterable", "isReadableStreamLike", "fromReadableStreamLike", "createInvalidObservableTypeError", "obj", "subscriber", "obs", "observable", "isFunction", "array", "i", "promise", "value", "err", "reportUnhandledError", "iterable", "iterable_1", "__values", "iterable_1_1", "asyncIterable", "process", "readableStream", "readableStreamLikeToAsyncGenerator", "asyncIterable_1", "__asyncValues", "asyncIterable_1_1", "executeSchedule", "parentSubscription", "scheduler", "work", "delay", "repeat", "scheduleSubscription", "observeOn", "scheduler", "delay", "operate", "source", "subscriber", "createOperatorSubscriber", "value", "executeSchedule", "err", "subscribeOn", "scheduler", "delay", "operate", "source", "subscriber", "scheduleObservable", "input", "scheduler", "innerFrom", "subscribeOn", "observeOn", "schedulePromise", "input", "scheduler", "innerFrom", "subscribeOn", "observeOn", "scheduleArray", "input", "scheduler", "Observable", "subscriber", "i", "scheduleIterable", "input", "scheduler", "Observable", "subscriber", "iterator", "executeSchedule", "value", "done", "_a", "err", "isFunction", "scheduleAsyncIterable", "input", "scheduler", "Observable", "subscriber", "executeSchedule", "iterator", "result", "scheduleReadableStreamLike", "input", "scheduler", "scheduleAsyncIterable", "readableStreamLikeToAsyncGenerator", "scheduled", "input", "scheduler", "isInteropObservable", "scheduleObservable", "isArrayLike", "scheduleArray", "isPromise", "schedulePromise", "isAsyncIterable", "scheduleAsyncIterable", "isIterable", "scheduleIterable", "isReadableStreamLike", "scheduleReadableStreamLike", "createInvalidObservableTypeError", "from", "input", "scheduler", "scheduled", "innerFrom", "of", "args", "_i", "scheduler", "popScheduler", "from", "throwError", "errorOrErrorFactory", "scheduler", "errorFactory", "isFunction", "init", "subscriber", "Observable", "EmptyError", "createErrorClass", "_super", "isValidDate", "value", "map", "project", "thisArg", "operate", "source", "subscriber", "index", "createOperatorSubscriber", "value", "isArray", "callOrApply", "fn", "args", "__spreadArray", "__read", "mapOneOrManyArgs", "map", "isArray", "getPrototypeOf", "objectProto", "getKeys", "argsArgArrayOrObject", "args", "first_1", "isPOJO", "keys", "key", "obj", "createObject", "keys", "values", "result", "key", "i", "combineLatest", "args", "_i", "scheduler", "popScheduler", "resultSelector", "popResultSelector", "_a", "argsArgArrayOrObject", "observables", "keys", "from", "result", "Observable", "combineLatestInit", "values", "createObject", "identity", "mapOneOrManyArgs", "valueTransform", "subscriber", "maybeSchedule", "length", "active", "remainingFirstValues", "i", "source", "hasFirstValue", "createOperatorSubscriber", "value", "execute", "subscription", "executeSchedule", "mergeInternals", "source", "subscriber", "project", "concurrent", "onBeforeNext", "expand", "innerSubScheduler", "additionalFinalizer", "buffer", "active", "index", "isComplete", "checkComplete", "outerNext", "value", "doInnerSub", "innerComplete", "innerFrom", "createOperatorSubscriber", "innerValue", "bufferedValue", "executeSchedule", "err", "mergeMap", "project", "resultSelector", "concurrent", "isFunction", "a", "i", "map", "b", "ii", "innerFrom", "operate", "source", "subscriber", "mergeInternals", "mergeAll", "concurrent", "mergeMap", "identity", "concatAll", "mergeAll", "concat", "args", "_i", "concatAll", "from", "popScheduler", "defer", "observableFactory", "Observable", "subscriber", "innerFrom", "nodeEventEmitterMethods", "eventTargetMethods", "jqueryMethods", "fromEvent", "target", "eventName", "options", "resultSelector", "isFunction", "mapOneOrManyArgs", "_a", "__read", "isEventTarget", "methodName", "handler", "isNodeStyleEventEmitter", "toCommonHandlerRegistry", "isJQueryStyleEventEmitter", "add", "remove", "isArrayLike", "mergeMap", "subTarget", "innerFrom", "Observable", "subscriber", "args", "_i", "fromEventPattern", "addHandler", "removeHandler", "resultSelector", "mapOneOrManyArgs", "Observable", "subscriber", "handler", "e", "_i", "retValue", "isFunction", "timer", "dueTime", "intervalOrScheduler", "scheduler", "async", "intervalDuration", "isScheduler", "Observable", "subscriber", "due", "isValidDate", "n", "merge", "args", "_i", "scheduler", "popScheduler", "concurrent", "popNumber", "sources", "innerFrom", "mergeAll", "from", "EMPTY", "NEVER", "Observable", "noop", "isArray", "argsOrArgArray", "args", "filter", "predicate", "thisArg", "operate", "source", "subscriber", "index", "createOperatorSubscriber", "value", "zip", "args", "_i", "resultSelector", "popResultSelector", "sources", "argsOrArgArray", "Observable", "subscriber", "buffers", "completed", "sourceIndex", "innerFrom", "createOperatorSubscriber", "value", "buffer", "result", "__spreadArray", "__read", "i", "EMPTY", "audit", "durationSelector", "operate", "source", "subscriber", "hasValue", "lastValue", "durationSubscriber", "isComplete", "endDuration", "value", "cleanupDuration", "createOperatorSubscriber", "innerFrom", "auditTime", "duration", "scheduler", "asyncScheduler", "audit", "timer", "bufferCount", "bufferSize", "startBufferEvery", "operate", "source", "subscriber", "buffers", "count", "createOperatorSubscriber", "value", "toEmit", "buffers_1", "__values", "buffers_1_1", "buffer", "toEmit_1", "toEmit_1_1", "arrRemove", "buffers_2", "buffers_2_1", "catchError", "selector", "operate", "source", "subscriber", "innerSub", "syncUnsub", "handledResult", "createOperatorSubscriber", "err", "innerFrom", "scanInternals", "accumulator", "seed", "hasSeed", "emitOnNext", "emitBeforeComplete", "source", "subscriber", "hasState", "state", "index", "createOperatorSubscriber", "value", "i", "combineLatest", "args", "_i", "resultSelector", "popResultSelector", "pipe", "__spreadArray", "__read", "mapOneOrManyArgs", "operate", "source", "subscriber", "combineLatestInit", "argsOrArgArray", "combineLatestWith", "otherSources", "_i", "combineLatest", "__spreadArray", "__read", "debounce", "durationSelector", "operate", "source", "subscriber", "hasValue", "lastValue", "durationSubscriber", "emit", "value", "createOperatorSubscriber", "noop", "innerFrom", "debounceTime", "dueTime", "scheduler", "asyncScheduler", "operate", "source", "subscriber", "activeTask", "lastValue", "lastTime", "emit", "value", "emitWhenIdle", "targetTime", "now", "createOperatorSubscriber", "defaultIfEmpty", "defaultValue", "operate", "source", "subscriber", "hasValue", "createOperatorSubscriber", "value", "take", "count", "EMPTY", "operate", "source", "subscriber", "seen", "createOperatorSubscriber", "value", "ignoreElements", "operate", "source", "subscriber", "createOperatorSubscriber", "noop", "mapTo", "value", "map", "delayWhen", "delayDurationSelector", "subscriptionDelay", "source", "concat", "take", "ignoreElements", "mergeMap", "value", "index", "innerFrom", "mapTo", "delay", "due", "scheduler", "asyncScheduler", "duration", "timer", "delayWhen", "distinctUntilChanged", "comparator", "keySelector", "identity", "defaultCompare", "operate", "source", "subscriber", "previousKey", "first", "createOperatorSubscriber", "value", "currentKey", "a", "b", "distinctUntilKeyChanged", "key", "compare", "distinctUntilChanged", "x", "y", "throwIfEmpty", "errorFactory", "defaultErrorFactory", "operate", "source", "subscriber", "hasValue", "createOperatorSubscriber", "value", "EmptyError", "endWith", "values", "_i", "source", "concat", "of", "__spreadArray", "__read", "finalize", "callback", "operate", "source", "subscriber", "first", "predicate", "defaultValue", "hasDefaultValue", "source", "filter", "v", "identity", "take", "defaultIfEmpty", "throwIfEmpty", "EmptyError", "takeLast", "count", "EMPTY", "operate", "source", "subscriber", "buffer", "createOperatorSubscriber", "value", "buffer_1", "__values", "buffer_1_1", "merge", "args", "_i", "scheduler", "popScheduler", "concurrent", "popNumber", "argsOrArgArray", "operate", "source", "subscriber", "mergeAll", "from", "__spreadArray", "__read", "mergeWith", "otherSources", "_i", "merge", "__spreadArray", "__read", "repeat", "countOrConfig", "count", "delay", "_a", "EMPTY", "operate", "source", "subscriber", "soFar", "sourceSub", "resubscribe", "notifier", "timer", "innerFrom", "notifierSubscriber_1", "createOperatorSubscriber", "subscribeToSource", "syncUnsub", "scan", "accumulator", "seed", "operate", "scanInternals", "share", "options", "_a", "connector", "Subject", "_b", "resetOnError", "_c", "resetOnComplete", "_d", "resetOnRefCountZero", "wrapperSource", "connection", "resetConnection", "subject", "refCount", "hasCompleted", "hasErrored", "cancelReset", "reset", "resetAndUnsubscribe", "conn", "operate", "source", "subscriber", "dest", "handleReset", "SafeSubscriber", "value", "err", "innerFrom", "on", "args", "_i", "onSubscriber", "__spreadArray", "__read", "shareReplay", "configOrBufferSize", "windowTime", "scheduler", "bufferSize", "refCount", "_a", "_b", "_c", "share", "ReplaySubject", "skip", "count", "filter", "_", "index", "skipUntil", "notifier", "operate", "source", "subscriber", "taking", "skipSubscriber", "createOperatorSubscriber", "noop", "innerFrom", "value", "startWith", "values", "_i", "scheduler", "popScheduler", "operate", "source", "subscriber", "concat", "switchMap", "project", "resultSelector", "operate", "source", "subscriber", "innerSubscriber", "index", "isComplete", "checkComplete", "createOperatorSubscriber", "value", "innerIndex", "outerIndex", "innerFrom", "innerValue", "takeUntil", "notifier", "operate", "source", "subscriber", "innerFrom", "createOperatorSubscriber", "noop", "takeWhile", "predicate", "inclusive", "operate", "source", "subscriber", "index", "createOperatorSubscriber", "value", "result", "tap", "observerOrNext", "error", "complete", "tapObserver", "isFunction", "operate", "source", "subscriber", "_a", "isUnsub", "createOperatorSubscriber", "value", "err", "_b", "identity", "throttle", "durationSelector", "config", "operate", "source", "subscriber", "_a", "_b", "leading", "_c", "trailing", "hasValue", "sendValue", "throttled", "isComplete", "endThrottling", "send", "cleanupThrottling", "startThrottle", "value", "innerFrom", "createOperatorSubscriber", "throttleTime", "duration", "scheduler", "config", "asyncScheduler", "duration$", "timer", "throttle", "withLatestFrom", "inputs", "_i", "project", "popResultSelector", "operate", "source", "subscriber", "len", "otherValues", "hasValue", "ready", "i", "innerFrom", "createOperatorSubscriber", "value", "identity", "noop", "values", "__spreadArray", "__read", "zip", "sources", "_i", "operate", "source", "subscriber", "__spreadArray", "__read", "zipWith", "otherInputs", "_i", "zip", "__spreadArray", "__read", "watchDocument", "document$", "ReplaySubject", "fromEvent", "getElements", "selector", "node", "getElement", "el", "getOptionalElement", "getActiveElement", "_a", "_b", "_c", "_d", "observer$", "merge", "fromEvent", "debounceTime", "startWith", "map", "getActiveElement", "shareReplay", "watchElementFocus", "el", "active", "distinctUntilChanged", "watchElementHover", "el", "timeout", "defer", "merge", "fromEvent", "map", "debounce", "active", "timer", "identity", "startWith", "appendChild", "el", "child", "node", "h", "tag", "attributes", "children", "attr", "round", "value", "digits", "watchScript", "src", "script", "h", "defer", "merge", "fromEvent", "switchMap", "throwError", "map", "finalize", "take", "entry$", "Subject", "observer$", "defer", "watchScript", "of", "map", "entries", "entry", "switchMap", "observer", "merge", "NEVER", "finalize", "shareReplay", "getElementSize", "el", "watchElementSize", "target", "tap", "filter", "startWith", "getElementContentSize", "el", "getElementContainer", "parent", "getElementContainers", "containers", "getElementOffset", "el", "getElementOffsetAbsolute", "rect", "watchElementOffset", "merge", "fromEvent", "auditTime", "animationFrameScheduler", "map", "startWith", "getElementContentOffset", "el", "watchElementContentOffset", "merge", "fromEvent", "auditTime", "animationFrameScheduler", "map", "startWith", "entry$", "Subject", "observer$", "defer", "of", "entries", "entry", "switchMap", "observer", "merge", "NEVER", "finalize", "shareReplay", "watchElementVisibility", "el", "tap", "filter", "target", "map", "isIntersecting", "watchElementBoundary", "threshold", "watchElementContentOffset", "y", "visible", "getElementSize", "content", "getElementContentSize", "distinctUntilChanged", "toggles", "getElement", "getToggle", "name", "setToggle", "value", "watchToggle", "el", "fromEvent", "map", "startWith", "isSusceptibleToKeyboard", "el", "type", "watchComposition", "merge", "fromEvent", "map", "startWith", "watchKeyboard", "keyboard$", "filter", "ev", "getToggle", "mode", "active", "getActiveElement", "share", "switchMap", "EMPTY", "getLocation", "setLocation", "url", "navigate", "feature", "el", "h", "watchLocation", "Subject", "getLocationHash", "setLocationHash", "hash", "el", "h", "ev", "watchLocationHash", "location$", "merge", "fromEvent", "map", "startWith", "filter", "shareReplay", "watchLocationTarget", "id", "getOptionalElement", "watchMedia", "query", "media", "fromEventPattern", "next", "startWith", "watchPrint", "merge", "fromEvent", "map", "at", "query$", "factory", "switchMap", "active", "EMPTY", "request", "url", "options", "Observable", "observer", "req", "event", "_a", "length", "requestJSON", "switchMap", "res", "map", "body", "shareReplay", "requestHTML", "dom", "requestXML", "getViewportOffset", "watchViewportOffset", "merge", "fromEvent", "map", "startWith", "getViewportSize", "watchViewportSize", "fromEvent", "map", "startWith", "watchViewport", "combineLatest", "watchViewportOffset", "watchViewportSize", "map", "offset", "size", "shareReplay", "watchViewportAt", "el", "viewport$", "header$", "size$", "distinctUntilKeyChanged", "offset$", "combineLatest", "map", "getElementOffset", "height", "offset", "size", "x", "y", "recv", "worker", "fromEvent", "ev", "send", "send$", "Subject", "data", "watchWorker", "url", "recv$", "worker$", "done$", "ignoreElements", "endWith", "mergeWith", "takeUntil", "share", "script", "getElement", "config", "getLocation", "configuration", "feature", "flag", "translation", "key", "value", "getComponentElement", "type", "node", "getElement", "getComponentElements", "getElements", "watchAnnounce", "el", "button", "getElement", "fromEvent", "map", "content", "mountAnnounce", "feature", "EMPTY", "defer", "push$", "Subject", "hash", "tap", "state", "finalize", "__spreadValues", "watchConsent", "el", "target$", "map", "target", "mountConsent", "options", "internal$", "Subject", "hidden", "tap", "state", "finalize", "__spreadValues", "renderTooltip", "id", "style", "h", "renderInlineTooltip2", "children", "renderAnnotation", "id", "prefix", "anchor", "h", "renderTooltip", "renderClipboardButton", "id", "h", "translation", "renderSearchDocument", "document", "flag", "parent", "teaser", "missing", "key", "list", "h", "config", "configuration", "url", "feature", "match", "highlight", "value", "tags", "tag", "type", "translation", "renderSearchResultItem", "result", "threshold", "docs", "doc", "article", "index", "best", "more", "children", "section", "renderSourceFacts", "facts", "h", "key", "value", "round", "renderTabbedControl", "type", "classes", "h", "renderTable", "table", "h", "renderVersion", "version", "_a", "config", "configuration", "url", "h", "renderVersionSelector", "versions", "active", "translation", "sequence", "watchTooltip2", "el", "active$", "combineLatest", "watchElementFocus", "watchElementHover", "map", "focus", "hover", "distinctUntilChanged", "offset$", "defer", "getElementContainers", "mergeMap", "watchElementContentOffset", "throttleTime", "getElementOffsetAbsolute", "first", "active", "switchMap", "offset", "share", "mountTooltip2", "dependencies", "content$", "viewport$", "id", "push$", "Subject", "show$", "BehaviorSubject", "ignoreElements", "endWith", "node$", "debounce", "timer", "queueScheduler", "EMPTY", "tap", "node", "startWith", "states", "origin$", "filter", "withLatestFrom", "_", "size", "host", "x", "height", "getElementSize", "origin", "getElement", "observeOn", "animationFrameScheduler", "state", "finalize", "__spreadValues", "mountInlineTooltip2", "container", "Observable", "observer", "title", "renderInlineTooltip2", "watchAnnotation", "el", "container", "offset$", "defer", "combineLatest", "watchElementOffset", "watchElementContentOffset", "map", "x", "y", "scroll", "width", "height", "getElementSize", "watchElementFocus", "switchMap", "active", "offset", "take", "mountAnnotation", "target$", "tooltip", "index", "push$", "Subject", "done$", "ignoreElements", "endWith", "watchElementVisibility", "takeUntil", "visible", "merge", "filter", "debounceTime", "auditTime", "animationFrameScheduler", "throttleTime", "origin", "fromEvent", "ev", "withLatestFrom", "_a", "parent", "getActiveElement", "target", "delay", "tap", "state", "finalize", "__spreadValues", "findHosts", "container", "getElements", "findMarkers", "markers", "el", "nodes", "it", "node", "text", "match", "id", "force", "marker", "swap", "source", "target", "mountAnnotationList", "target$", "print$", "parent", "prefix", "annotations", "getOptionalElement", "renderAnnotation", "EMPTY", "defer", "push$", "Subject", "done$", "ignoreElements", "endWith", "pairs", "annotation", "getElement", "takeUntil", "active", "inner", "child", "merge", "mountAnnotation", "finalize", "share", "findList", "el", "sibling", "mountAnnotationBlock", "options", "defer", "list", "mountAnnotationList", "EMPTY", "import_clipboard", "sequence", "findCandidateList", "el", "sibling", "watchCodeBlock", "watchElementSize", "map", "width", "getElementContentSize", "distinctUntilKeyChanged", "mountCodeBlock", "options", "hover", "factory$", "defer", "push$", "Subject", "done$", "takeLast", "scrollable", "content$", "ClipboardJS", "feature", "parent", "button", "renderClipboardButton", "mountInlineTooltip2", "container", "list", "annotations$", "mountAnnotationList", "takeUntil", "height", "distinctUntilChanged", "switchMap", "active", "EMPTY", "getElements", "tap", "state", "finalize", "__spreadValues", "mergeWith", "watchElementVisibility", "filter", "visible", "take", "watchDetails", "el", "target$", "print$", "open", "merge", "map", "target", "filter", "details", "active", "tap", "mountDetails", "options", "defer", "push$", "Subject", "action", "reveal", "state", "finalize", "__spreadValues", "mermaid_default", "mermaid$", "sequence", "fetchScripts", "watchScript", "of", "mountMermaid", "el", "tap", "mermaid_default", "map", "shareReplay", "__async", "id", "host", "h", "text", "svg", "fn", "shadow", "sentinel", "h", "mountDataTable", "el", "renderTable", "of", "watchContentTabs", "inputs", "initial", "input", "merge", "fromEvent", "map", "getElement", "startWith", "active", "mountContentTabs", "el", "viewport$", "target$", "container", "getElements", "prev", "renderTabbedControl", "next", "defer", "push$", "Subject", "done$", "ignoreElements", "endWith", "combineLatest", "watchElementSize", "watchElementVisibility", "takeUntil", "auditTime", "animationFrameScheduler", "size", "offset", "getElementOffset", "width", "getElementSize", "content", "getElementContentOffset", "watchElementContentOffset", "getElementContentSize", "direction", "filter", "label", "h", "ev", "tap", "feature", "skip", "withLatestFrom", "tab", "y", "set", "tabs", "media", "state", "finalize", "__spreadValues", "subscribeOn", "asyncScheduler", "mountContent", "el", "viewport$", "target$", "print$", "merge", "getElements", "child", "mountAnnotationBlock", "mountCodeBlock", "mountMermaid", "mountDataTable", "mountDetails", "mountContentTabs", "feature", "mountInlineTooltip2", "watchDialog", "_el", "alert$", "switchMap", "message", "merge", "of", "delay", "map", "active", "mountDialog", "el", "options", "inner", "getElement", "defer", "push$", "Subject", "tap", "state", "finalize", "__spreadValues", "sequence", "watchTooltip", "el", "host", "width", "getElementSize", "container", "getElementContainer", "scroll$", "watchElementContentOffset", "of", "active$", "merge", "watchElementFocus", "watchElementHover", "distinctUntilChanged", "combineLatest", "map", "active", "scroll", "x", "y", "getElementOffset", "size", "table", "mountTooltip", "title", "EMPTY", "id", "tooltip", "renderTooltip", "typeset", "getElement", "defer", "push$", "Subject", "offset", "filter", "debounceTime", "auditTime", "animationFrameScheduler", "throttleTime", "origin", "tap", "state", "finalize", "__spreadValues", "subscribeOn", "asyncScheduler", "isHidden", "viewport$", "feature", "of", "direction$", "map", "y", "bufferCount", "a", "b", "distinctUntilKeyChanged", "hidden$", "combineLatest", "filter", "offset", "direction", "distinctUntilChanged", "search$", "watchToggle", "search", "switchMap", "active", "startWith", "watchHeader", "el", "options", "defer", "watchElementSize", "height", "hidden", "shareReplay", "mountHeader", "header$", "main$", "push$", "Subject", "done$", "ignoreElements", "endWith", "combineLatestWith", "tooltips", "from", "getElements", "mergeMap", "child", "mountTooltip", "takeUntil", "state", "__spreadValues", "mergeWith", "watchHeaderTitle", "el", "viewport$", "header$", "watchViewportAt", "map", "y", "height", "getElementSize", "distinctUntilKeyChanged", "mountHeaderTitle", "options", "defer", "push$", "Subject", "active", "heading", "getOptionalElement", "EMPTY", "tap", "state", "finalize", "__spreadValues", "watchMain", "el", "viewport$", "header$", "adjust$", "map", "height", "distinctUntilChanged", "border$", "switchMap", "watchElementSize", "distinctUntilKeyChanged", "combineLatest", "header", "top", "bottom", "y", "a", "b", "watchPalette", "inputs", "current", "input", "index", "of", "mergeMap", "fromEvent", "map", "startWith", "shareReplay", "mountPalette", "el", "getElements", "meta", "h", "scheme", "media$", "watchMedia", "defer", "push$", "Subject", "palette", "media", "key", "value", "label", "filter", "ev", "withLatestFrom", "_", "header", "getComponentElement", "style", "color", "observeOn", "asyncScheduler", "takeUntil", "skip", "repeat", "tap", "state", "finalize", "__spreadValues", "mountProgress", "el", "progress$", "defer", "push$", "Subject", "value", "tap", "finalize", "map", "import_clipboard", "extract", "el", "copy", "text", "setupClipboardJS", "alert$", "ClipboardJS", "Observable", "subscriber", "getElement", "ev", "tap", "map", "translation", "resolve", "url", "base", "extract", "document", "sitemap", "el", "getElements", "getElement", "links", "link", "href", "fetchSitemap", "requestXML", "map", "catchError", "of", "handle", "ev", "sitemap", "EMPTY", "el", "url", "of", "head", "document", "tags", "getElements", "resolve", "key", "value", "inject", "next", "selector", "feature", "source", "getOptionalElement", "target", "html", "name", "container", "getComponentElement", "concat", "switchMap", "script", "Observable", "observer", "ignoreElements", "endWith", "setupInstantNavigation", "location$", "viewport$", "progress$", "config", "configuration", "sitemap$", "fetchSitemap", "instant$", "fromEvent", "combineLatestWith", "share", "history$", "map", "getLocation", "withLatestFrom", "offset", "merge", "document$", "distinctUntilKeyChanged", "requestHTML", "catchError", "setLocation", "_", "distinctUntilChanged", "a", "b", "tap", "_a", "_b", "setLocationHash", "debounceTime", "import_escape_html", "setupSearchHighlighter", "config", "regex", "term", "separator", "highlight", "_", "data", "query", "match", "value", "escapeHTML", "isSearchReadyMessage", "message", "isSearchResultMessage", "setupSearchWorker", "url", "index$", "worker$", "watchWorker", "merge", "of", "watchToggle", "first", "active", "switchMap", "config", "docs", "feature", "setupVersionSelector", "document$", "config", "configuration", "versions$", "requestJSON", "catchError", "EMPTY", "current$", "map", "versions", "current", "version", "aliases", "switchMap", "urls", "fromEvent", "filter", "ev", "withLatestFrom", "el", "url", "of", "fetchSitemap", "sitemap", "path", "getLocation", "setLocation", "combineLatest", "getElement", "renderVersionSelector", "_a", "outdated", "ignored", "main", "ignore", "warning", "getComponentElements", "watchSearchQuery", "el", "worker$", "searchParams", "getLocation", "setToggle", "watchToggle", "first", "active", "url", "focus$", "watchElementFocus", "value$", "merge", "isSearchReadyMessage", "fromEvent", "map", "distinctUntilChanged", "combineLatest", "value", "focus", "shareReplay", "mountSearchQuery", "push$", "Subject", "done$", "ignoreElements", "endWith", "_", "query", "distinctUntilKeyChanged", "takeUntil", "label", "getElement", "tap", "state", "finalize", "__spreadValues", "mountSearchResult", "el", "worker$", "query$", "push$", "Subject", "boundary$", "watchElementBoundary", "filter", "container", "meta", "getElement", "list", "watchToggle", "active", "withLatestFrom", "skipUntil", "first", "isSearchReadyMessage", "items", "value", "translation", "count", "round", "render$", "tap", "switchMap", "merge", "of", "bufferCount", "zipWith", "chunk", "map", "renderSearchResultItem", "share", "item", "mergeMap", "details", "getOptionalElement", "EMPTY", "fromEvent", "takeUntil", "isSearchResultMessage", "data", "state", "finalize", "__spreadValues", "watchSearchShare", "_el", "query$", "map", "value", "url", "getLocation", "mountSearchShare", "el", "options", "push$", "Subject", "done$", "ignoreElements", "endWith", "fromEvent", "takeUntil", "ev", "tap", "state", "finalize", "__spreadValues", "mountSearchSuggest", "el", "worker$", "keyboard$", "push$", "Subject", "query", "getComponentElement", "query$", "merge", "fromEvent", "observeOn", "asyncScheduler", "map", "distinctUntilChanged", "combineLatestWith", "suggest", "value", "words", "last", "filter", "mode", "key", "isSearchResultMessage", "data", "tap", "state", "finalize", "mountSearch", "el", "index$", "keyboard$", "config", "configuration", "worker$", "setupSearchWorker", "query", "getComponentElement", "result", "fromEvent", "filter", "target", "setToggle", "mode", "key", "active", "getActiveElement", "anchors", "anchor", "getElements", "article", "best", "a", "b", "els", "i", "query$", "mountSearchQuery", "merge", "mountSearchResult", "mergeWith", "getComponentElements", "child", "mountSearchShare", "mountSearchSuggest", "err", "NEVER", "mountSearchHiglight", "el", "index$", "location$", "combineLatest", "startWith", "getLocation", "filter", "url", "map", "index", "setupSearchHighlighter", "fn", "_a", "nodes", "it", "node", "original", "replaced", "text", "childNodes", "h", "watchSidebar", "el", "viewport$", "main$", "parent", "adjust", "combineLatest", "map", "offset", "height", "y", "distinctUntilChanged", "a", "b", "mountSidebar", "_a", "_b", "header$", "options", "__objRest", "inner", "getElement", "getElementOffset", "defer", "push$", "Subject", "done$", "ignoreElements", "endWith", "next$", "auditTime", "animationFrameScheduler", "withLatestFrom", "first", "item", "getElements", "container", "getElementSize", "from", "mergeMap", "label", "fromEvent", "observeOn", "asyncScheduler", "takeUntil", "input", "tap", "state", "finalize", "__spreadValues", "fetchSourceFactsFromGitHub", "user", "repo", "url", "zip", "requestJSON", "catchError", "EMPTY", "map", "release", "defaultIfEmpty", "info", "__spreadValues", "fetchSourceFactsFromGitLab", "base", "project", "url", "requestJSON", "catchError", "EMPTY", "map", "star_count", "forks_count", "defaultIfEmpty", "fetchSourceFacts", "url", "match", "user", "repo", "fetchSourceFactsFromGitHub", "base", "slug", "fetchSourceFactsFromGitLab", "EMPTY", "fetch$", "watchSource", "el", "defer", "cached", "of", "getComponentElements", "consent", "EMPTY", "fetchSourceFacts", "tap", "facts", "catchError", "filter", "map", "shareReplay", "mountSource", "inner", "getElement", "push$", "Subject", "renderSourceFacts", "state", "finalize", "__spreadValues", "watchTabs", "el", "viewport$", "header$", "watchElementSize", "switchMap", "watchViewportAt", "map", "y", "distinctUntilKeyChanged", "mountTabs", "options", "defer", "push$", "Subject", "hidden", "feature", "of", "tap", "state", "finalize", "__spreadValues", "watchTableOfContents", "el", "viewport$", "header$", "table", "anchors", "getElements", "anchor", "id", "target", "getOptionalElement", "adjust$", "distinctUntilKeyChanged", "map", "height", "main", "getComponentElement", "grid", "getElement", "share", "watchElementSize", "switchMap", "body", "defer", "path", "of", "index", "offset", "parent", "a", "b", "combineLatestWith", "adjust", "scan", "prev", "next", "y", "size", "last", "distinctUntilChanged", "startWith", "bufferCount", "mountTableOfContents", "main$", "target$", "push$", "Subject", "done$", "ignoreElements", "endWith", "feature", "smooth$", "merge", "debounceTime", "filter", "observeOn", "asyncScheduler", "withLatestFrom", "behavior", "container", "getElementContainer", "getElementSize", "takeUntil", "skip", "repeat", "url", "getLocation", "active", "hash", "tap", "state", "finalize", "__spreadValues", "watchBackToTop", "_el", "viewport$", "main$", "target$", "direction$", "map", "y", "bufferCount", "b", "distinctUntilChanged", "active$", "active", "combineLatest", "direction", "takeUntil", "skip", "endWith", "repeat", "hidden", "mountBackToTop", "el", "header$", "push$", "Subject", "done$", "ignoreElements", "distinctUntilKeyChanged", "height", "fromEvent", "ev", "tap", "state", "finalize", "__spreadValues", "patchEllipsis", "document$", "viewport$", "switchMap", "getElements", "mergeMap", "el", "watchElementVisibility", "takeUntil", "skip", "filter", "visible", "map", "take", "text", "host", "feature", "mountInlineTooltip2", "finalize", "EMPTY", "patchIndeterminate", "document$", "tablet$", "switchMap", "getElements", "tap", "el", "mergeMap", "fromEvent", "takeWhile", "map", "withLatestFrom", "tablet", "isAppleDevice", "patchScrollfix", "document$", "switchMap", "getElements", "tap", "el", "filter", "mergeMap", "fromEvent", "map", "top", "patchScrolllock", "viewport$", "tablet$", "combineLatest", "watchToggle", "map", "active", "tablet", "switchMap", "of", "delay", "withLatestFrom", "y", "value", "obj", "data", "key", "x", "y", "nodes", "parent", "i", "node", "fetchSearchIndex", "watchScript", "config", "map", "shareReplay", "requestJSON", "document$", "watchDocument", "location$", "watchLocation", "target$", "watchLocationTarget", "keyboard$", "watchKeyboard", "viewport$", "watchViewport", "tablet$", "watchMedia", "screen$", "print$", "watchPrint", "configuration", "index$", "NEVER", "alert$", "Subject", "setupClipboardJS", "progress$", "feature", "setupInstantNavigation", "_a", "setupVersionSelector", "merge", "delay", "setToggle", "filter", "mode", "key", "prev", "getOptionalElement", "setLocation", "next", "active", "getActiveElement", "patchEllipsis", "patchIndeterminate", "patchScrollfix", "patchScrolllock", "header$", "watchHeader", "getComponentElement", "main$", "switchMap", "el", "watchMain", "control$", "getComponentElements", "mountConsent", "mountDialog", "mountHeader", "mountPalette", "mountProgress", "mountSearch", "mountSource", "content$", "defer", "mountAnnounce", "mountContent", "mountSearchHiglight", "EMPTY", "mountHeaderTitle", "at", "mountSidebar", "mountTabs", "mountTableOfContents", "mountBackToTop", "component$", "mergeWith"]
+  "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"],
+  "sourcesContent": ["(function (global, factory) {\n  typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n  typeof define === 'function' && define.amd ? define(factory) :\n  (factory());\n}(this, (function () { 'use strict';\n\n  /**\n   * Applies the :focus-visible polyfill at the given scope.\n   * A scope in this case is either the top-level Document or a Shadow Root.\n   *\n   * @param {(Document|ShadowRoot)} scope\n   * @see https://github.com/WICG/focus-visible\n   */\n  function applyFocusVisiblePolyfill(scope) {\n    var hadKeyboardEvent = true;\n    var hadFocusVisibleRecently = false;\n    var hadFocusVisibleRecentlyTimeout = null;\n\n    var inputTypesAllowlist = {\n      text: true,\n      search: true,\n      url: true,\n      tel: true,\n      email: true,\n      password: true,\n      number: true,\n      date: true,\n      month: true,\n      week: true,\n      time: true,\n      datetime: true,\n      'datetime-local': true\n    };\n\n    /**\n     * Helper function for legacy browsers and iframes which sometimes focus\n     * elements like document, body, and non-interactive SVG.\n     * @param {Element} el\n     */\n    function isValidFocusTarget(el) {\n      if (\n        el &&\n        el !== document &&\n        el.nodeName !== 'HTML' &&\n        el.nodeName !== 'BODY' &&\n        'classList' in el &&\n        'contains' in el.classList\n      ) {\n        return true;\n      }\n      return false;\n    }\n\n    /**\n     * Computes whether the given element should automatically trigger the\n     * `focus-visible` class being added, i.e. whether it should always match\n     * `:focus-visible` when focused.\n     * @param {Element} el\n     * @return {boolean}\n     */\n    function focusTriggersKeyboardModality(el) {\n      var type = el.type;\n      var tagName = el.tagName;\n\n      if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n        return true;\n      }\n\n      if (tagName === 'TEXTAREA' && !el.readOnly) {\n        return true;\n      }\n\n      if (el.isContentEditable) {\n        return true;\n      }\n\n      return false;\n    }\n\n    /**\n     * Add the `focus-visible` class to the given element if it was not added by\n     * the author.\n     * @param {Element} el\n     */\n    function addFocusVisibleClass(el) {\n      if (el.classList.contains('focus-visible')) {\n        return;\n      }\n      el.classList.add('focus-visible');\n      el.setAttribute('data-focus-visible-added', '');\n    }\n\n    /**\n     * Remove the `focus-visible` class from the given element if it was not\n     * originally added by the author.\n     * @param {Element} el\n     */\n    function removeFocusVisibleClass(el) {\n      if (!el.hasAttribute('data-focus-visible-added')) {\n        return;\n      }\n      el.classList.remove('focus-visible');\n      el.removeAttribute('data-focus-visible-added');\n    }\n\n    /**\n     * If the most recent user interaction was via the keyboard;\n     * and the key press did not include a meta, alt/option, or control key;\n     * then the modality is keyboard. Otherwise, the modality is not keyboard.\n     * Apply `focus-visible` to any current active element and keep track\n     * of our keyboard modality state with `hadKeyboardEvent`.\n     * @param {KeyboardEvent} e\n     */\n    function onKeyDown(e) {\n      if (e.metaKey || e.altKey || e.ctrlKey) {\n        return;\n      }\n\n      if (isValidFocusTarget(scope.activeElement)) {\n        addFocusVisibleClass(scope.activeElement);\n      }\n\n      hadKeyboardEvent = true;\n    }\n\n    /**\n     * If at any point a user clicks with a pointing device, ensure that we change\n     * the modality away from keyboard.\n     * This avoids the situation where a user presses a key on an already focused\n     * element, and then clicks on a different element, focusing it with a\n     * pointing device, while we still think we're in keyboard modality.\n     * @param {Event} e\n     */\n    function onPointerDown(e) {\n      hadKeyboardEvent = false;\n    }\n\n    /**\n     * On `focus`, add the `focus-visible` class to the target if:\n     * - the target received focus as a result of keyboard navigation, or\n     * - the event target is an element that will likely require interaction\n     *   via the keyboard (e.g. a text box)\n     * @param {Event} e\n     */\n    function onFocus(e) {\n      // Prevent IE from focusing the document or HTML element.\n      if (!isValidFocusTarget(e.target)) {\n        return;\n      }\n\n      if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n        addFocusVisibleClass(e.target);\n      }\n    }\n\n    /**\n     * On `blur`, remove the `focus-visible` class from the target.\n     * @param {Event} e\n     */\n    function onBlur(e) {\n      if (!isValidFocusTarget(e.target)) {\n        return;\n      }\n\n      if (\n        e.target.classList.contains('focus-visible') ||\n        e.target.hasAttribute('data-focus-visible-added')\n      ) {\n        // To detect a tab/window switch, we look for a blur event followed\n        // rapidly by a visibility change.\n        // If we don't see a visibility change within 100ms, it's probably a\n        // regular focus change.\n        hadFocusVisibleRecently = true;\n        window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n        hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n          hadFocusVisibleRecently = false;\n        }, 100);\n        removeFocusVisibleClass(e.target);\n      }\n    }\n\n    /**\n     * If the user changes tabs, keep track of whether or not the previously\n     * focused element had .focus-visible.\n     * @param {Event} e\n     */\n    function onVisibilityChange(e) {\n      if (document.visibilityState === 'hidden') {\n        // If the tab becomes active again, the browser will handle calling focus\n        // on the element (Safari actually calls it twice).\n        // If this tab change caused a blur on an element with focus-visible,\n        // re-apply the class when the user switches back to the tab.\n        if (hadFocusVisibleRecently) {\n          hadKeyboardEvent = true;\n        }\n        addInitialPointerMoveListeners();\n      }\n    }\n\n    /**\n     * Add a group of listeners to detect usage of any pointing devices.\n     * These listeners will be added when the polyfill first loads, and anytime\n     * the window is blurred, so that they are active when the window regains\n     * focus.\n     */\n    function addInitialPointerMoveListeners() {\n      document.addEventListener('mousemove', onInitialPointerMove);\n      document.addEventListener('mousedown', onInitialPointerMove);\n      document.addEventListener('mouseup', onInitialPointerMove);\n      document.addEventListener('pointermove', onInitialPointerMove);\n      document.addEventListener('pointerdown', onInitialPointerMove);\n      document.addEventListener('pointerup', onInitialPointerMove);\n      document.addEventListener('touchmove', onInitialPointerMove);\n      document.addEventListener('touchstart', onInitialPointerMove);\n      document.addEventListener('touchend', onInitialPointerMove);\n    }\n\n    function removeInitialPointerMoveListeners() {\n      document.removeEventListener('mousemove', onInitialPointerMove);\n      document.removeEventListener('mousedown', onInitialPointerMove);\n      document.removeEventListener('mouseup', onInitialPointerMove);\n      document.removeEventListener('pointermove', onInitialPointerMove);\n      document.removeEventListener('pointerdown', onInitialPointerMove);\n      document.removeEventListener('pointerup', onInitialPointerMove);\n      document.removeEventListener('touchmove', onInitialPointerMove);\n      document.removeEventListener('touchstart', onInitialPointerMove);\n      document.removeEventListener('touchend', onInitialPointerMove);\n    }\n\n    /**\n     * When the polfyill first loads, assume the user is in keyboard modality.\n     * If any event is received from a pointing device (e.g. mouse, pointer,\n     * touch), turn off keyboard modality.\n     * This accounts for situations where focus enters the page from the URL bar.\n     * @param {Event} e\n     */\n    function onInitialPointerMove(e) {\n      // Work around a Safari quirk that fires a mousemove on <html> whenever the\n      // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n      if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n        return;\n      }\n\n      hadKeyboardEvent = false;\n      removeInitialPointerMoveListeners();\n    }\n\n    // For some kinds of state, we are interested in changes at the global scope\n    // only. For example, global pointer input, global key presses and global\n    // visibility change should affect the state at every scope:\n    document.addEventListener('keydown', onKeyDown, true);\n    document.addEventListener('mousedown', onPointerDown, true);\n    document.addEventListener('pointerdown', onPointerDown, true);\n    document.addEventListener('touchstart', onPointerDown, true);\n    document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n    addInitialPointerMoveListeners();\n\n    // For focus and blur, we specifically care about state changes in the local\n    // scope. This is because focus / blur events that originate from within a\n    // shadow root are not re-dispatched from the host element if it was already\n    // the active element in its own scope:\n    scope.addEventListener('focus', onFocus, true);\n    scope.addEventListener('blur', onBlur, true);\n\n    // We detect that a node is a ShadowRoot by ensuring that it is a\n    // DocumentFragment and also has a host property. This check covers native\n    // implementation and polyfill implementation transparently. If we only cared\n    // about the native implementation, we could just check if the scope was\n    // an instance of a ShadowRoot.\n    if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n      // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n      // have a root element to add a class to. So, we add this attribute to the\n      // host element instead:\n      scope.host.setAttribute('data-js-focus-visible', '');\n    } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n      document.documentElement.classList.add('js-focus-visible');\n      document.documentElement.setAttribute('data-js-focus-visible', '');\n    }\n  }\n\n  // It is important to wrap all references to global window and document in\n  // these checks to support server-side rendering use cases\n  // @see https://github.com/WICG/focus-visible/issues/199\n  if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n    // Make the polyfill helper globally available. This can be used as a signal\n    // to interested libraries that wish to coordinate with the polyfill for e.g.,\n    // applying the polyfill to a shadow root:\n    window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n    // Notify interested libraries of the polyfill's presence, in case the\n    // polyfill was loaded lazily:\n    var event;\n\n    try {\n      event = new CustomEvent('focus-visible-polyfill-ready');\n    } catch (error) {\n      // IE11 does not support using CustomEvent as a constructor directly:\n      event = document.createEvent('CustomEvent');\n      event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n    }\n\n    window.dispatchEvent(event);\n  }\n\n  if (typeof document !== 'undefined') {\n    // Apply the polyfill to the global document, so that no JavaScript\n    // coordination is required to use the polyfill in the top-level document:\n    applyFocusVisiblePolyfill(document);\n  }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param  {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n  var str = '' + string;\n  var match = matchHtmlRegExp.exec(str);\n\n  if (!match) {\n    return str;\n  }\n\n  var escape;\n  var html = '';\n  var index = 0;\n  var lastIndex = 0;\n\n  for (index = match.index; index < str.length; index++) {\n    switch (str.charCodeAt(index)) {\n      case 34: // \"\n        escape = '&quot;';\n        break;\n      case 38: // &\n        escape = '&amp;';\n        break;\n      case 39: // '\n        escape = '&#39;';\n        break;\n      case 60: // <\n        escape = '&lt;';\n        break;\n      case 62: // >\n        escape = '&gt;';\n        break;\n      default:\n        continue;\n    }\n\n    if (lastIndex !== index) {\n      html += str.substring(lastIndex, index);\n    }\n\n    lastIndex = index + 1;\n    html += escape;\n  }\n\n  return lastIndex !== index\n    ? html + str.substring(lastIndex, index)\n    : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n  \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n  try {\n    return document.execCommand(type);\n  } catch (err) {\n    return false;\n  }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n  var selectedText = select_default()(target);\n  command('cut');\n  return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n  var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n  var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n  fakeElement.style.fontSize = '12pt'; // Reset box model\n\n  fakeElement.style.border = '0';\n  fakeElement.style.padding = '0';\n  fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n  fakeElement.style.position = 'absolute';\n  fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n  var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n  fakeElement.style.top = \"\".concat(yPosition, \"px\");\n  fakeElement.setAttribute('readonly', '');\n  fakeElement.value = value;\n  return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n  var fakeElement = createFakeElement(value);\n  options.container.appendChild(fakeElement);\n  var selectedText = select_default()(fakeElement);\n  command('copy');\n  fakeElement.remove();\n  return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n  var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n    container: document.body\n  };\n  var selectedText = '';\n\n  if (typeof target === 'string') {\n    selectedText = fakeCopyAction(target, options);\n  } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n    // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n    selectedText = fakeCopyAction(target.value, options);\n  } else {\n    selectedText = select_default()(target);\n    command('copy');\n  }\n\n  return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n  var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n  // Defines base properties passed from constructor.\n  var _options$action = options.action,\n      action = _options$action === void 0 ? 'copy' : _options$action,\n      container = options.container,\n      target = options.target,\n      text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n  if (action !== 'copy' && action !== 'cut') {\n    throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n  } // Sets the `target` property using an element that will be have its content copied.\n\n\n  if (target !== undefined) {\n    if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n      if (action === 'copy' && target.hasAttribute('disabled')) {\n        throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n      }\n\n      if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n        throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n      }\n    } else {\n      throw new Error('Invalid \"target\" value, use a valid Element');\n    }\n  } // Define selection strategy based on `text` property.\n\n\n  if (text) {\n    return actions_copy(text, {\n      container: container\n    });\n  } // Defines which selection strategy based on `target` property.\n\n\n  if (target) {\n    return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n      container: container\n    });\n  }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n  var attribute = \"data-clipboard-\".concat(suffix);\n\n  if (!element.hasAttribute(attribute)) {\n    return;\n  }\n\n  return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n  _inherits(Clipboard, _Emitter);\n\n  var _super = _createSuper(Clipboard);\n\n  /**\n   * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n   * @param {Object} options\n   */\n  function Clipboard(trigger, options) {\n    var _this;\n\n    _classCallCheck(this, Clipboard);\n\n    _this = _super.call(this);\n\n    _this.resolveOptions(options);\n\n    _this.listenClick(trigger);\n\n    return _this;\n  }\n  /**\n   * Defines if attributes would be resolved using internal setter functions\n   * or custom functions that were passed in the constructor.\n   * @param {Object} options\n   */\n\n\n  _createClass(Clipboard, [{\n    key: \"resolveOptions\",\n    value: function resolveOptions() {\n      var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n      this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n      this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n      this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n      this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n    }\n    /**\n     * Adds a click event listener to the passed trigger.\n     * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n     */\n\n  }, {\n    key: \"listenClick\",\n    value: function listenClick(trigger) {\n      var _this2 = this;\n\n      this.listener = listen_default()(trigger, 'click', function (e) {\n        return _this2.onClick(e);\n      });\n    }\n    /**\n     * Defines a new `ClipboardAction` on each click event.\n     * @param {Event} e\n     */\n\n  }, {\n    key: \"onClick\",\n    value: function onClick(e) {\n      var trigger = e.delegateTarget || e.currentTarget;\n      var action = this.action(trigger) || 'copy';\n      var text = actions_default({\n        action: action,\n        container: this.container,\n        target: this.target(trigger),\n        text: this.text(trigger)\n      }); // Fires an event based on the copy operation result.\n\n      this.emit(text ? 'success' : 'error', {\n        action: action,\n        text: text,\n        trigger: trigger,\n        clearSelection: function clearSelection() {\n          if (trigger) {\n            trigger.focus();\n          }\n\n          window.getSelection().removeAllRanges();\n        }\n      });\n    }\n    /**\n     * Default `action` lookup function.\n     * @param {Element} trigger\n     */\n\n  }, {\n    key: \"defaultAction\",\n    value: function defaultAction(trigger) {\n      return getAttributeValue('action', trigger);\n    }\n    /**\n     * Default `target` lookup function.\n     * @param {Element} trigger\n     */\n\n  }, {\n    key: \"defaultTarget\",\n    value: function defaultTarget(trigger) {\n      var selector = getAttributeValue('target', trigger);\n\n      if (selector) {\n        return document.querySelector(selector);\n      }\n    }\n    /**\n     * Allow fire programmatically a copy action\n     * @param {String|HTMLElement} target\n     * @param {Object} options\n     * @returns Text copied.\n     */\n\n  }, {\n    key: \"defaultText\",\n\n    /**\n     * Default `text` lookup function.\n     * @param {Element} trigger\n     */\n    value: function defaultText(trigger) {\n      return getAttributeValue('text', trigger);\n    }\n    /**\n     * Destroy lifecycle.\n     */\n\n  }, {\n    key: \"destroy\",\n    value: function destroy() {\n      this.listener.destroy();\n    }\n  }], [{\n    key: \"copy\",\n    value: function copy(target) {\n      var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n        container: document.body\n      };\n      return actions_copy(target, options);\n    }\n    /**\n     * Allow fire programmatically a cut action\n     * @param {String|HTMLElement} target\n     * @returns Text cutted.\n     */\n\n  }, {\n    key: \"cut\",\n    value: function cut(target) {\n      return actions_cut(target);\n    }\n    /**\n     * Returns the support of the given action, or all actions if no action is\n     * given.\n     * @param {String} [action]\n     */\n\n  }, {\n    key: \"isSupported\",\n    value: function isSupported() {\n      var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n      var actions = typeof action === 'string' ? [action] : action;\n      var support = !!document.queryCommandSupported;\n      actions.forEach(function (action) {\n        support = support && !!document.queryCommandSupported(action);\n      });\n      return support;\n    }\n  }]);\n\n  return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n    var proto = Element.prototype;\n\n    proto.matches = proto.matchesSelector ||\n                    proto.mozMatchesSelector ||\n                    proto.msMatchesSelector ||\n                    proto.oMatchesSelector ||\n                    proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n    while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n        if (typeof element.matches === 'function' &&\n            element.matches(selector)) {\n          return element;\n        }\n        element = element.parentNode;\n    }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n    var listenerFn = listener.apply(this, arguments);\n\n    element.addEventListener(type, listenerFn, useCapture);\n\n    return {\n        destroy: function() {\n            element.removeEventListener(type, listenerFn, useCapture);\n        }\n    }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n    // Handle the regular Element usage\n    if (typeof elements.addEventListener === 'function') {\n        return _delegate.apply(null, arguments);\n    }\n\n    // Handle Element-less usage, it defaults to global delegation\n    if (typeof type === 'function') {\n        // Use `document` as the first parameter, then apply arguments\n        // This is a short way to .unshift `arguments` without running into deoptimizations\n        return _delegate.bind(null, document).apply(null, arguments);\n    }\n\n    // Handle Selector-based usage\n    if (typeof elements === 'string') {\n        elements = document.querySelectorAll(elements);\n    }\n\n    // Handle Array-like based usage\n    return Array.prototype.map.call(elements, function (element) {\n        return _delegate(element, selector, type, callback, useCapture);\n    });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n    return function(e) {\n        e.delegateTarget = closest(e.target, selector);\n\n        if (e.delegateTarget) {\n            callback.call(element, e);\n        }\n    }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n    return value !== undefined\n        && value instanceof HTMLElement\n        && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n    var type = Object.prototype.toString.call(value);\n\n    return value !== undefined\n        && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n        && ('length' in value)\n        && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n    return typeof value === 'string'\n        || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n    var type = Object.prototype.toString.call(value);\n\n    return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n    if (!target && !type && !callback) {\n        throw new Error('Missing required arguments');\n    }\n\n    if (!is.string(type)) {\n        throw new TypeError('Second argument must be a String');\n    }\n\n    if (!is.fn(callback)) {\n        throw new TypeError('Third argument must be a Function');\n    }\n\n    if (is.node(target)) {\n        return listenNode(target, type, callback);\n    }\n    else if (is.nodeList(target)) {\n        return listenNodeList(target, type, callback);\n    }\n    else if (is.string(target)) {\n        return listenSelector(target, type, callback);\n    }\n    else {\n        throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n    }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n    node.addEventListener(type, callback);\n\n    return {\n        destroy: function() {\n            node.removeEventListener(type, callback);\n        }\n    }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n    Array.prototype.forEach.call(nodeList, function(node) {\n        node.addEventListener(type, callback);\n    });\n\n    return {\n        destroy: function() {\n            Array.prototype.forEach.call(nodeList, function(node) {\n                node.removeEventListener(type, callback);\n            });\n        }\n    }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n    return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n    var selectedText;\n\n    if (element.nodeName === 'SELECT') {\n        element.focus();\n\n        selectedText = element.value;\n    }\n    else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n        var isReadOnly = element.hasAttribute('readonly');\n\n        if (!isReadOnly) {\n            element.setAttribute('readonly', '');\n        }\n\n        element.select();\n        element.setSelectionRange(0, element.value.length);\n\n        if (!isReadOnly) {\n            element.removeAttribute('readonly');\n        }\n\n        selectedText = element.value;\n    }\n    else {\n        if (element.hasAttribute('contenteditable')) {\n            element.focus();\n        }\n\n        var selection = window.getSelection();\n        var range = document.createRange();\n\n        range.selectNodeContents(element);\n        selection.removeAllRanges();\n        selection.addRange(range);\n\n        selectedText = selection.toString();\n    }\n\n    return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n  // Keep this empty so it's easier to inherit from\n  // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n  on: function (name, callback, ctx) {\n    var e = this.e || (this.e = {});\n\n    (e[name] || (e[name] = [])).push({\n      fn: callback,\n      ctx: ctx\n    });\n\n    return this;\n  },\n\n  once: function (name, callback, ctx) {\n    var self = this;\n    function listener () {\n      self.off(name, listener);\n      callback.apply(ctx, arguments);\n    };\n\n    listener._ = callback\n    return this.on(name, listener, ctx);\n  },\n\n  emit: function (name) {\n    var data = [].slice.call(arguments, 1);\n    var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n    var i = 0;\n    var len = evtArr.length;\n\n    for (i; i < len; i++) {\n      evtArr[i].fn.apply(evtArr[i].ctx, data);\n    }\n\n    return this;\n  },\n\n  off: function (name, callback) {\n    var e = this.e || (this.e = {});\n    var evts = e[name];\n    var liveEvents = [];\n\n    if (evts && callback) {\n      for (var i = 0, len = evts.length; i < len; i++) {\n        if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n          liveEvents.push(evts[i]);\n      }\n    }\n\n    // Remove event from queue to prevent memory leak\n    // Suggested by https://github.com/lazd\n    // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n    (liveEvents.length)\n      ? e[name] = liveEvents\n      : delete e[name];\n\n    return this;\n  }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n  EMPTY,\n  NEVER,\n  Observable,\n  Subject,\n  defer,\n  delay,\n  filter,\n  map,\n  merge,\n  mergeWith,\n  shareReplay,\n  switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n  at,\n  getActiveElement,\n  getOptionalElement,\n  requestJSON,\n  setLocation,\n  setToggle,\n  watchDocument,\n  watchKeyboard,\n  watchLocation,\n  watchLocationTarget,\n  watchMedia,\n  watchPrint,\n  watchScript,\n  watchViewport\n} from \"./browser\"\nimport {\n  getComponentElement,\n  getComponentElements,\n  mountAnnounce,\n  mountBackToTop,\n  mountConsent,\n  mountContent,\n  mountDialog,\n  mountHeader,\n  mountHeaderTitle,\n  mountPalette,\n  mountProgress,\n  mountSearch,\n  mountSearchHiglight,\n  mountSidebar,\n  mountSource,\n  mountTableOfContents,\n  mountTabs,\n  watchHeader,\n  watchMain\n} from \"./components\"\nimport {\n  SearchIndex,\n  setupClipboardJS,\n  setupInstantNavigation,\n  setupVersionSelector\n} from \"./integrations\"\nimport {\n  patchEllipsis,\n  patchIndeterminate,\n  patchScrollfix,\n  patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable<SearchIndex> {\n  if (location.protocol === \"file:\") {\n    return watchScript(\n      `${new URL(\"search/search_index.js\", config.base)}`\n    )\n      .pipe(\n        // @ts-ignore - @todo fix typings\n        map(() => __index),\n        shareReplay(1)\n      )\n  } else {\n    return requestJSON<SearchIndex>(\n      new URL(\"search/search_index.json\", config.base)\n    )\n  }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$   = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$   = watchMedia(\"(min-width: 960px)\")\nconst screen$   = watchMedia(\"(min-width: 1220px)\")\nconst print$    = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n  ? fetchSearchIndex()\n  : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject<string>()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject<number>()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n  setupInstantNavigation({ location$, viewport$, progress$ })\n    .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n  setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n  .pipe(\n    delay(125)\n  )\n    .subscribe(() => {\n      setToggle(\"drawer\", false)\n      setToggle(\"search\", false)\n    })\n\n/* Set up global keyboard handlers */\nkeyboard$\n  .pipe(\n    filter(({ mode }) => mode === \"global\")\n  )\n    .subscribe(key => {\n      switch (key.type) {\n\n        /* Go to previous page */\n        case \"p\":\n        case \",\":\n          const prev = getOptionalElement<HTMLLinkElement>(\"link[rel=prev]\")\n          if (typeof prev !== \"undefined\")\n            setLocation(prev)\n          break\n\n        /* Go to next page */\n        case \"n\":\n        case \".\":\n          const next = getOptionalElement<HTMLLinkElement>(\"link[rel=next]\")\n          if (typeof next !== \"undefined\")\n            setLocation(next)\n          break\n\n        /* Expand navigation, see https://bit.ly/3ZjG5io */\n        case \"Enter\":\n          const active = getActiveElement()\n          if (active instanceof HTMLLabelElement)\n            active.click()\n      }\n    })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n  .pipe(\n    map(() => getComponentElement(\"main\")),\n    switchMap(el => watchMain(el, { viewport$, header$ })),\n    shareReplay(1)\n  )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n  /* Consent */\n  ...getComponentElements(\"consent\")\n    .map(el => mountConsent(el, { target$ })),\n\n  /* Dialog */\n  ...getComponentElements(\"dialog\")\n    .map(el => mountDialog(el, { alert$ })),\n\n  /* Header */\n  ...getComponentElements(\"header\")\n    .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n  /* Color palette */\n  ...getComponentElements(\"palette\")\n    .map(el => mountPalette(el)),\n\n  /* Progress bar */\n  ...getComponentElements(\"progress\")\n    .map(el => mountProgress(el, { progress$ })),\n\n  /* Search */\n  ...getComponentElements(\"search\")\n    .map(el => mountSearch(el, { index$, keyboard$ })),\n\n  /* Repository information */\n  ...getComponentElements(\"source\")\n    .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n  /* Announcement bar */\n  ...getComponentElements(\"announce\")\n    .map(el => mountAnnounce(el)),\n\n  /* Content */\n  ...getComponentElements(\"content\")\n    .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n  /* Search highlighting */\n  ...getComponentElements(\"content\")\n    .map(el => feature(\"search.highlight\")\n      ? mountSearchHiglight(el, { index$, location$ })\n      : EMPTY\n    ),\n\n  /* Header title */\n  ...getComponentElements(\"header-title\")\n    .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n  /* Sidebar */\n  ...getComponentElements(\"sidebar\")\n    .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n      ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n      : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n    ),\n\n  /* Navigation tabs */\n  ...getComponentElements(\"tabs\")\n    .map(el => mountTabs(el, { viewport$, header$ })),\n\n  /* Table of contents */\n  ...getComponentElements(\"toc\")\n    .map(el => mountTableOfContents(el, {\n      viewport$, header$, main$, target$\n    })),\n\n  /* Back-to-top button */\n  ...getComponentElements(\"top\")\n    .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n  .pipe(\n    switchMap(() => content$),\n    mergeWith(control$),\n    shareReplay(1)\n  )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$  = document$          /* Document observable */\nwindow.location$  = location$          /* Location subject */\nwindow.target$    = target$            /* Location target observable */\nwindow.keyboard$  = keyboard$          /* Keyboard observable */\nwindow.viewport$  = viewport$          /* Viewport observable */\nwindow.tablet$    = tablet$            /* Media tablet observable */\nwindow.screen$    = screen$            /* Media screen observable */\nwindow.print$     = print$             /* Media print observable */\nwindow.alert$     = alert$             /* Alert subject */\nwindow.progress$  = progress$          /* Progress indicator subject */\nwindow.component$ = component$         /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n    extendStatics = Object.setPrototypeOf ||\r\n        ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n        function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n    return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n    if (typeof b !== \"function\" && b !== null)\r\n        throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n    extendStatics(d, b);\r\n    function __() { this.constructor = d; }\r\n    d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n    __assign = Object.assign || function __assign(t) {\r\n        for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n            s = arguments[i];\r\n            for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n        }\r\n        return t;\r\n    }\r\n    return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n    var t = {};\r\n    for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n        t[p] = s[p];\r\n    if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n        for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n            if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n                t[p[i]] = s[p[i]];\r\n        }\r\n    return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n    var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n    if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n    else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n    return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n    return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n    if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n    function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n    return new (P || (P = Promise))(function (resolve, reject) {\r\n        function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n        function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n        function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n        step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n    });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n    var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n    return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n    function verb(n) { return function (v) { return step([n, v]); }; }\r\n    function step(op) {\r\n        if (f) throw new TypeError(\"Generator is already executing.\");\r\n        while (_) try {\r\n            if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n            if (y = 0, t) op = [op[0] & 2, t.value];\r\n            switch (op[0]) {\r\n                case 0: case 1: t = op; break;\r\n                case 4: _.label++; return { value: op[1], done: false };\r\n                case 5: _.label++; y = op[1]; op = [0]; continue;\r\n                case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n                default:\r\n                    if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n                    if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n                    if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n                    if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n                    if (t[2]) _.ops.pop();\r\n                    _.trys.pop(); continue;\r\n            }\r\n            op = body.call(thisArg, _);\r\n        } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n        if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n    }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n    if (k2 === undefined) k2 = k;\r\n    Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n    if (k2 === undefined) k2 = k;\r\n    o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n    for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n    var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n    if (m) return m.call(o);\r\n    if (o && typeof o.length === \"number\") return {\r\n        next: function () {\r\n            if (o && i >= o.length) o = void 0;\r\n            return { value: o && o[i++], done: !o };\r\n        }\r\n    };\r\n    throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n    var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n    if (!m) return o;\r\n    var i = m.call(o), r, ar = [], e;\r\n    try {\r\n        while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n    }\r\n    catch (error) { e = { error: error }; }\r\n    finally {\r\n        try {\r\n            if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n        }\r\n        finally { if (e) throw e.error; }\r\n    }\r\n    return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n    for (var ar = [], i = 0; i < arguments.length; i++)\r\n        ar = ar.concat(__read(arguments[i]));\r\n    return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n    for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n    for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n        for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n            r[k] = a[j];\r\n    return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n    if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n        if (ar || !(i in from)) {\r\n            if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n            ar[i] = from[i];\r\n        }\r\n    }\r\n    return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n    return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n    if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n    var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n    return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n    function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n    function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n    function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n    function fulfill(value) { resume(\"next\", value); }\r\n    function reject(value) { resume(\"throw\", value); }\r\n    function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n    var i, p;\r\n    return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n    function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n    if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n    var m = o[Symbol.asyncIterator], i;\r\n    return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n    function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n    function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n    if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n    return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n    Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n    o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n    if (mod && mod.__esModule) return mod;\r\n    var result = {};\r\n    if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n    __setModuleDefault(result, mod);\r\n    return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n    return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n    if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n    if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n    return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n    if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n    if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n    if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n    return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n  return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass<T>(createImpl: (_super: any) => any): T {\n  const _super = (instance: any) => {\n    Error.call(instance);\n    instance.stack = new Error().stack;\n  };\n\n  const ctorFunc = createImpl(_super);\n  ctorFunc.prototype = Object.create(Error.prototype);\n  ctorFunc.prototype.constructor = ctorFunc;\n  return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n  readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n  /**\n   * @deprecated Internal implementation detail. Do not construct error instances.\n   * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n   */\n  new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n  (_super) =>\n    function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n      _super(this);\n      this.message = errors\n        ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n  ')}`\n        : '';\n      this.name = 'UnsubscriptionError';\n      this.errors = errors;\n    }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove<T>(arr: T[] | undefined | null, item: T) {\n  if (arr) {\n    const index = arr.indexOf(item);\n    0 <= index && arr.splice(index, 1);\n  }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n  /** @nocollapse */\n  public static EMPTY = (() => {\n    const empty = new Subscription();\n    empty.closed = true;\n    return empty;\n  })();\n\n  /**\n   * A flag to indicate whether this Subscription has already been unsubscribed.\n   */\n  public closed = false;\n\n  private _parentage: Subscription[] | Subscription | null = null;\n\n  /**\n   * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n   * list occurs in the {@link #add} and {@link #remove} methods.\n   */\n  private _finalizers: Exclude<TeardownLogic, void>[] | null = null;\n\n  /**\n   * @param initialTeardown A function executed first as part of the finalization\n   * process that is kicked off when {@link #unsubscribe} is called.\n   */\n  constructor(private initialTeardown?: () => void) {}\n\n  /**\n   * Disposes the resources held by the subscription. May, for instance, cancel\n   * an ongoing Observable execution or cancel any other type of work that\n   * started when the Subscription was created.\n   * @return {void}\n   */\n  unsubscribe(): void {\n    let errors: any[] | undefined;\n\n    if (!this.closed) {\n      this.closed = true;\n\n      // Remove this from it's parents.\n      const { _parentage } = this;\n      if (_parentage) {\n        this._parentage = null;\n        if (Array.isArray(_parentage)) {\n          for (const parent of _parentage) {\n            parent.remove(this);\n          }\n        } else {\n          _parentage.remove(this);\n        }\n      }\n\n      const { initialTeardown: initialFinalizer } = this;\n      if (isFunction(initialFinalizer)) {\n        try {\n          initialFinalizer();\n        } catch (e) {\n          errors = e instanceof UnsubscriptionError ? e.errors : [e];\n        }\n      }\n\n      const { _finalizers } = this;\n      if (_finalizers) {\n        this._finalizers = null;\n        for (const finalizer of _finalizers) {\n          try {\n            execFinalizer(finalizer);\n          } catch (err) {\n            errors = errors ?? [];\n            if (err instanceof UnsubscriptionError) {\n              errors = [...errors, ...err.errors];\n            } else {\n              errors.push(err);\n            }\n          }\n        }\n      }\n\n      if (errors) {\n        throw new UnsubscriptionError(errors);\n      }\n    }\n  }\n\n  /**\n   * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n   * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n   * because it has already been unsubscribed, then whatever finalizer is passed to it\n   * will automatically be executed (unless the finalizer itself is also a closed subscription).\n   *\n   * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n   * subscription to a any subscription will result in no operation. (A noop).\n   *\n   * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n   * operation at all. (A noop).\n   *\n   * `Subscription` instances that are added to this instance will automatically remove themselves\n   * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n   * will need to be removed manually with {@link #remove}\n   *\n   * @param teardown The finalization logic to add to this subscription.\n   */\n  add(teardown: TeardownLogic): void {\n    // Only add the finalizer if it's not undefined\n    // and don't add a subscription to itself.\n    if (teardown && teardown !== this) {\n      if (this.closed) {\n        // If this subscription is already closed,\n        // execute whatever finalizer is handed to it automatically.\n        execFinalizer(teardown);\n      } else {\n        if (teardown instanceof Subscription) {\n          // We don't add closed subscriptions, and we don't add the same subscription\n          // twice. Subscription unsubscribe is idempotent.\n          if (teardown.closed || teardown._hasParent(this)) {\n            return;\n          }\n          teardown._addParent(this);\n        }\n        (this._finalizers = this._finalizers ?? []).push(teardown);\n      }\n    }\n  }\n\n  /**\n   * Checks to see if a this subscription already has a particular parent.\n   * This will signal that this subscription has already been added to the parent in question.\n   * @param parent the parent to check for\n   */\n  private _hasParent(parent: Subscription) {\n    const { _parentage } = this;\n    return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n  }\n\n  /**\n   * Adds a parent to this subscription so it can be removed from the parent if it\n   * unsubscribes on it's own.\n   *\n   * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n   * @param parent The parent subscription to add\n   */\n  private _addParent(parent: Subscription) {\n    const { _parentage } = this;\n    this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n  }\n\n  /**\n   * Called on a child when it is removed via {@link #remove}.\n   * @param parent The parent to remove\n   */\n  private _removeParent(parent: Subscription) {\n    const { _parentage } = this;\n    if (_parentage === parent) {\n      this._parentage = null;\n    } else if (Array.isArray(_parentage)) {\n      arrRemove(_parentage, parent);\n    }\n  }\n\n  /**\n   * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n   *\n   * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n   * from every other `Subscription` they have been added to. This means that using the `remove` method\n   * is not a common thing and should be used thoughtfully.\n   *\n   * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n   * more than once, you will need to call `remove` the same number of times to remove all instances.\n   *\n   * All finalizer instances are removed to free up memory upon unsubscription.\n   *\n   * @param teardown The finalizer to remove from this subscription\n   */\n  remove(teardown: Exclude<TeardownLogic, void>): void {\n    const { _finalizers } = this;\n    _finalizers && arrRemove(_finalizers, teardown);\n\n    if (teardown instanceof Subscription) {\n      teardown._removeParent(this);\n    }\n  }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n  return (\n    value instanceof Subscription ||\n    (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n  );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n  if (isFunction(finalizer)) {\n    finalizer();\n  } else {\n    finalizer.unsubscribe();\n  }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n  onUnhandledError: null,\n  onStoppedNotification: null,\n  Promise: undefined,\n  useDeprecatedSynchronousErrorHandling: false,\n  useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n  /**\n   * A registration point for unhandled errors from RxJS. These are errors that\n   * cannot were not handled by consuming code in the usual subscription path. For\n   * example, if you have this configured, and you subscribe to an observable without\n   * providing an error handler, errors from that subscription will end up here. This\n   * will _always_ be called asynchronously on another job in the runtime. This is because\n   * we do not want errors thrown in this user-configured handler to interfere with the\n   * behavior of the library.\n   */\n  onUnhandledError: ((err: any) => void) | null;\n\n  /**\n   * A registration point for notifications that cannot be sent to subscribers because they\n   * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n   * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n   * might want a different behavior. For example, with sources that attempt to report errors\n   * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n   * This will _always_ be called asynchronously on another job in the runtime. This is because\n   * we do not want errors thrown in this user-configured handler to interfere with the\n   * behavior of the library.\n   */\n  onStoppedNotification: ((notification: ObservableNotification<any>, subscriber: Subscriber<any>) => void) | null;\n\n  /**\n   * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n   * methods.\n   *\n   * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n   * Promise constructor. If you need a Promise implementation other than native promises,\n   * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n   */\n  Promise?: PromiseConstructorLike;\n\n  /**\n   * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n   * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n   * call in a try/catch block. It also enables producer interference, a nasty bug\n   * where a multicast can be broken for all observers by a downstream consumer with\n   * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n   * FOR MIGRATION REASONS.\n   *\n   * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n   * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n   * behaviors described above. Will be removed in v8.\n   */\n  useDeprecatedSynchronousErrorHandling: boolean;\n\n  /**\n   * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n   * `unsubscribe()` via `this` context in `next` functions created in observers passed\n   * to `subscribe`.\n   *\n   * This is being removed because the performance was severely problematic, and it could also cause\n   * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n   * their `this` context overwritten.\n   *\n   * @deprecated As of version 8, RxJS will no longer support altering the\n   * context of next functions provided as part of an observer to Subscribe. Instead,\n   * you will have access to a subscription or a signal or token that will allow you to do things like\n   * unsubscribe and test closed status. Will be removed in v8.\n   */\n  useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n  setTimeout: SetTimeoutFunction;\n  clearTimeout: ClearTimeoutFunction;\n  delegate:\n    | {\n        setTimeout: SetTimeoutFunction;\n        clearTimeout: ClearTimeoutFunction;\n      }\n    | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n  // When accessing the delegate, use the variable rather than `this` so that\n  // the functions can be called without being bound to the provider.\n  setTimeout(handler: () => void, timeout?: number, ...args) {\n    const { delegate } = timeoutProvider;\n    if (delegate?.setTimeout) {\n      return delegate.setTimeout(handler, timeout, ...args);\n    }\n    return setTimeout(handler, timeout, ...args);\n  },\n  clearTimeout(handle) {\n    const { delegate } = timeoutProvider;\n    return (delegate?.clearTimeout || clearTimeout)(handle as any);\n  },\n  delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n  timeoutProvider.setTimeout(() => {\n    const { onUnhandledError } = config;\n    if (onUnhandledError) {\n      // Execute the user-configured error handler.\n      onUnhandledError(err);\n    } else {\n      // Throw so it is picked up by the runtime's uncaught error mechanism.\n      throw err;\n    }\n  });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n  return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification<T>(value: T) {\n  return createNotification('N', value, undefined) as NextNotification<T>;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n  return {\n    kind,\n    value,\n    error,\n  };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n  if (config.useDeprecatedSynchronousErrorHandling) {\n    const isRoot = !context;\n    if (isRoot) {\n      context = { errorThrown: false, error: null };\n    }\n    cb();\n    if (isRoot) {\n      const { errorThrown, error } = context!;\n      context = null;\n      if (errorThrown) {\n        throw error;\n      }\n    }\n  } else {\n    // This is the general non-deprecated path for everyone that\n    // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n    cb();\n  }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n  if (config.useDeprecatedSynchronousErrorHandling && context) {\n    context.errorThrown = true;\n    context.error = err;\n  }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber<T>\n */\nexport class Subscriber<T> extends Subscription implements Observer<T> {\n  /**\n   * A static factory for a Subscriber, given a (potentially partial) definition\n   * of an Observer.\n   * @param next The `next` callback of an Observer.\n   * @param error The `error` callback of an\n   * Observer.\n   * @param complete The `complete` callback of an\n   * Observer.\n   * @return A Subscriber wrapping the (partially defined)\n   * Observer represented by the given arguments.\n   * @nocollapse\n   * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n   * method, and there is no reason to be creating instances of `Subscriber` directly.\n   * If you have a specific use case, please file an issue.\n   */\n  static create<T>(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber<T> {\n    return new SafeSubscriber(next, error, complete);\n  }\n\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  protected isStopped: boolean = false;\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  protected destination: Subscriber<any> | Observer<any>; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n  /**\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n   */\n  constructor(destination?: Subscriber<any> | Observer<any>) {\n    super();\n    if (destination) {\n      this.destination = destination;\n      // Automatically chain subscriptions together here.\n      // if destination is a Subscription, then it is a Subscriber.\n      if (isSubscription(destination)) {\n        destination.add(this);\n      }\n    } else {\n      this.destination = EMPTY_OBSERVER;\n    }\n  }\n\n  /**\n   * The {@link Observer} callback to receive notifications of type `next` from\n   * the Observable, with a value. The Observable may call this method 0 or more\n   * times.\n   * @param {T} [value] The `next` value.\n   * @return {void}\n   */\n  next(value?: T): void {\n    if (this.isStopped) {\n      handleStoppedNotification(nextNotification(value), this);\n    } else {\n      this._next(value!);\n    }\n  }\n\n  /**\n   * The {@link Observer} callback to receive notifications of type `error` from\n   * the Observable, with an attached `Error`. Notifies the Observer that\n   * the Observable has experienced an error condition.\n   * @param {any} [err] The `error` exception.\n   * @return {void}\n   */\n  error(err?: any): void {\n    if (this.isStopped) {\n      handleStoppedNotification(errorNotification(err), this);\n    } else {\n      this.isStopped = true;\n      this._error(err);\n    }\n  }\n\n  /**\n   * The {@link Observer} callback to receive a valueless notification of type\n   * `complete` from the Observable. Notifies the Observer that the Observable\n   * has finished sending push-based notifications.\n   * @return {void}\n   */\n  complete(): void {\n    if (this.isStopped) {\n      handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n    } else {\n      this.isStopped = true;\n      this._complete();\n    }\n  }\n\n  unsubscribe(): void {\n    if (!this.closed) {\n      this.isStopped = true;\n      super.unsubscribe();\n      this.destination = null!;\n    }\n  }\n\n  protected _next(value: T): void {\n    this.destination.next(value);\n  }\n\n  protected _error(err: any): void {\n    try {\n      this.destination.error(err);\n    } finally {\n      this.unsubscribe();\n    }\n  }\n\n  protected _complete(): void {\n    try {\n      this.destination.complete();\n    } finally {\n      this.unsubscribe();\n    }\n  }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind<Fn extends (...args: any[]) => any>(fn: Fn, thisArg: any): Fn {\n  return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver<T> implements Observer<T> {\n  constructor(private partialObserver: Partial<Observer<T>>) {}\n\n  next(value: T): void {\n    const { partialObserver } = this;\n    if (partialObserver.next) {\n      try {\n        partialObserver.next(value);\n      } catch (error) {\n        handleUnhandledError(error);\n      }\n    }\n  }\n\n  error(err: any): void {\n    const { partialObserver } = this;\n    if (partialObserver.error) {\n      try {\n        partialObserver.error(err);\n      } catch (error) {\n        handleUnhandledError(error);\n      }\n    } else {\n      handleUnhandledError(err);\n    }\n  }\n\n  complete(): void {\n    const { partialObserver } = this;\n    if (partialObserver.complete) {\n      try {\n        partialObserver.complete();\n      } catch (error) {\n        handleUnhandledError(error);\n      }\n    }\n  }\n}\n\nexport class SafeSubscriber<T> extends Subscriber<T> {\n  constructor(\n    observerOrNext?: Partial<Observer<T>> | ((value: T) => void) | null,\n    error?: ((e?: any) => void) | null,\n    complete?: (() => void) | null\n  ) {\n    super();\n\n    let partialObserver: Partial<Observer<T>>;\n    if (isFunction(observerOrNext) || !observerOrNext) {\n      // The first argument is a function, not an observer. The next\n      // two arguments *could* be observers, or they could be empty.\n      partialObserver = {\n        next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n        error: error ?? undefined,\n        complete: complete ?? undefined,\n      };\n    } else {\n      // The first argument is a partial observer.\n      let context: any;\n      if (this && config.useDeprecatedNextContext) {\n        // This is a deprecated path that made `this.unsubscribe()` available in\n        // next handler functions passed to subscribe. This only exists behind a flag\n        // now, as it is *very* slow.\n        context = Object.create(observerOrNext);\n        context.unsubscribe = () => this.unsubscribe();\n        partialObserver = {\n          next: observerOrNext.next && bind(observerOrNext.next, context),\n          error: observerOrNext.error && bind(observerOrNext.error, context),\n          complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n        };\n      } else {\n        // The \"normal\" path. Just use the partial observer directly.\n        partialObserver = observerOrNext;\n      }\n    }\n\n    // Wrap the partial observer to ensure it's a full observer, and\n    // make sure proper error handling is accounted for.\n    this.destination = new ConsumerObserver(partialObserver);\n  }\n}\n\nfunction handleUnhandledError(error: any) {\n  if (config.useDeprecatedSynchronousErrorHandling) {\n    captureError(error);\n  } else {\n    // Ideal path, we report this as an unhandled error,\n    // which is thrown on a new call stack.\n    reportUnhandledError(error);\n  }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n  throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification<any>, subscriber: Subscriber<any>) {\n  const { onStoppedNotification } = config;\n  onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly<Observer<any>> & { closed: true } = {\n  closed: true,\n  next: noop,\n  error: defaultErrorHandler,\n  complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `<T>(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n *   map(i => range(i)),\n *   mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n *   next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n *   next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity<T>(x: T): T {\n  return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe<T, A>(fn1: UnaryFunction<T, A>): UnaryFunction<T, A>;\nexport function pipe<T, A, B>(fn1: UnaryFunction<T, A>, fn2: UnaryFunction<A, B>): UnaryFunction<T, B>;\nexport function pipe<T, A, B, C>(fn1: UnaryFunction<T, A>, fn2: UnaryFunction<A, B>, fn3: UnaryFunction<B, C>): UnaryFunction<T, C>;\nexport function pipe<T, A, B, C, D>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>\n): UnaryFunction<T, D>;\nexport function pipe<T, A, B, C, D, E>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>\n): UnaryFunction<T, E>;\nexport function pipe<T, A, B, C, D, E, F>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>\n): UnaryFunction<T, F>;\nexport function pipe<T, A, B, C, D, E, F, G>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>\n): UnaryFunction<T, G>;\nexport function pipe<T, A, B, C, D, E, F, G, H>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>,\n  fn8: UnaryFunction<G, H>\n): UnaryFunction<T, H>;\nexport function pipe<T, A, B, C, D, E, F, G, H, I>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>,\n  fn8: UnaryFunction<G, H>,\n  fn9: UnaryFunction<H, I>\n): UnaryFunction<T, I>;\nexport function pipe<T, A, B, C, D, E, F, G, H, I>(\n  fn1: UnaryFunction<T, A>,\n  fn2: UnaryFunction<A, B>,\n  fn3: UnaryFunction<B, C>,\n  fn4: UnaryFunction<C, D>,\n  fn5: UnaryFunction<D, E>,\n  fn6: UnaryFunction<E, F>,\n  fn7: UnaryFunction<F, G>,\n  fn8: UnaryFunction<G, H>,\n  fn9: UnaryFunction<H, I>,\n  ...fns: UnaryFunction<any, any>[]\n): UnaryFunction<T, unknown>;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on.  \n */\nexport function pipe(...fns: Array<UnaryFunction<any, any>>): UnaryFunction<any, any> {\n  return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray<T, R>(fns: Array<UnaryFunction<T, R>>): UnaryFunction<T, R> {\n  if (fns.length === 0) {\n    return identity as UnaryFunction<any, any>;\n  }\n\n  if (fns.length === 1) {\n    return fns[0];\n  }\n\n  return function piped(input: T): R {\n    return fns.reduce((prev: any, fn: UnaryFunction<T, R>) => fn(prev), input as any);\n  };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable<T>\n */\nexport class Observable<T> implements Subscribable<T> {\n  /**\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   */\n  source: Observable<any> | undefined;\n\n  /**\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   */\n  operator: Operator<any, T> | undefined;\n\n  /**\n   * @constructor\n   * @param {Function} subscribe the function that is called when the Observable is\n   * initially subscribed to. This function is given a Subscriber, to which new values\n   * can be `next`ed, or an `error` method can be called to raise an error, or\n   * `complete` can be called to notify of a successful completion.\n   */\n  constructor(subscribe?: (this: Observable<T>, subscriber: Subscriber<T>) => TeardownLogic) {\n    if (subscribe) {\n      this._subscribe = subscribe;\n    }\n  }\n\n  // HACK: Since TypeScript inherits static properties too, we have to\n  // fight against TypeScript here so Subject can have a different static create signature\n  /**\n   * Creates a new Observable by calling the Observable constructor\n   * @owner Observable\n   * @method create\n   * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n   * @return {Observable} a new observable\n   * @nocollapse\n   * @deprecated Use `new Observable()` instead. Will be removed in v8.\n   */\n  static create: (...args: any[]) => any = <T>(subscribe?: (subscriber: Subscriber<T>) => TeardownLogic) => {\n    return new Observable<T>(subscribe);\n  };\n\n  /**\n   * Creates a new Observable, with this Observable instance as the source, and the passed\n   * operator defined as the new observable's operator.\n   * @method lift\n   * @param operator the operator defining the operation to take on the observable\n   * @return a new observable with the Operator applied\n   * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n   * If you have implemented an operator using `lift`, it is recommended that you create an\n   * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n   * scratch\" section here: https://rxjs.dev/guide/operators\n   */\n  lift<R>(operator?: Operator<T, R>): Observable<R> {\n    const observable = new Observable<R>();\n    observable.source = this;\n    observable.operator = operator;\n    return observable;\n  }\n\n  subscribe(observerOrNext?: Partial<Observer<T>> | ((value: T) => void)): Subscription;\n  /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n  subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n  /**\n   * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n   *\n   * <span class=\"informal\">Use it when you have all these Observables, but still nothing is happening.</span>\n   *\n   * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n   * might be for example a function that you passed to Observable's constructor, but most of the time it is\n   * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n   * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n   * the thought.\n   *\n   * Apart from starting the execution of an Observable, this method allows you to listen for values\n   * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n   * of the following ways.\n   *\n   * The first way is creating an object that implements {@link Observer} interface. It should have methods\n   * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n   * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n   * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n   * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n   * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n   * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n   * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n   * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n   * an `error` method to avoid missing thrown errors.\n   *\n   * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n   * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n   * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n   * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n   * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n   * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n   *\n   * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n   * and you also handled emissions internally by using operators (e.g. using `tap`).\n   *\n   * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n   * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n   * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n   * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n   *\n   * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n   * It is an Observable itself that decides when these functions will be called. For example {@link of}\n   * by default emits all its values synchronously. Always check documentation for how given Observable\n   * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n   *\n   * #### Examples\n   *\n   * Subscribe with an {@link guide/observer Observer}\n   *\n   * ```ts\n   * import { of } from 'rxjs';\n   *\n   * const sumObserver = {\n   *   sum: 0,\n   *   next(value) {\n   *     console.log('Adding: ' + value);\n   *     this.sum = this.sum + value;\n   *   },\n   *   error() {\n   *     // We actually could just remove this method,\n   *     // since we do not really care about errors right now.\n   *   },\n   *   complete() {\n   *     console.log('Sum equals: ' + this.sum);\n   *   }\n   * };\n   *\n   * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n   *   .subscribe(sumObserver);\n   *\n   * // Logs:\n   * // 'Adding: 1'\n   * // 'Adding: 2'\n   * // 'Adding: 3'\n   * // 'Sum equals: 6'\n   * ```\n   *\n   * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n   *\n   * ```ts\n   * import { of } from 'rxjs'\n   *\n   * let sum = 0;\n   *\n   * of(1, 2, 3).subscribe(\n   *   value => {\n   *     console.log('Adding: ' + value);\n   *     sum = sum + value;\n   *   },\n   *   undefined,\n   *   () => console.log('Sum equals: ' + sum)\n   * );\n   *\n   * // Logs:\n   * // 'Adding: 1'\n   * // 'Adding: 2'\n   * // 'Adding: 3'\n   * // 'Sum equals: 6'\n   * ```\n   *\n   * Cancel a subscription\n   *\n   * ```ts\n   * import { interval } from 'rxjs';\n   *\n   * const subscription = interval(1000).subscribe({\n   *   next(num) {\n   *     console.log(num)\n   *   },\n   *   complete() {\n   *     // Will not be called, even when cancelling subscription.\n   *     console.log('completed!');\n   *   }\n   * });\n   *\n   * setTimeout(() => {\n   *   subscription.unsubscribe();\n   *   console.log('unsubscribed!');\n   * }, 2500);\n   *\n   * // Logs:\n   * // 0 after 1s\n   * // 1 after 2s\n   * // 'unsubscribed!' after 2.5s\n   * ```\n   *\n   * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n   * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n   * Observable.\n   * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n   * the error will be thrown asynchronously as unhandled.\n   * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n   * @return {Subscription} a subscription reference to the registered handlers\n   * @method subscribe\n   */\n  subscribe(\n    observerOrNext?: Partial<Observer<T>> | ((value: T) => void) | null,\n    error?: ((error: any) => void) | null,\n    complete?: (() => void) | null\n  ): Subscription {\n    const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n    errorContext(() => {\n      const { operator, source } = this;\n      subscriber.add(\n        operator\n          ? // We're dealing with a subscription in the\n            // operator chain to one of our lifted operators.\n            operator.call(subscriber, source)\n          : source\n          ? // If `source` has a value, but `operator` does not, something that\n            // had intimate knowledge of our API, like our `Subject`, must have\n            // set it. We're going to just call `_subscribe` directly.\n            this._subscribe(subscriber)\n          : // In all other cases, we're likely wrapping a user-provided initializer\n            // function, so we need to catch errors and handle them appropriately.\n            this._trySubscribe(subscriber)\n      );\n    });\n\n    return subscriber;\n  }\n\n  /** @internal */\n  protected _trySubscribe(sink: Subscriber<T>): TeardownLogic {\n    try {\n      return this._subscribe(sink);\n    } catch (err) {\n      // We don't need to return anything in this case,\n      // because it's just going to try to `add()` to a subscription\n      // above.\n      sink.error(err);\n    }\n  }\n\n  /**\n   * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n   * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n   *\n   * **WARNING**: Only use this with observables you *know* will complete. If the source\n   * observable does not complete, you will end up with a promise that is hung up, and\n   * potentially all of the state of an async function hanging out in memory. To avoid\n   * this situation, look into adding something like {@link timeout}, {@link take},\n   * {@link takeWhile}, or {@link takeUntil} amongst others.\n   *\n   * #### Example\n   *\n   * ```ts\n   * import { interval, take } from 'rxjs';\n   *\n   * const source$ = interval(1000).pipe(take(4));\n   *\n   * async function getTotal() {\n   *   let total = 0;\n   *\n   *   await source$.forEach(value => {\n   *     total += value;\n   *     console.log('observable -> ' + value);\n   *   });\n   *\n   *   return total;\n   * }\n   *\n   * getTotal().then(\n   *   total => console.log('Total: ' + total)\n   * );\n   *\n   * // Expected:\n   * // 'observable -> 0'\n   * // 'observable -> 1'\n   * // 'observable -> 2'\n   * // 'observable -> 3'\n   * // 'Total: 6'\n   * ```\n   *\n   * @param next a handler for each value emitted by the observable\n   * @return a promise that either resolves on observable completion or\n   *  rejects with the handled error\n   */\n  forEach(next: (value: T) => void): Promise<void>;\n\n  /**\n   * @param next a handler for each value emitted by the observable\n   * @param promiseCtor a constructor function used to instantiate the Promise\n   * @return a promise that either resolves on observable completion or\n   *  rejects with the handled error\n   * @deprecated Passing a Promise constructor will no longer be available\n   * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n   * little benefit. If you need this functionality, it is recommended that you either\n   * polyfill Promise, or you create an adapter to convert the returned native promise\n   * to whatever promise implementation you wanted. Will be removed in v8.\n   */\n  forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise<void>;\n\n  forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise<void> {\n    promiseCtor = getPromiseCtor(promiseCtor);\n\n    return new promiseCtor<void>((resolve, reject) => {\n      const subscriber = new SafeSubscriber<T>({\n        next: (value) => {\n          try {\n            next(value);\n          } catch (err) {\n            reject(err);\n            subscriber.unsubscribe();\n          }\n        },\n        error: reject,\n        complete: resolve,\n      });\n      this.subscribe(subscriber);\n    }) as Promise<void>;\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<any>): TeardownLogic {\n    return this.source?.subscribe(subscriber);\n  }\n\n  /**\n   * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n   * @method Symbol.observable\n   * @return {Observable} this instance of the observable\n   */\n  [Symbol_observable]() {\n    return this;\n  }\n\n  /* tslint:disable:max-line-length */\n  pipe(): Observable<T>;\n  pipe<A>(op1: OperatorFunction<T, A>): Observable<A>;\n  pipe<A, B>(op1: OperatorFunction<T, A>, op2: OperatorFunction<A, B>): Observable<B>;\n  pipe<A, B, C>(op1: OperatorFunction<T, A>, op2: OperatorFunction<A, B>, op3: OperatorFunction<B, C>): Observable<C>;\n  pipe<A, B, C, D>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>\n  ): Observable<D>;\n  pipe<A, B, C, D, E>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>\n  ): Observable<E>;\n  pipe<A, B, C, D, E, F>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>\n  ): Observable<F>;\n  pipe<A, B, C, D, E, F, G>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>\n  ): Observable<G>;\n  pipe<A, B, C, D, E, F, G, H>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>,\n    op8: OperatorFunction<G, H>\n  ): Observable<H>;\n  pipe<A, B, C, D, E, F, G, H, I>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>,\n    op8: OperatorFunction<G, H>,\n    op9: OperatorFunction<H, I>\n  ): Observable<I>;\n  pipe<A, B, C, D, E, F, G, H, I>(\n    op1: OperatorFunction<T, A>,\n    op2: OperatorFunction<A, B>,\n    op3: OperatorFunction<B, C>,\n    op4: OperatorFunction<C, D>,\n    op5: OperatorFunction<D, E>,\n    op6: OperatorFunction<E, F>,\n    op7: OperatorFunction<F, G>,\n    op8: OperatorFunction<G, H>,\n    op9: OperatorFunction<H, I>,\n    ...operations: OperatorFunction<any, any>[]\n  ): Observable<unknown>;\n  /* tslint:enable:max-line-length */\n\n  /**\n   * Used to stitch together functional operators into a chain.\n   * @method pipe\n   * @return {Observable} the Observable result of all of the operators having\n   * been called in the order they were passed in.\n   *\n   * ## Example\n   *\n   * ```ts\n   * import { interval, filter, map, scan } from 'rxjs';\n   *\n   * interval(1000)\n   *   .pipe(\n   *     filter(x => x % 2 === 0),\n   *     map(x => x + x),\n   *     scan((acc, x) => acc + x)\n   *   )\n   *   .subscribe(x => console.log(x));\n   * ```\n   */\n  pipe(...operations: OperatorFunction<any, any>[]): Observable<any> {\n    return pipeFromArray(operations)(this);\n  }\n\n  /* tslint:disable:max-line-length */\n  /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n  toPromise(): Promise<T | undefined>;\n  /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n  toPromise(PromiseCtor: typeof Promise): Promise<T | undefined>;\n  /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n  toPromise(PromiseCtor: PromiseConstructorLike): Promise<T | undefined>;\n  /* tslint:enable:max-line-length */\n\n  /**\n   * Subscribe to this Observable and get a Promise resolving on\n   * `complete` with the last emission (if any).\n   *\n   * **WARNING**: Only use this with observables you *know* will complete. If the source\n   * observable does not complete, you will end up with a promise that is hung up, and\n   * potentially all of the state of an async function hanging out in memory. To avoid\n   * this situation, look into adding something like {@link timeout}, {@link take},\n   * {@link takeWhile}, or {@link takeUntil} amongst others.\n   *\n   * @method toPromise\n   * @param [promiseCtor] a constructor function used to instantiate\n   * the Promise\n   * @return A Promise that resolves with the last value emit, or\n   * rejects on an error. If there were no emissions, Promise\n   * resolves with undefined.\n   * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n   */\n  toPromise(promiseCtor?: PromiseConstructorLike): Promise<T | undefined> {\n    promiseCtor = getPromiseCtor(promiseCtor);\n\n    return new promiseCtor((resolve, reject) => {\n      let value: T | undefined;\n      this.subscribe(\n        (x: T) => (value = x),\n        (err: any) => reject(err),\n        () => resolve(value)\n      );\n    }) as Promise<T | undefined>;\n  }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n  return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver<T>(value: any): value is Observer<T> {\n  return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber<T>(value: any): value is Subscriber<T> {\n  return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType<typeof Observable>['lift'] } {\n  return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate<T, R>(\n  init: (liftedSource: Observable<T>, subscriber: Subscriber<R>) => (() => void) | void\n): OperatorFunction<T, R> {\n  return (source: Observable<T>) => {\n    if (hasLift(source)) {\n      return source.lift(function (this: Subscriber<R>, liftedSource: Observable<T>) {\n        try {\n          return init(liftedSource, this);\n        } catch (err) {\n          this.error(err);\n        }\n      });\n    }\n    throw new TypeError('Unable to lift unknown Observable type');\n  };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber<T>(\n  destination: Subscriber<any>,\n  onNext?: (value: T) => void,\n  onComplete?: () => void,\n  onError?: (err: any) => void,\n  onFinalize?: () => void\n): Subscriber<T> {\n  return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber<T> extends Subscriber<T> {\n  /**\n   * Creates an instance of an `OperatorSubscriber`.\n   * @param destination The downstream subscriber.\n   * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n   * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n   * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n   * and send to the `destination` error handler.\n   * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n   * this handler are sent to the `destination` error handler.\n   * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n   * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n   * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n   * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n   * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n   * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n   */\n  constructor(\n    destination: Subscriber<any>,\n    onNext?: (value: T) => void,\n    onComplete?: () => void,\n    onError?: (err: any) => void,\n    private onFinalize?: () => void,\n    private shouldUnsubscribe?: () => boolean\n  ) {\n    // It's important - for performance reasons - that all of this class's\n    // members are initialized and that they are always initialized in the same\n    // order. This will ensure that all OperatorSubscriber instances have the\n    // same hidden class in V8. This, in turn, will help keep the number of\n    // hidden classes involved in property accesses within the base class as\n    // low as possible. If the number of hidden classes involved exceeds four,\n    // the property accesses will become megamorphic and performance penalties\n    // will be incurred - i.e. inline caches won't be used.\n    //\n    // The reasons for ensuring all instances have the same hidden class are\n    // further discussed in this blog post from Benedikt Meurer:\n    // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n    super(destination);\n    this._next = onNext\n      ? function (this: OperatorSubscriber<T>, value: T) {\n          try {\n            onNext(value);\n          } catch (err) {\n            destination.error(err);\n          }\n        }\n      : super._next;\n    this._error = onError\n      ? function (this: OperatorSubscriber<T>, err: any) {\n          try {\n            onError(err);\n          } catch (err) {\n            // Send any errors that occur down stream.\n            destination.error(err);\n          } finally {\n            // Ensure finalization.\n            this.unsubscribe();\n          }\n        }\n      : super._error;\n    this._complete = onComplete\n      ? function (this: OperatorSubscriber<T>) {\n          try {\n            onComplete();\n          } catch (err) {\n            // Send any errors that occur down stream.\n            destination.error(err);\n          } finally {\n            // Ensure finalization.\n            this.unsubscribe();\n          }\n        }\n      : super._complete;\n  }\n\n  unsubscribe() {\n    if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n      const { closed } = this;\n      super.unsubscribe();\n      // Execute additional teardown if we have any and we didn't already do so.\n      !closed && this.onFinalize?.();\n    }\n  }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n  schedule(callback: FrameRequestCallback): Subscription;\n  requestAnimationFrame: typeof requestAnimationFrame;\n  cancelAnimationFrame: typeof cancelAnimationFrame;\n  delegate:\n    | {\n        requestAnimationFrame: typeof requestAnimationFrame;\n        cancelAnimationFrame: typeof cancelAnimationFrame;\n      }\n    | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n  // When accessing the delegate, use the variable rather than `this` so that\n  // the functions can be called without being bound to the provider.\n  schedule(callback) {\n    let request = requestAnimationFrame;\n    let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n    const { delegate } = animationFrameProvider;\n    if (delegate) {\n      request = delegate.requestAnimationFrame;\n      cancel = delegate.cancelAnimationFrame;\n    }\n    const handle = request((timestamp) => {\n      // Clear the cancel function. The request has been fulfilled, so\n      // attempting to cancel the request upon unsubscription would be\n      // pointless.\n      cancel = undefined;\n      callback(timestamp);\n    });\n    return new Subscription(() => cancel?.(handle));\n  },\n  requestAnimationFrame(...args) {\n    const { delegate } = animationFrameProvider;\n    return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n  },\n  cancelAnimationFrame(...args) {\n    const { delegate } = animationFrameProvider;\n    return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n  },\n  delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n  /**\n   * @deprecated Internal implementation detail. Do not construct error instances.\n   * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n   */\n  new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n  (_super) =>\n    function ObjectUnsubscribedErrorImpl(this: any) {\n      _super(this);\n      this.name = 'ObjectUnsubscribedError';\n      this.message = 'object unsubscribed';\n    }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject<T> extends Observable<T> implements SubscriptionLike {\n  closed = false;\n\n  private currentObservers: Observer<T>[] | null = null;\n\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  observers: Observer<T>[] = [];\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  isStopped = false;\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  hasError = false;\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  thrownError: any = null;\n\n  /**\n   * Creates a \"subject\" by basically gluing an observer to an observable.\n   *\n   * @nocollapse\n   * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n   */\n  static create: (...args: any[]) => any = <T>(destination: Observer<T>, source: Observable<T>): AnonymousSubject<T> => {\n    return new AnonymousSubject<T>(destination, source);\n  };\n\n  constructor() {\n    // NOTE: This must be here to obscure Observable's constructor.\n    super();\n  }\n\n  /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n  lift<R>(operator: Operator<T, R>): Observable<R> {\n    const subject = new AnonymousSubject(this, this);\n    subject.operator = operator as any;\n    return subject as any;\n  }\n\n  /** @internal */\n  protected _throwIfClosed() {\n    if (this.closed) {\n      throw new ObjectUnsubscribedError();\n    }\n  }\n\n  next(value: T) {\n    errorContext(() => {\n      this._throwIfClosed();\n      if (!this.isStopped) {\n        if (!this.currentObservers) {\n          this.currentObservers = Array.from(this.observers);\n        }\n        for (const observer of this.currentObservers) {\n          observer.next(value);\n        }\n      }\n    });\n  }\n\n  error(err: any) {\n    errorContext(() => {\n      this._throwIfClosed();\n      if (!this.isStopped) {\n        this.hasError = this.isStopped = true;\n        this.thrownError = err;\n        const { observers } = this;\n        while (observers.length) {\n          observers.shift()!.error(err);\n        }\n      }\n    });\n  }\n\n  complete() {\n    errorContext(() => {\n      this._throwIfClosed();\n      if (!this.isStopped) {\n        this.isStopped = true;\n        const { observers } = this;\n        while (observers.length) {\n          observers.shift()!.complete();\n        }\n      }\n    });\n  }\n\n  unsubscribe() {\n    this.isStopped = this.closed = true;\n    this.observers = this.currentObservers = null!;\n  }\n\n  get observed() {\n    return this.observers?.length > 0;\n  }\n\n  /** @internal */\n  protected _trySubscribe(subscriber: Subscriber<T>): TeardownLogic {\n    this._throwIfClosed();\n    return super._trySubscribe(subscriber);\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    this._throwIfClosed();\n    this._checkFinalizedStatuses(subscriber);\n    return this._innerSubscribe(subscriber);\n  }\n\n  /** @internal */\n  protected _innerSubscribe(subscriber: Subscriber<any>) {\n    const { hasError, isStopped, observers } = this;\n    if (hasError || isStopped) {\n      return EMPTY_SUBSCRIPTION;\n    }\n    this.currentObservers = null;\n    observers.push(subscriber);\n    return new Subscription(() => {\n      this.currentObservers = null;\n      arrRemove(observers, subscriber);\n    });\n  }\n\n  /** @internal */\n  protected _checkFinalizedStatuses(subscriber: Subscriber<any>) {\n    const { hasError, thrownError, isStopped } = this;\n    if (hasError) {\n      subscriber.error(thrownError);\n    } else if (isStopped) {\n      subscriber.complete();\n    }\n  }\n\n  /**\n   * Creates a new Observable with this Subject as the source. You can do this\n   * to create custom Observer-side logic of the Subject and conceal it from\n   * code that uses the Observable.\n   * @return {Observable} Observable that the Subject casts to\n   */\n  asObservable(): Observable<T> {\n    const observable: any = new Observable<T>();\n    observable.source = this;\n    return observable;\n  }\n}\n\n/**\n * @class AnonymousSubject<T>\n */\nexport class AnonymousSubject<T> extends Subject<T> {\n  constructor(\n    /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n    public destination?: Observer<T>,\n    source?: Observable<T>\n  ) {\n    super();\n    this.source = source;\n  }\n\n  next(value: T) {\n    this.destination?.next?.(value);\n  }\n\n  error(err: any) {\n    this.destination?.error?.(err);\n  }\n\n  complete() {\n    this.destination?.complete?.();\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n  }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject<T>\n */\nexport class BehaviorSubject<T> extends Subject<T> {\n  constructor(private _value: T) {\n    super();\n  }\n\n  get value(): T {\n    return this.getValue();\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    const subscription = super._subscribe(subscriber);\n    !subscription.closed && subscriber.next(this._value);\n    return subscription;\n  }\n\n  getValue(): T {\n    const { hasError, thrownError, _value } = this;\n    if (hasError) {\n      throw thrownError;\n    }\n    this._throwIfClosed();\n    return _value;\n  }\n\n  next(value: T): void {\n    super.next((this._value = value));\n  }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n  delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n  now() {\n    // Use the variable rather than `this` so that the function can be called\n    // without being bound to the provider.\n    return (dateTimestampProvider.delegate || Date).now();\n  },\n  delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject<T> extends Subject<T> {\n  private _buffer: (T | number)[] = [];\n  private _infiniteTimeWindow = true;\n\n  /**\n   * @param bufferSize The size of the buffer to replay on subscription\n   * @param windowTime The amount of time the buffered items will stay buffered\n   * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n   * calculate the amount of time something has been buffered.\n   */\n  constructor(\n    private _bufferSize = Infinity,\n    private _windowTime = Infinity,\n    private _timestampProvider: TimestampProvider = dateTimestampProvider\n  ) {\n    super();\n    this._infiniteTimeWindow = _windowTime === Infinity;\n    this._bufferSize = Math.max(1, _bufferSize);\n    this._windowTime = Math.max(1, _windowTime);\n  }\n\n  next(value: T): void {\n    const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n    if (!isStopped) {\n      _buffer.push(value);\n      !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n    }\n    this._trimBuffer();\n    super.next(value);\n  }\n\n  /** @internal */\n  protected _subscribe(subscriber: Subscriber<T>): Subscription {\n    this._throwIfClosed();\n    this._trimBuffer();\n\n    const subscription = this._innerSubscribe(subscriber);\n\n    const { _infiniteTimeWindow, _buffer } = this;\n    // We use a copy here, so reentrant code does not mutate our array while we're\n    // emitting it to a new subscriber.\n    const copy = _buffer.slice();\n    for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n      subscriber.next(copy[i] as T);\n    }\n\n    this._checkFinalizedStatuses(subscriber);\n\n    return subscription;\n  }\n\n  private _trimBuffer() {\n    const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n    // If we don't have an infinite buffer size, and we're over the length,\n    // use splice to truncate the old buffer values off. Note that we have to\n    // double the size for instances where we're not using an infinite time window\n    // because we're storing the values and the timestamps in the same array.\n    const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n    _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n    // Now, if we're not in an infinite time window, remove all values where the time is\n    // older than what is allowed.\n    if (!_infiniteTimeWindow) {\n      const now = _timestampProvider.now();\n      let last = 0;\n      // Search the array for the first timestamp that isn't expired and\n      // truncate the buffer up to that point.\n      for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n        last = i;\n      }\n      last && _buffer.splice(0, last + 1);\n    }\n  }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action<T> extends Subscription {\n *   new (scheduler: Scheduler, work: (state?: T) => void);\n *   schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action<T>\n */\nexport class Action<T> extends Subscription {\n  constructor(scheduler: Scheduler, work: (this: SchedulerAction<T>, state?: T) => void) {\n    super();\n  }\n  /**\n   * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n   * some context object, `state`. May happen at some point in the future,\n   * according to the `delay` parameter, if specified.\n   * @param {T} [state] Some contextual data that the `work` function uses when\n   * called by the Scheduler.\n   * @param {number} [delay] Time to wait before executing the work, where the\n   * time unit is implicit and defined by the Scheduler.\n   * @return {void}\n   */\n  public schedule(state?: T, delay: number = 0): Subscription {\n    return this;\n  }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n  setInterval: SetIntervalFunction;\n  clearInterval: ClearIntervalFunction;\n  delegate:\n    | {\n        setInterval: SetIntervalFunction;\n        clearInterval: ClearIntervalFunction;\n      }\n    | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n  // When accessing the delegate, use the variable rather than `this` so that\n  // the functions can be called without being bound to the provider.\n  setInterval(handler: () => void, timeout?: number, ...args) {\n    const { delegate } = intervalProvider;\n    if (delegate?.setInterval) {\n      return delegate.setInterval(handler, timeout, ...args);\n    }\n    return setInterval(handler, timeout, ...args);\n  },\n  clearInterval(handle) {\n    const { delegate } = intervalProvider;\n    return (delegate?.clearInterval || clearInterval)(handle as any);\n  },\n  delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction<T> extends Action<T> {\n  public id: TimerHandle | undefined;\n  public state?: T;\n  // @ts-ignore: Property has no initializer and is not definitely assigned\n  public delay: number;\n  protected pending: boolean = false;\n\n  constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction<T>, state?: T) => void) {\n    super(scheduler, work);\n  }\n\n  public schedule(state?: T, delay: number = 0): Subscription {\n    if (this.closed) {\n      return this;\n    }\n\n    // Always replace the current state with the new state.\n    this.state = state;\n\n    const id = this.id;\n    const scheduler = this.scheduler;\n\n    //\n    // Important implementation note:\n    //\n    // Actions only execute once by default, unless rescheduled from within the\n    // scheduled callback. This allows us to implement single and repeat\n    // actions via the same code path, without adding API surface area, as well\n    // as mimic traditional recursion but across asynchronous boundaries.\n    //\n    // However, JS runtimes and timers distinguish between intervals achieved by\n    // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n    // serial `setTimeout` calls can be individually delayed, which delays\n    // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n    // guarantee the interval callback will be invoked more precisely to the\n    // interval period, regardless of load.\n    //\n    // Therefore, we use `setInterval` to schedule single and repeat actions.\n    // If the action reschedules itself with the same delay, the interval is not\n    // canceled. If the action doesn't reschedule, or reschedules with a\n    // different delay, the interval will be canceled after scheduled callback\n    // execution.\n    //\n    if (id != null) {\n      this.id = this.recycleAsyncId(scheduler, id, delay);\n    }\n\n    // Set the pending flag indicating that this action has been scheduled, or\n    // has recursively rescheduled itself.\n    this.pending = true;\n\n    this.delay = delay;\n    // If this action has already an async Id, don't request a new one.\n    this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n    return this;\n  }\n\n  protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n    return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n  }\n\n  protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n    // If this action is rescheduled with the same delay time, don't clear the interval id.\n    if (delay != null && this.delay === delay && this.pending === false) {\n      return id;\n    }\n    // Otherwise, if the action's delay time is different from the current delay,\n    // or the action has been rescheduled before it's executed, clear the interval id\n    if (id != null) {\n      intervalProvider.clearInterval(id);\n    }\n\n    return undefined;\n  }\n\n  /**\n   * Immediately executes this action and the `work` it contains.\n   * @return {any}\n   */\n  public execute(state: T, delay: number): any {\n    if (this.closed) {\n      return new Error('executing a cancelled action');\n    }\n\n    this.pending = false;\n    const error = this._execute(state, delay);\n    if (error) {\n      return error;\n    } else if (this.pending === false && this.id != null) {\n      // Dequeue if the action didn't reschedule itself. Don't call\n      // unsubscribe(), because the action could reschedule later.\n      // For example:\n      // ```\n      // scheduler.schedule(function doWork(counter) {\n      //   /* ... I'm a busy worker bee ... */\n      //   var originalAction = this;\n      //   /* wait 100ms before rescheduling the action */\n      //   setTimeout(function () {\n      //     originalAction.schedule(counter + 1);\n      //   }, 100);\n      // }, 1000);\n      // ```\n      this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n    }\n  }\n\n  protected _execute(state: T, _delay: number): any {\n    let errored: boolean = false;\n    let errorValue: any;\n    try {\n      this.work(state);\n    } catch (e) {\n      errored = true;\n      // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n      // return here, we can't have it return \"\" or 0 or false.\n      // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n      errorValue = e ? e : new Error('Scheduled action threw falsy error');\n    }\n    if (errored) {\n      this.unsubscribe();\n      return errorValue;\n    }\n  }\n\n  unsubscribe() {\n    if (!this.closed) {\n      const { id, scheduler } = this;\n      const { actions } = scheduler;\n\n      this.work = this.state = this.scheduler = null!;\n      this.pending = false;\n\n      arrRemove(actions, this);\n      if (id != null) {\n        this.id = this.recycleAsyncId(scheduler, id, null);\n      }\n\n      this.delay = null!;\n      super.unsubscribe();\n    }\n  }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n *   now(): number;\n *   schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n  public static now: () => number = dateTimestampProvider.now;\n\n  constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n    this.now = now;\n  }\n\n  /**\n   * A getter method that returns a number representing the current time\n   * (at the time this function was called) according to the scheduler's own\n   * internal clock.\n   * @return {number} A number that represents the current time. May or may not\n   * have a relation to wall-clock time. May or may not refer to a time unit\n   * (e.g. milliseconds).\n   */\n  public now: () => number;\n\n  /**\n   * Schedules a function, `work`, for execution. May happen at some point in\n   * the future, according to the `delay` parameter, if specified. May be passed\n   * some context object, `state`, which will be passed to the `work` function.\n   *\n   * The given arguments will be processed an stored as an Action object in a\n   * queue of actions.\n   *\n   * @param {function(state: ?T): ?Subscription} work A function representing a\n   * task, or some unit of work to be executed by the Scheduler.\n   * @param {number} [delay] Time to wait before executing the work, where the\n   * time unit is implicit and defined by the Scheduler itself.\n   * @param {T} [state] Some contextual data that the `work` function uses when\n   * called by the Scheduler.\n   * @return {Subscription} A subscription in order to be able to unsubscribe\n   * the scheduled work.\n   */\n  public schedule<T>(work: (this: SchedulerAction<T>, state?: T) => void, delay: number = 0, state?: T): Subscription {\n    return new this.schedulerActionCtor<T>(this, work).schedule(state, delay);\n  }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n  public actions: Array<AsyncAction<any>> = [];\n  /**\n   * A flag to indicate whether the Scheduler is currently executing a batch of\n   * queued actions.\n   * @type {boolean}\n   * @internal\n   */\n  public _active: boolean = false;\n  /**\n   * An internal ID used to track the latest asynchronous task such as those\n   * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n   * others.\n   * @type {any}\n   * @internal\n   */\n  public _scheduled: TimerHandle | undefined;\n\n  constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n    super(SchedulerAction, now);\n  }\n\n  public flush(action: AsyncAction<any>): void {\n    const { actions } = this;\n\n    if (this._active) {\n      actions.push(action);\n      return;\n    }\n\n    let error: any;\n    this._active = true;\n\n    do {\n      if ((error = action.execute(action.state, action.delay))) {\n        break;\n      }\n    } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n    this._active = false;\n\n    if (error) {\n      while ((action = actions.shift()!)) {\n        action.unsubscribe();\n      }\n      throw error;\n    }\n  }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * <span class=\"informal\">Schedule task as if you used setTimeout(task, duration)</span>\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n *   console.log(state);\n *   this.schedule(state + 1, 1000); // `this` references currently executing Action,\n *                                   // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction<T> extends AsyncAction<T> {\n  constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction<T>, state?: T) => void) {\n    super(scheduler, work);\n  }\n\n  public schedule(state?: T, delay: number = 0): Subscription {\n    if (delay > 0) {\n      return super.schedule(state, delay);\n    }\n    this.delay = delay;\n    this.state = state;\n    this.scheduler.flush(this);\n    return this;\n  }\n\n  public execute(state: T, delay: number): any {\n    return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n  }\n\n  protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n    // If delay exists and is greater than 0, or if the delay is null (the\n    // action wasn't rescheduled) but was originally scheduled as an async\n    // action, then recycle as an async action.\n\n    if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n      return super.requestAsyncId(scheduler, id, delay);\n    }\n\n    // Otherwise flush the scheduler starting with this action.\n    scheduler.flush(this);\n\n    // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n    // `TimerHandle`, and generally the return value here isn't really used. So the\n    // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n    // as opposed to refactoring every other instanceo of `requestAsyncId`.\n    return 0;\n  }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * <span class=\"informal\">Put every next task on a queue, instead of executing it immediately</span>\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n *   queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n *   console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n *   if (state !== 0) {\n *     console.log('before', state);\n *     this.schedule(state - 1); // `this` references currently executing Action,\n *                               // which we reschedule with new state\n *     console.log('after', state);\n *   }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction<T> extends AsyncAction<T> {\n  constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction<T>, state?: T) => void) {\n    super(scheduler, work);\n  }\n\n  protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n    // If delay is greater than 0, request as an async action.\n    if (delay !== null && delay > 0) {\n      return super.requestAsyncId(scheduler, id, delay);\n    }\n    // Push the action to the end of the scheduler queue.\n    scheduler.actions.push(this);\n    // If an animation frame has already been requested, don't request another\n    // one. If an animation frame hasn't been requested yet, request one. Return\n    // the current animation frame request id.\n    return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n  }\n\n  protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n    // If delay exists and is greater than 0, or if the delay is null (the\n    // action wasn't rescheduled) but was originally scheduled as an async\n    // action, then recycle as an async action.\n    if (delay != null ? delay > 0 : this.delay > 0) {\n      return super.recycleAsyncId(scheduler, id, delay);\n    }\n    // If the scheduler queue has no remaining actions with the same async id,\n    // cancel the requested animation frame and set the scheduled flag to\n    // undefined so the next AnimationFrameAction will request its own.\n    const { actions } = scheduler;\n    if (id != null && actions[actions.length - 1]?.id !== id) {\n      animationFrameProvider.cancelAnimationFrame(id as number);\n      scheduler._scheduled = undefined;\n    }\n    // Return undefined so the action knows to request a new async id if it's rescheduled.\n    return undefined;\n  }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n  public flush(action?: AsyncAction<any>): void {\n    this._active = true;\n    // The async id that effects a call to flush is stored in _scheduled.\n    // Before executing an action, it's necessary to check the action's async\n    // id to determine whether it's supposed to be executed in the current\n    // flush.\n    // Previous implementations of this method used a count to determine this,\n    // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n    // are removed from the actions array and that can shift actions that are\n    // scheduled to be executed in a subsequent flush into positions at which\n    // they are executed within the current flush.\n    const flushId = this._scheduled;\n    this._scheduled = undefined;\n\n    const { actions } = this;\n    let error: any;\n    action = action || actions.shift()!;\n\n    do {\n      if ((error = action.execute(action.state, action.delay))) {\n        break;\n      }\n    } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n    this._active = false;\n\n    if (error) {\n      while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n        action.unsubscribe();\n      }\n      throw error;\n    }\n  }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * <span class=\"informal\">Perform task when `window.requestAnimationFrame` would fire</span>\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html: <div style=\"background: #0ff;\"></div>\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n *   div.style.height = height + \"px\";\n *\n *   this.schedule(height + 1);  // `this` references currently executing Action,\n *                               // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * <span class=\"informal\">Just emits 'complete', and nothing else.</span>\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n *   next: () => console.log('Next'),\n *   complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n *   mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable<never>((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n  return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n  return new Observable<never>((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n  return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last<T>(arr: T[]): T | undefined {\n  return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n  return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n  return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n  return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = (<T>(x: any): x is ArrayLike<T> => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike<any> {\n  return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable<any> {\n  return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable<T>(obj: any): obj is AsyncIterable<T> {\n  return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n  // TODO: We should create error codes that can be looked up, so this can be less verbose.\n  return new TypeError(\n    `You provided ${\n      input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n    } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n  );\n}\n", "export function getSymbolIterator(): symbol {\n  if (typeof Symbol !== 'function' || !Symbol.iterator) {\n    return '@@iterator' as any;\n  }\n\n  return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable<any> {\n  return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator<T>(readableStream: ReadableStreamLike<T>): AsyncGenerator<T> {\n  const reader = readableStream.getReader();\n  try {\n    while (true) {\n      const { value, done } = await reader.read();\n      if (done) {\n        return;\n      }\n      yield value!;\n    }\n  } finally {\n    reader.releaseLock();\n  }\n}\n\nexport function isReadableStreamLike<T>(obj: any): obj is ReadableStreamLike<T> {\n  // We don't want to use instanceof checks because they would return\n  // false for instances from another Realm, like an <iframe>.\n  return isFunction(obj?.getReader);\n}\n", "import { isArrayLike } from '../util/isArrayLike';\nimport { isPromise } from '../util/isPromise';\nimport { Observable } from '../Observable';\nimport { ObservableInput, ObservedValueOf, ReadableStreamLike } from '../types';\nimport { isInteropObservable } from '../util/isInteropObservable';\nimport { isAsyncIterable } from '../util/isAsyncIterable';\nimport { createInvalidObservableTypeError } from '../util/throwUnobservableError';\nimport { isIterable } from '../util/isIterable';\nimport { isReadableStreamLike, readableStreamLikeToAsyncGenerator } from '../util/isReadableStreamLike';\nimport { Subscriber } from '../Subscriber';\nimport { isFunction } from '../util/isFunction';\nimport { reportUnhandledError } from '../util/reportUnhandledError';\nimport { observable as Symbol_observable } from '../symbol/observable';\n\nexport function innerFrom<O extends ObservableInput<any>>(input: O): Observable<ObservedValueOf<O>>;\nexport function innerFrom<T>(input: ObservableInput<T>): Observable<T> {\n  if (input instanceof Observable) {\n    return input;\n  }\n  if (input != null) {\n    if (isInteropObservable(input)) {\n      return fromInteropObservable(input);\n    }\n    if (isArrayLike(input)) {\n      return fromArrayLike(input);\n    }\n    if (isPromise(input)) {\n      return fromPromise(input);\n    }\n    if (isAsyncIterable(input)) {\n      return fromAsyncIterable(input);\n    }\n    if (isIterable(input)) {\n      return fromIterable(input);\n    }\n    if (isReadableStreamLike(input)) {\n      return fromReadableStreamLike(input);\n    }\n  }\n\n  throw createInvalidObservableTypeError(input);\n}\n\n/**\n * Creates an RxJS Observable from an object that implements `Symbol.observable`.\n * @param obj An object that properly implements `Symbol.observable`.\n */\nexport function fromInteropObservable<T>(obj: any) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    const obs = obj[Symbol_observable]();\n    if (isFunction(obs.subscribe)) {\n      return obs.subscribe(subscriber);\n    }\n    // Should be caught by observable subscribe function error handling.\n    throw new TypeError('Provided object does not correctly implement Symbol.observable');\n  });\n}\n\n/**\n * Synchronously emits the values of an array like and completes.\n * This is exported because there are creation functions and operators that need to\n * make direct use of the same logic, and there's no reason to make them run through\n * `from` conditionals because we *know* they're dealing with an array.\n * @param array The array to emit values from\n */\nexport function fromArrayLike<T>(array: ArrayLike<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    // Loop over the array and emit each value. Note two things here:\n    // 1. We're making sure that the subscriber is not closed on each loop.\n    //    This is so we don't continue looping over a very large array after\n    //    something like a `take`, `takeWhile`, or other synchronous unsubscription\n    //    has already unsubscribed.\n    // 2. In this form, reentrant code can alter that array we're looping over.\n    //    This is a known issue, but considered an edge case. The alternative would\n    //    be to copy the array before executing the loop, but this has\n    //    performance implications.\n    for (let i = 0; i < array.length && !subscriber.closed; i++) {\n      subscriber.next(array[i]);\n    }\n    subscriber.complete();\n  });\n}\n\nexport function fromPromise<T>(promise: PromiseLike<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    promise\n      .then(\n        (value) => {\n          if (!subscriber.closed) {\n            subscriber.next(value);\n            subscriber.complete();\n          }\n        },\n        (err: any) => subscriber.error(err)\n      )\n      .then(null, reportUnhandledError);\n  });\n}\n\nexport function fromIterable<T>(iterable: Iterable<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    for (const value of iterable) {\n      subscriber.next(value);\n      if (subscriber.closed) {\n        return;\n      }\n    }\n    subscriber.complete();\n  });\n}\n\nexport function fromAsyncIterable<T>(asyncIterable: AsyncIterable<T>) {\n  return new Observable((subscriber: Subscriber<T>) => {\n    process(asyncIterable, subscriber).catch((err) => subscriber.error(err));\n  });\n}\n\nexport function fromReadableStreamLike<T>(readableStream: ReadableStreamLike<T>) {\n  return fromAsyncIterable(readableStreamLikeToAsyncGenerator(readableStream));\n}\n\nasync function process<T>(asyncIterable: AsyncIterable<T>, subscriber: Subscriber<T>) {\n  for await (const value of asyncIterable) {\n    subscriber.next(value);\n    // A side-effect may have closed our subscriber,\n    // check before the next iteration.\n    if (subscriber.closed) {\n      return;\n    }\n  }\n  subscriber.complete();\n}\n", "import { Subscription } from '../Subscription';\nimport { SchedulerAction, SchedulerLike } from '../types';\n\nexport function executeSchedule(\n  parentSubscription: Subscription,\n  scheduler: SchedulerLike,\n  work: () => void,\n  delay: number,\n  repeat: true\n): void;\nexport function executeSchedule(\n  parentSubscription: Subscription,\n  scheduler: SchedulerLike,\n  work: () => void,\n  delay?: number,\n  repeat?: false\n): Subscription;\n\nexport function executeSchedule(\n  parentSubscription: Subscription,\n  scheduler: SchedulerLike,\n  work: () => void,\n  delay = 0,\n  repeat = false\n): Subscription | void {\n  const scheduleSubscription = scheduler.schedule(function (this: SchedulerAction<any>) {\n    work();\n    if (repeat) {\n      parentSubscription.add(this.schedule(null, delay));\n    } else {\n      this.unsubscribe();\n    }\n  }, delay);\n\n  parentSubscription.add(scheduleSubscription);\n\n  if (!repeat) {\n    // Because user-land scheduler implementations are unlikely to properly reuse\n    // Actions for repeat scheduling, we can't trust that the returned subscription\n    // will control repeat subscription scenarios. So we're trying to avoid using them\n    // incorrectly within this library.\n    return scheduleSubscription;\n  }\n}\n", "/** @prettier */\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { executeSchedule } from '../util/executeSchedule';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Re-emits all notifications from source Observable with specified scheduler.\n *\n * <span class=\"informal\">Ensure a specific scheduler is used, from outside of an Observable.</span>\n *\n * `observeOn` is an operator that accepts a scheduler as a first parameter, which will be used to reschedule\n * notifications emitted by the source Observable. It might be useful, if you do not have control over\n * internal scheduler of a given Observable, but want to control when its values are emitted nevertheless.\n *\n * Returned Observable emits the same notifications (nexted values, complete and error events) as the source Observable,\n * but rescheduled with provided scheduler. Note that this doesn't mean that source Observables internal\n * scheduler will be replaced in any way. Original scheduler still will be used, but when the source Observable emits\n * notification, it will be immediately scheduled again - this time with scheduler passed to `observeOn`.\n * An anti-pattern would be calling `observeOn` on Observable that emits lots of values synchronously, to split\n * that emissions into asynchronous chunks. For this to happen, scheduler would have to be passed into the source\n * Observable directly (usually into the operator that creates it). `observeOn` simply delays notifications a\n * little bit more, to ensure that they are emitted at expected moments.\n *\n * As a matter of fact, `observeOn` accepts second parameter, which specifies in milliseconds with what delay notifications\n * will be emitted. The main difference between {@link delay} operator and `observeOn` is that `observeOn`\n * will delay all notifications - including error notifications - while `delay` will pass through error\n * from source Observable immediately when it is emitted. In general it is highly recommended to use `delay` operator\n * for any kind of delaying of values in the stream, while using `observeOn` to specify which scheduler should be used\n * for notification emissions in general.\n *\n * ## Example\n *\n * Ensure values in subscribe are called just before browser repaint\n *\n * ```ts\n * import { interval, observeOn, animationFrameScheduler } from 'rxjs';\n *\n * const someDiv = document.createElement('div');\n * someDiv.style.cssText = 'width: 200px;background: #09c';\n * document.body.appendChild(someDiv);\n * const intervals = interval(10);      // Intervals are scheduled\n *                                      // with async scheduler by default...\n * intervals.pipe(\n *   observeOn(animationFrameScheduler) // ...but we will observe on animationFrame\n * )                                    // scheduler to ensure smooth animation.\n * .subscribe(val => {\n *   someDiv.style.height = val + 'px';\n * });\n * ```\n *\n * @see {@link delay}\n *\n * @param scheduler Scheduler that will be used to reschedule notifications from source Observable.\n * @param delay Number of milliseconds that states with what delay every notification should be rescheduled.\n * @return A function that returns an Observable that emits the same\n * notifications as the source Observable, but with provided scheduler.\n */\nexport function observeOn<T>(scheduler: SchedulerLike, delay = 0): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => executeSchedule(subscriber, scheduler, () => subscriber.next(value), delay),\n        () => executeSchedule(subscriber, scheduler, () => subscriber.complete(), delay),\n        (err) => executeSchedule(subscriber, scheduler, () => subscriber.error(err), delay)\n      )\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { operate } from '../util/lift';\n\n/**\n * Asynchronously subscribes Observers to this Observable on the specified {@link SchedulerLike}.\n *\n * With `subscribeOn` you can decide what type of scheduler a specific Observable will be using when it is subscribed to.\n *\n * Schedulers control the speed and order of emissions to observers from an Observable stream.\n *\n * ![](subscribeOn.png)\n *\n * ## Example\n *\n * Given the following code:\n *\n * ```ts\n * import { of, merge } from 'rxjs';\n *\n * const a = of(1, 2, 3);\n * const b = of(4, 5, 6);\n *\n * merge(a, b).subscribe(console.log);\n *\n * // Outputs\n * // 1\n * // 2\n * // 3\n * // 4\n * // 5\n * // 6\n * ```\n *\n * Both Observable `a` and `b` will emit their values directly and synchronously once they are subscribed to.\n *\n * If we instead use the `subscribeOn` operator declaring that we want to use the {@link asyncScheduler} for values emitted by Observable `a`:\n *\n * ```ts\n * import { of, subscribeOn, asyncScheduler, merge } from 'rxjs';\n *\n * const a = of(1, 2, 3).pipe(subscribeOn(asyncScheduler));\n * const b = of(4, 5, 6);\n *\n * merge(a, b).subscribe(console.log);\n *\n * // Outputs\n * // 4\n * // 5\n * // 6\n * // 1\n * // 2\n * // 3\n * ```\n *\n * The reason for this is that Observable `b` emits its values directly and synchronously like before\n * but the emissions from `a` are scheduled on the event loop because we are now using the {@link asyncScheduler} for that specific Observable.\n *\n * @param scheduler The {@link SchedulerLike} to perform subscription actions on.\n * @param delay A delay to pass to the scheduler to delay subscriptions\n * @return A function that returns an Observable modified so that its\n * subscriptions happen on the specified {@link SchedulerLike}.\n */\nexport function subscribeOn<T>(scheduler: SchedulerLike, delay: number = 0): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    subscriber.add(scheduler.schedule(() => source.subscribe(subscriber), delay));\n  });\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { observeOn } from '../operators/observeOn';\nimport { subscribeOn } from '../operators/subscribeOn';\nimport { InteropObservable, SchedulerLike } from '../types';\n\nexport function scheduleObservable<T>(input: InteropObservable<T>, scheduler: SchedulerLike) {\n  return innerFrom(input).pipe(subscribeOn(scheduler), observeOn(scheduler));\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { observeOn } from '../operators/observeOn';\nimport { subscribeOn } from '../operators/subscribeOn';\nimport { SchedulerLike } from '../types';\n\nexport function schedulePromise<T>(input: PromiseLike<T>, scheduler: SchedulerLike) {\n  return innerFrom(input).pipe(subscribeOn(scheduler), observeOn(scheduler));\n}\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\nexport function scheduleArray<T>(input: ArrayLike<T>, scheduler: SchedulerLike) {\n  return new Observable<T>((subscriber) => {\n    // The current array index.\n    let i = 0;\n    // Start iterating over the array like on a schedule.\n    return scheduler.schedule(function () {\n      if (i === input.length) {\n        // If we have hit the end of the array like in the\n        // previous job, we can complete.\n        subscriber.complete();\n      } else {\n        // Otherwise let's next the value at the current index,\n        // then increment our index.\n        subscriber.next(input[i++]);\n        // If the last emission didn't cause us to close the subscriber\n        // (via take or some side effect), reschedule the job and we'll\n        // make another pass.\n        if (!subscriber.closed) {\n          this.schedule();\n        }\n      }\n    });\n  });\n}\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\nimport { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from '../util/isFunction';\nimport { executeSchedule } from '../util/executeSchedule';\n\n/**\n * Used in {@link scheduled} to create an observable from an Iterable.\n * @param input The iterable to create an observable from\n * @param scheduler The scheduler to use\n */\nexport function scheduleIterable<T>(input: Iterable<T>, scheduler: SchedulerLike) {\n  return new Observable<T>((subscriber) => {\n    let iterator: Iterator<T, T>;\n\n    // Schedule the initial creation of the iterator from\n    // the iterable. This is so the code in the iterable is\n    // not called until the scheduled job fires.\n    executeSchedule(subscriber, scheduler, () => {\n      // Create the iterator.\n      iterator = (input as any)[Symbol_iterator]();\n\n      executeSchedule(\n        subscriber,\n        scheduler,\n        () => {\n          let value: T;\n          let done: boolean | undefined;\n          try {\n            // Pull the value out of the iterator\n            ({ value, done } = iterator.next());\n          } catch (err) {\n            // We got an error while pulling from the iterator\n            subscriber.error(err);\n            return;\n          }\n\n          if (done) {\n            // If it is \"done\" we just complete. This mimics the\n            // behavior of JavaScript's `for..of` consumption of\n            // iterables, which will not emit the value from an iterator\n            // result of `{ done: true: value: 'here' }`.\n            subscriber.complete();\n          } else {\n            // The iterable is not done, emit the value.\n            subscriber.next(value);\n          }\n        },\n        0,\n        true\n      );\n    });\n\n    // During finalization, if we see this iterator has a `return` method,\n    // then we know it is a Generator, and not just an Iterator. So we call\n    // the `return()` function. This will ensure that any `finally { }` blocks\n    // inside of the generator we can hit will be hit properly.\n    return () => isFunction(iterator?.return) && iterator.return();\n  });\n}\n", "import { SchedulerLike } from '../types';\nimport { Observable } from '../Observable';\nimport { executeSchedule } from '../util/executeSchedule';\n\nexport function scheduleAsyncIterable<T>(input: AsyncIterable<T>, scheduler: SchedulerLike) {\n  if (!input) {\n    throw new Error('Iterable cannot be null');\n  }\n  return new Observable<T>((subscriber) => {\n    executeSchedule(subscriber, scheduler, () => {\n      const iterator = input[Symbol.asyncIterator]();\n      executeSchedule(\n        subscriber,\n        scheduler,\n        () => {\n          iterator.next().then((result) => {\n            if (result.done) {\n              // This will remove the subscriptions from\n              // the parent subscription.\n              subscriber.complete();\n            } else {\n              subscriber.next(result.value);\n            }\n          });\n        },\n        0,\n        true\n      );\n    });\n  });\n}\n", "import { SchedulerLike, ReadableStreamLike } from '../types';\nimport { Observable } from '../Observable';\nimport { scheduleAsyncIterable } from './scheduleAsyncIterable';\nimport { readableStreamLikeToAsyncGenerator } from '../util/isReadableStreamLike';\n\nexport function scheduleReadableStreamLike<T>(input: ReadableStreamLike<T>, scheduler: SchedulerLike): Observable<T> {\n  return scheduleAsyncIterable(readableStreamLikeToAsyncGenerator(input), scheduler);\n}\n", "import { scheduleObservable } from './scheduleObservable';\nimport { schedulePromise } from './schedulePromise';\nimport { scheduleArray } from './scheduleArray';\nimport { scheduleIterable } from './scheduleIterable';\nimport { scheduleAsyncIterable } from './scheduleAsyncIterable';\nimport { isInteropObservable } from '../util/isInteropObservable';\nimport { isPromise } from '../util/isPromise';\nimport { isArrayLike } from '../util/isArrayLike';\nimport { isIterable } from '../util/isIterable';\nimport { ObservableInput, SchedulerLike } from '../types';\nimport { Observable } from '../Observable';\nimport { isAsyncIterable } from '../util/isAsyncIterable';\nimport { createInvalidObservableTypeError } from '../util/throwUnobservableError';\nimport { isReadableStreamLike } from '../util/isReadableStreamLike';\nimport { scheduleReadableStreamLike } from './scheduleReadableStreamLike';\n\n/**\n * Converts from a common {@link ObservableInput} type to an observable where subscription and emissions\n * are scheduled on the provided scheduler.\n *\n * @see {@link from}\n * @see {@link of}\n *\n * @param input The observable, array, promise, iterable, etc you would like to schedule\n * @param scheduler The scheduler to use to schedule the subscription and emissions from\n * the returned observable.\n */\nexport function scheduled<T>(input: ObservableInput<T>, scheduler: SchedulerLike): Observable<T> {\n  if (input != null) {\n    if (isInteropObservable(input)) {\n      return scheduleObservable(input, scheduler);\n    }\n    if (isArrayLike(input)) {\n      return scheduleArray(input, scheduler);\n    }\n    if (isPromise(input)) {\n      return schedulePromise(input, scheduler);\n    }\n    if (isAsyncIterable(input)) {\n      return scheduleAsyncIterable(input, scheduler);\n    }\n    if (isIterable(input)) {\n      return scheduleIterable(input, scheduler);\n    }\n    if (isReadableStreamLike(input)) {\n      return scheduleReadableStreamLike(input, scheduler);\n    }\n  }\n  throw createInvalidObservableTypeError(input);\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInput, SchedulerLike, ObservedValueOf } from '../types';\nimport { scheduled } from '../scheduled/scheduled';\nimport { innerFrom } from './innerFrom';\n\nexport function from<O extends ObservableInput<any>>(input: O): Observable<ObservedValueOf<O>>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function from<O extends ObservableInput<any>>(input: O, scheduler: SchedulerLike | undefined): Observable<ObservedValueOf<O>>;\n\n/**\n * Creates an Observable from an Array, an array-like object, a Promise, an iterable object, or an Observable-like object.\n *\n * <span class=\"informal\">Converts almost anything to an Observable.</span>\n *\n * ![](from.png)\n *\n * `from` converts various other objects and data types into Observables. It also converts a Promise, an array-like, or an\n * <a href=\"https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols#iterable\" target=\"_blank\">iterable</a>\n * object into an Observable that emits the items in that promise, array, or iterable. A String, in this context, is treated\n * as an array of characters. Observable-like objects (contains a function named with the ES2015 Symbol for Observable) can also be\n * converted through this operator.\n *\n * ## Examples\n *\n * Converts an array to an Observable\n *\n * ```ts\n * import { from } from 'rxjs';\n *\n * const array = [10, 20, 30];\n * const result = from(array);\n *\n * result.subscribe(x => console.log(x));\n *\n * // Logs:\n * // 10\n * // 20\n * // 30\n * ```\n *\n * Convert an infinite iterable (from a generator) to an Observable\n *\n * ```ts\n * import { from, take } from 'rxjs';\n *\n * function* generateDoubles(seed) {\n *    let i = seed;\n *    while (true) {\n *      yield i;\n *      i = 2 * i; // double it\n *    }\n * }\n *\n * const iterator = generateDoubles(3);\n * const result = from(iterator).pipe(take(10));\n *\n * result.subscribe(x => console.log(x));\n *\n * // Logs:\n * // 3\n * // 6\n * // 12\n * // 24\n * // 48\n * // 96\n * // 192\n * // 384\n * // 768\n * // 1536\n * ```\n *\n * With `asyncScheduler`\n *\n * ```ts\n * import { from, asyncScheduler } from 'rxjs';\n *\n * console.log('start');\n *\n * const array = [10, 20, 30];\n * const result = from(array, asyncScheduler);\n *\n * result.subscribe(x => console.log(x));\n *\n * console.log('end');\n *\n * // Logs:\n * // 'start'\n * // 'end'\n * // 10\n * // 20\n * // 30\n * ```\n *\n * @see {@link fromEvent}\n * @see {@link fromEventPattern}\n *\n * @param {ObservableInput<T>} A subscription object, a Promise, an Observable-like,\n * an Array, an iterable, or an array-like object to be converted.\n * @param {SchedulerLike} An optional {@link SchedulerLike} on which to schedule the emission of values.\n * @return {Observable<T>}\n */\nexport function from<T>(input: ObservableInput<T>, scheduler?: SchedulerLike): Observable<T> {\n  return scheduler ? scheduled(input, scheduler) : innerFrom(input);\n}\n", "import { SchedulerLike, ValueFromArray } from '../types';\nimport { Observable } from '../Observable';\nimport { popScheduler } from '../util/args';\nimport { from } from './from';\n\n// Devs are more likely to pass null or undefined than they are a scheduler\n// without accompanying values. To make things easier for (naughty) devs who\n// use the `strictNullChecks: false` TypeScript compiler option, these\n// overloads with explicit null and undefined values are included.\n\nexport function of(value: null): Observable<null>;\nexport function of(value: undefined): Observable<undefined>;\n\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function of(scheduler: SchedulerLike): Observable<never>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function of<A extends readonly unknown[]>(...valuesAndScheduler: [...A, SchedulerLike]): Observable<ValueFromArray<A>>;\n\nexport function of(): Observable<never>;\n/** @deprecated Do not specify explicit type parameters. Signatures with type parameters that cannot be inferred will be removed in v8. */\nexport function of<T>(): Observable<T>;\nexport function of<T>(value: T): Observable<T>;\nexport function of<A extends readonly unknown[]>(...values: A): Observable<ValueFromArray<A>>;\n\n/**\n * Converts the arguments to an observable sequence.\n *\n * <span class=\"informal\">Each argument becomes a `next` notification.</span>\n *\n * ![](of.png)\n *\n * Unlike {@link from}, it does not do any flattening and emits each argument in whole\n * as a separate `next` notification.\n *\n * ## Examples\n *\n * Emit the values `10, 20, 30`\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * of(10, 20, 30)\n *   .subscribe({\n *     next: value => console.log('next:', value),\n *     error: err => console.log('error:', err),\n *     complete: () => console.log('the end'),\n *   });\n *\n * // Outputs\n * // next: 10\n * // next: 20\n * // next: 30\n * // the end\n * ```\n *\n * Emit the array `[1, 2, 3]`\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * of([1, 2, 3])\n *   .subscribe({\n *     next: value => console.log('next:', value),\n *     error: err => console.log('error:', err),\n *     complete: () => console.log('the end'),\n *   });\n *\n * // Outputs\n * // next: [1, 2, 3]\n * // the end\n * ```\n *\n * @see {@link from}\n * @see {@link range}\n *\n * @param {...T} values A comma separated list of arguments you want to be emitted\n * @return {Observable} An Observable that emits the arguments\n * described above and then completes.\n */\nexport function of<T>(...args: Array<T | SchedulerLike>): Observable<T> {\n  const scheduler = popScheduler(args);\n  return from(args as T[], scheduler);\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { SchedulerLike } from '../types';\nimport { isFunction } from '../util/isFunction';\n\n/**\n * Creates an observable that will create an error instance and push it to the consumer as an error\n * immediately upon subscription.\n *\n * <span class=\"informal\">Just errors and does nothing else</span>\n *\n * ![](throw.png)\n *\n * This creation function is useful for creating an observable that will create an error and error every\n * time it is subscribed to. Generally, inside of most operators when you might want to return an errored\n * observable, this is unnecessary. In most cases, such as in the inner return of {@link concatMap},\n * {@link mergeMap}, {@link defer}, and many others, you can simply throw the error, and RxJS will pick\n * that up and notify the consumer of the error.\n *\n * ## Example\n *\n * Create a simple observable that will create a new error with a timestamp and log it\n * and the message every time you subscribe to it\n *\n * ```ts\n * import { throwError } from 'rxjs';\n *\n * let errorCount = 0;\n *\n * const errorWithTimestamp$ = throwError(() => {\n *   const error: any = new Error(`This is error number ${ ++errorCount }`);\n *   error.timestamp = Date.now();\n *   return error;\n * });\n *\n * errorWithTimestamp$.subscribe({\n *   error: err => console.log(err.timestamp, err.message)\n * });\n *\n * errorWithTimestamp$.subscribe({\n *   error: err => console.log(err.timestamp, err.message)\n * });\n *\n * // Logs the timestamp and a new error message for each subscription\n * ```\n *\n * ### Unnecessary usage\n *\n * Using `throwError` inside of an operator or creation function\n * with a callback, is usually not necessary\n *\n * ```ts\n * import { of, concatMap, timer, throwError } from 'rxjs';\n *\n * const delays$ = of(1000, 2000, Infinity, 3000);\n *\n * delays$.pipe(\n *   concatMap(ms => {\n *     if (ms < 10000) {\n *       return timer(ms);\n *     } else {\n *       // This is probably overkill.\n *       return throwError(() => new Error(`Invalid time ${ ms }`));\n *     }\n *   })\n * )\n * .subscribe({\n *   next: console.log,\n *   error: console.error\n * });\n * ```\n *\n * You can just throw the error instead\n *\n * ```ts\n * import { of, concatMap, timer } from 'rxjs';\n *\n * const delays$ = of(1000, 2000, Infinity, 3000);\n *\n * delays$.pipe(\n *   concatMap(ms => {\n *     if (ms < 10000) {\n *       return timer(ms);\n *     } else {\n *       // Cleaner and easier to read for most folks.\n *       throw new Error(`Invalid time ${ ms }`);\n *     }\n *   })\n * )\n * .subscribe({\n *   next: console.log,\n *   error: console.error\n * });\n * ```\n *\n * @param errorFactory A factory function that will create the error instance that is pushed.\n */\nexport function throwError(errorFactory: () => any): Observable<never>;\n\n/**\n * Returns an observable that will error with the specified error immediately upon subscription.\n *\n * @param error The error instance to emit\n * @deprecated Support for passing an error value will be removed in v8. Instead, pass a factory function to `throwError(() => new Error('test'))`. This is\n * because it will create the error at the moment it should be created and capture a more appropriate stack trace. If\n * for some reason you need to create the error ahead of time, you can still do that: `const err = new Error('test'); throwError(() => err);`.\n */\nexport function throwError(error: any): Observable<never>;\n\n/**\n * Notifies the consumer of an error using a given scheduler by scheduling it at delay `0` upon subscription.\n *\n * @param errorOrErrorFactory An error instance or error factory\n * @param scheduler A scheduler to use to schedule the error notification\n * @deprecated The `scheduler` parameter will be removed in v8.\n * Use `throwError` in combination with {@link observeOn}: `throwError(() => new Error('test')).pipe(observeOn(scheduler));`.\n * Details: https://rxjs.dev/deprecations/scheduler-argument\n */\nexport function throwError(errorOrErrorFactory: any, scheduler: SchedulerLike): Observable<never>;\n\nexport function throwError(errorOrErrorFactory: any, scheduler?: SchedulerLike): Observable<never> {\n  const errorFactory = isFunction(errorOrErrorFactory) ? errorOrErrorFactory : () => errorOrErrorFactory;\n  const init = (subscriber: Subscriber<never>) => subscriber.error(errorFactory());\n  return new Observable(scheduler ? (subscriber) => scheduler.schedule(init as any, 0, subscriber) : init);\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface EmptyError extends Error {}\n\nexport interface EmptyErrorCtor {\n  /**\n   * @deprecated Internal implementation detail. Do not construct error instances.\n   * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n   */\n  new (): EmptyError;\n}\n\n/**\n * An error thrown when an Observable or a sequence was queried but has no\n * elements.\n *\n * @see {@link first}\n * @see {@link last}\n * @see {@link single}\n * @see {@link firstValueFrom}\n * @see {@link lastValueFrom}\n *\n * @class EmptyError\n */\nexport const EmptyError: EmptyErrorCtor = createErrorClass((_super) => function EmptyErrorImpl(this: any) {\n  _super(this);\n  this.name = 'EmptyError';\n  this.message = 'no elements in sequence';\n});\n", "/**\n * Checks to see if a value is not only a `Date` object,\n * but a *valid* `Date` object that can be converted to a\n * number. For example, `new Date('blah')` is indeed an\n * `instanceof Date`, however it cannot be converted to a\n * number.\n */\nexport function isValidDate(value: any): value is Date {\n  return value instanceof Date && !isNaN(value as any);\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\nexport function map<T, R>(project: (value: T, index: number) => R): OperatorFunction<T, R>;\n/** @deprecated Use a closure instead of a `thisArg`. Signatures accepting a `thisArg` will be removed in v8. */\nexport function map<T, R, A>(project: (this: A, value: T, index: number) => R, thisArg: A): OperatorFunction<T, R>;\n\n/**\n * Applies a given `project` function to each value emitted by the source\n * Observable, and emits the resulting values as an Observable.\n *\n * <span class=\"informal\">Like [Array.prototype.map()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/map),\n * it passes each source value through a transformation function to get\n * corresponding output values.</span>\n *\n * ![](map.png)\n *\n * Similar to the well known `Array.prototype.map` function, this operator\n * applies a projection to each value and emits that projection in the output\n * Observable.\n *\n * ## Example\n *\n * Map every click to the `clientX` position of that click\n *\n * ```ts\n * import { fromEvent, map } from 'rxjs';\n *\n * const clicks = fromEvent<PointerEvent>(document, 'click');\n * const positions = clicks.pipe(map(ev => ev.clientX));\n *\n * positions.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link mapTo}\n * @see {@link pluck}\n *\n * @param {function(value: T, index: number): R} project The function to apply\n * to each `value` emitted by the source Observable. The `index` parameter is\n * the number `i` for the i-th emission that has happened since the\n * subscription, starting from the number `0`.\n * @param {any} [thisArg] An optional argument to define what `this` is in the\n * `project` function.\n * @return A function that returns an Observable that emits the values from the\n * source Observable transformed by the given `project` function.\n */\nexport function map<T, R>(project: (value: T, index: number) => R, thisArg?: any): OperatorFunction<T, R> {\n  return operate((source, subscriber) => {\n    // The index of the value from the source. Used with projection.\n    let index = 0;\n    // Subscribe to the source, all errors and completions are sent along\n    // to the consumer.\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value: T) => {\n        // Call the projection function with the appropriate this context,\n        // and send the resulting value to the consumer.\n        subscriber.next(project.call(thisArg, value, index++));\n      })\n    );\n  });\n}\n", "import { OperatorFunction } from \"../types\";\nimport { map } from \"../operators/map\";\n\nconst { isArray } = Array;\n\nfunction callOrApply<T, R>(fn: ((...values: T[]) => R), args: T|T[]): R {\n    return isArray(args) ? fn(...args) : fn(args);\n}\n\n/**\n * Used in several -- mostly deprecated -- situations where we need to \n * apply a list of arguments or a single argument to a result selector.\n */\nexport function mapOneOrManyArgs<T, R>(fn: ((...values: T[]) => R)): OperatorFunction<T|T[], R> {\n    return map(args => callOrApply(fn, args))\n}", "const { isArray } = Array;\nconst { getPrototypeOf, prototype: objectProto, keys: getKeys } = Object;\n\n/**\n * Used in functions where either a list of arguments, a single array of arguments, or a\n * dictionary of arguments can be returned. Returns an object with an `args` property with\n * the arguments in an array, if it is a dictionary, it will also return the `keys` in another\n * property.\n */\nexport function argsArgArrayOrObject<T, O extends Record<string, T>>(args: T[] | [O] | [T[]]): { args: T[]; keys: string[] | null } {\n  if (args.length === 1) {\n    const first = args[0];\n    if (isArray(first)) {\n      return { args: first, keys: null };\n    }\n    if (isPOJO(first)) {\n      const keys = getKeys(first);\n      return {\n        args: keys.map((key) => first[key]),\n        keys,\n      };\n    }\n  }\n\n  return { args: args as T[], keys: null };\n}\n\nfunction isPOJO(obj: any): obj is object {\n  return obj && typeof obj === 'object' && getPrototypeOf(obj) === objectProto;\n}\n", "export function createObject(keys: string[], values: any[]) {\n  return keys.reduce((result, key, i) => ((result[key] = values[i]), result), {} as any);\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInput, SchedulerLike, ObservedValueOf, ObservableInputTuple } from '../types';\nimport { argsArgArrayOrObject } from '../util/argsArgArrayOrObject';\nimport { Subscriber } from '../Subscriber';\nimport { from } from './from';\nimport { identity } from '../util/identity';\nimport { Subscription } from '../Subscription';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\nimport { popResultSelector, popScheduler } from '../util/args';\nimport { createObject } from '../util/createObject';\nimport { createOperatorSubscriber } from '../operators/OperatorSubscriber';\nimport { AnyCatcher } from '../AnyCatcher';\nimport { executeSchedule } from '../util/executeSchedule';\n\n// combineLatest(any)\n// We put this first because we need to catch cases where the user has supplied\n// _exactly `any`_ as the argument. Since `any` literally matches _anything_,\n// we don't want it to randomly hit one of the other type signatures below,\n// as we have no idea at build-time what type we should be returning when given an any.\n\n/**\n * You have passed `any` here, we can't figure out if it is\n * an array or an object, so you're getting `unknown`. Use better types.\n * @param arg Something typed as `any`\n */\nexport function combineLatest<T extends AnyCatcher>(arg: T): Observable<unknown>;\n\n// combineLatest([a, b, c])\nexport function combineLatest(sources: []): Observable<never>;\nexport function combineLatest<A extends readonly unknown[]>(sources: readonly [...ObservableInputTuple<A>]): Observable<A>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[], R>(\n  sources: readonly [...ObservableInputTuple<A>],\n  resultSelector: (...values: A) => R,\n  scheduler: SchedulerLike\n): Observable<R>;\nexport function combineLatest<A extends readonly unknown[], R>(\n  sources: readonly [...ObservableInputTuple<A>],\n  resultSelector: (...values: A) => R\n): Observable<R>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[]>(\n  sources: readonly [...ObservableInputTuple<A>],\n  scheduler: SchedulerLike\n): Observable<A>;\n\n// combineLatest(a, b, c)\n/** @deprecated Pass an array of sources instead. The rest-parameters signature will be removed in v8. Details: https://rxjs.dev/deprecations/array-argument */\nexport function combineLatest<A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): Observable<A>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[], R>(\n  ...sourcesAndResultSelectorAndScheduler: [...ObservableInputTuple<A>, (...values: A) => R, SchedulerLike]\n): Observable<R>;\n/** @deprecated Pass an array of sources instead. The rest-parameters signature will be removed in v8. Details: https://rxjs.dev/deprecations/array-argument */\nexport function combineLatest<A extends readonly unknown[], R>(\n  ...sourcesAndResultSelector: [...ObservableInputTuple<A>, (...values: A) => R]\n): Observable<R>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `combineLatestAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function combineLatest<A extends readonly unknown[]>(\n  ...sourcesAndScheduler: [...ObservableInputTuple<A>, SchedulerLike]\n): Observable<A>;\n\n// combineLatest({a, b, c})\nexport function combineLatest(sourcesObject: { [K in any]: never }): Observable<never>;\nexport function combineLatest<T extends Record<string, ObservableInput<any>>>(\n  sourcesObject: T\n): Observable<{ [K in keyof T]: ObservedValueOf<T[K]> }>;\n\n/**\n * Combines multiple Observables to create an Observable whose values are\n * calculated from the latest values of each of its input Observables.\n *\n * <span class=\"informal\">Whenever any input Observable emits a value, it\n * computes a formula using the latest values from all the inputs, then emits\n * the output of that formula.</span>\n *\n * ![](combineLatest.png)\n *\n * `combineLatest` combines the values from all the Observables passed in the\n * observables array. This is done by subscribing to each Observable in order and,\n * whenever any Observable emits, collecting an array of the most recent\n * values from each Observable. So if you pass `n` Observables to this operator,\n * the returned Observable will always emit an array of `n` values, in an order\n * corresponding to the order of the passed Observables (the value from the first Observable\n * will be at index 0 of the array and so on).\n *\n * Static version of `combineLatest` accepts an array of Observables. Note that an array of\n * Observables is a good choice, if you don't know beforehand how many Observables\n * you will combine. Passing an empty array will result in an Observable that\n * completes immediately.\n *\n * To ensure the output array always has the same length, `combineLatest` will\n * actually wait for all input Observables to emit at least once,\n * before it starts emitting results. This means if some Observable emits\n * values before other Observables started emitting, all these values but the last\n * will be lost. On the other hand, if some Observable does not emit a value but\n * completes, resulting Observable will complete at the same moment without\n * emitting anything, since it will now be impossible to include a value from the\n * completed Observable in the resulting array. Also, if some input Observable does\n * not emit any value and never completes, `combineLatest` will also never emit\n * and never complete, since, again, it will wait for all streams to emit some\n * value.\n *\n * If at least one Observable was passed to `combineLatest` and all passed Observables\n * emitted something, the resulting Observable will complete when all combined\n * streams complete. So even if some Observable completes, the result of\n * `combineLatest` will still emit values when other Observables do. In case\n * of a completed Observable, its value from now on will always be the last\n * emitted value. On the other hand, if any Observable errors, `combineLatest`\n * will error immediately as well, and all other Observables will be unsubscribed.\n *\n * ## Examples\n *\n * Combine two timer Observables\n *\n * ```ts\n * import { timer, combineLatest } from 'rxjs';\n *\n * const firstTimer = timer(0, 1000); // emit 0, 1, 2... after every second, starting from now\n * const secondTimer = timer(500, 1000); // emit 0, 1, 2... after every second, starting 0,5s from now\n * const combinedTimers = combineLatest([firstTimer, secondTimer]);\n * combinedTimers.subscribe(value => console.log(value));\n * // Logs\n * // [0, 0] after 0.5s\n * // [1, 0] after 1s\n * // [1, 1] after 1.5s\n * // [2, 1] after 2s\n * ```\n *\n * Combine a dictionary of Observables\n *\n * ```ts\n * import { of, delay, startWith, combineLatest } from 'rxjs';\n *\n * const observables = {\n *   a: of(1).pipe(delay(1000), startWith(0)),\n *   b: of(5).pipe(delay(5000), startWith(0)),\n *   c: of(10).pipe(delay(10000), startWith(0))\n * };\n * const combined = combineLatest(observables);\n * combined.subscribe(value => console.log(value));\n * // Logs\n * // { a: 0, b: 0, c: 0 } immediately\n * // { a: 1, b: 0, c: 0 } after 1s\n * // { a: 1, b: 5, c: 0 } after 5s\n * // { a: 1, b: 5, c: 10 } after 10s\n * ```\n *\n * Combine an array of Observables\n *\n * ```ts\n * import { of, delay, startWith, combineLatest } from 'rxjs';\n *\n * const observables = [1, 5, 10].map(\n *   n => of(n).pipe(\n *     delay(n * 1000), // emit 0 and then emit n after n seconds\n *     startWith(0)\n *   )\n * );\n * const combined = combineLatest(observables);\n * combined.subscribe(value => console.log(value));\n * // Logs\n * // [0, 0, 0] immediately\n * // [1, 0, 0] after 1s\n * // [1, 5, 0] after 5s\n * // [1, 5, 10] after 10s\n * ```\n *\n * Use map operator to dynamically calculate the Body-Mass Index\n *\n * ```ts\n * import { of, combineLatest, map } from 'rxjs';\n *\n * const weight = of(70, 72, 76, 79, 75);\n * const height = of(1.76, 1.77, 1.78);\n * const bmi = combineLatest([weight, height]).pipe(\n *   map(([w, h]) => w / (h * h)),\n * );\n * bmi.subscribe(x => console.log('BMI is ' + x));\n *\n * // With output to console:\n * // BMI is 24.212293388429753\n * // BMI is 23.93948099205209\n * // BMI is 23.671253629592222\n * ```\n *\n * @see {@link combineLatestAll}\n * @see {@link merge}\n * @see {@link withLatestFrom}\n *\n * @param {ObservableInput} [observables] An array of input Observables to combine with each other.\n * An array of Observables must be given as the first argument.\n * @param {function} [project] An optional function to project the values from\n * the combined latest values into a new value on the output Observable.\n * @param {SchedulerLike} [scheduler=null] The {@link SchedulerLike} to use for subscribing to\n * each input Observable.\n * @return {Observable} An Observable of projected values from the most recent\n * values from each input Observable, or an array of the most recent values from\n * each input Observable.\n */\nexport function combineLatest<O extends ObservableInput<any>, R>(...args: any[]): Observable<R> | Observable<ObservedValueOf<O>[]> {\n  const scheduler = popScheduler(args);\n  const resultSelector = popResultSelector(args);\n\n  const { args: observables, keys } = argsArgArrayOrObject(args);\n\n  if (observables.length === 0) {\n    // If no observables are passed, or someone has passed an empty array\n    // of observables, or even an empty object POJO, we need to just\n    // complete (EMPTY), but we have to honor the scheduler provided if any.\n    return from([], scheduler as any);\n  }\n\n  const result = new Observable<ObservedValueOf<O>[]>(\n    combineLatestInit(\n      observables as ObservableInput<ObservedValueOf<O>>[],\n      scheduler,\n      keys\n        ? // A handler for scrubbing the array of args into a dictionary.\n          (values) => createObject(keys, values)\n        : // A passthrough to just return the array\n          identity\n    )\n  );\n\n  return resultSelector ? (result.pipe(mapOneOrManyArgs(resultSelector)) as Observable<R>) : result;\n}\n\nexport function combineLatestInit(\n  observables: ObservableInput<any>[],\n  scheduler?: SchedulerLike,\n  valueTransform: (values: any[]) => any = identity\n) {\n  return (subscriber: Subscriber<any>) => {\n    // The outer subscription. We're capturing this in a function\n    // because we may have to schedule it.\n    maybeSchedule(\n      scheduler,\n      () => {\n        const { length } = observables;\n        // A store for the values each observable has emitted so far. We match observable to value on index.\n        const values = new Array(length);\n        // The number of currently active subscriptions, as they complete, we decrement this number to see if\n        // we are all done combining values, so we can complete the result.\n        let active = length;\n        // The number of inner sources that still haven't emitted the first value\n        // We need to track this because all sources need to emit one value in order\n        // to start emitting values.\n        let remainingFirstValues = length;\n        // The loop to kick off subscription. We're keying everything on index `i` to relate the observables passed\n        // in to the slot in the output array or the key in the array of keys in the output dictionary.\n        for (let i = 0; i < length; i++) {\n          maybeSchedule(\n            scheduler,\n            () => {\n              const source = from(observables[i], scheduler as any);\n              let hasFirstValue = false;\n              source.subscribe(\n                createOperatorSubscriber(\n                  subscriber,\n                  (value) => {\n                    // When we get a value, record it in our set of values.\n                    values[i] = value;\n                    if (!hasFirstValue) {\n                      // If this is our first value, record that.\n                      hasFirstValue = true;\n                      remainingFirstValues--;\n                    }\n                    if (!remainingFirstValues) {\n                      // We're not waiting for any more\n                      // first values, so we can emit!\n                      subscriber.next(valueTransform(values.slice()));\n                    }\n                  },\n                  () => {\n                    if (!--active) {\n                      // We only complete the result if we have no more active\n                      // inner observables.\n                      subscriber.complete();\n                    }\n                  }\n                )\n              );\n            },\n            subscriber\n          );\n        }\n      },\n      subscriber\n    );\n  };\n}\n\n/**\n * A small utility to handle the couple of locations where we want to schedule if a scheduler was provided,\n * but we don't if there was no scheduler.\n */\nfunction maybeSchedule(scheduler: SchedulerLike | undefined, execute: () => void, subscription: Subscription) {\n  if (scheduler) {\n    executeSchedule(subscription, scheduler, execute);\n  } else {\n    execute();\n  }\n}\n", "import { Observable } from '../Observable';\nimport { innerFrom } from '../observable/innerFrom';\nimport { Subscriber } from '../Subscriber';\nimport { ObservableInput, SchedulerLike } from '../types';\nimport { executeSchedule } from '../util/executeSchedule';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * A process embodying the general \"merge\" strategy. This is used in\n * `mergeMap` and `mergeScan` because the logic is otherwise nearly identical.\n * @param source The original source observable\n * @param subscriber The consumer subscriber\n * @param project The projection function to get our inner sources\n * @param concurrent The number of concurrent inner subscriptions\n * @param onBeforeNext Additional logic to apply before nexting to our consumer\n * @param expand If `true` this will perform an \"expand\" strategy, which differs only\n * in that it recurses, and the inner subscription must be schedule-able.\n * @param innerSubScheduler A scheduler to use to schedule inner subscriptions,\n * this is to support the expand strategy, mostly, and should be deprecated\n */\nexport function mergeInternals<T, R>(\n  source: Observable<T>,\n  subscriber: Subscriber<R>,\n  project: (value: T, index: number) => ObservableInput<R>,\n  concurrent: number,\n  onBeforeNext?: (innerValue: R) => void,\n  expand?: boolean,\n  innerSubScheduler?: SchedulerLike,\n  additionalFinalizer?: () => void\n) {\n  // Buffered values, in the event of going over our concurrency limit\n  const buffer: T[] = [];\n  // The number of active inner subscriptions.\n  let active = 0;\n  // An index to pass to our accumulator function\n  let index = 0;\n  // Whether or not the outer source has completed.\n  let isComplete = false;\n\n  /**\n   * Checks to see if we can complete our result or not.\n   */\n  const checkComplete = () => {\n    // If the outer has completed, and nothing is left in the buffer,\n    // and we don't have any active inner subscriptions, then we can\n    // Emit the state and complete.\n    if (isComplete && !buffer.length && !active) {\n      subscriber.complete();\n    }\n  };\n\n  // If we're under our concurrency limit, just start the inner subscription, otherwise buffer and wait.\n  const outerNext = (value: T) => (active < concurrent ? doInnerSub(value) : buffer.push(value));\n\n  const doInnerSub = (value: T) => {\n    // If we're expanding, we need to emit the outer values and the inner values\n    // as the inners will \"become outers\" in a way as they are recursively fed\n    // back to the projection mechanism.\n    expand && subscriber.next(value as any);\n\n    // Increment the number of active subscriptions so we can track it\n    // against our concurrency limit later.\n    active++;\n\n    // A flag used to show that the inner observable completed.\n    // This is checked during finalization to see if we should\n    // move to the next item in the buffer, if there is on.\n    let innerComplete = false;\n\n    // Start our inner subscription.\n    innerFrom(project(value, index++)).subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (innerValue) => {\n          // `mergeScan` has additional handling here. For example\n          // taking the inner value and updating state.\n          onBeforeNext?.(innerValue);\n\n          if (expand) {\n            // If we're expanding, then just recurse back to our outer\n            // handler. It will emit the value first thing.\n            outerNext(innerValue as any);\n          } else {\n            // Otherwise, emit the inner value.\n            subscriber.next(innerValue);\n          }\n        },\n        () => {\n          // Flag that we have completed, so we know to check the buffer\n          // during finalization.\n          innerComplete = true;\n        },\n        // Errors are passed to the destination.\n        undefined,\n        () => {\n          // During finalization, if the inner completed (it wasn't errored or\n          // cancelled), then we want to try the next item in the buffer if\n          // there is one.\n          if (innerComplete) {\n            // We have to wrap this in a try/catch because it happens during\n            // finalization, possibly asynchronously, and we want to pass\n            // any errors that happen (like in a projection function) to\n            // the outer Subscriber.\n            try {\n              // INNER SOURCE COMPLETE\n              // Decrement the active count to ensure that the next time\n              // we try to call `doInnerSub`, the number is accurate.\n              active--;\n              // If we have more values in the buffer, try to process those\n              // Note that this call will increment `active` ahead of the\n              // next conditional, if there were any more inner subscriptions\n              // to start.\n              while (buffer.length && active < concurrent) {\n                const bufferedValue = buffer.shift()!;\n                // Particularly for `expand`, we need to check to see if a scheduler was provided\n                // for when we want to start our inner subscription. Otherwise, we just start\n                // are next inner subscription.\n                if (innerSubScheduler) {\n                  executeSchedule(subscriber, innerSubScheduler, () => doInnerSub(bufferedValue));\n                } else {\n                  doInnerSub(bufferedValue);\n                }\n              }\n              // Check to see if we can complete, and complete if so.\n              checkComplete();\n            } catch (err) {\n              subscriber.error(err);\n            }\n          }\n        }\n      )\n    );\n  };\n\n  // Subscribe to our source observable.\n  source.subscribe(\n    createOperatorSubscriber(subscriber, outerNext, () => {\n      // Outer completed, make a note of it, and check to see if we can complete everything.\n      isComplete = true;\n      checkComplete();\n    })\n  );\n\n  // Additional finalization (for when the destination is torn down).\n  // Other finalization is added implicitly via subscription above.\n  return () => {\n    additionalFinalizer?.();\n  };\n}\n", "import { ObservableInput, OperatorFunction, ObservedValueOf } from '../types';\nimport { map } from './map';\nimport { innerFrom } from '../observable/innerFrom';\nimport { operate } from '../util/lift';\nimport { mergeInternals } from './mergeInternals';\nimport { isFunction } from '../util/isFunction';\n\n/* tslint:disable:max-line-length */\nexport function mergeMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  concurrent?: number\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function mergeMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: undefined,\n  concurrent?: number\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function mergeMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: (outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R,\n  concurrent?: number\n): OperatorFunction<T, R>;\n/* tslint:enable:max-line-length */\n\n/**\n * Projects each source value to an Observable which is merged in the output\n * Observable.\n *\n * <span class=\"informal\">Maps each value to an Observable, then flattens all of\n * these inner Observables using {@link mergeAll}.</span>\n *\n * ![](mergeMap.png)\n *\n * Returns an Observable that emits items based on applying a function that you\n * supply to each item emitted by the source Observable, where that function\n * returns an Observable, and then merging those resulting Observables and\n * emitting the results of this merger.\n *\n * ## Example\n *\n * Map and flatten each letter to an Observable ticking every 1 second\n *\n * ```ts\n * import { of, mergeMap, interval, map } from 'rxjs';\n *\n * const letters = of('a', 'b', 'c');\n * const result = letters.pipe(\n *   mergeMap(x => interval(1000).pipe(map(i => x + i)))\n * );\n *\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // a0\n * // b0\n * // c0\n * // a1\n * // b1\n * // c1\n * // continues to list a, b, c every second with respective ascending integers\n * ```\n *\n * @see {@link concatMap}\n * @see {@link exhaustMap}\n * @see {@link merge}\n * @see {@link mergeAll}\n * @see {@link mergeMapTo}\n * @see {@link mergeScan}\n * @see {@link switchMap}\n *\n * @param {function(value: T, ?index: number): ObservableInput} project A function\n * that, when applied to an item emitted by the source Observable, returns an\n * Observable.\n * @param {number} [concurrent=Infinity] Maximum number of input\n * Observables being subscribed to concurrently.\n * @return A function that returns an Observable that emits the result of\n * applying the projection function (and the optional deprecated\n * `resultSelector`) to each item emitted by the source Observable and merging\n * the results of the Observables obtained from this transformation.\n */\nexport function mergeMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector?: ((outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R) | number,\n  concurrent: number = Infinity\n): OperatorFunction<T, ObservedValueOf<O> | R> {\n  if (isFunction(resultSelector)) {\n    // DEPRECATED PATH\n    return mergeMap((a, i) => map((b: any, ii: number) => resultSelector(a, b, i, ii))(innerFrom(project(a, i))), concurrent);\n  } else if (typeof resultSelector === 'number') {\n    concurrent = resultSelector;\n  }\n\n  return operate((source, subscriber) => mergeInternals(source, subscriber, project, concurrent));\n}\n", "import { mergeMap } from './mergeMap';\nimport { identity } from '../util/identity';\nimport { OperatorFunction, ObservableInput, ObservedValueOf } from '../types';\n\n/**\n * Converts a higher-order Observable into a first-order Observable which\n * concurrently delivers all values that are emitted on the inner Observables.\n *\n * <span class=\"informal\">Flattens an Observable-of-Observables.</span>\n *\n * ![](mergeAll.png)\n *\n * `mergeAll` subscribes to an Observable that emits Observables, also known as\n * a higher-order Observable. Each time it observes one of these emitted inner\n * Observables, it subscribes to that and delivers all the values from the\n * inner Observable on the output Observable. The output Observable only\n * completes once all inner Observables have completed. Any error delivered by\n * a inner Observable will be immediately emitted on the output Observable.\n *\n * ## Examples\n *\n * Spawn a new interval Observable for each click event, and blend their outputs as one Observable\n *\n * ```ts\n * import { fromEvent, map, interval, mergeAll } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const higherOrder = clicks.pipe(map(() => interval(1000)));\n * const firstOrder = higherOrder.pipe(mergeAll());\n *\n * firstOrder.subscribe(x => console.log(x));\n * ```\n *\n * Count from 0 to 9 every second for each click, but only allow 2 concurrent timers\n *\n * ```ts\n * import { fromEvent, map, interval, take, mergeAll } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const higherOrder = clicks.pipe(\n *   map(() => interval(1000).pipe(take(10)))\n * );\n * const firstOrder = higherOrder.pipe(mergeAll(2));\n *\n * firstOrder.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link combineLatestAll}\n * @see {@link concatAll}\n * @see {@link exhaustAll}\n * @see {@link merge}\n * @see {@link mergeMap}\n * @see {@link mergeMapTo}\n * @see {@link mergeScan}\n * @see {@link switchAll}\n * @see {@link switchMap}\n * @see {@link zipAll}\n *\n * @param {number} [concurrent=Infinity] Maximum number of inner\n * Observables being subscribed to concurrently.\n * @return A function that returns an Observable that emits values coming from\n * all the inner Observables emitted by the source Observable.\n */\nexport function mergeAll<O extends ObservableInput<any>>(concurrent: number = Infinity): OperatorFunction<O, ObservedValueOf<O>> {\n  return mergeMap(identity, concurrent);\n}\n", "import { mergeAll } from './mergeAll';\nimport { OperatorFunction, ObservableInput, ObservedValueOf } from '../types';\n\n/**\n * Converts a higher-order Observable into a first-order Observable by\n * concatenating the inner Observables in order.\n *\n * <span class=\"informal\">Flattens an Observable-of-Observables by putting one\n * inner Observable after the other.</span>\n *\n * ![](concatAll.svg)\n *\n * Joins every Observable emitted by the source (a higher-order Observable), in\n * a serial fashion. It subscribes to each inner Observable only after the\n * previous inner Observable has completed, and merges all of their values into\n * the returned observable.\n *\n * __Warning:__ If the source Observable emits Observables quickly and\n * endlessly, and the inner Observables it emits generally complete slower than\n * the source emits, you can run into memory issues as the incoming Observables\n * collect in an unbounded buffer.\n *\n * Note: `concatAll` is equivalent to `mergeAll` with concurrency parameter set\n * to `1`.\n *\n * ## Example\n *\n * For each click event, tick every second from 0 to 3, with no concurrency\n *\n * ```ts\n * import { fromEvent, map, interval, take, concatAll } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const higherOrder = clicks.pipe(\n *   map(() => interval(1000).pipe(take(4)))\n * );\n * const firstOrder = higherOrder.pipe(concatAll());\n * firstOrder.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // (results are not concurrent)\n * // For every click on the \"document\" it will emit values 0 to 3 spaced\n * // on a 1000ms interval\n * // one click = 1000ms-> 0 -1000ms-> 1 -1000ms-> 2 -1000ms-> 3\n * ```\n *\n * @see {@link combineLatestAll}\n * @see {@link concat}\n * @see {@link concatMap}\n * @see {@link concatMapTo}\n * @see {@link exhaustAll}\n * @see {@link mergeAll}\n * @see {@link switchAll}\n * @see {@link switchMap}\n * @see {@link zipAll}\n *\n * @return A function that returns an Observable emitting values from all the\n * inner Observables concatenated.\n */\nexport function concatAll<O extends ObservableInput<any>>(): OperatorFunction<O, ObservedValueOf<O>> {\n  return mergeAll(1);\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInputTuple, SchedulerLike } from '../types';\nimport { concatAll } from '../operators/concatAll';\nimport { popScheduler } from '../util/args';\nimport { from } from './from';\n\nexport function concat<T extends readonly unknown[]>(...inputs: [...ObservableInputTuple<T>]): Observable<T[number]>;\nexport function concat<T extends readonly unknown[]>(\n  ...inputsAndScheduler: [...ObservableInputTuple<T>, SchedulerLike]\n): Observable<T[number]>;\n\n/**\n * Creates an output Observable which sequentially emits all values from the first given\n * Observable and then moves on to the next.\n *\n * <span class=\"informal\">Concatenates multiple Observables together by\n * sequentially emitting their values, one Observable after the other.</span>\n *\n * ![](concat.png)\n *\n * `concat` joins multiple Observables together, by subscribing to them one at a time and\n * merging their results into the output Observable. You can pass either an array of\n * Observables, or put them directly as arguments. Passing an empty array will result\n * in Observable that completes immediately.\n *\n * `concat` will subscribe to first input Observable and emit all its values, without\n * changing or affecting them in any way. When that Observable completes, it will\n * subscribe to then next Observable passed and, again, emit its values. This will be\n * repeated, until the operator runs out of Observables. When last input Observable completes,\n * `concat` will complete as well. At any given moment only one Observable passed to operator\n * emits values. If you would like to emit values from passed Observables concurrently, check out\n * {@link merge} instead, especially with optional `concurrent` parameter. As a matter of fact,\n * `concat` is an equivalent of `merge` operator with `concurrent` parameter set to `1`.\n *\n * Note that if some input Observable never completes, `concat` will also never complete\n * and Observables following the one that did not complete will never be subscribed. On the other\n * hand, if some Observable simply completes immediately after it is subscribed, it will be\n * invisible for `concat`, which will just move on to the next Observable.\n *\n * If any Observable in chain errors, instead of passing control to the next Observable,\n * `concat` will error immediately as well. Observables that would be subscribed after\n * the one that emitted error, never will.\n *\n * If you pass to `concat` the same Observable many times, its stream of values\n * will be \"replayed\" on every subscription, which means you can repeat given Observable\n * as many times as you like. If passing the same Observable to `concat` 1000 times becomes tedious,\n * you can always use {@link repeat}.\n *\n * ## Examples\n *\n * Concatenate a timer counting from 0 to 3 with a synchronous sequence from 1 to 10\n *\n * ```ts\n * import { interval, take, range, concat } from 'rxjs';\n *\n * const timer = interval(1000).pipe(take(4));\n * const sequence = range(1, 10);\n * const result = concat(timer, sequence);\n * result.subscribe(x => console.log(x));\n *\n * // results in:\n * // 0 -1000ms-> 1 -1000ms-> 2 -1000ms-> 3 -immediate-> 1 ... 10\n * ```\n *\n * Concatenate 3 Observables\n *\n * ```ts\n * import { interval, take, concat } from 'rxjs';\n *\n * const timer1 = interval(1000).pipe(take(10));\n * const timer2 = interval(2000).pipe(take(6));\n * const timer3 = interval(500).pipe(take(10));\n *\n * const result = concat(timer1, timer2, timer3);\n * result.subscribe(x => console.log(x));\n *\n * // results in the following:\n * // (Prints to console sequentially)\n * // -1000ms-> 0 -1000ms-> 1 -1000ms-> ... 9\n * // -2000ms-> 0 -2000ms-> 1 -2000ms-> ... 5\n * // -500ms-> 0 -500ms-> 1 -500ms-> ... 9\n * ```\n *\n * Concatenate the same Observable to repeat it\n *\n * ```ts\n * import { interval, take, concat } from 'rxjs';\n *\n * const timer = interval(1000).pipe(take(2));\n *\n * concat(timer, timer) // concatenating the same Observable!\n *   .subscribe({\n *     next: value => console.log(value),\n *     complete: () => console.log('...and it is done!')\n *   });\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 0 after 3s\n * // 1 after 4s\n * // '...and it is done!' also after 4s\n * ```\n *\n * @see {@link concatAll}\n * @see {@link concatMap}\n * @see {@link concatMapTo}\n * @see {@link startWith}\n * @see {@link endWith}\n *\n * @param args Input Observables to concatenate.\n */\nexport function concat(...args: any[]): Observable<unknown> {\n  return concatAll()(from(args, popScheduler(args)));\n}\n", "import { Observable } from '../Observable';\nimport { ObservedValueOf, ObservableInput } from '../types';\nimport { innerFrom } from './innerFrom';\n\n/**\n * Creates an Observable that, on subscribe, calls an Observable factory to\n * make an Observable for each new Observer.\n *\n * <span class=\"informal\">Creates the Observable lazily, that is, only when it\n * is subscribed.\n * </span>\n *\n * ![](defer.png)\n *\n * `defer` allows you to create an Observable only when the Observer\n * subscribes. It waits until an Observer subscribes to it, calls the given\n * factory function to get an Observable -- where a factory function typically\n * generates a new Observable -- and subscribes the Observer to this Observable.\n * In case the factory function returns a falsy value, then EMPTY is used as\n * Observable instead. Last but not least, an exception during the factory\n * function call is transferred to the Observer by calling `error`.\n *\n * ## Example\n *\n * Subscribe to either an Observable of clicks or an Observable of interval, at random\n *\n * ```ts\n * import { defer, fromEvent, interval } from 'rxjs';\n *\n * const clicksOrInterval = defer(() => {\n *   return Math.random() > 0.5\n *     ? fromEvent(document, 'click')\n *     : interval(1000);\n * });\n * clicksOrInterval.subscribe(x => console.log(x));\n *\n * // Results in the following behavior:\n * // If the result of Math.random() is greater than 0.5 it will listen\n * // for clicks anywhere on the \"document\"; when document is clicked it\n * // will log a MouseEvent object to the console. If the result is less\n * // than 0.5 it will emit ascending numbers, one every second(1000ms).\n * ```\n *\n * @see {@link Observable}\n *\n * @param {function(): ObservableInput} observableFactory The Observable\n * factory function to invoke for each Observer that subscribes to the output\n * Observable. May also return a Promise, which will be converted on the fly\n * to an Observable.\n * @return {Observable} An Observable whose Observers' subscriptions trigger\n * an invocation of the given Observable factory function.\n */\nexport function defer<R extends ObservableInput<any>>(observableFactory: () => R): Observable<ObservedValueOf<R>> {\n  return new Observable<ObservedValueOf<R>>((subscriber) => {\n    innerFrom(observableFactory()).subscribe(subscriber);\n  });\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { Observable } from '../Observable';\nimport { mergeMap } from '../operators/mergeMap';\nimport { isArrayLike } from '../util/isArrayLike';\nimport { isFunction } from '../util/isFunction';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\n\n// These constants are used to create handler registry functions using array mapping below.\nconst nodeEventEmitterMethods = ['addListener', 'removeListener'] as const;\nconst eventTargetMethods = ['addEventListener', 'removeEventListener'] as const;\nconst jqueryMethods = ['on', 'off'] as const;\n\nexport interface NodeStyleEventEmitter {\n  addListener(eventName: string | symbol, handler: NodeEventHandler): this;\n  removeListener(eventName: string | symbol, handler: NodeEventHandler): this;\n}\n\nexport type NodeEventHandler = (...args: any[]) => void;\n\n// For APIs that implement `addListener` and `removeListener` methods that may\n// not use the same arguments or return EventEmitter values\n// such as React Native\nexport interface NodeCompatibleEventEmitter {\n  addListener(eventName: string, handler: NodeEventHandler): void | {};\n  removeListener(eventName: string, handler: NodeEventHandler): void | {};\n}\n\n// Use handler types like those in @types/jquery. See:\n// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/847731ba1d7fa6db6b911c0e43aa0afe596e7723/types/jquery/misc.d.ts#L6395\nexport interface JQueryStyleEventEmitter<TContext, T> {\n  on(eventName: string, handler: (this: TContext, t: T, ...args: any[]) => any): void;\n  off(eventName: string, handler: (this: TContext, t: T, ...args: any[]) => any): void;\n}\n\nexport interface EventListenerObject<E> {\n  handleEvent(evt: E): void;\n}\n\nexport interface HasEventTargetAddRemove<E> {\n  addEventListener(\n    type: string,\n    listener: ((evt: E) => void) | EventListenerObject<E> | null,\n    options?: boolean | AddEventListenerOptions\n  ): void;\n  removeEventListener(\n    type: string,\n    listener: ((evt: E) => void) | EventListenerObject<E> | null,\n    options?: EventListenerOptions | boolean\n  ): void;\n}\n\nexport interface EventListenerOptions {\n  capture?: boolean;\n  passive?: boolean;\n  once?: boolean;\n}\n\nexport interface AddEventListenerOptions extends EventListenerOptions {\n  once?: boolean;\n  passive?: boolean;\n}\n\nexport function fromEvent<T>(target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>, eventName: string): Observable<T>;\nexport function fromEvent<T, R>(\n  target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>,\n  eventName: string,\n  resultSelector: (event: T) => R\n): Observable<R>;\nexport function fromEvent<T>(\n  target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>,\n  eventName: string,\n  options: EventListenerOptions\n): Observable<T>;\nexport function fromEvent<T, R>(\n  target: HasEventTargetAddRemove<T> | ArrayLike<HasEventTargetAddRemove<T>>,\n  eventName: string,\n  options: EventListenerOptions,\n  resultSelector: (event: T) => R\n): Observable<R>;\n\nexport function fromEvent(target: NodeStyleEventEmitter | ArrayLike<NodeStyleEventEmitter>, eventName: string): Observable<unknown>;\n/** @deprecated Do not specify explicit type parameters. Signatures with type parameters that cannot be inferred will be removed in v8. */\nexport function fromEvent<T>(target: NodeStyleEventEmitter | ArrayLike<NodeStyleEventEmitter>, eventName: string): Observable<T>;\nexport function fromEvent<R>(\n  target: NodeStyleEventEmitter | ArrayLike<NodeStyleEventEmitter>,\n  eventName: string,\n  resultSelector: (...args: any[]) => R\n): Observable<R>;\n\nexport function fromEvent(\n  target: NodeCompatibleEventEmitter | ArrayLike<NodeCompatibleEventEmitter>,\n  eventName: string\n): Observable<unknown>;\n/** @deprecated Do not specify explicit type parameters. Signatures with type parameters that cannot be inferred will be removed in v8. */\nexport function fromEvent<T>(target: NodeCompatibleEventEmitter | ArrayLike<NodeCompatibleEventEmitter>, eventName: string): Observable<T>;\nexport function fromEvent<R>(\n  target: NodeCompatibleEventEmitter | ArrayLike<NodeCompatibleEventEmitter>,\n  eventName: string,\n  resultSelector: (...args: any[]) => R\n): Observable<R>;\n\nexport function fromEvent<T>(\n  target: JQueryStyleEventEmitter<any, T> | ArrayLike<JQueryStyleEventEmitter<any, T>>,\n  eventName: string\n): Observable<T>;\nexport function fromEvent<T, R>(\n  target: JQueryStyleEventEmitter<any, T> | ArrayLike<JQueryStyleEventEmitter<any, T>>,\n  eventName: string,\n  resultSelector: (value: T, ...args: any[]) => R\n): Observable<R>;\n\n/**\n * Creates an Observable that emits events of a specific type coming from the\n * given event target.\n *\n * <span class=\"informal\">Creates an Observable from DOM events, or Node.js\n * EventEmitter events or others.</span>\n *\n * ![](fromEvent.png)\n *\n * `fromEvent` accepts as a first argument event target, which is an object with methods\n * for registering event handler functions. As a second argument it takes string that indicates\n * type of event we want to listen for. `fromEvent` supports selected types of event targets,\n * which are described in detail below. If your event target does not match any of the ones listed,\n * you should use {@link fromEventPattern}, which can be used on arbitrary APIs.\n * When it comes to APIs supported by `fromEvent`, their methods for adding and removing event\n * handler functions have different names, but they all accept a string describing event type\n * and function itself, which will be called whenever said event happens.\n *\n * Every time resulting Observable is subscribed, event handler function will be registered\n * to event target on given event type. When that event fires, value\n * passed as a first argument to registered function will be emitted by output Observable.\n * When Observable is unsubscribed, function will be unregistered from event target.\n *\n * Note that if event target calls registered function with more than one argument, second\n * and following arguments will not appear in resulting stream. In order to get access to them,\n * you can pass to `fromEvent` optional project function, which will be called with all arguments\n * passed to event handler. Output Observable will then emit value returned by project function,\n * instead of the usual value.\n *\n * Remember that event targets listed below are checked via duck typing. It means that\n * no matter what kind of object you have and no matter what environment you work in,\n * you can safely use `fromEvent` on that object if it exposes described methods (provided\n * of course they behave as was described above). So for example if Node.js library exposes\n * event target which has the same method names as DOM EventTarget, `fromEvent` is still\n * a good choice.\n *\n * If the API you use is more callback then event handler oriented (subscribed\n * callback function fires only once and thus there is no need to manually\n * unregister it), you should use {@link bindCallback} or {@link bindNodeCallback}\n * instead.\n *\n * `fromEvent` supports following types of event targets:\n *\n * **DOM EventTarget**\n *\n * This is an object with `addEventListener` and `removeEventListener` methods.\n *\n * In the browser, `addEventListener` accepts - apart from event type string and event\n * handler function arguments - optional third parameter, which is either an object or boolean,\n * both used for additional configuration how and when passed function will be called. When\n * `fromEvent` is used with event target of that type, you can provide this values\n * as third parameter as well.\n *\n * **Node.js EventEmitter**\n *\n * An object with `addListener` and `removeListener` methods.\n *\n * **JQuery-style event target**\n *\n * An object with `on` and `off` methods\n *\n * **DOM NodeList**\n *\n * List of DOM Nodes, returned for example by `document.querySelectorAll` or `Node.childNodes`.\n *\n * Although this collection is not event target in itself, `fromEvent` will iterate over all Nodes\n * it contains and install event handler function in every of them. When returned Observable\n * is unsubscribed, function will be removed from all Nodes.\n *\n * **DOM HtmlCollection**\n *\n * Just as in case of NodeList it is a collection of DOM nodes. Here as well event handler function is\n * installed and removed in each of elements.\n *\n *\n * ## Examples\n *\n * Emit clicks happening on the DOM document\n *\n * ```ts\n * import { fromEvent } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * clicks.subscribe(x => console.log(x));\n *\n * // Results in:\n * // MouseEvent object logged to console every time a click\n * // occurs on the document.\n * ```\n *\n * Use `addEventListener` with capture option\n *\n * ```ts\n * import { fromEvent } from 'rxjs';\n *\n * const div = document.createElement('div');\n * div.style.cssText = 'width: 200px; height: 200px; background: #09c;';\n * document.body.appendChild(div);\n *\n * // note optional configuration parameter which will be passed to addEventListener\n * const clicksInDocument = fromEvent(document, 'click', { capture: true });\n * const clicksInDiv = fromEvent(div, 'click');\n *\n * clicksInDocument.subscribe(() => console.log('document'));\n * clicksInDiv.subscribe(() => console.log('div'));\n *\n * // By default events bubble UP in DOM tree, so normally\n * // when we would click on div in document\n * // \"div\" would be logged first and then \"document\".\n * // Since we specified optional `capture` option, document\n * // will catch event when it goes DOWN DOM tree, so console\n * // will log \"document\" and then \"div\".\n * ```\n *\n * @see {@link bindCallback}\n * @see {@link bindNodeCallback}\n * @see {@link fromEventPattern}\n *\n * @param {FromEventTarget<T>} target The DOM EventTarget, Node.js\n * EventEmitter, JQuery-like event target, NodeList or HTMLCollection to attach the event handler to.\n * @param {string} eventName The event name of interest, being emitted by the\n * `target`.\n * @param {EventListenerOptions} [options] Options to pass through to addEventListener\n * @return {Observable<T>}\n */\nexport function fromEvent<T>(\n  target: any,\n  eventName: string,\n  options?: EventListenerOptions | ((...args: any[]) => T),\n  resultSelector?: (...args: any[]) => T\n): Observable<T> {\n  if (isFunction(options)) {\n    resultSelector = options;\n    options = undefined;\n  }\n  if (resultSelector) {\n    return fromEvent<T>(target, eventName, options as EventListenerOptions).pipe(mapOneOrManyArgs(resultSelector));\n  }\n\n  // Figure out our add and remove methods. In order to do this,\n  // we are going to analyze the target in a preferred order, if\n  // the target matches a given signature, we take the two \"add\" and \"remove\"\n  // method names and apply them to a map to create opposite versions of the\n  // same function. This is because they all operate in duplicate pairs,\n  // `addListener(name, handler)`, `removeListener(name, handler)`, for example.\n  // The call only differs by method name, as to whether or not you're adding or removing.\n  const [add, remove] =\n    // If it is an EventTarget, we need to use a slightly different method than the other two patterns.\n    isEventTarget(target)\n      ? eventTargetMethods.map((methodName) => (handler: any) => target[methodName](eventName, handler, options as EventListenerOptions))\n      : // In all other cases, the call pattern is identical with the exception of the method names.\n      isNodeStyleEventEmitter(target)\n      ? nodeEventEmitterMethods.map(toCommonHandlerRegistry(target, eventName))\n      : isJQueryStyleEventEmitter(target)\n      ? jqueryMethods.map(toCommonHandlerRegistry(target, eventName))\n      : [];\n\n  // If add is falsy, it's because we didn't match a pattern above.\n  // Check to see if it is an ArrayLike, because if it is, we want to\n  // try to apply fromEvent to all of it's items. We do this check last,\n  // because there are may be some types that are both ArrayLike *and* implement\n  // event registry points, and we'd rather delegate to that when possible.\n  if (!add) {\n    if (isArrayLike(target)) {\n      return mergeMap((subTarget: any) => fromEvent(subTarget, eventName, options as EventListenerOptions))(\n        innerFrom(target)\n      ) as Observable<T>;\n    }\n  }\n\n  // If add is falsy and we made it here, it's because we didn't\n  // match any valid target objects above.\n  if (!add) {\n    throw new TypeError('Invalid event target');\n  }\n\n  return new Observable<T>((subscriber) => {\n    // The handler we are going to register. Forwards the event object, by itself, or\n    // an array of arguments to the event handler, if there is more than one argument,\n    // to the consumer.\n    const handler = (...args: any[]) => subscriber.next(1 < args.length ? args : args[0]);\n    // Do the work of adding the handler to the target.\n    add(handler);\n    // When we finalize, we want to remove the handler and free up memory.\n    return () => remove!(handler);\n  });\n}\n\n/**\n * Used to create `add` and `remove` functions to register and unregister event handlers\n * from a target in the most common handler pattern, where there are only two arguments.\n * (e.g.  `on(name, fn)`, `off(name, fn)`, `addListener(name, fn)`, or `removeListener(name, fn)`)\n * @param target The target we're calling methods on\n * @param eventName The event name for the event we're creating register or unregister functions for\n */\nfunction toCommonHandlerRegistry(target: any, eventName: string) {\n  return (methodName: string) => (handler: any) => target[methodName](eventName, handler);\n}\n\n/**\n * Checks to see if the target implements the required node-style EventEmitter methods\n * for adding and removing event handlers.\n * @param target the object to check\n */\nfunction isNodeStyleEventEmitter(target: any): target is NodeStyleEventEmitter {\n  return isFunction(target.addListener) && isFunction(target.removeListener);\n}\n\n/**\n * Checks to see if the target implements the required jQuery-style EventEmitter methods\n * for adding and removing event handlers.\n * @param target the object to check\n */\nfunction isJQueryStyleEventEmitter(target: any): target is JQueryStyleEventEmitter<any, any> {\n  return isFunction(target.on) && isFunction(target.off);\n}\n\n/**\n * Checks to see if the target implements the required EventTarget methods\n * for adding and removing event handlers.\n * @param target the object to check\n */\nfunction isEventTarget(target: any): target is HasEventTargetAddRemove<any> {\n  return isFunction(target.addEventListener) && isFunction(target.removeEventListener);\n}\n", "import { Observable } from '../Observable';\nimport { isFunction } from '../util/isFunction';\nimport { NodeEventHandler } from './fromEvent';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\n\n/* tslint:disable:max-line-length */\nexport function fromEventPattern<T>(\n  addHandler: (handler: NodeEventHandler) => any,\n  removeHandler?: (handler: NodeEventHandler, signal?: any) => void\n): Observable<T>;\nexport function fromEventPattern<T>(\n  addHandler: (handler: NodeEventHandler) => any,\n  removeHandler?: (handler: NodeEventHandler, signal?: any) => void,\n  resultSelector?: (...args: any[]) => T\n): Observable<T>;\n/* tslint:enable:max-line-length */\n\n/**\n * Creates an Observable from an arbitrary API for registering event handlers.\n *\n * <span class=\"informal\">When that method for adding event handler was something {@link fromEvent}\n * was not prepared for.</span>\n *\n * ![](fromEventPattern.png)\n *\n * `fromEventPattern` allows you to convert into an Observable any API that supports registering handler functions\n * for events. It is similar to {@link fromEvent}, but far\n * more flexible. In fact, all use cases of {@link fromEvent} could be easily handled by\n * `fromEventPattern` (although in slightly more verbose way).\n *\n * This operator accepts as a first argument an `addHandler` function, which will be injected with\n * handler parameter. That handler is actually an event handler function that you now can pass\n * to API expecting it. `addHandler` will be called whenever Observable\n * returned by the operator is subscribed, so registering handler in API will not\n * necessarily happen when `fromEventPattern` is called.\n *\n * After registration, every time an event that we listen to happens,\n * Observable returned by `fromEventPattern` will emit value that event handler\n * function was called with. Note that if event handler was called with more\n * than one argument, second and following arguments will not appear in the Observable.\n *\n * If API you are using allows to unregister event handlers as well, you can pass to `fromEventPattern`\n * another function - `removeHandler` - as a second parameter. It will be injected\n * with the same handler function as before, which now you can use to unregister\n * it from the API. `removeHandler` will be called when consumer of resulting Observable\n * unsubscribes from it.\n *\n * In some APIs unregistering is actually handled differently. Method registering an event handler\n * returns some kind of token, which is later used to identify which function should\n * be unregistered or it itself has method that unregisters event handler.\n * If that is the case with your API, make sure token returned\n * by registering method is returned by `addHandler`. Then it will be passed\n * as a second argument to `removeHandler`, where you will be able to use it.\n *\n * If you need access to all event handler parameters (not only the first one),\n * or you need to transform them in any way, you can call `fromEventPattern` with optional\n * third parameter - project function which will accept all arguments passed to\n * event handler when it is called. Whatever is returned from project function will appear on\n * resulting stream instead of usual event handlers first argument. This means\n * that default project can be thought of as function that takes its first parameter\n * and ignores the rest.\n *\n * ## Examples\n *\n * Emits clicks happening on the DOM document\n *\n * ```ts\n * import { fromEventPattern } from 'rxjs';\n *\n * function addClickHandler(handler) {\n *   document.addEventListener('click', handler);\n * }\n *\n * function removeClickHandler(handler) {\n *   document.removeEventListener('click', handler);\n * }\n *\n * const clicks = fromEventPattern(\n *   addClickHandler,\n *   removeClickHandler\n * );\n * clicks.subscribe(x => console.log(x));\n *\n * // Whenever you click anywhere in the browser, DOM MouseEvent\n * // object will be logged.\n * ```\n *\n * Use with API that returns cancellation token\n *\n * ```ts\n * import { fromEventPattern } from 'rxjs';\n *\n * const token = someAPI.registerEventHandler(function() {});\n * someAPI.unregisterEventHandler(token); // this APIs cancellation method accepts\n *                                        // not handler itself, but special token.\n *\n * const someAPIObservable = fromEventPattern(\n *   function(handler) { return someAPI.registerEventHandler(handler); }, // Note that we return the token here...\n *   function(handler, token) { someAPI.unregisterEventHandler(token); }  // ...to then use it here.\n * );\n * ```\n *\n * Use with project function\n *\n * ```ts\n * import { fromEventPattern } from 'rxjs';\n *\n * someAPI.registerEventHandler((eventType, eventMessage) => {\n *   console.log(eventType, eventMessage); // Logs 'EVENT_TYPE' 'EVENT_MESSAGE' to console.\n * });\n *\n * const someAPIObservable = fromEventPattern(\n *   handler => someAPI.registerEventHandler(handler),\n *   handler => someAPI.unregisterEventHandler(handler)\n *   (eventType, eventMessage) => eventType + ' --- ' + eventMessage // without that function only 'EVENT_TYPE'\n * );                                                                // would be emitted by the Observable\n *\n * someAPIObservable.subscribe(value => console.log(value));\n *\n * // Logs:\n * // 'EVENT_TYPE --- EVENT_MESSAGE'\n * ```\n *\n * @see {@link fromEvent}\n * @see {@link bindCallback}\n * @see {@link bindNodeCallback}\n *\n * @param {function(handler: Function): any} addHandler A function that takes\n * a `handler` function as argument and attaches it somehow to the actual\n * source of events.\n * @param {function(handler: Function, token?: any): void} [removeHandler] A function that\n * takes a `handler` function as an argument and removes it from the event source. If `addHandler`\n * returns some kind of token, `removeHandler` function will have it as a second parameter.\n * @param {function(...args: any): T} [project] A function to\n * transform results. It takes the arguments from the event handler and\n * should return a single value.\n * @return {Observable<T>} Observable which, when an event happens, emits first parameter\n * passed to registered event handler. Alternatively it emits whatever project function returns\n * at that moment.\n */\nexport function fromEventPattern<T>(\n  addHandler: (handler: NodeEventHandler) => any,\n  removeHandler?: (handler: NodeEventHandler, signal?: any) => void,\n  resultSelector?: (...args: any[]) => T\n): Observable<T | T[]> {\n  if (resultSelector) {\n    return fromEventPattern<T>(addHandler, removeHandler).pipe(mapOneOrManyArgs(resultSelector));\n  }\n\n  return new Observable<T | T[]>((subscriber) => {\n    const handler = (...e: T[]) => subscriber.next(e.length === 1 ? e[0] : e);\n    const retValue = addHandler(handler);\n    return isFunction(removeHandler) ? () => removeHandler(handler, retValue) : undefined;\n  });\n}\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\nimport { async as asyncScheduler } from '../scheduler/async';\nimport { isScheduler } from '../util/isScheduler';\nimport { isValidDate } from '../util/isDate';\n\n/**\n * Creates an observable that will wait for a specified time period, or exact date, before\n * emitting the number 0.\n *\n * <span class=\"informal\">Used to emit a notification after a delay.</span>\n *\n * This observable is useful for creating delays in code, or racing against other values\n * for ad-hoc timeouts.\n *\n * The `delay` is specified by default in milliseconds, however providing a custom scheduler could\n * create a different behavior.\n *\n * ## Examples\n *\n * Wait 3 seconds and start another observable\n *\n * You might want to use `timer` to delay subscription to an\n * observable by a set amount of time. Here we use a timer with\n * {@link concatMapTo} or {@link concatMap} in order to wait\n * a few seconds and start a subscription to a source.\n *\n * ```ts\n * import { of, timer, concatMap } from 'rxjs';\n *\n * // This could be any observable\n * const source = of(1, 2, 3);\n *\n * timer(3000)\n *   .pipe(concatMap(() => source))\n *   .subscribe(console.log);\n * ```\n *\n * Take all values until the start of the next minute\n *\n * Using a `Date` as the trigger for the first emission, you can\n * do things like wait until midnight to fire an event, or in this case,\n * wait until a new minute starts (chosen so the example wouldn't take\n * too long to run) in order to stop watching a stream. Leveraging\n * {@link takeUntil}.\n *\n * ```ts\n * import { interval, takeUntil, timer } from 'rxjs';\n *\n * // Build a Date object that marks the\n * // next minute.\n * const currentDate = new Date();\n * const startOfNextMinute = new Date(\n *   currentDate.getFullYear(),\n *   currentDate.getMonth(),\n *   currentDate.getDate(),\n *   currentDate.getHours(),\n *   currentDate.getMinutes() + 1\n * );\n *\n * // This could be any observable stream\n * const source = interval(1000);\n *\n * const result = source.pipe(\n *   takeUntil(timer(startOfNextMinute))\n * );\n *\n * result.subscribe(console.log);\n * ```\n *\n * ### Known Limitations\n *\n * - The {@link asyncScheduler} uses `setTimeout` which has limitations for how far in the future it can be scheduled.\n *\n * - If a `scheduler` is provided that returns a timestamp other than an epoch from `now()`, and\n * a `Date` object is passed to the `dueTime` argument, the calculation for when the first emission\n * should occur will be incorrect. In this case, it would be best to do your own calculations\n * ahead of time, and pass a `number` in as the `dueTime`.\n *\n * @param due If a `number`, the amount of time in milliseconds to wait before emitting.\n * If a `Date`, the exact time at which to emit.\n * @param scheduler The scheduler to use to schedule the delay. Defaults to {@link asyncScheduler}.\n */\nexport function timer(due: number | Date, scheduler?: SchedulerLike): Observable<0>;\n\n/**\n * Creates an observable that starts an interval after a specified delay, emitting incrementing numbers -- starting at `0` --\n * on each interval after words.\n *\n * The `delay` and `intervalDuration` are specified by default in milliseconds, however providing a custom scheduler could\n * create a different behavior.\n *\n * ## Example\n *\n * ### Start an interval that starts right away\n *\n * Since {@link interval} waits for the passed delay before starting,\n * sometimes that's not ideal. You may want to start an interval immediately.\n * `timer` works well for this. Here we have both side-by-side so you can\n * see them in comparison.\n *\n * Note that this observable will never complete.\n *\n * ```ts\n * import { timer, interval } from 'rxjs';\n *\n * timer(0, 1000).subscribe(n => console.log('timer', n));\n * interval(1000).subscribe(n => console.log('interval', n));\n * ```\n *\n * ### Known Limitations\n *\n * - The {@link asyncScheduler} uses `setTimeout` which has limitations for how far in the future it can be scheduled.\n *\n * - If a `scheduler` is provided that returns a timestamp other than an epoch from `now()`, and\n * a `Date` object is passed to the `dueTime` argument, the calculation for when the first emission\n * should occur will be incorrect. In this case, it would be best to do your own calculations\n * ahead of time, and pass a `number` in as the `startDue`.\n * @param startDue If a `number`, is the time to wait before starting the interval.\n * If a `Date`, is the exact time at which to start the interval.\n * @param intervalDuration The delay between each value emitted in the interval. Passing a\n * negative number here will result in immediate completion after the first value is emitted, as though\n * no `intervalDuration` was passed at all.\n * @param scheduler The scheduler to use to schedule the delay. Defaults to {@link asyncScheduler}.\n */\nexport function timer(startDue: number | Date, intervalDuration: number, scheduler?: SchedulerLike): Observable<number>;\n\n/**\n * @deprecated The signature allowing `undefined` to be passed for `intervalDuration` will be removed in v8. Use the `timer(dueTime, scheduler?)` signature instead.\n */\nexport function timer(dueTime: number | Date, unused: undefined, scheduler?: SchedulerLike): Observable<0>;\n\nexport function timer(\n  dueTime: number | Date = 0,\n  intervalOrScheduler?: number | SchedulerLike,\n  scheduler: SchedulerLike = asyncScheduler\n): Observable<number> {\n  // Since negative intervalDuration is treated as though no\n  // interval was specified at all, we start with a negative number.\n  let intervalDuration = -1;\n\n  if (intervalOrScheduler != null) {\n    // If we have a second argument, and it's a scheduler,\n    // override the scheduler we had defaulted. Otherwise,\n    // it must be an interval.\n    if (isScheduler(intervalOrScheduler)) {\n      scheduler = intervalOrScheduler;\n    } else {\n      // Note that this *could* be negative, in which case\n      // it's like not passing an intervalDuration at all.\n      intervalDuration = intervalOrScheduler;\n    }\n  }\n\n  return new Observable((subscriber) => {\n    // If a valid date is passed, calculate how long to wait before\n    // executing the first value... otherwise, if it's a number just schedule\n    // that many milliseconds (or scheduler-specified unit size) in the future.\n    let due = isValidDate(dueTime) ? +dueTime - scheduler!.now() : dueTime;\n\n    if (due < 0) {\n      // Ensure we don't schedule in the future.\n      due = 0;\n    }\n\n    // The incrementing value we emit.\n    let n = 0;\n\n    // Start the timer.\n    return scheduler.schedule(function () {\n      if (!subscriber.closed) {\n        // Emit the next value and increment.\n        subscriber.next(n++);\n\n        if (0 <= intervalDuration) {\n          // If we have a interval after the initial timer,\n          // reschedule with the period.\n          this.schedule(undefined, intervalDuration);\n        } else {\n          // We didn't have an interval. So just complete.\n          subscriber.complete();\n        }\n      }\n    }, due);\n  });\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInput, ObservableInputTuple, SchedulerLike } from '../types';\nimport { mergeAll } from '../operators/mergeAll';\nimport { innerFrom } from './innerFrom';\nimport { EMPTY } from './empty';\nimport { popNumber, popScheduler } from '../util/args';\nimport { from } from './from';\n\nexport function merge<A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): Observable<A[number]>;\nexport function merge<A extends readonly unknown[]>(...sourcesAndConcurrency: [...ObservableInputTuple<A>, number?]): Observable<A[number]>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `mergeAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function merge<A extends readonly unknown[]>(\n  ...sourcesAndScheduler: [...ObservableInputTuple<A>, SchedulerLike?]\n): Observable<A[number]>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `mergeAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function merge<A extends readonly unknown[]>(\n  ...sourcesAndConcurrencyAndScheduler: [...ObservableInputTuple<A>, number?, SchedulerLike?]\n): Observable<A[number]>;\n\n/**\n * Creates an output Observable which concurrently emits all values from every\n * given input Observable.\n *\n * <span class=\"informal\">Flattens multiple Observables together by blending\n * their values into one Observable.</span>\n *\n * ![](merge.png)\n *\n * `merge` subscribes to each given input Observable (as arguments), and simply\n * forwards (without doing any transformation) all the values from all the input\n * Observables to the output Observable. The output Observable only completes\n * once all input Observables have completed. Any error delivered by an input\n * Observable will be immediately emitted on the output Observable.\n *\n * ## Examples\n *\n * Merge together two Observables: 1s interval and clicks\n *\n * ```ts\n * import { merge, fromEvent, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const timer = interval(1000);\n * const clicksOrTimer = merge(clicks, timer);\n * clicksOrTimer.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // timer will emit ascending values, one every second(1000ms) to console\n * // clicks logs MouseEvents to console every time the \"document\" is clicked\n * // Since the two streams are merged you see these happening\n * // as they occur.\n * ```\n *\n * Merge together 3 Observables, but run only 2 concurrently\n *\n * ```ts\n * import { interval, take, merge } from 'rxjs';\n *\n * const timer1 = interval(1000).pipe(take(10));\n * const timer2 = interval(2000).pipe(take(6));\n * const timer3 = interval(500).pipe(take(10));\n *\n * const concurrent = 2; // the argument\n * const merged = merge(timer1, timer2, timer3, concurrent);\n * merged.subscribe(x => console.log(x));\n *\n * // Results in the following:\n * // - First timer1 and timer2 will run concurrently\n * // - timer1 will emit a value every 1000ms for 10 iterations\n * // - timer2 will emit a value every 2000ms for 6 iterations\n * // - after timer1 hits its max iteration, timer2 will\n * //   continue, and timer3 will start to run concurrently with timer2\n * // - when timer2 hits its max iteration it terminates, and\n * //   timer3 will continue to emit a value every 500ms until it is complete\n * ```\n *\n * @see {@link mergeAll}\n * @see {@link mergeMap}\n * @see {@link mergeMapTo}\n * @see {@link mergeScan}\n *\n * @param {...ObservableInput} observables Input Observables to merge together.\n * @param {number} [concurrent=Infinity] Maximum number of input\n * Observables being subscribed to concurrently.\n * @param {SchedulerLike} [scheduler=null] The {@link SchedulerLike} to use for managing\n * concurrency of input Observables.\n * @return {Observable} an Observable that emits items that are the result of\n * every input Observable.\n */\nexport function merge(...args: (ObservableInput<unknown> | number | SchedulerLike)[]): Observable<unknown> {\n  const scheduler = popScheduler(args);\n  const concurrent = popNumber(args, Infinity);\n  const sources = args as ObservableInput<unknown>[];\n  return !sources.length\n    ? // No source provided\n      EMPTY\n    : sources.length === 1\n    ? // One source? Just return it.\n      innerFrom(sources[0])\n    : // Merge all sources\n      mergeAll(concurrent)(from(sources, scheduler));\n}\n", "import { Observable } from '../Observable';\nimport { noop } from '../util/noop';\n\n/**\n * An Observable that emits no items to the Observer and never completes.\n *\n * ![](never.png)\n *\n * A simple Observable that emits neither values nor errors nor the completion\n * notification. It can be used for testing purposes or for composing with other\n * Observables. Please note that by never emitting a complete notification, this\n * Observable keeps the subscription from being disposed automatically.\n * Subscriptions need to be manually disposed.\n *\n * ##  Example\n *\n * Emit the number 7, then never emit anything else (not even complete)\n *\n * ```ts\n * import { NEVER, startWith } from 'rxjs';\n *\n * const info = () => console.log('Will not be called');\n *\n * const result = NEVER.pipe(startWith(7));\n * result.subscribe({\n *   next: x => console.log(x),\n *   error: info,\n *   complete: info\n * });\n * ```\n *\n * @see {@link Observable}\n * @see {@link EMPTY}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const NEVER = new Observable<never>(noop);\n\n/**\n * @deprecated Replaced with the {@link NEVER} constant. Will be removed in v8.\n */\nexport function never() {\n  return NEVER;\n}\n", "const { isArray } = Array;\n\n/**\n * Used in operators and functions that accept either a list of arguments, or an array of arguments\n * as a single argument.\n */\nexport function argsOrArgArray<T>(args: (T | T[])[]): T[] {\n  return args.length === 1 && isArray(args[0]) ? args[0] : (args as T[]);\n}\n", "import { OperatorFunction, MonoTypeOperatorFunction, TruthyTypesOf } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/** @deprecated Use a closure instead of a `thisArg`. Signatures accepting a `thisArg` will be removed in v8. */\nexport function filter<T, S extends T, A>(predicate: (this: A, value: T, index: number) => value is S, thisArg: A): OperatorFunction<T, S>;\nexport function filter<T, S extends T>(predicate: (value: T, index: number) => value is S): OperatorFunction<T, S>;\nexport function filter<T>(predicate: BooleanConstructor): OperatorFunction<T, TruthyTypesOf<T>>;\n/** @deprecated Use a closure instead of a `thisArg`. Signatures accepting a `thisArg` will be removed in v8. */\nexport function filter<T, A>(predicate: (this: A, value: T, index: number) => boolean, thisArg: A): MonoTypeOperatorFunction<T>;\nexport function filter<T>(predicate: (value: T, index: number) => boolean): MonoTypeOperatorFunction<T>;\n\n/**\n * Filter items emitted by the source Observable by only emitting those that\n * satisfy a specified predicate.\n *\n * <span class=\"informal\">Like\n * [Array.prototype.filter()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/filter),\n * it only emits a value from the source if it passes a criterion function.</span>\n *\n * ![](filter.png)\n *\n * Similar to the well-known `Array.prototype.filter` method, this operator\n * takes values from the source Observable, passes them through a `predicate`\n * function and only emits those values that yielded `true`.\n *\n * ## Example\n *\n * Emit only click events whose target was a DIV element\n *\n * ```ts\n * import { fromEvent, filter } from 'rxjs';\n *\n * const div = document.createElement('div');\n * div.style.cssText = 'width: 200px; height: 200px; background: #09c;';\n * document.body.appendChild(div);\n *\n * const clicks = fromEvent(document, 'click');\n * const clicksOnDivs = clicks.pipe(filter(ev => (<HTMLElement>ev.target).tagName === 'DIV'));\n * clicksOnDivs.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link distinct}\n * @see {@link distinctUntilChanged}\n * @see {@link distinctUntilKeyChanged}\n * @see {@link ignoreElements}\n * @see {@link partition}\n * @see {@link skip}\n *\n * @param predicate A function that\n * evaluates each value emitted by the source Observable. If it returns `true`,\n * the value is emitted, if `false` the value is not passed to the output\n * Observable. The `index` parameter is the number `i` for the i-th source\n * emission that has happened since the subscription, starting from the number\n * `0`.\n * @param thisArg An optional argument to determine the value of `this`\n * in the `predicate` function.\n * @return A function that returns an Observable that emits items from the\n * source Observable that satisfy the specified `predicate`.\n */\nexport function filter<T>(predicate: (value: T, index: number) => boolean, thisArg?: any): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    // An index passed to our predicate function on each call.\n    let index = 0;\n\n    // Subscribe to the source, all errors and completions are\n    // forwarded to the consumer.\n    source.subscribe(\n      // Call the predicate with the appropriate `this` context,\n      // if the predicate returns `true`, then send the value\n      // to the consumer.\n      createOperatorSubscriber(subscriber, (value) => predicate.call(thisArg, value, index++) && subscriber.next(value))\n    );\n  });\n}\n", "import { Observable } from '../Observable';\nimport { ObservableInputTuple } from '../types';\nimport { innerFrom } from './innerFrom';\nimport { argsOrArgArray } from '../util/argsOrArgArray';\nimport { EMPTY } from './empty';\nimport { createOperatorSubscriber } from '../operators/OperatorSubscriber';\nimport { popResultSelector } from '../util/args';\n\nexport function zip<A extends readonly unknown[]>(sources: [...ObservableInputTuple<A>]): Observable<A>;\nexport function zip<A extends readonly unknown[], R>(\n  sources: [...ObservableInputTuple<A>],\n  resultSelector: (...values: A) => R\n): Observable<R>;\nexport function zip<A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): Observable<A>;\nexport function zip<A extends readonly unknown[], R>(\n  ...sourcesAndResultSelector: [...ObservableInputTuple<A>, (...values: A) => R]\n): Observable<R>;\n\n/**\n * Combines multiple Observables to create an Observable whose values are calculated from the values, in order, of each\n * of its input Observables.\n *\n * If the last parameter is a function, this function is used to compute the created value from the input values.\n * Otherwise, an array of the input values is returned.\n *\n * ## Example\n *\n * Combine age and name from different sources\n *\n * ```ts\n * import { of, zip, map } from 'rxjs';\n *\n * const age$ = of(27, 25, 29);\n * const name$ = of('Foo', 'Bar', 'Beer');\n * const isDev$ = of(true, true, false);\n *\n * zip(age$, name$, isDev$).pipe(\n *   map(([age, name, isDev]) => ({ age, name, isDev }))\n * )\n * .subscribe(x => console.log(x));\n *\n * // Outputs\n * // { age: 27, name: 'Foo', isDev: true }\n * // { age: 25, name: 'Bar', isDev: true }\n * // { age: 29, name: 'Beer', isDev: false }\n * ```\n *\n * @param sources\n * @return {Observable<R>}\n */\nexport function zip(...args: unknown[]): Observable<unknown> {\n  const resultSelector = popResultSelector(args);\n\n  const sources = argsOrArgArray(args) as Observable<unknown>[];\n\n  return sources.length\n    ? new Observable<unknown[]>((subscriber) => {\n        // A collection of buffers of values from each source.\n        // Keyed by the same index with which the sources were passed in.\n        let buffers: unknown[][] = sources.map(() => []);\n\n        // An array of flags of whether or not the sources have completed.\n        // This is used to check to see if we should complete the result.\n        // Keyed by the same index with which the sources were passed in.\n        let completed = sources.map(() => false);\n\n        // When everything is done, release the arrays above.\n        subscriber.add(() => {\n          buffers = completed = null!;\n        });\n\n        // Loop over our sources and subscribe to each one. The index `i` is\n        // especially important here, because we use it in closures below to\n        // access the related buffers and completion properties\n        for (let sourceIndex = 0; !subscriber.closed && sourceIndex < sources.length; sourceIndex++) {\n          innerFrom(sources[sourceIndex]).subscribe(\n            createOperatorSubscriber(\n              subscriber,\n              (value) => {\n                buffers[sourceIndex].push(value);\n                // if every buffer has at least one value in it, then we\n                // can shift out the oldest value from each buffer and emit\n                // them as an array.\n                if (buffers.every((buffer) => buffer.length)) {\n                  const result: any = buffers.map((buffer) => buffer.shift()!);\n                  // Emit the array. If theres' a result selector, use that.\n                  subscriber.next(resultSelector ? resultSelector(...result) : result);\n                  // If any one of the sources is both complete and has an empty buffer\n                  // then we complete the result. This is because we cannot possibly have\n                  // any more values to zip together.\n                  if (buffers.some((buffer, i) => !buffer.length && completed[i])) {\n                    subscriber.complete();\n                  }\n                }\n              },\n              () => {\n                // This source completed. Mark it as complete so we can check it later\n                // if we have to.\n                completed[sourceIndex] = true;\n                // But, if this complete source has nothing in its buffer, then we\n                // can complete the result, because we can't possibly have any more\n                // values from this to zip together with the other values.\n                !buffers[sourceIndex].length && subscriber.complete();\n              }\n            )\n          );\n        }\n\n        // When everything is done, release the arrays above.\n        return () => {\n          buffers = completed = null!;\n        };\n      })\n    : EMPTY;\n}\n", "import { Subscriber } from '../Subscriber';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\n\nimport { operate } from '../util/lift';\nimport { innerFrom } from '../observable/innerFrom';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Ignores source values for a duration determined by another Observable, then\n * emits the most recent value from the source Observable, then repeats this\n * process.\n *\n * <span class=\"informal\">It's like {@link auditTime}, but the silencing\n * duration is determined by a second Observable.</span>\n *\n * ![](audit.svg)\n *\n * `audit` is similar to `throttle`, but emits the last value from the silenced\n * time window, instead of the first value. `audit` emits the most recent value\n * from the source Observable on the output Observable as soon as its internal\n * timer becomes disabled, and ignores source values while the timer is enabled.\n * Initially, the timer is disabled. As soon as the first source value arrives,\n * the timer is enabled by calling the `durationSelector` function with the\n * source value, which returns the \"duration\" Observable. When the duration\n * Observable emits a value, the timer is disabled, then the most\n * recent source value is emitted on the output Observable, and this process\n * repeats for the next source value.\n *\n * ## Example\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, audit, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(audit(ev => interval(1000)));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link auditTime}\n * @see {@link debounce}\n * @see {@link delayWhen}\n * @see {@link sample}\n * @see {@link throttle}\n *\n * @param durationSelector A function\n * that receives a value from the source Observable, for computing the silencing\n * duration, returned as an Observable or a Promise.\n * @return A function that returns an Observable that performs rate-limiting of\n * emissions from the source Observable.\n */\nexport function audit<T>(durationSelector: (value: T) => ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    let lastValue: T | null = null;\n    let durationSubscriber: Subscriber<any> | null = null;\n    let isComplete = false;\n\n    const endDuration = () => {\n      durationSubscriber?.unsubscribe();\n      durationSubscriber = null;\n      if (hasValue) {\n        hasValue = false;\n        const value = lastValue!;\n        lastValue = null;\n        subscriber.next(value);\n      }\n      isComplete && subscriber.complete();\n    };\n\n    const cleanupDuration = () => {\n      durationSubscriber = null;\n      isComplete && subscriber.complete();\n    };\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          hasValue = true;\n          lastValue = value;\n          if (!durationSubscriber) {\n            innerFrom(durationSelector(value)).subscribe(\n              (durationSubscriber = createOperatorSubscriber(subscriber, endDuration, cleanupDuration))\n            );\n          }\n        },\n        () => {\n          isComplete = true;\n          (!hasValue || !durationSubscriber || durationSubscriber.closed) && subscriber.complete();\n        }\n      )\n    );\n  });\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { audit } from './audit';\nimport { timer } from '../observable/timer';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\n\n/**\n * Ignores source values for `duration` milliseconds, then emits the most recent\n * value from the source Observable, then repeats this process.\n *\n * <span class=\"informal\">When it sees a source value, it ignores that plus\n * the next ones for `duration` milliseconds, and then it emits the most recent\n * value from the source.</span>\n *\n * ![](auditTime.png)\n *\n * `auditTime` is similar to `throttleTime`, but emits the last value from the\n * silenced time window, instead of the first value. `auditTime` emits the most\n * recent value from the source Observable on the output Observable as soon as\n * its internal timer becomes disabled, and ignores source values while the\n * timer is enabled. Initially, the timer is disabled. As soon as the first\n * source value arrives, the timer is enabled. After `duration` milliseconds (or\n * the time unit determined internally by the optional `scheduler`) has passed,\n * the timer is disabled, then the most recent source value is emitted on the\n * output Observable, and this process repeats for the next source value.\n * Optionally takes a {@link SchedulerLike} for managing timers.\n *\n * ## Example\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, auditTime } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(auditTime(1000));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link debounceTime}\n * @see {@link delay}\n * @see {@link sampleTime}\n * @see {@link throttleTime}\n *\n * @param {number} duration Time to wait before emitting the most recent source\n * value, measured in milliseconds or the time unit determined internally\n * by the optional `scheduler`.\n * @param {SchedulerLike} [scheduler=async] The {@link SchedulerLike} to use for\n * managing the timers that handle the rate-limiting behavior.\n * @return A function that returns an Observable that performs rate-limiting of\n * emissions from the source Observable.\n */\nexport function auditTime<T>(duration: number, scheduler: SchedulerLike = asyncScheduler): MonoTypeOperatorFunction<T> {\n  return audit(() => timer(duration, scheduler));\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { arrRemove } from '../util/arrRemove';\n\n/**\n * Buffers the source Observable values until the size hits the maximum\n * `bufferSize` given.\n *\n * <span class=\"informal\">Collects values from the past as an array, and emits\n * that array only when its size reaches `bufferSize`.</span>\n *\n * ![](bufferCount.png)\n *\n * Buffers a number of values from the source Observable by `bufferSize` then\n * emits the buffer and clears it, and starts a new buffer each\n * `startBufferEvery` values. If `startBufferEvery` is not provided or is\n * `null`, then new buffers are started immediately at the start of the source\n * and when each buffer closes and is emitted.\n *\n * ## Examples\n *\n * Emit the last two click events as an array\n *\n * ```ts\n * import { fromEvent, bufferCount } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const buffered = clicks.pipe(bufferCount(2));\n * buffered.subscribe(x => console.log(x));\n * ```\n *\n * On every click, emit the last two click events as an array\n *\n * ```ts\n * import { fromEvent, bufferCount } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const buffered = clicks.pipe(bufferCount(2, 1));\n * buffered.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link buffer}\n * @see {@link bufferTime}\n * @see {@link bufferToggle}\n * @see {@link bufferWhen}\n * @see {@link pairwise}\n * @see {@link windowCount}\n *\n * @param {number} bufferSize The maximum size of the buffer emitted.\n * @param {number} [startBufferEvery] Interval at which to start a new buffer.\n * For example if `startBufferEvery` is `2`, then a new buffer will be started\n * on every other value from the source. A new buffer is started at the\n * beginning of the source by default.\n * @return A function that returns an Observable of arrays of buffered values.\n */\nexport function bufferCount<T>(bufferSize: number, startBufferEvery: number | null = null): OperatorFunction<T, T[]> {\n  // If no `startBufferEvery` value was supplied, then we're\n  // opening and closing on the bufferSize itself.\n  startBufferEvery = startBufferEvery ?? bufferSize;\n\n  return operate((source, subscriber) => {\n    let buffers: T[][] = [];\n    let count = 0;\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          let toEmit: T[][] | null = null;\n\n          // Check to see if we need to start a buffer.\n          // This will start one at the first value, and then\n          // a new one every N after that.\n          if (count++ % startBufferEvery! === 0) {\n            buffers.push([]);\n          }\n\n          // Push our value into our active buffers.\n          for (const buffer of buffers) {\n            buffer.push(value);\n            // Check to see if we're over the bufferSize\n            // if we are, record it so we can emit it later.\n            // If we emitted it now and removed it, it would\n            // mutate the `buffers` array while we're looping\n            // over it.\n            if (bufferSize <= buffer.length) {\n              toEmit = toEmit ?? [];\n              toEmit.push(buffer);\n            }\n          }\n\n          if (toEmit) {\n            // We have found some buffers that are over the\n            // `bufferSize`. Emit them, and remove them from our\n            // buffers list.\n            for (const buffer of toEmit) {\n              arrRemove(buffers, buffer);\n              subscriber.next(buffer);\n            }\n          }\n        },\n        () => {\n          // When the source completes, emit all of our\n          // active buffers.\n          for (const buffer of buffers) {\n            subscriber.next(buffer);\n          }\n          subscriber.complete();\n        },\n        // Pass all errors through to consumer.\n        undefined,\n        () => {\n          // Clean up our memory when we finalize\n          buffers = null!;\n        }\n      )\n    );\n  });\n}\n", "import { Observable } from '../Observable';\n\nimport { ObservableInput, OperatorFunction, ObservedValueOf } from '../types';\nimport { Subscription } from '../Subscription';\nimport { innerFrom } from '../observable/innerFrom';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { operate } from '../util/lift';\n\n/* tslint:disable:max-line-length */\nexport function catchError<T, O extends ObservableInput<any>>(\n  selector: (err: any, caught: Observable<T>) => O\n): OperatorFunction<T, T | ObservedValueOf<O>>;\n/* tslint:enable:max-line-length */\n\n/**\n * Catches errors on the observable to be handled by returning a new observable or throwing an error.\n *\n * <span class=\"informal\">\n * It only listens to the error channel and ignores notifications.\n * Handles errors from the source observable, and maps them to a new observable.\n * The error may also be rethrown, or a new error can be thrown to emit an error from the result.\n * </span>\n *\n * ![](catch.png)\n *\n * This operator handles errors, but forwards along all other events to the resulting observable.\n * If the source observable terminates with an error, it will map that error to a new observable,\n * subscribe to it, and forward all of its events to the resulting observable.\n *\n * ## Examples\n *\n * Continue with a different Observable when there's an error\n *\n * ```ts\n * import { of, map, catchError } from 'rxjs';\n *\n * of(1, 2, 3, 4, 5)\n *   .pipe(\n *     map(n => {\n *       if (n === 4) {\n *         throw 'four!';\n *       }\n *       return n;\n *     }),\n *     catchError(err => of('I', 'II', 'III', 'IV', 'V'))\n *   )\n *   .subscribe(x => console.log(x));\n *   // 1, 2, 3, I, II, III, IV, V\n * ```\n *\n * Retry the caught source Observable again in case of error, similar to `retry()` operator\n *\n * ```ts\n * import { of, map, catchError, take } from 'rxjs';\n *\n * of(1, 2, 3, 4, 5)\n *   .pipe(\n *     map(n => {\n *       if (n === 4) {\n *         throw 'four!';\n *       }\n *       return n;\n *     }),\n *     catchError((err, caught) => caught),\n *     take(30)\n *   )\n *   .subscribe(x => console.log(x));\n *   // 1, 2, 3, 1, 2, 3, ...\n * ```\n *\n * Throw a new error when the source Observable throws an error\n *\n * ```ts\n * import { of, map, catchError } from 'rxjs';\n *\n * of(1, 2, 3, 4, 5)\n *   .pipe(\n *     map(n => {\n *       if (n === 4) {\n *         throw 'four!';\n *       }\n *       return n;\n *     }),\n *     catchError(err => {\n *       throw 'error in source. Details: ' + err;\n *     })\n *   )\n *   .subscribe({\n *     next: x => console.log(x),\n *     error: err => console.log(err)\n *   });\n *   // 1, 2, 3, error in source. Details: four!\n * ```\n *\n * @see {@link onErrorResumeNext}\n * @see {@link repeat}\n * @see {@link repeatWhen}\n * @see {@link retry }\n * @see {@link retryWhen}\n *\n * @param {function} selector a function that takes as arguments `err`, which is the error, and `caught`, which\n * is the source observable, in case you'd like to \"retry\" that observable by returning it again. Whatever observable\n * is returned by the `selector` will be used to continue the observable chain.\n * @return A function that returns an Observable that originates from either\n * the source or the Observable returned by the `selector` function.\n */\nexport function catchError<T, O extends ObservableInput<any>>(\n  selector: (err: any, caught: Observable<T>) => O\n): OperatorFunction<T, T | ObservedValueOf<O>> {\n  return operate((source, subscriber) => {\n    let innerSub: Subscription | null = null;\n    let syncUnsub = false;\n    let handledResult: Observable<ObservedValueOf<O>>;\n\n    innerSub = source.subscribe(\n      createOperatorSubscriber(subscriber, undefined, undefined, (err) => {\n        handledResult = innerFrom(selector(err, catchError(selector)(source)));\n        if (innerSub) {\n          innerSub.unsubscribe();\n          innerSub = null;\n          handledResult.subscribe(subscriber);\n        } else {\n          // We don't have an innerSub yet, that means the error was synchronous\n          // because the subscribe call hasn't returned yet.\n          syncUnsub = true;\n        }\n      })\n    );\n\n    if (syncUnsub) {\n      // We have a synchronous error, we need to make sure to\n      // finalize right away. This ensures that callbacks in the `finalize` operator are called\n      // at the right time, and that finalization occurs at the expected\n      // time between the source error and the subscription to the\n      // next observable.\n      innerSub.unsubscribe();\n      innerSub = null;\n      handledResult!.subscribe(subscriber);\n    }\n  });\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * A basic scan operation. This is used for `scan` and `reduce`.\n * @param accumulator The accumulator to use\n * @param seed The seed value for the state to accumulate\n * @param hasSeed Whether or not a seed was provided\n * @param emitOnNext Whether or not to emit the state on next\n * @param emitBeforeComplete Whether or not to emit the before completion\n */\n\nexport function scanInternals<V, A, S>(\n  accumulator: (acc: V | A | S, value: V, index: number) => A,\n  seed: S,\n  hasSeed: boolean,\n  emitOnNext: boolean,\n  emitBeforeComplete?: undefined | true\n) {\n  return (source: Observable<V>, subscriber: Subscriber<any>) => {\n    // Whether or not we have state yet. This will only be\n    // false before the first value arrives if we didn't get\n    // a seed value.\n    let hasState = hasSeed;\n    // The state that we're tracking, starting with the seed,\n    // if there is one, and then updated by the return value\n    // from the accumulator on each emission.\n    let state: any = seed;\n    // An index to pass to the accumulator function.\n    let index = 0;\n\n    // Subscribe to our source. All errors and completions are passed through.\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          // Always increment the index.\n          const i = index++;\n          // Set the state\n          state = hasState\n            ? // We already have state, so we can get the new state from the accumulator\n              accumulator(state, value, i)\n            : // We didn't have state yet, a seed value was not provided, so\n\n              // we set the state to the first value, and mark that we have state now\n              ((hasState = true), value);\n\n          // Maybe send it to the consumer.\n          emitOnNext && subscriber.next(state);\n        },\n        // If an onComplete was given, call it, otherwise\n        // just pass through the complete notification to the consumer.\n        emitBeforeComplete &&\n          (() => {\n            hasState && subscriber.next(state);\n            subscriber.complete();\n          })\n      )\n    );\n  };\n}\n", "import { combineLatestInit } from '../observable/combineLatest';\nimport { ObservableInput, ObservableInputTuple, OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { argsOrArgArray } from '../util/argsOrArgArray';\nimport { mapOneOrManyArgs } from '../util/mapOneOrManyArgs';\nimport { pipe } from '../util/pipe';\nimport { popResultSelector } from '../util/args';\n\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(\n  sources: [...ObservableInputTuple<A>],\n  project: (...values: [T, ...A]) => R\n): OperatorFunction<T, R>;\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(sources: [...ObservableInputTuple<A>]): OperatorFunction<T, [T, ...A]>;\n\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(\n  ...sourcesAndProject: [...ObservableInputTuple<A>, (...values: [T, ...A]) => R]\n): OperatorFunction<T, R>;\n/** @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8. */\nexport function combineLatest<T, A extends readonly unknown[], R>(...sources: [...ObservableInputTuple<A>]): OperatorFunction<T, [T, ...A]>;\n\n/**\n * @deprecated Replaced with {@link combineLatestWith}. Will be removed in v8.\n */\nexport function combineLatest<T, R>(...args: (ObservableInput<any> | ((...values: any[]) => R))[]): OperatorFunction<T, unknown> {\n  const resultSelector = popResultSelector(args);\n  return resultSelector\n    ? pipe(combineLatest(...(args as Array<ObservableInput<any>>)), mapOneOrManyArgs(resultSelector))\n    : operate((source, subscriber) => {\n        combineLatestInit([source, ...argsOrArgArray(args)])(subscriber);\n      });\n}\n", "import { ObservableInputTuple, OperatorFunction, Cons } from '../types';\nimport { combineLatest } from './combineLatest';\n\n/**\n * Create an observable that combines the latest values from all passed observables and the source\n * into arrays and emits them.\n *\n * Returns an observable, that when subscribed to, will subscribe to the source observable and all\n * sources provided as arguments. Once all sources emit at least one value, all of the latest values\n * will be emitted as an array. After that, every time any source emits a value, all of the latest values\n * will be emitted as an array.\n *\n * This is a useful operator for eagerly calculating values based off of changed inputs.\n *\n * ## Example\n *\n * Simple concatenation of values from two inputs\n *\n * ```ts\n * import { fromEvent, combineLatestWith, map } from 'rxjs';\n *\n * // Setup: Add two inputs to the page\n * const input1 = document.createElement('input');\n * document.body.appendChild(input1);\n * const input2 = document.createElement('input');\n * document.body.appendChild(input2);\n *\n * // Get streams of changes\n * const input1Changes$ = fromEvent(input1, 'change');\n * const input2Changes$ = fromEvent(input2, 'change');\n *\n * // Combine the changes by adding them together\n * input1Changes$.pipe(\n *   combineLatestWith(input2Changes$),\n *   map(([e1, e2]) => (<HTMLInputElement>e1.target).value + ' - ' + (<HTMLInputElement>e2.target).value)\n * )\n * .subscribe(x => console.log(x));\n * ```\n *\n * @param otherSources the other sources to subscribe to.\n * @return A function that returns an Observable that emits the latest\n * emissions from both source and provided Observables.\n */\nexport function combineLatestWith<T, A extends readonly unknown[]>(\n  ...otherSources: [...ObservableInputTuple<A>]\n): OperatorFunction<T, Cons<T, A>> {\n  return combineLatest(...otherSources);\n}\n", "import { Subscriber } from '../Subscriber';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { noop } from '../util/noop';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\n\n/**\n * Emits a notification from the source Observable only after a particular time span\n * determined by another Observable has passed without another source emission.\n *\n * <span class=\"informal\">It's like {@link debounceTime}, but the time span of\n * emission silence is determined by a second Observable.</span>\n *\n * ![](debounce.svg)\n *\n * `debounce` delays notifications emitted by the source Observable, but drops previous\n * pending delayed emissions if a new notification arrives on the source Observable.\n * This operator keeps track of the most recent notification from the source\n * Observable, and spawns a duration Observable by calling the\n * `durationSelector` function. The notification is emitted only when the duration\n * Observable emits a next notification, and if no other notification was emitted on\n * the source Observable since the duration Observable was spawned. If a new\n * notification appears before the duration Observable emits, the previous notification will\n * not be emitted and a new duration is scheduled from `durationSelector` is scheduled.\n * If the completing event happens during the scheduled duration the last cached notification\n * is emitted before the completion event is forwarded to the output observable.\n * If the error event happens during the scheduled duration or after it only the error event is\n * forwarded to the output observable. The cache notification is not emitted in this case.\n *\n * Like {@link debounceTime}, this is a rate-limiting operator, and also a\n * delay-like operator since output emissions do not necessarily occur at the\n * same time as they did on the source Observable.\n *\n * ## Example\n *\n * Emit the most recent click after a burst of clicks\n *\n * ```ts\n * import { fromEvent, scan, debounce, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(\n *   scan(i => ++i, 1),\n *   debounce(i => interval(200 * i))\n * );\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link auditTime}\n * @see {@link debounceTime}\n * @see {@link delay}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link throttle}\n * @see {@link throttleTime}\n *\n * @param durationSelector A function\n * that receives a value from the source Observable, for computing the timeout\n * duration for each source value, returned as an Observable or a Promise.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by the specified duration Observable returned by\n * `durationSelector`, and may drop some values if they occur too frequently.\n */\nexport function debounce<T>(durationSelector: (value: T) => ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    let lastValue: T | null = null;\n    // The subscriber/subscription for the current debounce, if there is one.\n    let durationSubscriber: Subscriber<any> | null = null;\n\n    const emit = () => {\n      // Unsubscribe any current debounce subscription we have,\n      // we only cared about the first notification from it, and we\n      // want to clean that subscription up as soon as possible.\n      durationSubscriber?.unsubscribe();\n      durationSubscriber = null;\n      if (hasValue) {\n        // We have a value! Free up memory first, then emit the value.\n        hasValue = false;\n        const value = lastValue!;\n        lastValue = null;\n        subscriber.next(value);\n      }\n    };\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value: T) => {\n          // Cancel any pending debounce duration. We don't\n          // need to null it out here yet tho, because we're just going\n          // to create another one in a few lines.\n          durationSubscriber?.unsubscribe();\n          hasValue = true;\n          lastValue = value;\n          // Capture our duration subscriber, so we can unsubscribe it when we're notified\n          // and we're going to emit the value.\n          durationSubscriber = createOperatorSubscriber(subscriber, emit, noop);\n          // Subscribe to the duration.\n          innerFrom(durationSelector(value)).subscribe(durationSubscriber);\n        },\n        () => {\n          // Source completed.\n          // Emit any pending debounced values then complete\n          emit();\n          subscriber.complete();\n        },\n        // Pass all errors through to consumer\n        undefined,\n        () => {\n          // Finalization.\n          lastValue = durationSubscriber = null;\n        }\n      )\n    );\n  });\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { Subscription } from '../Subscription';\nimport { MonoTypeOperatorFunction, SchedulerAction, SchedulerLike } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Emits a notification from the source Observable only after a particular time span\n * has passed without another source emission.\n *\n * <span class=\"informal\">It's like {@link delay}, but passes only the most\n * recent notification from each burst of emissions.</span>\n *\n * ![](debounceTime.png)\n *\n * `debounceTime` delays notifications emitted by the source Observable, but drops\n * previous pending delayed emissions if a new notification arrives on the source\n * Observable. This operator keeps track of the most recent notification from the\n * source Observable, and emits that only when `dueTime` has passed\n * without any other notification appearing on the source Observable. If a new value\n * appears before `dueTime` silence occurs, the previous notification will be dropped\n * and will not be emitted and a new `dueTime` is scheduled.\n * If the completing event happens during `dueTime` the last cached notification\n * is emitted before the completion event is forwarded to the output observable.\n * If the error event happens during `dueTime` or after it only the error event is\n * forwarded to the output observable. The cache notification is not emitted in this case.\n *\n * This is a rate-limiting operator, because it is impossible for more than one\n * notification to be emitted in any time window of duration `dueTime`, but it is also\n * a delay-like operator since output emissions do not occur at the same time as\n * they did on the source Observable. Optionally takes a {@link SchedulerLike} for\n * managing timers.\n *\n * ## Example\n *\n * Emit the most recent click after a burst of clicks\n *\n * ```ts\n * import { fromEvent, debounceTime } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(debounceTime(1000));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link auditTime}\n * @see {@link debounce}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link throttle}\n * @see {@link throttleTime}\n *\n * @param {number} dueTime The timeout duration in milliseconds (or the time\n * unit determined internally by the optional `scheduler`) for the window of\n * time required to wait for emission silence before emitting the most recent\n * source value.\n * @param {SchedulerLike} [scheduler=async] The {@link SchedulerLike} to use for\n * managing the timers that handle the timeout for each value.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by the specified `dueTime`, and may drop some values\n * if they occur too frequently.\n */\nexport function debounceTime<T>(dueTime: number, scheduler: SchedulerLike = asyncScheduler): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let activeTask: Subscription | null = null;\n    let lastValue: T | null = null;\n    let lastTime: number | null = null;\n\n    const emit = () => {\n      if (activeTask) {\n        // We have a value! Free up memory first, then emit the value.\n        activeTask.unsubscribe();\n        activeTask = null;\n        const value = lastValue!;\n        lastValue = null;\n        subscriber.next(value);\n      }\n    };\n    function emitWhenIdle(this: SchedulerAction<unknown>) {\n      // This is called `dueTime` after the first value\n      // but we might have received new values during this window!\n\n      const targetTime = lastTime! + dueTime;\n      const now = scheduler.now();\n      if (now < targetTime) {\n        // On that case, re-schedule to the new target\n        activeTask = this.schedule(undefined, targetTime - now);\n        subscriber.add(activeTask);\n        return;\n      }\n\n      emit();\n    }\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value: T) => {\n          lastValue = value;\n          lastTime = scheduler.now();\n\n          // Only set up a task if it's not already up\n          if (!activeTask) {\n            activeTask = scheduler.schedule(emitWhenIdle, dueTime);\n            subscriber.add(activeTask);\n          }\n        },\n        () => {\n          // Source completed.\n          // Emit any pending debounced values then complete\n          emit();\n          subscriber.complete();\n        },\n        // Pass all errors through to consumer.\n        undefined,\n        () => {\n          // Finalization.\n          lastValue = activeTask = null;\n        }\n      )\n    );\n  });\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Emits a given value if the source Observable completes without emitting any\n * `next` value, otherwise mirrors the source Observable.\n *\n * <span class=\"informal\">If the source Observable turns out to be empty, then\n * this operator will emit a default value.</span>\n *\n * ![](defaultIfEmpty.png)\n *\n * `defaultIfEmpty` emits the values emitted by the source Observable or a\n * specified default value if the source Observable is empty (completes without\n * having emitted any `next` value).\n *\n * ## Example\n *\n * If no clicks happen in 5 seconds, then emit 'no clicks'\n *\n * ```ts\n * import { fromEvent, takeUntil, interval, defaultIfEmpty } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const clicksBeforeFive = clicks.pipe(takeUntil(interval(5000)));\n * const result = clicksBeforeFive.pipe(defaultIfEmpty('no clicks'));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link empty}\n * @see {@link last}\n *\n * @param defaultValue The default value used if the source\n * Observable is empty.\n * @return A function that returns an Observable that emits either the\n * specified `defaultValue` if the source Observable emits no items, or the\n * values emitted by the source Observable.\n */\nexport function defaultIfEmpty<T, R>(defaultValue: R): OperatorFunction<T, T | R> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          hasValue = true;\n          subscriber.next(value);\n        },\n        () => {\n          if (!hasValue) {\n            subscriber.next(defaultValue!);\n          }\n          subscriber.complete();\n        }\n      )\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { EMPTY } from '../observable/empty';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Emits only the first `count` values emitted by the source Observable.\n *\n * <span class=\"informal\">Takes the first `count` values from the source, then\n * completes.</span>\n *\n * ![](take.png)\n *\n * `take` returns an Observable that emits only the first `count` values emitted\n * by the source Observable. If the source emits fewer than `count` values then\n * all of its values are emitted. After that, it completes, regardless if the\n * source completes.\n *\n * ## Example\n *\n * Take the first 5 seconds of an infinite 1-second interval Observable\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const intervalCount = interval(1000);\n * const takeFive = intervalCount.pipe(take(5));\n * takeFive.subscribe(x => console.log(x));\n *\n * // Logs:\n * // 0\n * // 1\n * // 2\n * // 3\n * // 4\n * ```\n *\n * @see {@link takeLast}\n * @see {@link takeUntil}\n * @see {@link takeWhile}\n * @see {@link skip}\n *\n * @param count The maximum number of `next` values to emit.\n * @return A function that returns an Observable that emits only the first\n * `count` values emitted by the source Observable, or all of the values from\n * the source if the source emits fewer than `count` values.\n */\nexport function take<T>(count: number): MonoTypeOperatorFunction<T> {\n  return count <= 0\n    ? // If we are taking no values, that's empty.\n      () => EMPTY\n    : operate((source, subscriber) => {\n        let seen = 0;\n        source.subscribe(\n          createOperatorSubscriber(subscriber, (value) => {\n            // Increment the number of values we have seen,\n            // then check it against the allowed count to see\n            // if we are still letting values through.\n            if (++seen <= count) {\n              subscriber.next(value);\n              // If we have met or passed our allowed count,\n              // we need to complete. We have to do <= here,\n              // because re-entrant code will increment `seen` twice.\n              if (count <= seen) {\n                subscriber.complete();\n              }\n            }\n          })\n        );\n      });\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { noop } from '../util/noop';\n\n/**\n * Ignores all items emitted by the source Observable and only passes calls of `complete` or `error`.\n *\n * ![](ignoreElements.png)\n *\n * The `ignoreElements` operator suppresses all items emitted by the source Observable,\n * but allows its termination notification (either `error` or `complete`) to pass through unchanged.\n *\n * If you do not care about the items being emitted by an Observable, but you do want to be notified\n * when it completes or when it terminates with an error, you can apply the `ignoreElements` operator\n * to the Observable, which will ensure that it will never call its observers\u2019 `next` handlers.\n *\n * ## Example\n *\n * Ignore all `next` emissions from the source\n *\n * ```ts\n * import { of, ignoreElements } from 'rxjs';\n *\n * of('you', 'talking', 'to', 'me')\n *   .pipe(ignoreElements())\n *   .subscribe({\n *     next: word => console.log(word),\n *     error: err => console.log('error:', err),\n *     complete: () => console.log('the end'),\n *   });\n *\n * // result:\n * // 'the end'\n * ```\n *\n * @return A function that returns an empty Observable that only calls\n * `complete` or `error`, based on which one is called by the source\n * Observable.\n */\nexport function ignoreElements(): OperatorFunction<unknown, never> {\n  return operate((source, subscriber) => {\n    source.subscribe(createOperatorSubscriber(subscriber, noop));\n  });\n}\n", "import { OperatorFunction } from '../types';\nimport { map } from './map';\n\n/** @deprecated To be removed in v9. Use {@link map} instead: `map(() => value)`. */\nexport function mapTo<R>(value: R): OperatorFunction<unknown, R>;\n/**\n * @deprecated Do not specify explicit type parameters. Signatures with type parameters\n * that cannot be inferred will be removed in v8. `mapTo` itself will be removed in v9,\n * use {@link map} instead: `map(() => value)`.\n * */\nexport function mapTo<T, R>(value: R): OperatorFunction<T, R>;\n\n/**\n * Emits the given constant value on the output Observable every time the source\n * Observable emits a value.\n *\n * <span class=\"informal\">Like {@link map}, but it maps every source value to\n * the same output value every time.</span>\n *\n * ![](mapTo.png)\n *\n * Takes a constant `value` as argument, and emits that whenever the source\n * Observable emits a value. In other words, ignores the actual source value,\n * and simply uses the emission moment to know when to emit the given `value`.\n *\n * ## Example\n *\n * Map every click to the string `'Hi'`\n *\n * ```ts\n * import { fromEvent, mapTo } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const greetings = clicks.pipe(mapTo('Hi'));\n *\n * greetings.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link map}\n *\n * @param value The value to map each source value to.\n * @return A function that returns an Observable that emits the given `value`\n * every time the source Observable emits.\n * @deprecated To be removed in v9. Use {@link map} instead: `map(() => value)`.\n */\nexport function mapTo<R>(value: R): OperatorFunction<unknown, R> {\n  return map(() => value);\n}\n", "import { Observable } from '../Observable';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { concat } from '../observable/concat';\nimport { take } from './take';\nimport { ignoreElements } from './ignoreElements';\nimport { mapTo } from './mapTo';\nimport { mergeMap } from './mergeMap';\nimport { innerFrom } from '../observable/innerFrom';\n\n/** @deprecated The `subscriptionDelay` parameter will be removed in v8. */\nexport function delayWhen<T>(\n  delayDurationSelector: (value: T, index: number) => ObservableInput<any>,\n  subscriptionDelay: Observable<any>\n): MonoTypeOperatorFunction<T>;\nexport function delayWhen<T>(delayDurationSelector: (value: T, index: number) => ObservableInput<any>): MonoTypeOperatorFunction<T>;\n\n/**\n * Delays the emission of items from the source Observable by a given time span\n * determined by the emissions of another Observable.\n *\n * <span class=\"informal\">It's like {@link delay}, but the time span of the\n * delay duration is determined by a second Observable.</span>\n *\n * ![](delayWhen.png)\n *\n * `delayWhen` operator shifts each emitted value from the source Observable by\n * a time span determined by another Observable. When the source emits a value,\n * the `delayDurationSelector` function is called with the value emitted from\n * the source Observable as the first argument to the `delayDurationSelector`.\n * The `delayDurationSelector` function should return an {@link ObservableInput},\n * that is internally converted to an Observable that is called the \"duration\"\n * Observable.\n *\n * The source value is emitted on the output Observable only when the \"duration\"\n * Observable emits ({@link guide/glossary-and-semantics#next next}s) any value.\n * Upon that, the \"duration\" Observable gets unsubscribed.\n *\n * Before RxJS V7, the {@link guide/glossary-and-semantics#complete completion}\n * of the \"duration\" Observable would have been triggering the emission of the\n * source value to the output Observable, but with RxJS V7, this is not the case\n * anymore.\n *\n * Only next notifications (from the \"duration\" Observable) trigger values from\n * the source Observable to be passed to the output Observable. If the \"duration\"\n * Observable only emits the complete notification (without next), the value\n * emitted by the source Observable will never get to the output Observable - it\n * will be swallowed. If the \"duration\" Observable errors, the error will be\n * propagated to the output Observable.\n *\n * Optionally, `delayWhen` takes a second argument, `subscriptionDelay`, which\n * is an Observable. When `subscriptionDelay` emits its first value or\n * completes, the source Observable is subscribed to and starts behaving like\n * described in the previous paragraph. If `subscriptionDelay` is not provided,\n * `delayWhen` will subscribe to the source Observable as soon as the output\n * Observable is subscribed.\n *\n * ## Example\n *\n * Delay each click by a random amount of time, between 0 and 5 seconds\n *\n * ```ts\n * import { fromEvent, delayWhen, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const delayedClicks = clicks.pipe(\n *   delayWhen(() => interval(Math.random() * 5000))\n * );\n * delayedClicks.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link delay}\n * @see {@link throttle}\n * @see {@link throttleTime}\n * @see {@link debounce}\n * @see {@link debounceTime}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link audit}\n * @see {@link auditTime}\n *\n * @param delayDurationSelector A function that returns an `ObservableInput` for\n * each `value` emitted by the source Observable, which is then used to delay the\n * emission of that `value` on the output Observable until the `ObservableInput`\n * returned from this function emits a next value. When called, beside `value`,\n * this function receives a zero-based `index` of the emission order.\n * @param subscriptionDelay An Observable that triggers the subscription to the\n * source Observable once it emits any value.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by an amount of time specified by the Observable\n * returned by `delayDurationSelector`.\n */\nexport function delayWhen<T>(\n  delayDurationSelector: (value: T, index: number) => ObservableInput<any>,\n  subscriptionDelay?: Observable<any>\n): MonoTypeOperatorFunction<T> {\n  if (subscriptionDelay) {\n    // DEPRECATED PATH\n    return (source: Observable<T>) =>\n      concat(subscriptionDelay.pipe(take(1), ignoreElements()), source.pipe(delayWhen(delayDurationSelector)));\n  }\n\n  return mergeMap((value, index) => innerFrom(delayDurationSelector(value, index)).pipe(take(1), mapTo(value)));\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { delayWhen } from './delayWhen';\nimport { timer } from '../observable/timer';\n\n/**\n * Delays the emission of items from the source Observable by a given timeout or\n * until a given Date.\n *\n * <span class=\"informal\">Time shifts each item by some specified amount of\n * milliseconds.</span>\n *\n * ![](delay.svg)\n *\n * If the delay argument is a Number, this operator time shifts the source\n * Observable by that amount of time expressed in milliseconds. The relative\n * time intervals between the values are preserved.\n *\n * If the delay argument is a Date, this operator time shifts the start of the\n * Observable execution until the given date occurs.\n *\n * ## Examples\n *\n * Delay each click by one second\n *\n * ```ts\n * import { fromEvent, delay } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const delayedClicks = clicks.pipe(delay(1000)); // each click emitted after 1 second\n * delayedClicks.subscribe(x => console.log(x));\n * ```\n *\n * Delay all clicks until a future date happens\n *\n * ```ts\n * import { fromEvent, delay } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const date = new Date('March 15, 2050 12:00:00'); // in the future\n * const delayedClicks = clicks.pipe(delay(date)); // click emitted only after that date\n * delayedClicks.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link delayWhen}\n * @see {@link throttle}\n * @see {@link throttleTime}\n * @see {@link debounce}\n * @see {@link debounceTime}\n * @see {@link sample}\n * @see {@link sampleTime}\n * @see {@link audit}\n * @see {@link auditTime}\n *\n * @param {number|Date} due The delay duration in milliseconds (a `number`) or\n * a `Date` until which the emission of the source items is delayed.\n * @param {SchedulerLike} [scheduler=async] The {@link SchedulerLike} to use for\n * managing the timers that handle the time-shift for each item.\n * @return A function that returns an Observable that delays the emissions of\n * the source Observable by the specified timeout or Date.\n */\nexport function delay<T>(due: number | Date, scheduler: SchedulerLike = asyncScheduler): MonoTypeOperatorFunction<T> {\n  const duration = timer(due, scheduler);\n  return delayWhen(() => duration);\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { identity } from '../util/identity';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\nexport function distinctUntilChanged<T>(comparator?: (previous: T, current: T) => boolean): MonoTypeOperatorFunction<T>;\nexport function distinctUntilChanged<T, K>(\n  comparator: (previous: K, current: K) => boolean,\n  keySelector: (value: T) => K\n): MonoTypeOperatorFunction<T>;\n\n/**\n * Returns a result {@link Observable} that emits all values pushed by the source observable if they\n * are distinct in comparison to the last value the result observable emitted.\n *\n * When provided without parameters or with the first parameter (`{@link distinctUntilChanged#comparator comparator}`),\n * it behaves like this:\n *\n * 1. It will always emit the first value from the source.\n * 2. For all subsequent values pushed by the source, they will be compared to the previously emitted values\n *    using the provided `comparator` or an `===` equality check.\n * 3. If the value pushed by the source is determined to be unequal by this check, that value is emitted and\n *    becomes the new \"previously emitted value\" internally.\n *\n * When the second parameter (`{@link distinctUntilChanged#keySelector keySelector}`) is provided, the behavior\n * changes:\n *\n * 1. It will always emit the first value from the source.\n * 2. The `keySelector` will be run against all values, including the first value.\n * 3. For all values after the first, the selected key will be compared against the key selected from\n *    the previously emitted value using the `comparator`.\n * 4. If the keys are determined to be unequal by this check, the value (not the key), is emitted\n *    and the selected key from that value is saved for future comparisons against other keys.\n *\n * ## Examples\n *\n * A very basic example with no `{@link distinctUntilChanged#comparator comparator}`. Note that `1` is emitted more than once,\n * because it's distinct in comparison to the _previously emitted_ value,\n * not in comparison to _all other emitted values_.\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * of(1, 1, 1, 2, 2, 2, 1, 1, 3, 3)\n *   .pipe(distinctUntilChanged())\n *   .subscribe(console.log);\n * // Logs: 1, 2, 1, 3\n * ```\n *\n * With a `{@link distinctUntilChanged#comparator comparator}`, you can do custom comparisons. Let's say\n * you only want to emit a value when all of its components have\n * changed:\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * const totallyDifferentBuilds$ = of(\n *   { engineVersion: '1.1.0', transmissionVersion: '1.2.0' },\n *   { engineVersion: '1.1.0', transmissionVersion: '1.4.0' },\n *   { engineVersion: '1.3.0', transmissionVersion: '1.4.0' },\n *   { engineVersion: '1.3.0', transmissionVersion: '1.5.0' },\n *   { engineVersion: '2.0.0', transmissionVersion: '1.5.0' }\n * ).pipe(\n *   distinctUntilChanged((prev, curr) => {\n *     return (\n *       prev.engineVersion === curr.engineVersion ||\n *       prev.transmissionVersion === curr.transmissionVersion\n *     );\n *   })\n * );\n *\n * totallyDifferentBuilds$.subscribe(console.log);\n *\n * // Logs:\n * // { engineVersion: '1.1.0', transmissionVersion: '1.2.0' }\n * // { engineVersion: '1.3.0', transmissionVersion: '1.4.0' }\n * // { engineVersion: '2.0.0', transmissionVersion: '1.5.0' }\n * ```\n *\n * You can also provide a custom `{@link distinctUntilChanged#comparator comparator}` to check that emitted\n * changes are only in one direction. Let's say you only want to get\n * the next record temperature:\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * const temps$ = of(30, 31, 20, 34, 33, 29, 35, 20);\n *\n * const recordHighs$ = temps$.pipe(\n *   distinctUntilChanged((prevHigh, temp) => {\n *     // If the current temp is less than\n *     // or the same as the previous record,\n *     // the record hasn't changed.\n *     return temp <= prevHigh;\n *   })\n * );\n *\n * recordHighs$.subscribe(console.log);\n * // Logs: 30, 31, 34, 35\n * ```\n *\n * Selecting update events only when the `updatedBy` field shows\n * the account changed hands.\n *\n * ```ts\n * import { of, distinctUntilChanged } from 'rxjs';\n *\n * // A stream of updates to a given account\n * const accountUpdates$ = of(\n *   { updatedBy: 'blesh', data: [] },\n *   { updatedBy: 'blesh', data: [] },\n *   { updatedBy: 'ncjamieson', data: [] },\n *   { updatedBy: 'ncjamieson', data: [] },\n *   { updatedBy: 'blesh', data: [] }\n * );\n *\n * // We only want the events where it changed hands\n * const changedHands$ = accountUpdates$.pipe(\n *   distinctUntilChanged(undefined, update => update.updatedBy)\n * );\n *\n * changedHands$.subscribe(console.log);\n * // Logs:\n * // { updatedBy: 'blesh', data: Array[0] }\n * // { updatedBy: 'ncjamieson', data: Array[0] }\n * // { updatedBy: 'blesh', data: Array[0] }\n * ```\n *\n * @see {@link distinct}\n * @see {@link distinctUntilKeyChanged}\n *\n * @param comparator A function used to compare the previous and current keys for\n * equality. Defaults to a `===` check.\n * @param keySelector Used to select a key value to be passed to the `comparator`.\n *\n * @return A function that returns an Observable that emits items from the\n * source Observable with distinct values.\n */\nexport function distinctUntilChanged<T, K>(\n  comparator?: (previous: K, current: K) => boolean,\n  keySelector: (value: T) => K = identity as (value: T) => K\n): MonoTypeOperatorFunction<T> {\n  // We've been allowing `null` do be passed as the `compare`, so we can't do\n  // a default value for the parameter, because that will only work\n  // for `undefined`.\n  comparator = comparator ?? defaultCompare;\n\n  return operate((source, subscriber) => {\n    // The previous key, used to compare against keys selected\n    // from new arrivals to determine \"distinctiveness\".\n    let previousKey: K;\n    // Whether or not this is the first value we've gotten.\n    let first = true;\n\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value) => {\n        // We always call the key selector.\n        const currentKey = keySelector(value);\n\n        // If it's the first value, we always emit it.\n        // Otherwise, we compare this key to the previous key, and\n        // if the comparer returns false, we emit.\n        if (first || !comparator!(previousKey, currentKey)) {\n          // Update our state *before* we emit the value\n          // as emission can be the source of re-entrant code\n          // in functional libraries like this. We only really\n          // need to do this if it's the first value, or if the\n          // key we're tracking in previous needs to change.\n          first = false;\n          previousKey = currentKey;\n\n          // Emit the value!\n          subscriber.next(value);\n        }\n      })\n    );\n  });\n}\n\nfunction defaultCompare(a: any, b: any) {\n  return a === b;\n}\n", "import { distinctUntilChanged } from './distinctUntilChanged';\nimport { MonoTypeOperatorFunction } from '../types';\n\n/* tslint:disable:max-line-length */\nexport function distinctUntilKeyChanged<T>(key: keyof T): MonoTypeOperatorFunction<T>;\nexport function distinctUntilKeyChanged<T, K extends keyof T>(key: K, compare: (x: T[K], y: T[K]) => boolean): MonoTypeOperatorFunction<T>;\n/* tslint:enable:max-line-length */\n\n/**\n * Returns an Observable that emits all items emitted by the source Observable that are distinct by comparison from the previous item,\n * using a property accessed by using the key provided to check if the two items are distinct.\n *\n * If a comparator function is provided, then it will be called for each item to test for whether or not that value should be emitted.\n *\n * If a comparator function is not provided, an equality check is used by default.\n *\n * ## Examples\n *\n * An example comparing the name of persons\n *\n * ```ts\n * import { of, distinctUntilKeyChanged } from 'rxjs';\n *\n * of(\n *   { age: 4, name: 'Foo' },\n *   { age: 7, name: 'Bar' },\n *   { age: 5, name: 'Foo' },\n *   { age: 6, name: 'Foo' }\n * ).pipe(\n *   distinctUntilKeyChanged('name')\n * )\n * .subscribe(x => console.log(x));\n *\n * // displays:\n * // { age: 4, name: 'Foo' }\n * // { age: 7, name: 'Bar' }\n * // { age: 5, name: 'Foo' }\n * ```\n *\n * An example comparing the first letters of the name\n *\n * ```ts\n * import { of, distinctUntilKeyChanged } from 'rxjs';\n *\n * of(\n *   { age: 4, name: 'Foo1' },\n *   { age: 7, name: 'Bar' },\n *   { age: 5, name: 'Foo2' },\n *   { age: 6, name: 'Foo3' }\n * ).pipe(\n *   distinctUntilKeyChanged('name', (x, y) => x.substring(0, 3) === y.substring(0, 3))\n * )\n * .subscribe(x => console.log(x));\n *\n * // displays:\n * // { age: 4, name: 'Foo1' }\n * // { age: 7, name: 'Bar' }\n * // { age: 5, name: 'Foo2' }\n * ```\n *\n * @see {@link distinct}\n * @see {@link distinctUntilChanged}\n *\n * @param {string} key String key for object property lookup on each item.\n * @param {function} [compare] Optional comparison function called to test if an item is distinct from the previous item in the source.\n * @return A function that returns an Observable that emits items from the\n * source Observable with distinct values based on the key specified.\n */\nexport function distinctUntilKeyChanged<T, K extends keyof T>(key: K, compare?: (x: T[K], y: T[K]) => boolean): MonoTypeOperatorFunction<T> {\n  return distinctUntilChanged((x: T, y: T) => compare ? compare(x[key], y[key]) : x[key] === y[key]);\n}\n", "import { EmptyError } from '../util/EmptyError';\nimport { MonoTypeOperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * If the source observable completes without emitting a value, it will emit\n * an error. The error will be created at that time by the optional\n * `errorFactory` argument, otherwise, the error will be {@link EmptyError}.\n *\n * ![](throwIfEmpty.png)\n *\n * ## Example\n *\n * Throw an error if the document wasn't clicked within 1 second\n *\n * ```ts\n * import { fromEvent, takeUntil, timer, throwIfEmpty } from 'rxjs';\n *\n * const click$ = fromEvent(document, 'click');\n *\n * click$.pipe(\n *   takeUntil(timer(1000)),\n *   throwIfEmpty(() => new Error('The document was not clicked within 1 second'))\n * )\n * .subscribe({\n *   next() {\n *    console.log('The document was clicked');\n *   },\n *   error(err) {\n *     console.error(err.message);\n *   }\n * });\n * ```\n *\n * @param errorFactory A factory function called to produce the\n * error to be thrown when the source observable completes without emitting a\n * value.\n * @return A function that returns an Observable that throws an error if the\n * source Observable completed without emitting.\n */\nexport function throwIfEmpty<T>(errorFactory: () => any = defaultErrorFactory): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let hasValue = false;\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          hasValue = true;\n          subscriber.next(value);\n        },\n        () => (hasValue ? subscriber.complete() : subscriber.error(errorFactory()))\n      )\n    );\n  });\n}\n\nfunction defaultErrorFactory() {\n  return new EmptyError();\n}\n", "/** prettier */\nimport { Observable } from '../Observable';\nimport { concat } from '../observable/concat';\nimport { of } from '../observable/of';\nimport { MonoTypeOperatorFunction, SchedulerLike, OperatorFunction, ValueFromArray } from '../types';\n\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `concatAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function endWith<T>(scheduler: SchedulerLike): MonoTypeOperatorFunction<T>;\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `concatAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function endWith<T, A extends unknown[] = T[]>(\n  ...valuesAndScheduler: [...A, SchedulerLike]\n): OperatorFunction<T, T | ValueFromArray<A>>;\n\nexport function endWith<T, A extends unknown[] = T[]>(...values: A): OperatorFunction<T, T | ValueFromArray<A>>;\n\n/**\n * Returns an observable that will emit all values from the source, then synchronously emit\n * the provided value(s) immediately after the source completes.\n *\n * NOTE: Passing a last argument of a Scheduler is _deprecated_, and may result in incorrect\n * types in TypeScript.\n *\n * This is useful for knowing when an observable ends. Particularly when paired with an\n * operator like {@link takeUntil}\n *\n * ![](endWith.png)\n *\n * ## Example\n *\n * Emit values to know when an interval starts and stops. The interval will\n * stop when a user clicks anywhere on the document.\n *\n * ```ts\n * import { interval, map, fromEvent, startWith, takeUntil, endWith } from 'rxjs';\n *\n * const ticker$ = interval(5000).pipe(\n *   map(() => 'tick')\n * );\n *\n * const documentClicks$ = fromEvent(document, 'click');\n *\n * ticker$.pipe(\n *   startWith('interval started'),\n *   takeUntil(documentClicks$),\n *   endWith('interval ended by click')\n * )\n * .subscribe(x => console.log(x));\n *\n * // Result (assuming a user clicks after 15 seconds)\n * // 'interval started'\n * // 'tick'\n * // 'tick'\n * // 'tick'\n * // 'interval ended by click'\n * ```\n *\n * @see {@link startWith}\n * @see {@link concat}\n * @see {@link takeUntil}\n *\n * @param values Items you want the modified Observable to emit last.\n * @return A function that returns an Observable that emits all values from the\n * source, then synchronously emits the provided value(s) immediately after the\n * source completes.\n */\nexport function endWith<T>(...values: Array<T | SchedulerLike>): MonoTypeOperatorFunction<T> {\n  return (source: Observable<T>) => concat(source, of(...values)) as Observable<T>;\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { operate } from '../util/lift';\n\n/**\n * Returns an Observable that mirrors the source Observable, but will call a specified function when\n * the source terminates on complete or error.\n * The specified function will also be called when the subscriber explicitly unsubscribes.\n *\n * ## Examples\n *\n * Execute callback function when the observable completes\n *\n * ```ts\n * import { interval, take, finalize } from 'rxjs';\n *\n * // emit value in sequence every 1 second\n * const source = interval(1000);\n * const example = source.pipe(\n *   take(5), //take only the first 5 values\n *   finalize(() => console.log('Sequence complete')) // Execute when the observable completes\n * );\n * const subscribe = example.subscribe(val => console.log(val));\n *\n * // results:\n * // 0\n * // 1\n * // 2\n * // 3\n * // 4\n * // 'Sequence complete'\n * ```\n *\n * Execute callback function when the subscriber explicitly unsubscribes\n *\n * ```ts\n * import { interval, finalize, tap, noop, timer } from 'rxjs';\n *\n * const source = interval(100).pipe(\n *   finalize(() => console.log('[finalize] Called')),\n *   tap({\n *     next: () => console.log('[next] Called'),\n *     error: () => console.log('[error] Not called'),\n *     complete: () => console.log('[tap complete] Not called')\n *   })\n * );\n *\n * const sub = source.subscribe({\n *   next: x => console.log(x),\n *   error: noop,\n *   complete: () => console.log('[complete] Not called')\n * });\n *\n * timer(150).subscribe(() => sub.unsubscribe());\n *\n * // results:\n * // '[next] Called'\n * // 0\n * // '[finalize] Called'\n * ```\n *\n * @param {function} callback Function to be called when source terminates.\n * @return A function that returns an Observable that mirrors the source, but\n * will call the specified function on termination.\n */\nexport function finalize<T>(callback: () => void): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    // TODO: This try/finally was only added for `useDeprecatedSynchronousErrorHandling`.\n    // REMOVE THIS WHEN THAT HOT GARBAGE IS REMOVED IN V8.\n    try {\n      source.subscribe(subscriber);\n    } finally {\n      subscriber.add(callback);\n    }\n  });\n}\n", "import { Observable } from '../Observable';\nimport { EmptyError } from '../util/EmptyError';\nimport { OperatorFunction, TruthyTypesOf } from '../types';\nimport { filter } from './filter';\nimport { take } from './take';\nimport { defaultIfEmpty } from './defaultIfEmpty';\nimport { throwIfEmpty } from './throwIfEmpty';\nimport { identity } from '../util/identity';\n\nexport function first<T, D = T>(predicate?: null, defaultValue?: D): OperatorFunction<T, T | D>;\nexport function first<T>(predicate: BooleanConstructor): OperatorFunction<T, TruthyTypesOf<T>>;\nexport function first<T, D>(predicate: BooleanConstructor, defaultValue: D): OperatorFunction<T, TruthyTypesOf<T> | D>;\nexport function first<T, S extends T>(\n  predicate: (value: T, index: number, source: Observable<T>) => value is S,\n  defaultValue?: S\n): OperatorFunction<T, S>;\nexport function first<T, S extends T, D>(\n  predicate: (value: T, index: number, source: Observable<T>) => value is S,\n  defaultValue: D\n): OperatorFunction<T, S | D>;\nexport function first<T, D = T>(\n  predicate: (value: T, index: number, source: Observable<T>) => boolean,\n  defaultValue?: D\n): OperatorFunction<T, T | D>;\n\n/**\n * Emits only the first value (or the first value that meets some condition)\n * emitted by the source Observable.\n *\n * <span class=\"informal\">Emits only the first value. Or emits only the first\n * value that passes some test.</span>\n *\n * ![](first.png)\n *\n * If called with no arguments, `first` emits the first value of the source\n * Observable, then completes. If called with a `predicate` function, `first`\n * emits the first value of the source that matches the specified condition. Throws an error if\n * `defaultValue` was not provided and a matching element is not found.\n *\n * ## Examples\n *\n * Emit only the first click that happens on the DOM\n *\n * ```ts\n * import { fromEvent, first } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(first());\n * result.subscribe(x => console.log(x));\n * ```\n *\n * Emits the first click that happens on a DIV\n *\n * ```ts\n * import { fromEvent, first } from 'rxjs';\n *\n * const div = document.createElement('div');\n * div.style.cssText = 'width: 200px; height: 200px; background: #09c;';\n * document.body.appendChild(div);\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(first(ev => (<HTMLElement>ev.target).tagName === 'DIV'));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link filter}\n * @see {@link find}\n * @see {@link take}\n *\n * @throws {EmptyError} Delivers an EmptyError to the Observer's `error`\n * callback if the Observable completes before any `next` notification was sent.\n * This is how `first()` is different from {@link take}(1) which completes instead.\n *\n * @param {function(value: T, index: number, source: Observable<T>): boolean} [predicate]\n * An optional function called with each item to test for condition matching.\n * @param {D} [defaultValue] The default value emitted in case no valid value\n * was found on the source.\n * @return A function that returns an Observable that emits the first item that\n * matches the condition.\n */\nexport function first<T, D>(\n  predicate?: ((value: T, index: number, source: Observable<T>) => boolean) | null,\n  defaultValue?: D\n): OperatorFunction<T, T | D> {\n  const hasDefaultValue = arguments.length >= 2;\n  return (source: Observable<T>) =>\n    source.pipe(\n      predicate ? filter((v, i) => predicate(v, i, source)) : identity,\n      take(1),\n      hasDefaultValue ? defaultIfEmpty(defaultValue!) : throwIfEmpty(() => new EmptyError())\n    );\n}\n", "import { EMPTY } from '../observable/empty';\nimport { MonoTypeOperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/**\n * Waits for the source to complete, then emits the last N values from the source,\n * as specified by the `count` argument.\n *\n * ![](takeLast.png)\n *\n * `takeLast` results in an observable that will hold values up to `count` values in memory,\n * until the source completes. It then pushes all values in memory to the consumer, in the\n * order they were received from the source, then notifies the consumer that it is\n * complete.\n *\n * If for some reason the source completes before the `count` supplied to `takeLast` is reached,\n * all values received until that point are emitted, and then completion is notified.\n *\n * **Warning**: Using `takeLast` with an observable that never completes will result\n * in an observable that never emits a value.\n *\n * ## Example\n *\n * Take the last 3 values of an Observable with many values\n *\n * ```ts\n * import { range, takeLast } from 'rxjs';\n *\n * const many = range(1, 100);\n * const lastThree = many.pipe(takeLast(3));\n * lastThree.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link take}\n * @see {@link takeUntil}\n * @see {@link takeWhile}\n * @see {@link skip}\n *\n * @param count The maximum number of values to emit from the end of\n * the sequence of values emitted by the source Observable.\n * @return A function that returns an Observable that emits at most the last\n * `count` values emitted by the source Observable.\n */\nexport function takeLast<T>(count: number): MonoTypeOperatorFunction<T> {\n  return count <= 0\n    ? () => EMPTY\n    : operate((source, subscriber) => {\n        // This buffer will hold the values we are going to emit\n        // when the source completes. Since we only want to take the\n        // last N values, we can't emit until we're sure we're not getting\n        // any more values.\n        let buffer: T[] = [];\n        source.subscribe(\n          createOperatorSubscriber(\n            subscriber,\n            (value) => {\n              // Add the most recent value onto the end of our buffer.\n              buffer.push(value);\n              // If our buffer is now larger than the number of values we\n              // want to take, we remove the oldest value from the buffer.\n              count < buffer.length && buffer.shift();\n            },\n            () => {\n              // The source completed, we now know what are last values\n              // are, emit them in the order they were received.\n              for (const value of buffer) {\n                subscriber.next(value);\n              }\n              subscriber.complete();\n            },\n            // Errors are passed through to the consumer\n            undefined,\n            () => {\n              // During finalization release the values in our buffer.\n              buffer = null!;\n            }\n          )\n        );\n      });\n}\n", "import { ObservableInput, ObservableInputTuple, OperatorFunction, SchedulerLike } from '../types';\nimport { operate } from '../util/lift';\nimport { argsOrArgArray } from '../util/argsOrArgArray';\nimport { mergeAll } from './mergeAll';\nimport { popNumber, popScheduler } from '../util/args';\nimport { from } from '../observable/from';\n\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(...sources: [...ObservableInputTuple<A>]): OperatorFunction<T, T | A[number]>;\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(\n  ...sourcesAndConcurrency: [...ObservableInputTuple<A>, number]\n): OperatorFunction<T, T | A[number]>;\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(\n  ...sourcesAndScheduler: [...ObservableInputTuple<A>, SchedulerLike]\n): OperatorFunction<T, T | A[number]>;\n/** @deprecated Replaced with {@link mergeWith}. Will be removed in v8. */\nexport function merge<T, A extends readonly unknown[]>(\n  ...sourcesAndConcurrencyAndScheduler: [...ObservableInputTuple<A>, number, SchedulerLike]\n): OperatorFunction<T, T | A[number]>;\n\nexport function merge<T>(...args: unknown[]): OperatorFunction<T, unknown> {\n  const scheduler = popScheduler(args);\n  const concurrent = popNumber(args, Infinity);\n  args = argsOrArgArray(args);\n\n  return operate((source, subscriber) => {\n    mergeAll(concurrent)(from([source, ...(args as ObservableInput<T>[])], scheduler)).subscribe(subscriber);\n  });\n}\n", "import { ObservableInputTuple, OperatorFunction } from '../types';\nimport { merge } from './merge';\n\n/**\n * Merge the values from all observables to a single observable result.\n *\n * Creates an observable, that when subscribed to, subscribes to the source\n * observable, and all other sources provided as arguments. All values from\n * every source are emitted from the resulting subscription.\n *\n * When all sources complete, the resulting observable will complete.\n *\n * When any source errors, the resulting observable will error.\n *\n * ## Example\n *\n * Joining all outputs from multiple user input event streams\n *\n * ```ts\n * import { fromEvent, map, mergeWith } from 'rxjs';\n *\n * const clicks$ = fromEvent(document, 'click').pipe(map(() => 'click'));\n * const mousemoves$ = fromEvent(document, 'mousemove').pipe(map(() => 'mousemove'));\n * const dblclicks$ = fromEvent(document, 'dblclick').pipe(map(() => 'dblclick'));\n *\n * mousemoves$\n *   .pipe(mergeWith(clicks$, dblclicks$))\n *   .subscribe(x => console.log(x));\n *\n * // result (assuming user interactions)\n * // 'mousemove'\n * // 'mousemove'\n * // 'mousemove'\n * // 'click'\n * // 'click'\n * // 'dblclick'\n * ```\n *\n * @see {@link merge}\n *\n * @param otherSources the sources to combine the current source with.\n * @return A function that returns an Observable that merges the values from\n * all given Observables.\n */\nexport function mergeWith<T, A extends readonly unknown[]>(\n  ...otherSources: [...ObservableInputTuple<A>]\n): OperatorFunction<T, T | A[number]> {\n  return merge(...otherSources);\n}\n", "import { Subscription } from '../Subscription';\nimport { EMPTY } from '../observable/empty';\nimport { operate } from '../util/lift';\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { timer } from '../observable/timer';\n\nexport interface RepeatConfig {\n  /**\n   * The number of times to repeat the source. Defaults to `Infinity`.\n   */\n  count?: number;\n\n  /**\n   * If a `number`, will delay the repeat of the source by that number of milliseconds.\n   * If a function, it will provide the number of times the source has been subscribed to,\n   * and the return value should be a valid observable input that will notify when the source\n   * should be repeated. If the notifier observable is empty, the result will complete.\n   */\n  delay?: number | ((count: number) => ObservableInput<any>);\n}\n\n/**\n * Returns an Observable that will resubscribe to the source stream when the source stream completes.\n *\n * <span class=\"informal\">Repeats all values emitted on the source. It's like {@link retry}, but for non error cases.</span>\n *\n * ![](repeat.png)\n *\n * Repeat will output values from a source until the source completes, then it will resubscribe to the\n * source a specified number of times, with a specified delay. Repeat can be particularly useful in\n * combination with closing operators like {@link take}, {@link takeUntil}, {@link first}, or {@link takeWhile},\n * as it can be used to restart a source again from scratch.\n *\n * Repeat is very similar to {@link retry}, where {@link retry} will resubscribe to the source in the error case, but\n * `repeat` will resubscribe if the source completes.\n *\n * Note that `repeat` will _not_ catch errors. Use {@link retry} for that.\n *\n * - `repeat(0)` returns an empty observable\n * - `repeat()` will repeat forever\n * - `repeat({ delay: 200 })` will repeat forever, with a delay of 200ms between repetitions.\n * - `repeat({ count: 2, delay: 400 })` will repeat twice, with a delay of 400ms between repetitions.\n * - `repeat({ delay: (count) => timer(count * 1000) })` will repeat forever, but will have a delay that grows by one second for each repetition.\n *\n * ## Example\n *\n * Repeat a message stream\n *\n * ```ts\n * import { of, repeat } from 'rxjs';\n *\n * const source = of('Repeat message');\n * const result = source.pipe(repeat(3));\n *\n * result.subscribe(x => console.log(x));\n *\n * // Results\n * // 'Repeat message'\n * // 'Repeat message'\n * // 'Repeat message'\n * ```\n *\n * Repeat 3 values, 2 times\n *\n * ```ts\n * import { interval, take, repeat } from 'rxjs';\n *\n * const source = interval(1000);\n * const result = source.pipe(take(3), repeat(2));\n *\n * result.subscribe(x => console.log(x));\n *\n * // Results every second\n * // 0\n * // 1\n * // 2\n * // 0\n * // 1\n * // 2\n * ```\n *\n * Defining two complex repeats with delays on the same source.\n * Note that the second repeat cannot be called until the first\n * repeat as exhausted it's count.\n *\n * ```ts\n * import { defer, of, repeat } from 'rxjs';\n *\n * const source = defer(() => {\n *    return of(`Hello, it is ${new Date()}`)\n * });\n *\n * source.pipe(\n *    // Repeat 3 times with a delay of 1 second between repetitions\n *    repeat({\n *      count: 3,\n *      delay: 1000,\n *    }),\n *\n *    // *Then* repeat forever, but with an exponential step-back\n *    // maxing out at 1 minute.\n *    repeat({\n *      delay: (count) => timer(Math.min(60000, 2 ^ count * 1000))\n *    })\n * )\n * ```\n *\n * @see {@link repeatWhen}\n * @see {@link retry}\n *\n * @param count The number of times the source Observable items are repeated, a count of 0 will yield\n * an empty Observable.\n */\nexport function repeat<T>(countOrConfig?: number | RepeatConfig): MonoTypeOperatorFunction<T> {\n  let count = Infinity;\n  let delay: RepeatConfig['delay'];\n\n  if (countOrConfig != null) {\n    if (typeof countOrConfig === 'object') {\n      ({ count = Infinity, delay } = countOrConfig);\n    } else {\n      count = countOrConfig;\n    }\n  }\n\n  return count <= 0\n    ? () => EMPTY\n    : operate((source, subscriber) => {\n        let soFar = 0;\n        let sourceSub: Subscription | null;\n\n        const resubscribe = () => {\n          sourceSub?.unsubscribe();\n          sourceSub = null;\n          if (delay != null) {\n            const notifier = typeof delay === 'number' ? timer(delay) : innerFrom(delay(soFar));\n            const notifierSubscriber = createOperatorSubscriber(subscriber, () => {\n              notifierSubscriber.unsubscribe();\n              subscribeToSource();\n            });\n            notifier.subscribe(notifierSubscriber);\n          } else {\n            subscribeToSource();\n          }\n        };\n\n        const subscribeToSource = () => {\n          let syncUnsub = false;\n          sourceSub = source.subscribe(\n            createOperatorSubscriber(subscriber, undefined, () => {\n              if (++soFar < count) {\n                if (sourceSub) {\n                  resubscribe();\n                } else {\n                  syncUnsub = true;\n                }\n              } else {\n                subscriber.complete();\n              }\n            })\n          );\n\n          if (syncUnsub) {\n            resubscribe();\n          }\n        };\n\n        subscribeToSource();\n      });\n}\n", "import { OperatorFunction } from '../types';\nimport { operate } from '../util/lift';\nimport { scanInternals } from './scanInternals';\n\nexport function scan<V, A = V>(accumulator: (acc: A | V, value: V, index: number) => A): OperatorFunction<V, V | A>;\nexport function scan<V, A>(accumulator: (acc: A, value: V, index: number) => A, seed: A): OperatorFunction<V, A>;\nexport function scan<V, A, S>(accumulator: (acc: A | S, value: V, index: number) => A, seed: S): OperatorFunction<V, A>;\n\n// TODO: link to a \"redux pattern\" section in the guide (location TBD)\n\n/**\n * Useful for encapsulating and managing state. Applies an accumulator (or \"reducer function\")\n * to each value from the source after an initial state is established -- either via\n * a `seed` value (second argument), or from the first value from the source.\n *\n * <span class=\"informal\">It's like {@link reduce}, but emits the current\n * accumulation state after each update</span>\n *\n * ![](scan.png)\n *\n * This operator maintains an internal state and emits it after processing each value as follows:\n *\n * 1. First value arrives\n *   - If a `seed` value was supplied (as the second argument to `scan`), let `state = seed` and `value = firstValue`.\n *   - If NO `seed` value was supplied (no second argument), let `state = firstValue` and go to 3.\n * 2. Let `state = accumulator(state, value)`.\n *   - If an error is thrown by `accumulator`, notify the consumer of an error. The process ends.\n * 3. Emit `state`.\n * 4. Next value arrives, let `value = nextValue`, go to 2.\n *\n * ## Examples\n *\n * An average of previous numbers. This example shows how\n * not providing a `seed` can prime the stream with the\n * first value from the source.\n *\n * ```ts\n * import { of, scan, map } from 'rxjs';\n *\n * const numbers$ = of(1, 2, 3);\n *\n * numbers$\n *   .pipe(\n *     // Get the sum of the numbers coming in.\n *     scan((total, n) => total + n),\n *     // Get the average by dividing the sum by the total number\n *     // received so far (which is 1 more than the zero-based index).\n *     map((sum, index) => sum / (index + 1))\n *   )\n *   .subscribe(console.log);\n * ```\n *\n * The Fibonacci sequence. This example shows how you can use\n * a seed to prime accumulation process. Also... you know... Fibonacci.\n * So important to like, computers and stuff that its whiteboarded\n * in job interviews. Now you can show them the Rx version! (Please don't, haha)\n *\n * ```ts\n * import { interval, scan, map, startWith } from 'rxjs';\n *\n * const firstTwoFibs = [0, 1];\n * // An endless stream of Fibonacci numbers.\n * const fibonacci$ = interval(1000).pipe(\n *   // Scan to get the fibonacci numbers (after 0, 1)\n *   scan(([a, b]) => [b, a + b], firstTwoFibs),\n *   // Get the second number in the tuple, it's the one you calculated\n *   map(([, n]) => n),\n *   // Start with our first two digits :)\n *   startWith(...firstTwoFibs)\n * );\n *\n * fibonacci$.subscribe(console.log);\n * ```\n *\n * @see {@link expand}\n * @see {@link mergeScan}\n * @see {@link reduce}\n * @see {@link switchScan}\n *\n * @param accumulator A \"reducer function\". This will be called for each value after an initial state is\n * acquired.\n * @param seed The initial state. If this is not provided, the first value from the source will\n * be used as the initial state, and emitted without going through the accumulator. All subsequent values\n * will be processed by the accumulator function. If this is provided, all values will go through\n * the accumulator function.\n * @return A function that returns an Observable of the accumulated values.\n */\nexport function scan<V, A, S>(accumulator: (acc: V | A | S, value: V, index: number) => A, seed?: S): OperatorFunction<V, V | A> {\n  // providing a seed of `undefined` *should* be valid and trigger\n  // hasSeed! so don't use `seed !== undefined` checks!\n  // For this reason, we have to check it here at the original call site\n  // otherwise inside Operator/Subscriber we won't know if `undefined`\n  // means they didn't provide anything or if they literally provided `undefined`\n  return operate(scanInternals(accumulator, seed as S, arguments.length >= 2, true));\n}\n", "import { innerFrom } from '../observable/innerFrom';\nimport { Subject } from '../Subject';\nimport { SafeSubscriber } from '../Subscriber';\nimport { Subscription } from '../Subscription';\nimport { MonoTypeOperatorFunction, SubjectLike, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\n\nexport interface ShareConfig<T> {\n  /**\n   * The factory used to create the subject that will connect the source observable to\n   * multicast consumers.\n   */\n  connector?: () => SubjectLike<T>;\n  /**\n   * If `true`, the resulting observable will reset internal state on error from source and return to a \"cold\" state. This\n   * allows the resulting observable to be \"retried\" in the event of an error.\n   * If `false`, when an error comes from the source it will push the error into the connecting subject, and the subject\n   * will remain the connecting subject, meaning the resulting observable will not go \"cold\" again, and subsequent retries\n   * or resubscriptions will resubscribe to that same subject. In all cases, RxJS subjects will emit the same error again, however\n   * {@link ReplaySubject} will also push its buffered values before pushing the error.\n   * It is also possible to pass a notifier factory returning an `ObservableInput` instead which grants more fine-grained\n   * control over how and when the reset should happen. This allows behaviors like conditional or delayed resets.\n   */\n  resetOnError?: boolean | ((error: any) => ObservableInput<any>);\n  /**\n   * If `true`, the resulting observable will reset internal state on completion from source and return to a \"cold\" state. This\n   * allows the resulting observable to be \"repeated\" after it is done.\n   * If `false`, when the source completes, it will push the completion through the connecting subject, and the subject\n   * will remain the connecting subject, meaning the resulting observable will not go \"cold\" again, and subsequent repeats\n   * or resubscriptions will resubscribe to that same subject.\n   * It is also possible to pass a notifier factory returning an `ObservableInput` instead which grants more fine-grained\n   * control over how and when the reset should happen. This allows behaviors like conditional or delayed resets.\n   */\n  resetOnComplete?: boolean | (() => ObservableInput<any>);\n  /**\n   * If `true`, when the number of subscribers to the resulting observable reaches zero due to those subscribers unsubscribing, the\n   * internal state will be reset and the resulting observable will return to a \"cold\" state. This means that the next\n   * time the resulting observable is subscribed to, a new subject will be created and the source will be subscribed to\n   * again.\n   * If `false`, when the number of subscribers to the resulting observable reaches zero due to unsubscription, the subject\n   * will remain connected to the source, and new subscriptions to the result will be connected through that same subject.\n   * It is also possible to pass a notifier factory returning an `ObservableInput` instead which grants more fine-grained\n   * control over how and when the reset should happen. This allows behaviors like conditional or delayed resets.\n   */\n  resetOnRefCountZero?: boolean | (() => ObservableInput<any>);\n}\n\nexport function share<T>(): MonoTypeOperatorFunction<T>;\n\nexport function share<T>(options: ShareConfig<T>): MonoTypeOperatorFunction<T>;\n\n/**\n * Returns a new Observable that multicasts (shares) the original Observable. As long as there is at least one\n * Subscriber this Observable will be subscribed and emitting data. When all subscribers have unsubscribed it will\n * unsubscribe from the source Observable. Because the Observable is multicasting it makes the stream `hot`.\n * This is an alias for `multicast(() => new Subject()), refCount()`.\n *\n * The subscription to the underlying source Observable can be reset (unsubscribe and resubscribe for new subscribers),\n * if the subscriber count to the shared observable drops to 0, or if the source Observable errors or completes. It is\n * possible to use notifier factories for the resets to allow for behaviors like conditional or delayed resets. Please\n * note that resetting on error or complete of the source Observable does not behave like a transparent retry or restart\n * of the source because the error or complete will be forwarded to all subscribers and their subscription will be\n * closed. Only new subscribers after a reset on error or complete happened will cause a fresh subscription to the\n * source. To achieve transparent retries or restarts pipe the source through appropriate operators before sharing.\n *\n * ![](share.png)\n *\n * ## Example\n *\n * Generate new multicast Observable from the `source` Observable value\n *\n * ```ts\n * import { interval, tap, map, take, share } from 'rxjs';\n *\n * const source = interval(1000).pipe(\n *   tap(x => console.log('Processing: ', x)),\n *   map(x => x * x),\n *   take(6),\n *   share()\n * );\n *\n * source.subscribe(x => console.log('subscription 1: ', x));\n * source.subscribe(x => console.log('subscription 2: ', x));\n *\n * // Logs:\n * // Processing: 0\n * // subscription 1: 0\n * // subscription 2: 0\n * // Processing: 1\n * // subscription 1: 1\n * // subscription 2: 1\n * // Processing: 2\n * // subscription 1: 4\n * // subscription 2: 4\n * // Processing: 3\n * // subscription 1: 9\n * // subscription 2: 9\n * // Processing: 4\n * // subscription 1: 16\n * // subscription 2: 16\n * // Processing: 5\n * // subscription 1: 25\n * // subscription 2: 25\n * ```\n *\n * ## Example with notifier factory: Delayed reset\n *\n * ```ts\n * import { interval, take, share, timer } from 'rxjs';\n *\n * const source = interval(1000).pipe(\n *   take(3),\n *   share({\n *     resetOnRefCountZero: () => timer(1000)\n *   })\n * );\n *\n * const subscriptionOne = source.subscribe(x => console.log('subscription 1: ', x));\n * setTimeout(() => subscriptionOne.unsubscribe(), 1300);\n *\n * setTimeout(() => source.subscribe(x => console.log('subscription 2: ', x)), 1700);\n *\n * setTimeout(() => source.subscribe(x => console.log('subscription 3: ', x)), 5000);\n *\n * // Logs:\n * // subscription 1:  0\n * // (subscription 1 unsubscribes here)\n * // (subscription 2 subscribes here ~400ms later, source was not reset)\n * // subscription 2:  1\n * // subscription 2:  2\n * // (subscription 2 unsubscribes here)\n * // (subscription 3 subscribes here ~2000ms later, source did reset before)\n * // subscription 3:  0\n * // subscription 3:  1\n * // subscription 3:  2\n * ```\n *\n * @see {@link shareReplay}\n *\n * @return A function that returns an Observable that mirrors the source.\n */\nexport function share<T>(options: ShareConfig<T> = {}): MonoTypeOperatorFunction<T> {\n  const { connector = () => new Subject<T>(), resetOnError = true, resetOnComplete = true, resetOnRefCountZero = true } = options;\n  // It's necessary to use a wrapper here, as the _operator_ must be\n  // referentially transparent. Otherwise, it cannot be used in calls to the\n  // static `pipe` function - to create a partial pipeline.\n  //\n  // The _operator function_ - the function returned by the _operator_ - will\n  // not be referentially transparent - as it shares its source - but the\n  // _operator function_ is called when the complete pipeline is composed via a\n  // call to a source observable's `pipe` method - not when the static `pipe`\n  // function is called.\n  return (wrapperSource) => {\n    let connection: SafeSubscriber<T> | undefined;\n    let resetConnection: Subscription | undefined;\n    let subject: SubjectLike<T> | undefined;\n    let refCount = 0;\n    let hasCompleted = false;\n    let hasErrored = false;\n\n    const cancelReset = () => {\n      resetConnection?.unsubscribe();\n      resetConnection = undefined;\n    };\n    // Used to reset the internal state to a \"cold\"\n    // state, as though it had never been subscribed to.\n    const reset = () => {\n      cancelReset();\n      connection = subject = undefined;\n      hasCompleted = hasErrored = false;\n    };\n    const resetAndUnsubscribe = () => {\n      // We need to capture the connection before\n      // we reset (if we need to reset).\n      const conn = connection;\n      reset();\n      conn?.unsubscribe();\n    };\n\n    return operate<T, T>((source, subscriber) => {\n      refCount++;\n      if (!hasErrored && !hasCompleted) {\n        cancelReset();\n      }\n\n      // Create the subject if we don't have one yet. Grab a local reference to\n      // it as well, which avoids non-null assertions when using it and, if we\n      // connect to it now, then error/complete need a reference after it was\n      // reset.\n      const dest = (subject = subject ?? connector());\n\n      // Add the finalization directly to the subscriber - instead of returning it -\n      // so that the handling of the subscriber's unsubscription will be wired\n      // up _before_ the subscription to the source occurs. This is done so that\n      // the assignment to the source connection's `closed` property will be seen\n      // by synchronous firehose sources.\n      subscriber.add(() => {\n        refCount--;\n\n        // If we're resetting on refCount === 0, and it's 0, we only want to do\n        // that on \"unsubscribe\", really. Resetting on error or completion is a different\n        // configuration.\n        if (refCount === 0 && !hasErrored && !hasCompleted) {\n          resetConnection = handleReset(resetAndUnsubscribe, resetOnRefCountZero);\n        }\n      });\n\n      // The following line adds the subscription to the subscriber passed.\n      // Basically, `subscriber === dest.subscribe(subscriber)` is `true`.\n      dest.subscribe(subscriber);\n\n      if (\n        !connection &&\n        // Check this shareReplay is still activate - it can be reset to 0\n        // and be \"unsubscribed\" _before_ it actually subscribes.\n        // If we were to subscribe then, it'd leak and get stuck.\n        refCount > 0\n      ) {\n        // We need to create a subscriber here - rather than pass an observer and\n        // assign the returned subscription to connection - because it's possible\n        // for reentrant subscriptions to the shared observable to occur and in\n        // those situations we want connection to be already-assigned so that we\n        // don't create another connection to the source.\n        connection = new SafeSubscriber({\n          next: (value) => dest.next(value),\n          error: (err) => {\n            hasErrored = true;\n            cancelReset();\n            resetConnection = handleReset(reset, resetOnError, err);\n            dest.error(err);\n          },\n          complete: () => {\n            hasCompleted = true;\n            cancelReset();\n            resetConnection = handleReset(reset, resetOnComplete);\n            dest.complete();\n          },\n        });\n        innerFrom(source).subscribe(connection);\n      }\n    })(wrapperSource);\n  };\n}\n\nfunction handleReset<T extends unknown[] = never[]>(\n  reset: () => void,\n  on: boolean | ((...args: T) => ObservableInput<any>),\n  ...args: T\n): Subscription | undefined {\n  if (on === true) {\n    reset();\n    return;\n  }\n\n  if (on === false) {\n    return;\n  }\n\n  const onSubscriber = new SafeSubscriber({\n    next: () => {\n      onSubscriber.unsubscribe();\n      reset();\n    },\n  });\n\n  return innerFrom(on(...args)).subscribe(onSubscriber);\n}\n", "import { ReplaySubject } from '../ReplaySubject';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { share } from './share';\n\nexport interface ShareReplayConfig {\n  bufferSize?: number;\n  windowTime?: number;\n  refCount: boolean;\n  scheduler?: SchedulerLike;\n}\n\nexport function shareReplay<T>(config: ShareReplayConfig): MonoTypeOperatorFunction<T>;\nexport function shareReplay<T>(bufferSize?: number, windowTime?: number, scheduler?: SchedulerLike): MonoTypeOperatorFunction<T>;\n\n/**\n * Share source and replay specified number of emissions on subscription.\n *\n * This operator is a specialization of `replay` that connects to a source observable\n * and multicasts through a `ReplaySubject` constructed with the specified arguments.\n * A successfully completed source will stay cached in the `shareReplay`ed observable forever,\n * but an errored source can be retried.\n *\n * ## Why use `shareReplay`?\n *\n * You generally want to use `shareReplay` when you have side-effects or taxing computations\n * that you do not wish to be executed amongst multiple subscribers.\n * It may also be valuable in situations where you know you will have late subscribers to\n * a stream that need access to previously emitted values.\n * This ability to replay values on subscription is what differentiates {@link share} and `shareReplay`.\n *\n * ## Reference counting\n *\n * By default `shareReplay` will use `refCount` of false, meaning that it will _not_ unsubscribe the\n * source when the reference counter drops to zero, i.e. the inner `ReplaySubject` will _not_ be unsubscribed\n * (and potentially run for ever).\n * This is the default as it is expected that `shareReplay` is often used to keep around expensive to setup\n * observables which we want to keep running instead of having to do the expensive setup again.\n *\n * As of RXJS version 6.4.0 a new overload signature was added to allow for manual control over what\n * happens when the operators internal reference counter drops to zero.\n * If `refCount` is true, the source will be unsubscribed from once the reference count drops to zero, i.e.\n * the inner `ReplaySubject` will be unsubscribed. All new subscribers will receive value emissions from a\n * new `ReplaySubject` which in turn will cause a new subscription to the source observable.\n *\n * ## Examples\n *\n * Example with a third subscriber coming late to the party\n *\n * ```ts\n * import { interval, take, shareReplay } from 'rxjs';\n *\n * const shared$ = interval(2000).pipe(\n *   take(6),\n *   shareReplay(3)\n * );\n *\n * shared$.subscribe(x => console.log('sub A: ', x));\n * shared$.subscribe(y => console.log('sub B: ', y));\n *\n * setTimeout(() => {\n *   shared$.subscribe(y => console.log('sub C: ', y));\n * }, 11000);\n *\n * // Logs:\n * // (after ~2000 ms)\n * // sub A: 0\n * // sub B: 0\n * // (after ~4000 ms)\n * // sub A: 1\n * // sub B: 1\n * // (after ~6000 ms)\n * // sub A: 2\n * // sub B: 2\n * // (after ~8000 ms)\n * // sub A: 3\n * // sub B: 3\n * // (after ~10000 ms)\n * // sub A: 4\n * // sub B: 4\n * // (after ~11000 ms, sub C gets the last 3 values)\n * // sub C: 2\n * // sub C: 3\n * // sub C: 4\n * // (after ~12000 ms)\n * // sub A: 5\n * // sub B: 5\n * // sub C: 5\n * ```\n *\n * Example for `refCount` usage\n *\n * ```ts\n * import { Observable, tap, interval, shareReplay, take } from 'rxjs';\n *\n * const log = <T>(name: string, source: Observable<T>) => source.pipe(\n *   tap({\n *     subscribe: () => console.log(`${ name }: subscribed`),\n *     next: value => console.log(`${ name }: ${ value }`),\n *     complete: () => console.log(`${ name }: completed`),\n *     finalize: () => console.log(`${ name }: unsubscribed`)\n *   })\n * );\n *\n * const obs$ = log('source', interval(1000));\n *\n * const shared$ = log('shared', obs$.pipe(\n *   shareReplay({ bufferSize: 1, refCount: true }),\n *   take(2)\n * ));\n *\n * shared$.subscribe(x => console.log('sub A: ', x));\n * shared$.subscribe(y => console.log('sub B: ', y));\n *\n * // PRINTS:\n * // shared: subscribed <-- reference count = 1\n * // source: subscribed\n * // shared: subscribed <-- reference count = 2\n * // source: 0\n * // shared: 0\n * // sub A: 0\n * // shared: 0\n * // sub B: 0\n * // source: 1\n * // shared: 1\n * // sub A: 1\n * // shared: completed <-- take(2) completes the subscription for sub A\n * // shared: unsubscribed <-- reference count = 1\n * // shared: 1\n * // sub B: 1\n * // shared: completed <-- take(2) completes the subscription for sub B\n * // shared: unsubscribed <-- reference count = 0\n * // source: unsubscribed <-- replaySubject unsubscribes from source observable because the reference count dropped to 0 and refCount is true\n *\n * // In case of refCount being false, the unsubscribe is never called on the source and the source would keep on emitting, even if no subscribers\n * // are listening.\n * // source: 2\n * // source: 3\n * // source: 4\n * // ...\n * ```\n *\n * @see {@link publish}\n * @see {@link share}\n * @see {@link publishReplay}\n *\n * @param configOrBufferSize Maximum element count of the replay buffer or {@link ShareReplayConfig configuration}\n * object.\n * @param windowTime Maximum time length of the replay buffer in milliseconds.\n * @param scheduler Scheduler where connected observers within the selector function\n * will be invoked on.\n * @return A function that returns an Observable sequence that contains the\n * elements of a sequence produced by multicasting the source sequence within a\n * selector function.\n */\nexport function shareReplay<T>(\n  configOrBufferSize?: ShareReplayConfig | number,\n  windowTime?: number,\n  scheduler?: SchedulerLike\n): MonoTypeOperatorFunction<T> {\n  let bufferSize: number;\n  let refCount = false;\n  if (configOrBufferSize && typeof configOrBufferSize === 'object') {\n    ({ bufferSize = Infinity, windowTime = Infinity, refCount = false, scheduler } = configOrBufferSize);\n  } else {\n    bufferSize = (configOrBufferSize ?? Infinity) as number;\n  }\n  return share<T>({\n    connector: () => new ReplaySubject(bufferSize, windowTime, scheduler),\n    resetOnError: true,\n    resetOnComplete: false,\n    resetOnRefCountZero: refCount,\n  });\n}\n", "import { MonoTypeOperatorFunction } from '../types';\nimport { filter } from './filter';\n\n/**\n * Returns an Observable that skips the first `count` items emitted by the source Observable.\n *\n * ![](skip.png)\n *\n * Skips the values until the sent notifications are equal or less than provided skip count. It raises\n * an error if skip count is equal or more than the actual number of emits and source raises an error.\n *\n * ## Example\n *\n * Skip the values before the emission\n *\n * ```ts\n * import { interval, skip } from 'rxjs';\n *\n * // emit every half second\n * const source = interval(500);\n * // skip the first 10 emitted values\n * const result = source.pipe(skip(10));\n *\n * result.subscribe(value => console.log(value));\n * // output: 10...11...12...13...\n * ```\n *\n * @see {@link last}\n * @see {@link skipWhile}\n * @see {@link skipUntil}\n * @see {@link skipLast}\n *\n * @param {Number} count - The number of times, items emitted by source Observable should be skipped.\n * @return A function that returns an Observable that skips the first `count`\n * values emitted by the source Observable.\n */\nexport function skip<T>(count: number): MonoTypeOperatorFunction<T> {\n  return filter((_, index) => count <= index);\n}\n", "import { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { noop } from '../util/noop';\n\n/**\n * Returns an Observable that skips items emitted by the source Observable until a second Observable emits an item.\n *\n * The `skipUntil` operator causes the observable stream to skip the emission of values until the passed in observable\n * emits the first value. This can be particularly useful in combination with user interactions, responses of HTTP\n * requests or waiting for specific times to pass by.\n *\n * ![](skipUntil.png)\n *\n * Internally, the `skipUntil` operator subscribes to the passed in `notifier` `ObservableInput` (which gets converted\n * to an Observable) in order to recognize the emission of its first value. When `notifier` emits next, the operator\n * unsubscribes from it and starts emitting the values of the *source* observable until it completes or errors. It\n * will never let the *source* observable emit any values if the `notifier` completes or throws an error without\n * emitting a value before.\n *\n * ## Example\n *\n * In the following example, all emitted values of the interval observable are skipped until the user clicks anywhere\n * within the page\n *\n * ```ts\n * import { interval, fromEvent, skipUntil } from 'rxjs';\n *\n * const intervalObservable = interval(1000);\n * const click = fromEvent(document, 'click');\n *\n * const emitAfterClick = intervalObservable.pipe(\n *   skipUntil(click)\n * );\n * // clicked at 4.6s. output: 5...6...7...8........ or\n * // clicked at 7.3s. output: 8...9...10..11.......\n * emitAfterClick.subscribe(value => console.log(value));\n * ```\n *\n * @see {@link last}\n * @see {@link skip}\n * @see {@link skipWhile}\n * @see {@link skipLast}\n *\n * @param notifier An `ObservableInput` that has to emit an item before the source Observable elements begin to\n * be mirrored by the resulting Observable.\n * @return A function that returns an Observable that skips items from the\n * source Observable until the `notifier` Observable emits an item, then emits the\n * remaining items.\n */\nexport function skipUntil<T>(notifier: ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let taking = false;\n\n    const skipSubscriber = createOperatorSubscriber(\n      subscriber,\n      () => {\n        skipSubscriber?.unsubscribe();\n        taking = true;\n      },\n      noop\n    );\n\n    innerFrom(notifier).subscribe(skipSubscriber);\n\n    source.subscribe(createOperatorSubscriber(subscriber, (value) => taking && subscriber.next(value)));\n  });\n}\n", "import { concat } from '../observable/concat';\nimport { OperatorFunction, SchedulerLike, ValueFromArray } from '../types';\nimport { popScheduler } from '../util/args';\nimport { operate } from '../util/lift';\n\n// Devs are more likely to pass null or undefined than they are a scheduler\n// without accompanying values. To make things easier for (naughty) devs who\n// use the `strictNullChecks: false` TypeScript compiler option, these\n// overloads with explicit null and undefined values are included.\n\nexport function startWith<T>(value: null): OperatorFunction<T, T | null>;\nexport function startWith<T>(value: undefined): OperatorFunction<T, T | undefined>;\n\n/** @deprecated The `scheduler` parameter will be removed in v8. Use `scheduled` and `concatAll`. Details: https://rxjs.dev/deprecations/scheduler-argument */\nexport function startWith<T, A extends readonly unknown[] = T[]>(\n  ...valuesAndScheduler: [...A, SchedulerLike]\n): OperatorFunction<T, T | ValueFromArray<A>>;\nexport function startWith<T, A extends readonly unknown[] = T[]>(...values: A): OperatorFunction<T, T | ValueFromArray<A>>;\n\n/**\n * Returns an observable that, at the moment of subscription, will synchronously emit all\n * values provided to this operator, then subscribe to the source and mirror all of its emissions\n * to subscribers.\n *\n * This is a useful way to know when subscription has occurred on an existing observable.\n *\n * <span class=\"informal\">First emits its arguments in order, and then any\n * emissions from the source.</span>\n *\n * ![](startWith.png)\n *\n * ## Examples\n *\n * Emit a value when a timer starts.\n *\n * ```ts\n * import { timer, map, startWith } from 'rxjs';\n *\n * timer(1000)\n *   .pipe(\n *     map(() => 'timer emit'),\n *     startWith('timer start')\n *   )\n *   .subscribe(x => console.log(x));\n *\n * // results:\n * // 'timer start'\n * // 'timer emit'\n * ```\n *\n * @param values Items you want the modified Observable to emit first.\n * @return A function that returns an Observable that synchronously emits\n * provided values before subscribing to the source Observable.\n *\n * @see {@link endWith}\n * @see {@link finalize}\n * @see {@link concat}\n */\nexport function startWith<T, D>(...values: D[]): OperatorFunction<T, T | D> {\n  const scheduler = popScheduler(values);\n  return operate((source, subscriber) => {\n    // Here we can't pass `undefined` as a scheduler, because if we did, the\n    // code inside of `concat` would be confused by the `undefined`, and treat it\n    // like an invalid observable. So we have to split it two different ways.\n    (scheduler ? concat(values, source, scheduler) : concat(values, source)).subscribe(subscriber);\n  });\n}\n", "import { Subscriber } from '../Subscriber';\nimport { ObservableInput, OperatorFunction, ObservedValueOf } from '../types';\nimport { innerFrom } from '../observable/innerFrom';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\n/* tslint:disable:max-line-length */\nexport function switchMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function switchMap<T, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: undefined\n): OperatorFunction<T, ObservedValueOf<O>>;\n/** @deprecated The `resultSelector` parameter will be removed in v8. Use an inner `map` instead. Details: https://rxjs.dev/deprecations/resultSelector */\nexport function switchMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector: (outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R\n): OperatorFunction<T, R>;\n/* tslint:enable:max-line-length */\n\n/**\n * Projects each source value to an Observable which is merged in the output\n * Observable, emitting values only from the most recently projected Observable.\n *\n * <span class=\"informal\">Maps each value to an Observable, then flattens all of\n * these inner Observables using {@link switchAll}.</span>\n *\n * ![](switchMap.png)\n *\n * Returns an Observable that emits items based on applying a function that you\n * supply to each item emitted by the source Observable, where that function\n * returns an (so-called \"inner\") Observable. Each time it observes one of these\n * inner Observables, the output Observable begins emitting the items emitted by\n * that inner Observable. When a new inner Observable is emitted, `switchMap`\n * stops emitting items from the earlier-emitted inner Observable and begins\n * emitting items from the new one. It continues to behave like this for\n * subsequent inner Observables.\n *\n * ## Example\n *\n * Generate new Observable according to source Observable values\n *\n * ```ts\n * import { of, switchMap } from 'rxjs';\n *\n * const switched = of(1, 2, 3).pipe(switchMap(x => of(x, x ** 2, x ** 3)));\n * switched.subscribe(x => console.log(x));\n * // outputs\n * // 1\n * // 1\n * // 1\n * // 2\n * // 4\n * // 8\n * // 3\n * // 9\n * // 27\n * ```\n *\n * Restart an interval Observable on every click event\n *\n * ```ts\n * import { fromEvent, switchMap, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(switchMap(() => interval(1000)));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link concatMap}\n * @see {@link exhaustMap}\n * @see {@link mergeMap}\n * @see {@link switchAll}\n * @see {@link switchMapTo}\n *\n * @param {function(value: T, index: number): ObservableInput} project A function\n * that, when applied to an item emitted by the source Observable, returns an\n * Observable.\n * @return A function that returns an Observable that emits the result of\n * applying the projection function (and the optional deprecated\n * `resultSelector`) to each item emitted by the source Observable and taking\n * only the values from the most recently projected inner Observable.\n */\nexport function switchMap<T, R, O extends ObservableInput<any>>(\n  project: (value: T, index: number) => O,\n  resultSelector?: (outerValue: T, innerValue: ObservedValueOf<O>, outerIndex: number, innerIndex: number) => R\n): OperatorFunction<T, ObservedValueOf<O> | R> {\n  return operate((source, subscriber) => {\n    let innerSubscriber: Subscriber<ObservedValueOf<O>> | null = null;\n    let index = 0;\n    // Whether or not the source subscription has completed\n    let isComplete = false;\n\n    // We only complete the result if the source is complete AND we don't have an active inner subscription.\n    // This is called both when the source completes and when the inners complete.\n    const checkComplete = () => isComplete && !innerSubscriber && subscriber.complete();\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        (value) => {\n          // Cancel the previous inner subscription if there was one\n          innerSubscriber?.unsubscribe();\n          let innerIndex = 0;\n          const outerIndex = index++;\n          // Start the next inner subscription\n          innerFrom(project(value, outerIndex)).subscribe(\n            (innerSubscriber = createOperatorSubscriber(\n              subscriber,\n              // When we get a new inner value, next it through. Note that this is\n              // handling the deprecate result selector here. This is because with this architecture\n              // it ends up being smaller than using the map operator.\n              (innerValue) => subscriber.next(resultSelector ? resultSelector(value, innerValue, outerIndex, innerIndex++) : innerValue),\n              () => {\n                // The inner has completed. Null out the inner subscriber to\n                // free up memory and to signal that we have no inner subscription\n                // currently.\n                innerSubscriber = null!;\n                checkComplete();\n              }\n            ))\n          );\n        },\n        () => {\n          isComplete = true;\n          checkComplete();\n        }\n      )\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { noop } from '../util/noop';\n\n/**\n * Emits the values emitted by the source Observable until a `notifier`\n * Observable emits a value.\n *\n * <span class=\"informal\">Lets values pass until a second Observable,\n * `notifier`, emits a value. Then, it completes.</span>\n *\n * ![](takeUntil.png)\n *\n * `takeUntil` subscribes and begins mirroring the source Observable. It also\n * monitors a second Observable, `notifier` that you provide. If the `notifier`\n * emits a value, the output Observable stops mirroring the source Observable\n * and completes. If the `notifier` doesn't emit any value and completes\n * then `takeUntil` will pass all values.\n *\n * ## Example\n *\n * Tick every second until the first click happens\n *\n * ```ts\n * import { interval, fromEvent, takeUntil } from 'rxjs';\n *\n * const source = interval(1000);\n * const clicks = fromEvent(document, 'click');\n * const result = source.pipe(takeUntil(clicks));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link take}\n * @see {@link takeLast}\n * @see {@link takeWhile}\n * @see {@link skip}\n *\n * @param {Observable} notifier The Observable whose first emitted value will\n * cause the output Observable of `takeUntil` to stop emitting values from the\n * source Observable.\n * @return A function that returns an Observable that emits the values from the\n * source Observable until `notifier` emits its first value.\n */\nexport function takeUntil<T>(notifier: ObservableInput<any>): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    innerFrom(notifier).subscribe(createOperatorSubscriber(subscriber, () => subscriber.complete(), noop));\n    !subscriber.closed && source.subscribe(subscriber);\n  });\n}\n", "import { OperatorFunction, MonoTypeOperatorFunction, TruthyTypesOf } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\n\nexport function takeWhile<T>(predicate: BooleanConstructor, inclusive: true): MonoTypeOperatorFunction<T>;\nexport function takeWhile<T>(predicate: BooleanConstructor, inclusive: false): OperatorFunction<T, TruthyTypesOf<T>>;\nexport function takeWhile<T>(predicate: BooleanConstructor): OperatorFunction<T, TruthyTypesOf<T>>;\nexport function takeWhile<T, S extends T>(predicate: (value: T, index: number) => value is S): OperatorFunction<T, S>;\nexport function takeWhile<T, S extends T>(predicate: (value: T, index: number) => value is S, inclusive: false): OperatorFunction<T, S>;\nexport function takeWhile<T>(predicate: (value: T, index: number) => boolean, inclusive?: boolean): MonoTypeOperatorFunction<T>;\n\n/**\n * Emits values emitted by the source Observable so long as each value satisfies\n * the given `predicate`, and then completes as soon as this `predicate` is not\n * satisfied.\n *\n * <span class=\"informal\">Takes values from the source only while they pass the\n * condition given. When the first value does not satisfy, it completes.</span>\n *\n * ![](takeWhile.png)\n *\n * `takeWhile` subscribes and begins mirroring the source Observable. Each value\n * emitted on the source is given to the `predicate` function which returns a\n * boolean, representing a condition to be satisfied by the source values. The\n * output Observable emits the source values until such time as the `predicate`\n * returns false, at which point `takeWhile` stops mirroring the source\n * Observable and completes the output Observable.\n *\n * ## Example\n *\n * Emit click events only while the clientX property is greater than 200\n *\n * ```ts\n * import { fromEvent, takeWhile } from 'rxjs';\n *\n * const clicks = fromEvent<PointerEvent>(document, 'click');\n * const result = clicks.pipe(takeWhile(ev => ev.clientX > 200));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link take}\n * @see {@link takeLast}\n * @see {@link takeUntil}\n * @see {@link skip}\n *\n * @param {function(value: T, index: number): boolean} predicate A function that\n * evaluates a value emitted by the source Observable and returns a boolean.\n * Also takes the (zero-based) index as the second argument.\n * @param {boolean} inclusive When set to `true` the value that caused\n * `predicate` to return `false` will also be emitted.\n * @return A function that returns an Observable that emits values from the\n * source Observable so long as each value satisfies the condition defined by\n * the `predicate`, then completes.\n */\nexport function takeWhile<T>(predicate: (value: T, index: number) => boolean, inclusive = false): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    let index = 0;\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value) => {\n        const result = predicate(value, index++);\n        (result || inclusive) && subscriber.next(value);\n        !result && subscriber.complete();\n      })\n    );\n  });\n}\n", "import { MonoTypeOperatorFunction, Observer } from '../types';\nimport { isFunction } from '../util/isFunction';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { identity } from '../util/identity';\n\n/**\n * An extension to the {@link Observer} interface used only by the {@link tap} operator.\n *\n * It provides a useful set of callbacks a user can register to do side-effects in\n * cases other than what the usual {@link Observer} callbacks are\n * ({@link guide/glossary-and-semantics#next next},\n * {@link guide/glossary-and-semantics#error error} and/or\n * {@link guide/glossary-and-semantics#complete complete}).\n *\n * ## Example\n *\n * ```ts\n * import { fromEvent, switchMap, tap, interval, take } from 'rxjs';\n *\n * const source$ = fromEvent(document, 'click');\n * const result$ = source$.pipe(\n *   switchMap((_, i) => i % 2 === 0\n *     ? fromEvent(document, 'mousemove').pipe(\n *         tap({\n *           subscribe: () => console.log('Subscribed to the mouse move events after click #' + i),\n *           unsubscribe: () => console.log('Mouse move events #' + i + ' unsubscribed'),\n *           finalize: () => console.log('Mouse move events #' + i + ' finalized')\n *         })\n *       )\n *     : interval(1_000).pipe(\n *         take(5),\n *         tap({\n *           subscribe: () => console.log('Subscribed to the 1-second interval events after click #' + i),\n *           unsubscribe: () => console.log('1-second interval events #' + i + ' unsubscribed'),\n *           finalize: () => console.log('1-second interval events #' + i + ' finalized')\n *         })\n *       )\n *   )\n * );\n *\n * const subscription = result$.subscribe({\n *   next: console.log\n * });\n *\n * setTimeout(() => {\n *   console.log('Unsubscribe after 60 seconds');\n *   subscription.unsubscribe();\n * }, 60_000);\n * ```\n */\nexport interface TapObserver<T> extends Observer<T> {\n  /**\n   * The callback that `tap` operator invokes at the moment when the source Observable\n   * gets subscribed to.\n   */\n  subscribe: () => void;\n  /**\n   * The callback that `tap` operator invokes when an explicit\n   * {@link guide/glossary-and-semantics#unsubscription unsubscribe} happens. It won't get invoked on\n   * `error` or `complete` events.\n   */\n  unsubscribe: () => void;\n  /**\n   * The callback that `tap` operator invokes when any kind of\n   * {@link guide/glossary-and-semantics#finalization finalization} happens - either when\n   * the source Observable `error`s or `complete`s or when it gets explicitly unsubscribed\n   * by the user. There is no difference in using this callback or the {@link finalize}\n   * operator, but if you're already using `tap` operator, you can use this callback\n   * instead. You'd get the same result in either case.\n   */\n  finalize: () => void;\n}\nexport function tap<T>(observerOrNext?: Partial<TapObserver<T>> | ((value: T) => void)): MonoTypeOperatorFunction<T>;\n/** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\nexport function tap<T>(\n  next?: ((value: T) => void) | null,\n  error?: ((error: any) => void) | null,\n  complete?: (() => void) | null\n): MonoTypeOperatorFunction<T>;\n\n/**\n * Used to perform side-effects for notifications from the source observable\n *\n * <span class=\"informal\">Used when you want to affect outside state with a notification without altering the notification</span>\n *\n * ![](tap.png)\n *\n * Tap is designed to allow the developer a designated place to perform side effects. While you _could_ perform side-effects\n * inside of a `map` or a `mergeMap`, that would make their mapping functions impure, which isn't always a big deal, but will\n * make it so you can't do things like memoize those functions. The `tap` operator is designed solely for such side-effects to\n * help you remove side-effects from other operations.\n *\n * For any notification, next, error, or complete, `tap` will call the appropriate callback you have provided to it, via a function\n * reference, or a partial observer, then pass that notification down the stream.\n *\n * The observable returned by `tap` is an exact mirror of the source, with one exception: Any error that occurs -- synchronously -- in a handler\n * provided to `tap` will be emitted as an error from the returned observable.\n *\n * > Be careful! You can mutate objects as they pass through the `tap` operator's handlers.\n *\n * The most common use of `tap` is actually for debugging. You can place a `tap(console.log)` anywhere\n * in your observable `pipe`, log out the notifications as they are emitted by the source returned by the previous\n * operation.\n *\n * ## Examples\n *\n * Check a random number before it is handled. Below is an observable that will use a random number between 0 and 1,\n * and emit `'big'` or `'small'` depending on the size of that number. But we wanted to log what the original number\n * was, so we have added a `tap(console.log)`.\n *\n * ```ts\n * import { of, tap, map } from 'rxjs';\n *\n * of(Math.random()).pipe(\n *   tap(console.log),\n *   map(n => n > 0.5 ? 'big' : 'small')\n * ).subscribe(console.log);\n * ```\n *\n * Using `tap` to analyze a value and force an error. Below is an observable where in our system we only\n * want to emit numbers 3 or less we get from another source. We can force our observable to error\n * using `tap`.\n *\n * ```ts\n * import { of, tap } from 'rxjs';\n *\n * const source = of(1, 2, 3, 4, 5);\n *\n * source.pipe(\n *   tap(n => {\n *     if (n > 3) {\n *       throw new TypeError(`Value ${ n } is greater than 3`);\n *     }\n *   })\n * )\n * .subscribe({ next: console.log, error: err => console.log(err.message) });\n * ```\n *\n * We want to know when an observable completes before moving on to the next observable. The system\n * below will emit a random series of `'X'` characters from 3 different observables in sequence. The\n * only way we know when one observable completes and moves to the next one, in this case, is because\n * we have added a `tap` with the side effect of logging to console.\n *\n * ```ts\n * import { of, concatMap, interval, take, map, tap } from 'rxjs';\n *\n * of(1, 2, 3).pipe(\n *   concatMap(n => interval(1000).pipe(\n *     take(Math.round(Math.random() * 10)),\n *     map(() => 'X'),\n *     tap({ complete: () => console.log(`Done with ${ n }`) })\n *   ))\n * )\n * .subscribe(console.log);\n * ```\n *\n * @see {@link finalize}\n * @see {@link TapObserver}\n *\n * @param observerOrNext A next handler or partial observer\n * @param error An error handler\n * @param complete A completion handler\n * @return A function that returns an Observable identical to the source, but\n * runs the specified Observer or callback(s) for each item.\n */\nexport function tap<T>(\n  observerOrNext?: Partial<TapObserver<T>> | ((value: T) => void) | null,\n  error?: ((e: any) => void) | null,\n  complete?: (() => void) | null\n): MonoTypeOperatorFunction<T> {\n  // We have to check to see not only if next is a function,\n  // but if error or complete were passed. This is because someone\n  // could technically call tap like `tap(null, fn)` or `tap(null, null, fn)`.\n  const tapObserver =\n    isFunction(observerOrNext) || error || complete\n      ? // tslint:disable-next-line: no-object-literal-type-assertion\n        ({ next: observerOrNext as Exclude<typeof observerOrNext, Partial<TapObserver<T>>>, error, complete } as Partial<TapObserver<T>>)\n      : observerOrNext;\n\n  return tapObserver\n    ? operate((source, subscriber) => {\n        tapObserver.subscribe?.();\n        let isUnsub = true;\n        source.subscribe(\n          createOperatorSubscriber(\n            subscriber,\n            (value) => {\n              tapObserver.next?.(value);\n              subscriber.next(value);\n            },\n            () => {\n              isUnsub = false;\n              tapObserver.complete?.();\n              subscriber.complete();\n            },\n            (err) => {\n              isUnsub = false;\n              tapObserver.error?.(err);\n              subscriber.error(err);\n            },\n            () => {\n              if (isUnsub) {\n                tapObserver.unsubscribe?.();\n              }\n              tapObserver.finalize?.();\n            }\n          )\n        );\n      })\n    : // Tap was called with no valid tap observer or handler\n      // (e.g. `tap(null, null, null)` or `tap(null)` or `tap()`)\n      // so we're going to just mirror the source.\n      identity;\n}\n", "import { Subscription } from '../Subscription';\n\nimport { MonoTypeOperatorFunction, ObservableInput } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\n\n/**\n * An object interface used by {@link throttle} or {@link throttleTime} that ensure\n * configuration options of these operators.\n *\n * @see {@link throttle}\n * @see {@link throttleTime}\n */\nexport interface ThrottleConfig {\n  /**\n   * If `true`, the resulting Observable will emit the first value from the source\n   * Observable at the **start** of the \"throttling\" process (when starting an\n   * internal timer that prevents other emissions from the source to pass through).\n   * If `false`, it will not emit the first value from the source Observable at the\n   * start of the \"throttling\" process.\n   *\n   * If not provided, defaults to: `true`.\n   */\n  leading?: boolean;\n  /**\n   * If `true`, the resulting Observable will emit the last value from the source\n   * Observable at the **end** of the \"throttling\" process (when ending an internal\n   * timer that prevents other emissions from the source to pass through).\n   * If `false`, it will not emit the last value from the source Observable at the\n   * end of the \"throttling\" process.\n   *\n   * If not provided, defaults to: `false`.\n   */\n  trailing?: boolean;\n}\n\n/**\n * Emits a value from the source Observable, then ignores subsequent source\n * values for a duration determined by another Observable, then repeats this\n * process.\n *\n * <span class=\"informal\">It's like {@link throttleTime}, but the silencing\n * duration is determined by a second Observable.</span>\n *\n * ![](throttle.svg)\n *\n * `throttle` emits the source Observable values on the output Observable\n * when its internal timer is disabled, and ignores source values when the timer\n * is enabled. Initially, the timer is disabled. As soon as the first source\n * value arrives, it is forwarded to the output Observable, and then the timer\n * is enabled by calling the `durationSelector` function with the source value,\n * which returns the \"duration\" Observable. When the duration Observable emits a\n * value, the timer is disabled, and this process repeats for the\n * next source value.\n *\n * ## Example\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, throttle, interval } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(throttle(() => interval(1000)));\n *\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link audit}\n * @see {@link debounce}\n * @see {@link delayWhen}\n * @see {@link sample}\n * @see {@link throttleTime}\n *\n * @param durationSelector A function that receives a value from the source\n * Observable, for computing the silencing duration for each source value,\n * returned as an `ObservableInput`.\n * @param config A configuration object to define `leading` and `trailing`\n * behavior. Defaults to `{ leading: true, trailing: false }`.\n * @return A function that returns an Observable that performs the throttle\n * operation to limit the rate of emissions from the source.\n */\nexport function throttle<T>(durationSelector: (value: T) => ObservableInput<any>, config?: ThrottleConfig): MonoTypeOperatorFunction<T> {\n  return operate((source, subscriber) => {\n    const { leading = true, trailing = false } = config ?? {};\n    let hasValue = false;\n    let sendValue: T | null = null;\n    let throttled: Subscription | null = null;\n    let isComplete = false;\n\n    const endThrottling = () => {\n      throttled?.unsubscribe();\n      throttled = null;\n      if (trailing) {\n        send();\n        isComplete && subscriber.complete();\n      }\n    };\n\n    const cleanupThrottling = () => {\n      throttled = null;\n      isComplete && subscriber.complete();\n    };\n\n    const startThrottle = (value: T) =>\n      (throttled = innerFrom(durationSelector(value)).subscribe(createOperatorSubscriber(subscriber, endThrottling, cleanupThrottling)));\n\n    const send = () => {\n      if (hasValue) {\n        // Ensure we clear out our value and hasValue flag\n        // before we emit, otherwise reentrant code can cause\n        // issues here.\n        hasValue = false;\n        const value = sendValue!;\n        sendValue = null;\n        // Emit the value.\n        subscriber.next(value);\n        !isComplete && startThrottle(value);\n      }\n    };\n\n    source.subscribe(\n      createOperatorSubscriber(\n        subscriber,\n        // Regarding the presence of throttled.closed in the following\n        // conditions, if a synchronous duration selector is specified - weird,\n        // but legal - an already-closed subscription will be assigned to\n        // throttled, so the subscription's closed property needs to be checked,\n        // too.\n        (value) => {\n          hasValue = true;\n          sendValue = value;\n          !(throttled && !throttled.closed) && (leading ? send() : startThrottle(value));\n        },\n        () => {\n          isComplete = true;\n          !(trailing && hasValue && throttled && !throttled.closed) && subscriber.complete();\n        }\n      )\n    );\n  });\n}\n", "import { asyncScheduler } from '../scheduler/async';\nimport { throttle, ThrottleConfig } from './throttle';\nimport { MonoTypeOperatorFunction, SchedulerLike } from '../types';\nimport { timer } from '../observable/timer';\n\n/**\n * Emits a value from the source Observable, then ignores subsequent source\n * values for `duration` milliseconds, then repeats this process.\n *\n * <span class=\"informal\">Lets a value pass, then ignores source values for the\n * next `duration` milliseconds.</span>\n *\n * ![](throttleTime.png)\n *\n * `throttleTime` emits the source Observable values on the output Observable\n * when its internal timer is disabled, and ignores source values when the timer\n * is enabled. Initially, the timer is disabled. As soon as the first source\n * value arrives, it is forwarded to the output Observable, and then the timer\n * is enabled. After `duration` milliseconds (or the time unit determined\n * internally by the optional `scheduler`) has passed, the timer is disabled,\n * and this process repeats for the next source value. Optionally takes a\n * {@link SchedulerLike} for managing timers.\n *\n * ## Examples\n *\n * ### Limit click rate\n *\n * Emit clicks at a rate of at most one click per second\n *\n * ```ts\n * import { fromEvent, throttleTime } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const result = clicks.pipe(throttleTime(1000));\n *\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link auditTime}\n * @see {@link debounceTime}\n * @see {@link delay}\n * @see {@link sampleTime}\n * @see {@link throttle}\n *\n * @param duration Time to wait before emitting another value after\n * emitting the last value, measured in milliseconds or the time unit determined\n * internally by the optional `scheduler`.\n * @param scheduler The {@link SchedulerLike} to use for\n * managing the timers that handle the throttling. Defaults to {@link asyncScheduler}.\n * @param config A configuration object to define `leading` and\n * `trailing` behavior. Defaults to `{ leading: true, trailing: false }`.\n * @return A function that returns an Observable that performs the throttle\n * operation to limit the rate of emissions from the source.\n */\nexport function throttleTime<T>(\n  duration: number,\n  scheduler: SchedulerLike = asyncScheduler,\n  config?: ThrottleConfig\n): MonoTypeOperatorFunction<T> {\n  const duration$ = timer(duration, scheduler);\n  return throttle(() => duration$, config);\n}\n", "import { OperatorFunction, ObservableInputTuple } from '../types';\nimport { operate } from '../util/lift';\nimport { createOperatorSubscriber } from './OperatorSubscriber';\nimport { innerFrom } from '../observable/innerFrom';\nimport { identity } from '../util/identity';\nimport { noop } from '../util/noop';\nimport { popResultSelector } from '../util/args';\n\nexport function withLatestFrom<T, O extends unknown[]>(...inputs: [...ObservableInputTuple<O>]): OperatorFunction<T, [T, ...O]>;\n\nexport function withLatestFrom<T, O extends unknown[], R>(\n  ...inputs: [...ObservableInputTuple<O>, (...value: [T, ...O]) => R]\n): OperatorFunction<T, R>;\n\n/**\n * Combines the source Observable with other Observables to create an Observable\n * whose values are calculated from the latest values of each, only when the\n * source emits.\n *\n * <span class=\"informal\">Whenever the source Observable emits a value, it\n * computes a formula using that value plus the latest values from other input\n * Observables, then emits the output of that formula.</span>\n *\n * ![](withLatestFrom.png)\n *\n * `withLatestFrom` combines each value from the source Observable (the\n * instance) with the latest values from the other input Observables only when\n * the source emits a value, optionally using a `project` function to determine\n * the value to be emitted on the output Observable. All input Observables must\n * emit at least one value before the output Observable will emit a value.\n *\n * ## Example\n *\n * On every click event, emit an array with the latest timer event plus the click event\n *\n * ```ts\n * import { fromEvent, interval, withLatestFrom } from 'rxjs';\n *\n * const clicks = fromEvent(document, 'click');\n * const timer = interval(1000);\n * const result = clicks.pipe(withLatestFrom(timer));\n * result.subscribe(x => console.log(x));\n * ```\n *\n * @see {@link combineLatest}\n *\n * @param {ObservableInput} other An input Observable to combine with the source\n * Observable. More than one input Observables may be given as argument.\n * @param {Function} [project] Projection function for combining values\n * together. Receives all values in order of the Observables passed, where the\n * first parameter is a value from the source Observable. (e.g.\n * `a.pipe(withLatestFrom(b, c), map(([a1, b1, c1]) => a1 + b1 + c1))`). If this is not\n * passed, arrays will be emitted on the output Observable.\n * @return A function that returns an Observable of projected values from the\n * most recent values from each input Observable, or an array of the most\n * recent values from each input Observable.\n */\nexport function withLatestFrom<T, R>(...inputs: any[]): OperatorFunction<T, R | any[]> {\n  const project = popResultSelector(inputs) as ((...args: any[]) => R) | undefined;\n\n  return operate((source, subscriber) => {\n    const len = inputs.length;\n    const otherValues = new Array(len);\n    // An array of whether or not the other sources have emitted. Matched with them by index.\n    // TODO: At somepoint, we should investigate the performance implications here, and look\n    // into using a `Set()` and checking the `size` to see if we're ready.\n    let hasValue = inputs.map(() => false);\n    // Flipped true when we have at least one value from all other sources and\n    // we are ready to start emitting values.\n    let ready = false;\n\n    // Other sources. Note that here we are not checking `subscriber.closed`,\n    // this causes all inputs to be subscribed to, even if nothing can be emitted\n    // from them. This is an important distinction because subscription constitutes\n    // a side-effect.\n    for (let i = 0; i < len; i++) {\n      innerFrom(inputs[i]).subscribe(\n        createOperatorSubscriber(\n          subscriber,\n          (value) => {\n            otherValues[i] = value;\n            if (!ready && !hasValue[i]) {\n              // If we're not ready yet, flag to show this observable has emitted.\n              hasValue[i] = true;\n              // Intentionally terse code.\n              // If all of our other observables have emitted, set `ready` to `true`,\n              // so we know we can start emitting values, then clean up the `hasValue` array,\n              // because we don't need it anymore.\n              (ready = hasValue.every(identity)) && (hasValue = null!);\n            }\n          },\n          // Completing one of the other sources has\n          // no bearing on the completion of our result.\n          noop\n        )\n      );\n    }\n\n    // Source subscription\n    source.subscribe(\n      createOperatorSubscriber(subscriber, (value) => {\n        if (ready) {\n          // We have at least one value from the other sources. Go ahead and emit.\n          const values = [value, ...otherValues];\n          subscriber.next(project ? project(...values) : values);\n        }\n      })\n    );\n  });\n}\n", "import { zip as zipStatic } from '../observable/zip';\nimport { ObservableInput, ObservableInputTuple, OperatorFunction, Cons } from '../types';\nimport { operate } from '../util/lift';\n\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[]>(otherInputs: [...ObservableInputTuple<A>]): OperatorFunction<T, Cons<T, A>>;\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[], R>(\n  otherInputsAndProject: [...ObservableInputTuple<A>],\n  project: (...values: Cons<T, A>) => R\n): OperatorFunction<T, R>;\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[]>(...otherInputs: [...ObservableInputTuple<A>]): OperatorFunction<T, Cons<T, A>>;\n/** @deprecated Replaced with {@link zipWith}. Will be removed in v8. */\nexport function zip<T, A extends readonly unknown[], R>(\n  ...otherInputsAndProject: [...ObservableInputTuple<A>, (...values: Cons<T, A>) => R]\n): OperatorFunction<T, R>;\n\n/**\n * @deprecated Replaced with {@link zipWith}. Will be removed in v8.\n */\nexport function zip<T, R>(...sources: Array<ObservableInput<any> | ((...values: Array<any>) => R)>): OperatorFunction<T, any> {\n  return operate((source, subscriber) => {\n    zipStatic(source as ObservableInput<any>, ...(sources as Array<ObservableInput<any>>)).subscribe(subscriber);\n  });\n}\n", "import { ObservableInputTuple, OperatorFunction, Cons } from '../types';\nimport { zip } from './zip';\n\n/**\n * Subscribes to the source, and the observable inputs provided as arguments, and combines their values, by index, into arrays.\n *\n * What is meant by \"combine by index\": The first value from each will be made into a single array, then emitted,\n * then the second value from each will be combined into a single array and emitted, then the third value\n * from each will be combined into a single array and emitted, and so on.\n *\n * This will continue until it is no longer able to combine values of the same index into an array.\n *\n * After the last value from any one completed source is emitted in an array, the resulting observable will complete,\n * as there is no way to continue \"zipping\" values together by index.\n *\n * Use-cases for this operator are limited. There are memory concerns if one of the streams is emitting\n * values at a much faster rate than the others. Usage should likely be limited to streams that emit\n * at a similar pace, or finite streams of known length.\n *\n * In many cases, authors want `combineLatestWith` and not `zipWith`.\n *\n * @param otherInputs other observable inputs to collate values from.\n * @return A function that returns an Observable that emits items by index\n * combined from the source Observable and provided Observables, in form of an\n * array.\n */\nexport function zipWith<T, A extends readonly unknown[]>(...otherInputs: [...ObservableInputTuple<A>]): OperatorFunction<T, Cons<T, A>> {\n  return zip(...otherInputs);\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  ReplaySubject,\n  Subject,\n  fromEvent\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch document\n *\n * Documents are implemented as subjects, so all downstream observables are\n * automatically updated when a new document is emitted.\n *\n * @returns Document subject\n */\nexport function watchDocument(): Subject<Document> {\n  const document$ = new ReplaySubject<Document>(1)\n  fromEvent(document, \"DOMContentLoaded\", { once: true })\n    .subscribe(() => document$.next(document))\n\n  /* Return document */\n  return document$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve all elements matching the query selector\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Elements\n */\nexport function getElements<T extends keyof HTMLElementTagNameMap>(\n  selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T][]\n\nexport function getElements<T extends HTMLElement>(\n  selector: string, node?: ParentNode\n): T[]\n\nexport function getElements<T extends HTMLElement>(\n  selector: string, node: ParentNode = document\n): T[] {\n  return Array.from(node.querySelectorAll<T>(selector))\n}\n\n/**\n * Retrieve an element matching a query selector or throw a reference error\n *\n * Note that this function assumes that the element is present. If unsure if an\n * element is existent, use the `getOptionalElement` function instead.\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Element\n */\nexport function getElement<T extends keyof HTMLElementTagNameMap>(\n  selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T]\n\nexport function getElement<T extends HTMLElement>(\n  selector: string, node?: ParentNode\n): T\n\nexport function getElement<T extends HTMLElement>(\n  selector: string, node: ParentNode = document\n): T {\n  const el = getOptionalElement<T>(selector, node)\n  if (typeof el === \"undefined\")\n    throw new ReferenceError(\n      `Missing element: expected \"${selector}\" to be present`\n    )\n\n  /* Return element */\n  return el\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Retrieve an optional element matching the query selector\n *\n * @template T - Element type\n *\n * @param selector - Query selector\n * @param node - Node of reference\n *\n * @returns Element or nothing\n */\nexport function getOptionalElement<T extends keyof HTMLElementTagNameMap>(\n  selector: T, node?: ParentNode\n): HTMLElementTagNameMap[T] | undefined\n\nexport function getOptionalElement<T extends HTMLElement>(\n  selector: string, node?: ParentNode\n): T | undefined\n\nexport function getOptionalElement<T extends HTMLElement>(\n  selector: string, node: ParentNode = document\n): T | undefined {\n  return node.querySelector<T>(selector) || undefined\n}\n\n/**\n * Retrieve the currently active element\n *\n * @returns Element or nothing\n */\nexport function getActiveElement(): HTMLElement | undefined {\n  return (\n    document.activeElement?.shadowRoot?.activeElement as HTMLElement ??\n    document.activeElement as HTMLElement ??\n    undefined\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  debounceTime,\n  distinctUntilChanged,\n  fromEvent,\n  map,\n  merge,\n  shareReplay,\n  startWith\n} from \"rxjs\"\n\nimport { getActiveElement } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Focus observable\n *\n * Previously, this observer used `focus` and `blur` events to determine whether\n * an element is focused, but this doesn't work if there are focusable elements\n * within the elements itself. A better solutions are `focusin` and `focusout`\n * events, which bubble up the tree and allow for more fine-grained control.\n *\n * `debounceTime` is necessary, because when a focus change happens inside an\n * element, the observable would first emit `false` and then `true` again.\n */\nconst observer$ = merge(\n  fromEvent(document.body, \"focusin\"),\n  fromEvent(document.body, \"focusout\")\n)\n  .pipe(\n    debounceTime(1),\n    startWith(undefined),\n    map(() => getActiveElement() || document.body),\n    shareReplay(1)\n  )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element focus\n *\n * @param el - Element\n *\n * @returns Element focus observable\n */\nexport function watchElementFocus(\n  el: HTMLElement\n): Observable<boolean> {\n  return observer$\n    .pipe(\n      map(active => el.contains(active)),\n      distinctUntilChanged()\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  debounce,\n  defer,\n  fromEvent,\n  identity,\n  map,\n  merge,\n  startWith,\n  timer\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element hover\n *\n * The second parameter allows to specify a timeout in milliseconds after which\n * the hover state will be reset to `false`. This is useful for tooltips which\n * should disappear after a certain amount of time, in order to allow the user\n * to move the cursor from the host to the tooltip.\n *\n * @param el - Element\n * @param timeout - Timeout\n *\n * @returns Element hover observable\n */\nexport function watchElementHover(\n  el: HTMLElement, timeout?: number\n): Observable<boolean> {\n  return defer(() => merge(\n    fromEvent(el, \"mouseenter\").pipe(map(() => true)),\n    fromEvent(el, \"mouseleave\").pipe(map(() => false))\n  )\n    .pipe(\n      timeout ? debounce(active => timer(+!active * timeout)) : identity,\n      startWith(el.matches(\":hover\"))\n    )\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { JSX as JSXInternal } from \"preact\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * HTML attributes\n */\ntype Attributes =\n  & JSXInternal.HTMLAttributes\n  & JSXInternal.SVGAttributes\n  & Record<string, any>\n\n/**\n * Child element\n */\ntype Child =\n  | ChildNode\n  | HTMLElement\n  | Text\n  | string\n  | number\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Append a child node to an element\n *\n * @param el - Element\n * @param child - Child node(s)\n */\nfunction appendChild(el: HTMLElement, child: Child | Child[]): void {\n\n  /* Handle primitive types (including raw HTML) */\n  if (typeof child === \"string\" || typeof child === \"number\") {\n    el.innerHTML += child.toString()\n\n  /* Handle nodes */\n  } else if (child instanceof Node) {\n    el.appendChild(child)\n\n  /* Handle nested children */\n  } else if (Array.isArray(child)) {\n    for (const node of child)\n      appendChild(el, node)\n  }\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * JSX factory\n *\n * @template T - Element type\n *\n * @param tag - HTML tag\n * @param attributes - HTML attributes\n * @param children - Child elements\n *\n * @returns Element\n */\nexport function h<T extends keyof HTMLElementTagNameMap>(\n  tag: T, attributes?: Attributes | null, ...children: Child[]\n): HTMLElementTagNameMap[T]\n\nexport function h<T extends h.JSX.Element>(\n  tag: string, attributes?: Attributes | null, ...children: Child[]\n): T\n\nexport function h<T extends h.JSX.Element>(\n  tag: string, attributes?: Attributes | null, ...children: Child[]\n): T {\n  const el = document.createElement(tag)\n\n  /* Set attributes, if any */\n  if (attributes)\n    for (const attr of Object.keys(attributes)) {\n      if (typeof attributes[attr] === \"undefined\")\n        continue\n\n      /* Set default attribute or boolean */\n      if (typeof attributes[attr] !== \"boolean\")\n        el.setAttribute(attr, attributes[attr])\n      else\n        el.setAttribute(attr, \"\")\n    }\n\n  /* Append child nodes */\n  for (const child of children)\n    appendChild(el, child)\n\n  /* Return element */\n  return el as T\n}\n\n/* ----------------------------------------------------------------------------\n * Namespace\n * ------------------------------------------------------------------------- */\n\nexport declare namespace h {\n  namespace JSX {\n    type Element = HTMLElement\n    type IntrinsicElements = JSXInternal.IntrinsicElements\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Round a number for display with repository facts\n *\n * This is a reverse-engineered version of GitHub's weird rounding algorithm\n * for stars, forks and all other numbers. While all numbers below `1,000` are\n * returned as-is, bigger numbers are converted to fixed numbers:\n *\n * - `1,049` => `1k`\n * - `1,050` => `1.1k`\n * - `1,949` => `1.9k`\n * - `1,950` => `2k`\n *\n * @param value - Original value\n *\n * @returns Rounded value\n */\nexport function round(value: number): string {\n  if (value > 999) {\n    const digits = +((value - 950) % 1000 > 99)\n    return `${((value + 0.000001) / 1000).toFixed(digits)}k`\n  } else {\n    return value.toString()\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  defer,\n  finalize,\n  fromEvent,\n  map,\n  merge,\n  switchMap,\n  take,\n  throwError\n} from \"rxjs\"\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create and load a `script` element\n *\n * This function returns an observable that will emit when the script was\n * successfully loaded, or throw an error if it wasn't.\n *\n * @param src - Script URL\n *\n * @returns Script observable\n */\nexport function watchScript(src: string): Observable<void> {\n  const script = h(\"script\", { src })\n  return defer(() => {\n    document.head.appendChild(script)\n    return merge(\n      fromEvent(script, \"load\"),\n      fromEvent(script, \"error\")\n        .pipe(\n          switchMap(() => (\n            throwError(() => new ReferenceError(`Invalid script: ${src}`))\n          ))\n        )\n    )\n      .pipe(\n        map(() => undefined),\n        finalize(() => document.head.removeChild(script)),\n        take(1)\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  NEVER,\n  Observable,\n  Subject,\n  defer,\n  filter,\n  finalize,\n  map,\n  merge,\n  of,\n  shareReplay,\n  startWith,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { watchScript } from \"../../../script\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Element offset\n */\nexport interface ElementSize {\n  width: number                        /* Element width */\n  height: number                       /* Element height */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Resize observer entry subject\n */\nconst entry$ = new Subject<ResizeObserverEntry>()\n\n/**\n * Resize observer observable\n *\n * This observable will create a `ResizeObserver` on the first subscription\n * and will automatically terminate it when there are no more subscribers.\n * It's quite important to centralize observation in a single `ResizeObserver`,\n * as the performance difference can be quite dramatic, as the link shows.\n *\n * If the browser doesn't have a `ResizeObserver` implementation available, a\n * polyfill is automatically downloaded from unpkg.com. This is also compatible\n * with the built-in privacy plugin, which will download the polyfill and put\n * it alongside the built site for self-hosting.\n *\n * @see https://bit.ly/3iIYfEm - Google Groups on performance\n */\nconst observer$ = defer(() => (\n  typeof ResizeObserver === \"undefined\"\n    ? watchScript(\"https://unpkg.com/resize-observer-polyfill\")\n    : of(undefined)\n))\n  .pipe(\n    map(() => new ResizeObserver(entries => (\n      entries.forEach(entry => entry$.next(entry))\n    ))),\n    switchMap(observer => merge(NEVER, of(observer)).pipe(\n      finalize(() => observer.disconnect())\n    )),\n    shareReplay(1)\n  )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element size\n *\n * @param el - Element\n *\n * @returns Element size\n */\nexport function getElementSize(\n  el: HTMLElement\n): ElementSize {\n  return {\n    width:  el.offsetWidth,\n    height: el.offsetHeight\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element size\n *\n * This function returns an observable that subscribes to a single internal\n * instance of `ResizeObserver` upon subscription, and emit resize events until\n * termination. Note that this function should not be called with the same\n * element twice, as the first unsubscription will terminate observation.\n *\n * Sadly, we can't use the `DOMRect` objects returned by the observer, because\n * we need the emitted values to be consistent with `getElementSize`, which will\n * return the used values (rounded) and not actual values (unrounded). Thus, we\n * use the `offset*` properties. See the linked GitHub issue.\n *\n * @see https://bit.ly/3m0k3he - GitHub issue\n *\n * @param el - Element\n *\n * @returns Element size observable\n */\nexport function watchElementSize(\n  el: HTMLElement\n): Observable<ElementSize> {\n\n  // Compute target element - since inline elements cannot be observed by the\n  // current `ResizeObserver` implementation as provided by browsers, we need\n  // to determine the first containing parent element and use that one as a\n  // target, while we always compute the actual size from the element.\n  let target = el\n  while (target.clientWidth === 0)\n    if (target.parentElement)\n      target = target.parentElement\n    else\n      break\n\n  // Observe target element and recompute element size on resize - as described\n  // above, the target element is not necessarily the element of interest\n  return observer$.pipe(\n    tap(observer => observer.observe(target)),\n    switchMap(observer => entry$.pipe(\n      filter(entry => entry.target === target),\n      finalize(() => observer.unobserve(target))\n    )),\n    map(() => getElementSize(el)),\n    startWith(getElementSize(el))\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ElementSize } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element content size (= scroll width and height)\n *\n * @param el - Element\n *\n * @returns Element content size\n */\nexport function getElementContentSize(\n  el: HTMLElement\n): ElementSize {\n  return {\n    width:  el.scrollWidth,\n    height: el.scrollHeight\n  }\n}\n\n/**\n * Retrieve the overflowing container of an element, if any\n *\n * @param el - Element\n *\n * @returns Overflowing container or nothing\n */\nexport function getElementContainer(\n  el: HTMLElement\n): HTMLElement | undefined {\n  let parent = el.parentElement\n  while (parent)\n    if (\n      el.scrollWidth  <= parent.scrollWidth &&\n      el.scrollHeight <= parent.scrollHeight\n    )\n      parent = (el = parent).parentElement\n    else\n      break\n\n  /* Return overflowing container */\n  return parent ? el : undefined\n}\n\n/**\n * Retrieve all overflowing containers of an element, if any\n *\n * Note that this function has a slightly different behavior, so we should at\n * some point consider refactoring how overflowing containers are handled.\n *\n * @param el - Element\n *\n * @returns Overflowing containers\n */\nexport function getElementContainers(\n  el: HTMLElement\n): HTMLElement[] {\n  const containers: HTMLElement[] = []\n\n  // Walk up the DOM tree until we find an overflowing container\n  let parent = el.parentElement\n  while (parent) {\n    if (\n      el.clientWidth  > parent.clientWidth ||\n      el.clientHeight > parent.clientHeight\n    )\n      containers.push(parent)\n\n    // Continue with parent element\n    parent = (el = parent).parentElement\n  }\n\n  // If the page is short, the body might not be overflowing and there might be\n  // no other containers, which is why we need to make sure the body is present\n  if (containers.length === 0)\n    containers.push(document.documentElement)\n\n  // Return overflowing containers\n  return containers\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  animationFrameScheduler,\n  auditTime,\n  fromEvent,\n  map,\n  merge,\n  startWith\n} from \"rxjs\"\n\nimport { watchElementSize } from \"../../size\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Element offset\n */\nexport interface ElementOffset {\n  x: number                            /* Horizontal offset */\n  y: number                            /* Vertical offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element offset\n *\n * @param el - Element\n *\n * @returns Element offset\n */\nexport function getElementOffset(\n  el: HTMLElement\n): ElementOffset {\n  return {\n    x: el.offsetLeft,\n    y: el.offsetTop\n  }\n}\n\n/**\n * Retrieve absolute element offset\n *\n * @param el - Element\n *\n * @returns Element offset\n */\nexport function getElementOffsetAbsolute(\n  el: HTMLElement\n): ElementOffset {\n  const rect = el.getBoundingClientRect()\n  return {\n    x: rect.x + window.scrollX,\n    y: rect.y + window.scrollY\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element offset\n *\n * @param el - Element\n *\n * @returns Element offset observable\n */\nexport function watchElementOffset(\n  el: HTMLElement\n): Observable<ElementOffset> {\n  return merge(\n    fromEvent(window, \"load\"),\n    fromEvent(window, \"resize\")\n  )\n    .pipe(\n      auditTime(0, animationFrameScheduler),\n      map(() => getElementOffset(el)),\n      startWith(getElementOffset(el))\n    )\n}\n\n/**\n * Watch absolute element offset\n *\n * @param el - Element\n *\n * @returns Element offset observable\n */\nexport function watchElementOffsetAbsolute(\n  el: HTMLElement\n): Observable<ElementOffset> {\n  return merge(\n    watchElementOffset(el),\n    watchElementSize(document.body) // @todo find a better way for this\n  )\n    .pipe(\n      map(() => getElementOffsetAbsolute(el)),\n      startWith(getElementOffsetAbsolute(el))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  animationFrameScheduler,\n  auditTime,\n  fromEvent,\n  map,\n  merge,\n  startWith\n} from \"rxjs\"\n\nimport { ElementOffset } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve element content offset (= scroll offset)\n *\n * @param el - Element\n *\n * @returns Element content offset\n */\nexport function getElementContentOffset(\n  el: HTMLElement\n): ElementOffset {\n  return {\n    x: el.scrollLeft,\n    y: el.scrollTop\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch element content offset\n *\n * @param el - Element\n *\n * @returns Element content offset observable\n */\nexport function watchElementContentOffset(\n  el: HTMLElement\n): Observable<ElementOffset> {\n  return merge(\n    fromEvent(el, \"scroll\"),\n    fromEvent(window, \"scroll\"),\n    fromEvent(window, \"resize\")\n  )\n    .pipe(\n      auditTime(0, animationFrameScheduler),\n      map(() => getElementContentOffset(el)),\n      startWith(getElementContentOffset(el))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  NEVER,\n  Observable,\n  Subject,\n  defer,\n  distinctUntilChanged,\n  filter,\n  finalize,\n  map,\n  merge,\n  of,\n  shareReplay,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport {\n  getElementContentSize,\n  getElementSize,\n  watchElementContentOffset\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Intersection observer entry subject\n */\nconst entry$ = new Subject<IntersectionObserverEntry>()\n\n/**\n * Intersection observer observable\n *\n * This observable will create an `IntersectionObserver` on first subscription\n * and will automatically terminate it when there are no more subscribers.\n *\n * @see https://bit.ly/3iIYfEm - Google Groups on performance\n */\nconst observer$ = defer(() => of(\n  new IntersectionObserver(entries => {\n    for (const entry of entries)\n      entry$.next(entry)\n  }, {\n    threshold: 0\n  })\n))\n  .pipe(\n    switchMap(observer => merge(NEVER, of(observer))\n      .pipe(\n        finalize(() => observer.disconnect())\n      )\n    ),\n    shareReplay(1)\n  )\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch element visibility\n *\n * @param el - Element\n *\n * @returns Element visibility observable\n */\nexport function watchElementVisibility(\n  el: HTMLElement\n): Observable<boolean> {\n  return observer$\n    .pipe(\n      tap(observer => observer.observe(el)),\n      switchMap(observer => entry$\n        .pipe(\n          filter(({ target }) => target === el),\n          finalize(() => observer.unobserve(el)),\n          map(({ isIntersecting }) => isIntersecting)\n        )\n      )\n    )\n}\n\n/**\n * Watch element boundary\n *\n * This function returns an observable which emits whether the bottom content\n * boundary (= scroll offset) of an element is within a certain threshold.\n *\n * @param el - Element\n * @param threshold - Threshold\n *\n * @returns Element boundary observable\n */\nexport function watchElementBoundary(\n  el: HTMLElement, threshold = 16\n): Observable<boolean> {\n  return watchElementContentOffset(el)\n    .pipe(\n      map(({ y }) => {\n        const visible = getElementSize(el)\n        const content = getElementContentSize(el)\n        return y >= (\n          content.height - visible.height - threshold\n        )\n      }),\n      distinctUntilChanged()\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  startWith\n} from \"rxjs\"\n\nimport { getElement } from \"../element\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Toggle\n */\nexport type Toggle =\n  | \"drawer\"                           /* Toggle for drawer */\n  | \"search\"                           /* Toggle for search */\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Toggle map\n */\nconst toggles: Record<Toggle, HTMLInputElement> = {\n  drawer: getElement(\"[data-md-toggle=drawer]\"),\n  search: getElement(\"[data-md-toggle=search]\")\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve the value of a toggle\n *\n * @param name - Toggle\n *\n * @returns Toggle value\n */\nexport function getToggle(name: Toggle): boolean {\n  return toggles[name].checked\n}\n\n/**\n * Set toggle\n *\n * Simulating a click event seems to be the most cross-browser compatible way\n * of changing the value while also emitting a `change` event. Before, Material\n * used `CustomEvent` to programmatically change the value of a toggle, but this\n * is a much simpler and cleaner solution which doesn't require a polyfill.\n *\n * @param name - Toggle\n * @param value - Toggle value\n */\nexport function setToggle(name: Toggle, value: boolean): void {\n  if (toggles[name].checked !== value)\n    toggles[name].click()\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch toggle\n *\n * @param name - Toggle\n *\n * @returns Toggle value observable\n */\nexport function watchToggle(name: Toggle): Observable<boolean> {\n  const el = toggles[name]\n  return fromEvent(el, \"change\")\n    .pipe(\n      map(() => el.checked),\n      startWith(el.checked)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  filter,\n  fromEvent,\n  map,\n  merge,\n  share,\n  startWith,\n  switchMap\n} from \"rxjs\"\n\nimport { getActiveElement } from \"../element\"\nimport { getToggle } from \"../toggle\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Keyboard mode\n */\nexport type KeyboardMode =\n  | \"global\"                           /* Global */\n  | \"search\"                           /* Search is open */\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Keyboard\n */\nexport interface Keyboard {\n  mode: KeyboardMode                   /* Keyboard mode */\n  type: string                         /* Key type */\n  claim(): void                        /* Key claim */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Check whether an element may receive keyboard input\n *\n * @param el - Element\n * @param type - Key type\n *\n * @returns Test result\n */\nfunction isSusceptibleToKeyboard(\n  el: HTMLElement, type: string\n): boolean {\n  switch (el.constructor) {\n\n    /* Input elements */\n    case HTMLInputElement:\n      /* @ts-expect-error - omit unnecessary type cast */\n      if (el.type === \"radio\")\n        return /^Arrow/.test(type)\n      else\n        return true\n\n    /* Select element and textarea */\n    case HTMLSelectElement:\n    case HTMLTextAreaElement:\n      return true\n\n    /* Everything else */\n    default:\n      return el.isContentEditable\n  }\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch composition events\n *\n * @returns Composition observable\n */\nexport function watchComposition(): Observable<boolean> {\n  return merge(\n    fromEvent(window, \"compositionstart\").pipe(map(() => true)),\n    fromEvent(window, \"compositionend\").pipe(map(() => false))\n  )\n    .pipe(\n      startWith(false)\n    )\n}\n\n/**\n * Watch keyboard\n *\n * @returns Keyboard observable\n */\nexport function watchKeyboard(): Observable<Keyboard> {\n  const keyboard$ = fromEvent<KeyboardEvent>(window, \"keydown\")\n    .pipe(\n      filter(ev => !(ev.metaKey || ev.ctrlKey)),\n      map(ev => ({\n        mode: getToggle(\"search\") ? \"search\" : \"global\",\n        type: ev.key,\n        claim() {\n          ev.preventDefault()\n          ev.stopPropagation()\n        }\n      } as Keyboard)),\n      filter(({ mode, type }) => {\n        if (mode === \"global\") {\n          const active = getActiveElement()\n          if (typeof active !== \"undefined\")\n            return !isSusceptibleToKeyboard(active, type)\n        }\n        return true\n      }),\n      share()\n    )\n\n  /* Don't emit during composition events - see https://bit.ly/3te3Wl8 */\n  return watchComposition()\n    .pipe(\n      switchMap(active => !active ? keyboard$ : EMPTY)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Subject } from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve location\n *\n * This function returns a `URL` object (and not `Location`) to normalize the\n * typings across the application. Furthermore, locations need to be tracked\n * without setting them and `Location` is a singleton which represents the\n * current location.\n *\n * @returns URL\n */\nexport function getLocation(): URL {\n  return new URL(location.href)\n}\n\n/**\n * Set location\n *\n * If instant navigation is enabled, this function creates a temporary anchor\n * element, sets the `href` attribute, appends it to the body, clicks it, and\n * then removes it again. The event will bubble up the DOM and trigger be\n * intercepted by the instant loading business logic.\n *\n * Note that we must append and remove the anchor element, or the event will\n * not bubble up the DOM, making it impossible to intercept it.\n *\n * @param url - URL to navigate to\n * @param navigate - Force navigation\n */\nexport function setLocation(\n  url: URL | HTMLLinkElement, navigate = false\n): void {\n  if (feature(\"navigation.instant\") && !navigate) {\n    const el = h(\"a\", { href: url.href })\n    document.body.appendChild(el)\n    el.click()\n    el.remove()\n\n  // If we're not using instant navigation, and the page should not be reloaded\n  // just instruct the browser to navigate to the given URL\n  } else {\n    location.href = url.href\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch location\n *\n * @returns Location subject\n */\nexport function watchLocation(): Subject<URL> {\n  return new Subject<URL>()\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  filter,\n  fromEvent,\n  map,\n  merge,\n  shareReplay,\n  startWith\n} from \"rxjs\"\n\nimport { getOptionalElement } from \"~/browser\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve location hash\n *\n * @returns Location hash\n */\nexport function getLocationHash(): string {\n  return location.hash.slice(1)\n}\n\n/**\n * Set location hash\n *\n * Setting a new fragment identifier via `location.hash` will have no effect\n * if the value doesn't change. When a new fragment identifier is set, we want\n * the browser to target the respective element at all times, which is why we\n * use this dirty little trick.\n *\n * @param hash - Location hash\n */\nexport function setLocationHash(hash: string): void {\n  const el = h(\"a\", { href: hash })\n  el.addEventListener(\"click\", ev => ev.stopPropagation())\n  el.click()\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch location hash\n *\n * @param location$ - Location observable\n *\n * @returns Location hash observable\n */\nexport function watchLocationHash(\n  location$: Observable<URL>\n): Observable<string> {\n  return merge(\n    fromEvent<HashChangeEvent>(window, \"hashchange\"),\n    location$\n  )\n    .pipe(\n      map(getLocationHash),\n      startWith(getLocationHash()),\n      filter(hash => hash.length > 0),\n      shareReplay(1)\n    )\n}\n\n/**\n * Watch location target\n *\n * @param location$ - Location observable\n *\n * @returns Location target observable\n */\nexport function watchLocationTarget(\n  location$: Observable<URL>\n): Observable<HTMLElement> {\n  return watchLocationHash(location$)\n    .pipe(\n      map(id => getOptionalElement(`[id=\"${id}\"]`)!),\n      filter(el => typeof el !== \"undefined\")\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  fromEvent,\n  fromEventPattern,\n  map,\n  merge,\n  startWith,\n  switchMap\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch media query\n *\n * Note that although `MediaQueryList.addListener` is deprecated we have to\n * use it, because it's the only way to ensure proper downward compatibility.\n *\n * @see https://bit.ly/3dUBH2m - GitHub issue\n *\n * @param query - Media query\n *\n * @returns Media observable\n */\nexport function watchMedia(query: string): Observable<boolean> {\n  const media = matchMedia(query)\n  return fromEventPattern<boolean>(next => (\n    media.addListener(() => next(media.matches))\n  ))\n    .pipe(\n      startWith(media.matches)\n    )\n}\n\n/**\n * Watch print mode\n *\n * @returns Print observable\n */\nexport function watchPrint(): Observable<boolean> {\n  const media = matchMedia(\"print\")\n  return merge(\n    fromEvent(window, \"beforeprint\").pipe(map(() => true)),\n    fromEvent(window, \"afterprint\").pipe(map(() => false))\n  )\n    .pipe(\n      startWith(media.matches)\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Toggle an observable with a media observable\n *\n * @template T - Data type\n *\n * @param query$ - Media observable\n * @param factory - Observable factory\n *\n * @returns Toggled observable\n */\nexport function at<T>(\n  query$: Observable<boolean>, factory: () => Observable<T>\n): Observable<T> {\n  return query$\n    .pipe(\n      switchMap(active => active ? factory() : EMPTY)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  map,\n  shareReplay,\n  switchMap\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Options\n */\ninterface Options {\n  progress$?: Subject<number>          // Progress subject\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch the given URL\n *\n * This function returns an observable that emits the response as a blob and\n * completes, or emits an error if the request failed. The caller can cancel\n * the request by unsubscribing at any time, which will automatically abort\n * the inflight request and complete the observable.\n *\n * Note that we use `XMLHTTPRequest` not because we're nostalgic, but because\n * it's the only way to get progress events for downloads and also allow for\n * cancellation of requests, as the official Fetch API does not support this\n * yet, even though we're already in 2024.\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function request(\n  url: URL | string, options?: Options\n): Observable<Blob> {\n  return new Observable<Blob>(observer => {\n    const req = new XMLHttpRequest()\n    req.open(\"GET\", `${url}`)\n    req.responseType = \"blob\"\n\n    // Handle response\n    req.addEventListener(\"load\", () => {\n      if (req.status >= 200 && req.status < 300) {\n        observer.next(req.response)\n        observer.complete()\n\n      // Every response that is not in the 2xx range is considered an error\n      } else {\n        observer.error(new Error(req.statusText))\n      }\n    })\n\n    // Handle network errors\n    req.addEventListener(\"error\", () => {\n      observer.error(new Error(\"Network error\"))\n    })\n\n    // Handle aborted requests\n    req.addEventListener(\"abort\", () => {\n      observer.complete()\n    })\n\n    // Handle download progress\n    if (typeof options?.progress$ !== \"undefined\") {\n      req.addEventListener(\"progress\", event => {\n        if (event.lengthComputable) {\n          options.progress$!.next((event.loaded / event.total) * 100)\n\n        // Hack: Chromium doesn't report the total number of bytes if content\n        // is compressed, so we need this fallback - see https://t.ly/ZXofI\n        } else {\n          const length = req.getResponseHeader(\"Content-Length\") ?? 0\n          options.progress$!.next((event.loaded / +length) * 100)\n        }\n      })\n\n      // Immediately set progress to 5% to indicate that we're loading\n      options.progress$.next(5)\n    }\n\n    // Send request and automatically abort request upon unsubscription\n    req.send()\n    return () => req.abort()\n  })\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Fetch JSON from the given URL\n *\n * @template T - Data type\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestJSON<T>(\n  url: URL | string, options?: Options\n): Observable<T> {\n  return request(url, options)\n    .pipe(\n      switchMap(res => res.text()),\n      map(body => JSON.parse(body) as T),\n      shareReplay(1)\n    )\n}\n\n/**\n * Fetch HTML from the given URL\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestHTML(\n  url: URL | string, options?: Options\n): Observable<Document> {\n  const dom = new DOMParser()\n  return request(url, options)\n    .pipe(\n      switchMap(res => res.text()),\n      map(res => dom.parseFromString(res, \"text/html\")),\n      shareReplay(1)\n    )\n}\n\n/**\n * Fetch XML from the given URL\n *\n * @param url - Request URL\n * @param options - Options\n *\n * @returns Data observable\n */\nexport function requestXML(\n  url: URL | string, options?: Options\n): Observable<Document> {\n  const dom = new DOMParser()\n  return request(url, options)\n    .pipe(\n      switchMap(res => res.text()),\n      map(res => dom.parseFromString(res, \"text/xml\")),\n      shareReplay(1)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  merge,\n  startWith\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport offset\n */\nexport interface ViewportOffset {\n  x: number                            /* Horizontal offset */\n  y: number                            /* Vertical offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve viewport offset\n *\n * On iOS Safari, viewport offset can be negative due to overflow scrolling.\n * As this may induce strange behaviors downstream, we'll just limit it to 0.\n *\n * @returns Viewport offset\n */\nexport function getViewportOffset(): ViewportOffset {\n  return {\n    x: Math.max(0, scrollX),\n    y: Math.max(0, scrollY)\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport offset\n *\n * @returns Viewport offset observable\n */\nexport function watchViewportOffset(): Observable<ViewportOffset> {\n  return merge(\n    fromEvent(window, \"scroll\", { passive: true }),\n    fromEvent(window, \"resize\", { passive: true })\n  )\n    .pipe(\n      map(getViewportOffset),\n      startWith(getViewportOffset())\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  startWith\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport size\n */\nexport interface ViewportSize {\n  width: number                        /* Viewport width */\n  height: number                       /* Viewport height */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve viewport size\n *\n * @returns Viewport size\n */\nexport function getViewportSize(): ViewportSize {\n  return {\n    width:  innerWidth,\n    height: innerHeight\n  }\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport size\n *\n * @returns Viewport size observable\n */\nexport function watchViewportSize(): Observable<ViewportSize> {\n  return fromEvent(window, \"resize\", { passive: true })\n    .pipe(\n      map(getViewportSize),\n      startWith(getViewportSize())\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  map,\n  shareReplay\n} from \"rxjs\"\n\nimport {\n  ViewportOffset,\n  watchViewportOffset\n} from \"../offset\"\nimport {\n  ViewportSize,\n  watchViewportSize\n} from \"../size\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Viewport\n */\nexport interface Viewport {\n  offset: ViewportOffset               /* Viewport offset */\n  size: ViewportSize                   /* Viewport size */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport\n *\n * @returns Viewport observable\n */\nexport function watchViewport(): Observable<Viewport> {\n  return combineLatest([\n    watchViewportOffset(),\n    watchViewportSize()\n  ])\n    .pipe(\n      map(([offset, size]) => ({ offset, size })),\n      shareReplay(1)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  distinctUntilKeyChanged,\n  map\n} from \"rxjs\"\n\nimport { Header } from \"~/components\"\n\nimport { getElementOffset } from \"../../element\"\nimport { Viewport } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch viewport relative to element\n *\n * @param el - Element\n * @param options - Options\n *\n * @returns Viewport observable\n */\nexport function watchViewportAt(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<Viewport> {\n  const size$ = viewport$\n    .pipe(\n      distinctUntilKeyChanged(\"size\")\n    )\n\n  /* Compute element offset */\n  const offset$ = combineLatest([size$, header$])\n    .pipe(\n      map(() => getElementOffset(el))\n    )\n\n  /* Compute relative viewport, return hot observable */\n  return combineLatest([header$, viewport$, offset$])\n    .pipe(\n      map(([{ height }, { offset, size }, { x, y }]) => ({\n        offset: {\n          x: offset.x - x,\n          y: offset.y - y + height\n        },\n        size\n      }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  endWith,\n  fromEvent,\n  ignoreElements,\n  mergeWith,\n  share,\n  takeUntil\n} from \"rxjs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Worker message\n */\nexport interface WorkerMessage {\n  type: unknown                        /* Message type */\n  data?: unknown                       /* Message data */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create an observable for receiving from a web worker\n *\n * @template T - Data type\n *\n * @param worker - Web worker\n *\n * @returns Message observable\n */\nfunction recv<T>(worker: Worker): Observable<T> {\n  return fromEvent<MessageEvent<T>, T>(worker, \"message\", ev => ev.data)\n}\n\n/**\n * Create a subject for sending to a web worker\n *\n * @template T - Data type\n *\n * @param worker - Web worker\n *\n * @returns Message subject\n */\nfunction send<T>(worker: Worker): Subject<T> {\n  const send$ = new Subject<T>()\n  send$.subscribe(data => worker.postMessage(data))\n\n  /* Return message subject */\n  return send$\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a bidirectional communication channel to a web worker\n *\n * @template T - Data type\n *\n * @param url - Worker URL\n * @param worker - Worker\n *\n * @returns Worker subject\n */\nexport function watchWorker<T extends WorkerMessage>(\n  url: string, worker = new Worker(url)\n): Subject<T> {\n  const recv$ = recv<T>(worker)\n  const send$ = send<T>(worker)\n\n  /* Create worker subject and forward messages */\n  const worker$ = new Subject<T>()\n  worker$.subscribe(send$)\n\n  /* Return worker subject */\n  const done$ = send$.pipe(ignoreElements(), endWith(true))\n  return worker$\n    .pipe(\n      ignoreElements(),\n      mergeWith(recv$.pipe(takeUntil(done$))),\n      share()\n    ) as Subject<T>\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { getElement, getLocation } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Feature flag\n */\nexport type Flag =\n  | \"announce.dismiss\"                 /* Dismissable announcement bar */\n  | \"content.code.annotate\"            /* Code annotations */\n  | \"content.code.copy\"                /* Code copy button */\n  | \"content.lazy\"                     /* Lazy content elements */\n  | \"content.tabs.link\"                /* Link content tabs */\n  | \"content.tooltips\"                 /* Tooltips */\n  | \"header.autohide\"                  /* Hide header */\n  | \"navigation.expand\"                /* Automatic expansion */\n  | \"navigation.indexes\"               /* Section pages */\n  | \"navigation.instant\"               /* Instant navigation */\n  | \"navigation.instant.progress\"      /* Instant navigation progress */\n  | \"navigation.sections\"              /* Section navigation */\n  | \"navigation.tabs\"                  /* Tabs navigation */\n  | \"navigation.tabs.sticky\"           /* Tabs navigation (sticky) */\n  | \"navigation.top\"                   /* Back-to-top button */\n  | \"navigation.tracking\"              /* Anchor tracking */\n  | \"search.highlight\"                 /* Search highlighting */\n  | \"search.share\"                     /* Search sharing */\n  | \"search.suggest\"                   /* Search suggestions */\n  | \"toc.follow\"                       /* Following table of contents */\n  | \"toc.integrate\"                    /* Integrated table of contents */\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Translation\n */\nexport type Translation =\n  | \"clipboard.copy\"                   /* Copy to clipboard */\n  | \"clipboard.copied\"                 /* Copied to clipboard */\n  | \"search.result.placeholder\"        /* Type to start searching */\n  | \"search.result.none\"               /* No matching documents */\n  | \"search.result.one\"                /* 1 matching document */\n  | \"search.result.other\"              /* # matching documents */\n  | \"search.result.more.one\"           /* 1 more on this page */\n  | \"search.result.more.other\"         /* # more on this page */\n  | \"search.result.term.missing\"       /* Missing */\n  | \"select.version\"                   /* Version selector */\n\n/**\n * Translations\n */\nexport type Translations =\n  Record<Translation, string>\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Versioning\n */\nexport interface Versioning {\n  provider: \"mike\"                     /* Version provider */\n  default?: string | string[]          /* Default version */\n  alias?: boolean                      /* Show alias */\n}\n\n/**\n * Configuration\n */\nexport interface Config {\n  base: string                         /* Base URL */\n  features: Flag[]                     /* Feature flags */\n  translations: Translations           /* Translations */\n  search: string                       /* Search worker URL */\n  tags?: Record<string, string>        /* Tags mapping */\n  version?: Versioning                 /* Versioning */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve global configuration and make base URL absolute\n */\nconst script = getElement(\"#__config\")\nconst config: Config = JSON.parse(script.textContent!)\nconfig.base = `${new URL(config.base, getLocation())}`\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve global configuration\n *\n * @returns Global configuration\n */\nexport function configuration(): Config {\n  return config\n}\n\n/**\n * Check whether a feature flag is enabled\n *\n * @param flag - Feature flag\n *\n * @returns Test result\n */\nexport function feature(flag: Flag): boolean {\n  return config.features.includes(flag)\n}\n\n/**\n * Retrieve the translation for the given key\n *\n * @param key - Key to be translated\n * @param value - Positional value, if any\n *\n * @returns Translation\n */\nexport function translation(\n  key: Translation, value?: string | number\n): string {\n  return typeof value !== \"undefined\"\n    ? config.translations[key].replace(\"#\", value.toString())\n    : config.translations[key]\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { getElement, getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Component type\n */\nexport type ComponentType =\n  | \"announce\"                         /* Announcement bar */\n  | \"container\"                        /* Container */\n  | \"consent\"                          /* Consent */\n  | \"content\"                          /* Content */\n  | \"dialog\"                           /* Dialog */\n  | \"header\"                           /* Header */\n  | \"header-title\"                     /* Header title */\n  | \"header-topic\"                     /* Header topic */\n  | \"main\"                             /* Main area */\n  | \"outdated\"                         /* Version warning */\n  | \"palette\"                          /* Color palette */\n  | \"progress\"                         /* Progress indicator */\n  | \"search\"                           /* Search */\n  | \"search-query\"                     /* Search input */\n  | \"search-result\"                    /* Search results */\n  | \"search-share\"                     /* Search sharing */\n  | \"search-suggest\"                   /* Search suggestions */\n  | \"sidebar\"                          /* Sidebar */\n  | \"skip\"                             /* Skip link */\n  | \"source\"                           /* Repository information */\n  | \"tabs\"                             /* Navigation tabs */\n  | \"toc\"                              /* Table of contents */\n  | \"top\"                              /* Back-to-top button */\n\n/**\n * Component\n *\n * @template T - Component type\n * @template U - Reference type\n */\nexport type Component<\n  T extends {} = {},\n  U extends HTMLElement = HTMLElement\n> =\n  T & {\n    ref: U                             /* Component reference */\n  }\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Component type map\n */\ninterface ComponentTypeMap {\n  \"announce\": HTMLElement              /* Announcement bar */\n  \"container\": HTMLElement             /* Container */\n  \"consent\": HTMLElement               /* Consent */\n  \"content\": HTMLElement               /* Content */\n  \"dialog\": HTMLElement                /* Dialog */\n  \"header\": HTMLElement                /* Header */\n  \"header-title\": HTMLElement          /* Header title */\n  \"header-topic\": HTMLElement          /* Header topic */\n  \"main\": HTMLElement                  /* Main area */\n  \"outdated\": HTMLElement              /* Version warning */\n  \"palette\": HTMLElement               /* Color palette */\n  \"progress\": HTMLElement              /* Progress indicator */\n  \"search\": HTMLElement                /* Search */\n  \"search-query\": HTMLInputElement     /* Search input */\n  \"search-result\": HTMLElement         /* Search results */\n  \"search-share\": HTMLAnchorElement    /* Search sharing */\n  \"search-suggest\": HTMLElement        /* Search suggestions */\n  \"sidebar\": HTMLElement               /* Sidebar */\n  \"skip\": HTMLAnchorElement            /* Skip link */\n  \"source\": HTMLAnchorElement          /* Repository information */\n  \"tabs\": HTMLElement                  /* Navigation tabs */\n  \"toc\": HTMLElement                   /* Table of contents */\n  \"top\": HTMLAnchorElement             /* Back-to-top button */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Retrieve the element for a given component or throw a reference error\n *\n * @template T - Component type\n *\n * @param type - Component type\n * @param node - Node of reference\n *\n * @returns Element\n */\nexport function getComponentElement<T extends ComponentType>(\n  type: T, node: ParentNode = document\n): ComponentTypeMap[T] {\n  return getElement(`[data-md-component=${type}]`, node)\n}\n\n/**\n * Retrieve all elements for a given component\n *\n * @template T - Component type\n *\n * @param type - Component type\n * @param node - Node of reference\n *\n * @returns Elements\n */\nexport function getComponentElements<T extends ComponentType>(\n  type: T, node: ParentNode = document\n): ComponentTypeMap[T][] {\n  return getElements(`[data-md-component=${type}]`, node)\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  finalize,\n  fromEvent,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { getElement } from \"~/browser\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Announcement bar\n */\nexport interface Announce {\n  hash: number                        /* Content hash */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch announcement bar\n *\n * @param el - Announcement bar element\n *\n * @returns Announcement bar observable\n */\nexport function watchAnnounce(\n  el: HTMLElement\n): Observable<Announce> {\n  const button = getElement(\".md-typeset > :first-child\", el)\n  return fromEvent(button, \"click\", { once: true })\n    .pipe(\n      map(() => getElement(\".md-typeset\", el)),\n      map(content => ({ hash: __md_hash(content.innerHTML) }))\n    )\n}\n\n/**\n * Mount announcement bar\n *\n * @param el - Announcement bar element\n *\n * @returns Announcement bar component observable\n */\nexport function mountAnnounce(\n  el: HTMLElement\n): Observable<Component<Announce>> {\n  if (!feature(\"announce.dismiss\") || !el.childElementCount)\n    return EMPTY\n\n  /* Support instant navigation - see https://t.ly/3FTme */\n  if (!el.hidden) {\n    const content = getElement(\".md-typeset\", el)\n    if (__md_hash(content.innerHTML) === __md_get(\"__announce\"))\n      el.hidden = true\n  }\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<Announce>()\n    push$.subscribe(({ hash }) => {\n      el.hidden = true\n\n      /* Persist preference in local storage */\n      __md_set<number>(\"__announce\", hash)\n    })\n\n    /* Create and return component */\n    return watchAnnounce(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  finalize,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Consent\n */\nexport interface Consent {\n  hidden: boolean                      /* Consent is hidden */\n}\n\n/**\n * Consent defaults\n */\nexport interface ConsentDefaults {\n  analytics?: boolean                  /* Consent for Analytics */\n  github?: boolean                     /* Consent for GitHub */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  target$: Observable<HTMLElement>     /* Target observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch consent\n *\n * @param el - Consent element\n * @param options - Options\n *\n * @returns Consent observable\n */\nexport function watchConsent(\n  el: HTMLElement, { target$ }: WatchOptions\n): Observable<Consent> {\n  return target$\n    .pipe(\n      map(target => ({ hidden: target !== el }))\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount consent\n *\n * @param el - Consent element\n * @param options - Options\n *\n * @returns Consent component observable\n */\nexport function mountConsent(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Consent>> {\n  const internal$ = new Subject<Consent>()\n  internal$.subscribe(({ hidden }) => {\n    el.hidden = hidden\n  })\n\n  /* Create and return component */\n  return watchConsent(el, options)\n    .pipe(\n      tap(state => internal$.next(state)),\n      finalize(() => internal$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ComponentChild } from \"preact\"\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tooltip style\n */\nexport type TooltipStyle =\n  | \"inline\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a tooltip\n *\n * @param id - Tooltip identifier\n * @param style - Tooltip style\n *\n * @returns Element\n */\nexport function renderTooltip(\n  id?: string, style?: TooltipStyle\n): HTMLElement {\n  if (style === \"inline\") { // @todo refactor control flow\n    return (\n      <div class=\"md-tooltip md-tooltip--inline\" id={id} role=\"tooltip\">\n        <div class=\"md-tooltip__inner md-typeset\"></div>\n      </div>\n    )\n  } else {\n    return (\n      <div class=\"md-tooltip\" id={id} role=\"tooltip\">\n        <div class=\"md-tooltip__inner md-typeset\"></div>\n      </div>\n    )\n  }\n}\n\n// @todo: rename\nexport function renderInlineTooltip2(\n  ...children: ComponentChild[]\n): HTMLElement {\n  return (\n    <div class=\"md-tooltip2\" role=\"tooltip\">\n      <div class=\"md-tooltip2__inner md-typeset\">\n        {children}\n      </div>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\nimport { renderTooltip } from \"../tooltip\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render an annotation\n *\n * @param id - Annotation identifier\n * @param prefix - Tooltip identifier prefix\n *\n * @returns Element\n */\nexport function renderAnnotation(\n  id: string | number, prefix?: string\n): HTMLElement {\n  prefix = prefix ? `${prefix}_annotation_${id}` : undefined\n\n  /* Render tooltip with anchor, if given */\n  if (prefix) {\n    const anchor = prefix ? `#${prefix}` : undefined\n    return (\n      <aside class=\"md-annotation\" tabIndex={0}>\n        {renderTooltip(prefix)}\n        <a href={anchor} class=\"md-annotation__index\" tabIndex={-1}>\n          <span data-md-annotation-id={id}></span>\n        </a>\n      </aside>\n    )\n  } else {\n    return (\n      <aside class=\"md-annotation\" tabIndex={0}>\n        {renderTooltip(prefix)}\n        <span class=\"md-annotation__index\" tabIndex={-1}>\n          <span data-md-annotation-id={id}></span>\n        </span>\n      </aside>\n    )\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { translation } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a 'copy-to-clipboard' button\n *\n * @param id - Unique identifier\n *\n * @returns Element\n */\nexport function renderClipboardButton(id: string): HTMLElement {\n  return (\n    <button\n      class=\"md-clipboard md-icon\"\n      title={translation(\"clipboard.copy\")}\n      data-clipboard-target={`#${id} > code`}\n    ></button>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport escapeHTML from \"escape-html\"\nimport { ComponentChild } from \"preact\"\n\nimport { configuration, feature, translation } from \"~/_\"\nimport { SearchItem } from \"~/integrations/search\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Render flag\n */\nconst enum Flag {\n  TEASER = 1,                          /* Render teaser */\n  PARENT = 2                           /* Render as parent */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper function\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a search document\n *\n * @param document - Search document\n * @param flag - Render flags\n *\n * @returns Element\n */\nfunction renderSearchDocument(\n  document: SearchItem, flag: Flag\n): HTMLElement {\n  const parent = flag & Flag.PARENT\n  const teaser = flag & Flag.TEASER\n\n  /* Render missing query terms */\n  const missing = Object.keys(document.terms)\n    .filter(key => !document.terms[key])\n    .reduce<ComponentChild[]>((list, key) => [\n      ...list, <del>{escapeHTML(key)}</del>, \" \"\n    ], [])\n    .slice(0, -1)\n\n  /* Assemble query string for highlighting */\n  const config = configuration()\n  const url = new URL(document.location, config.base)\n  if (feature(\"search.highlight\"))\n    url.searchParams.set(\"h\", Object.entries(document.terms)\n      .filter(([, match]) => match)\n      .reduce((highlight, [value]) => `${highlight} ${value}`.trim(), \"\")\n    )\n\n  /* Render article or section, depending on flags */\n  const { tags } = configuration()\n  return (\n    <a href={`${url}`} class=\"md-search-result__link\" tabIndex={-1}>\n      <article\n        class=\"md-search-result__article md-typeset\"\n        data-md-score={document.score.toFixed(2)}\n      >\n        {parent > 0 && <div class=\"md-search-result__icon md-icon\"></div>}\n        {parent > 0 && <h1>{document.title}</h1>}\n        {parent <= 0 && <h2>{document.title}</h2>}\n        {teaser > 0 && document.text.length > 0 &&\n          document.text\n        }\n        {document.tags && document.tags.map(tag => {\n          const type = tags\n            ? tag in tags\n              ? `md-tag-icon md-tag--${tags[tag]}`\n              : \"md-tag-icon\"\n            : \"\"\n          return (\n            <span class={`md-tag ${type}`}>{tag}</span>\n          )\n        })}\n        {teaser > 0 && missing.length > 0 &&\n          <p class=\"md-search-result__terms\">\n            {translation(\"search.result.term.missing\")}: {...missing}\n          </p>\n        }\n      </article>\n    </a>\n  )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a search result\n *\n * @param result - Search result\n *\n * @returns Element\n */\nexport function renderSearchResultItem(\n  result: SearchItem[]\n): HTMLElement {\n  const threshold = result[0].score\n  const docs = [...result]\n\n  const config = configuration()\n\n  /* Find and extract parent article */\n  const parent = docs.findIndex(doc => {\n    const l = `${new URL(doc.location, config.base)}` // @todo hacky\n    return !l.includes(\"#\")\n  })\n  const [article] = docs.splice(parent, 1)\n\n  /* Determine last index above threshold */\n  let index = docs.findIndex(doc => doc.score < threshold)\n  if (index === -1)\n    index = docs.length\n\n  /* Partition sections */\n  const best = docs.slice(0, index)\n  const more = docs.slice(index)\n\n  /* Render children */\n  const children = [\n    renderSearchDocument(article, Flag.PARENT | +(!parent && index === 0)),\n    ...best.map(section => renderSearchDocument(section, Flag.TEASER)),\n    ...more.length ? [\n      <details class=\"md-search-result__more\">\n        <summary tabIndex={-1}>\n          <div>\n            {more.length > 0 && more.length === 1\n              ? translation(\"search.result.more.one\")\n              : translation(\"search.result.more.other\", more.length)\n            }\n          </div>\n        </summary>\n        {...more.map(section => renderSearchDocument(section, Flag.TEASER))}\n      </details>\n    ] : []\n  ]\n\n  /* Render search result */\n  return (\n    <li class=\"md-search-result__item\">\n      {children}\n    </li>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { SourceFacts } from \"~/components\"\nimport { h, round } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render repository facts\n *\n * @param facts - Repository facts\n *\n * @returns Element\n */\nexport function renderSourceFacts(facts: SourceFacts): HTMLElement {\n  return (\n    <ul class=\"md-source__facts\">\n      {Object.entries(facts).map(([key, value]) => (\n        <li class={`md-source__fact md-source__fact--${key}`}>\n          {typeof value === \"number\" ? round(value) : value}\n        </li>\n      ))}\n    </ul>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tabbed control type\n */\ntype TabbedControlType =\n  | \"prev\"\n  | \"next\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render control for content tabs\n *\n * @param type - Control type\n *\n * @returns Element\n */\nexport function renderTabbedControl(\n  type: TabbedControlType\n): HTMLElement {\n  const classes = `tabbed-control tabbed-control--${type}`\n  return (\n    <div class={classes} hidden>\n      <button class=\"tabbed-button\" tabIndex={-1} aria-hidden=\"true\"></button>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a table inside a wrapper to improve scrolling on mobile\n *\n * @param table - Table element\n *\n * @returns Element\n */\nexport function renderTable(table: HTMLElement): HTMLElement {\n  return (\n    <div class=\"md-typeset__scrollwrap\">\n      <div class=\"md-typeset__table\">\n        {table}\n      </div>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { configuration, translation } from \"~/_\"\nimport { h } from \"~/utilities\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Version properties\n */\nexport interface VersionProperties {\n  hidden?: boolean                     /* Version is hidden */\n}\n\n/**\n * Version\n */\nexport interface Version {\n  version: string                      /* Version identifier */\n  title: string                        /* Version title */\n  aliases: string[]                    /* Version aliases */\n  properties?: VersionProperties       /* Version properties */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a version\n *\n * @param version - Version\n *\n * @returns Element\n */\nfunction renderVersion(version: Version): HTMLElement {\n  const config = configuration()\n\n  /* Ensure trailing slash - see https://bit.ly/3rL5u3f */\n  const url = new URL(`../${version.version}/`, config.base)\n  return (\n    <li class=\"md-version__item\">\n      <a href={`${url}`} class=\"md-version__link\">\n        {version.title}\n        {config.version?.alias && version.aliases.length > 0 && (\n          <span class=\"md-version__alias\">\n            {version.aliases[0]}\n          </span>\n        )}\n      </a>\n    </li>\n  )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Render a version selector\n *\n * @param versions - Versions\n * @param active - Active version\n *\n * @returns Element\n */\nexport function renderVersionSelector(\n  versions: Version[], active: Version\n): HTMLElement {\n  const config = configuration()\n  versions = versions.filter(version => !version.properties?.hidden)\n  return (\n    <div class=\"md-version\">\n      <button\n        class=\"md-version__current\"\n        aria-label={translation(\"select.version\")}\n      >\n        {active.title}\n        {config.version?.alias && active.aliases.length > 0 && (\n          <span class=\"md-version__alias\">\n            {active.aliases[0]}\n          </span>\n        )}\n      </button>\n      <ul class=\"md-version__list\">\n        {versions.map(renderVersion)}\n      </ul>\n    </div>\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  BehaviorSubject,\n  EMPTY,\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  combineLatest,\n  combineLatestWith,\n  debounce,\n  defer,\n  distinctUntilChanged,\n  endWith,\n  filter,\n  finalize,\n  first,\n  ignoreElements,\n  map,\n  mergeMap,\n  observeOn,\n  queueScheduler,\n  share,\n  startWith,\n  switchMap,\n  tap,\n  throttleTime,\n  timer,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  ElementOffset,\n  Viewport,\n  getElement,\n  getElementContainers,\n  getElementOffsetAbsolute,\n  getElementSize,\n  watchElementContentOffset,\n  watchElementFocus,\n  watchElementHover\n} from \"~/browser\"\nimport { renderInlineTooltip2 } from \"~/templates\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tooltip\n */\nexport interface Tooltip {\n  active: boolean                      // Tooltip is active\n  offset: ElementOffset                // Tooltip offset\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Dependencies\n */\ninterface Dependencies {\n  content$: Observable<HTMLElement>    // Tooltip content observable\n  viewport$: Observable<Viewport>      // Viewport observable\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Global sequence number for tooltips\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch tooltip\n *\n * This function tracks the tooltip host element, and deduces the active state\n * and offset of the tooltip from it. The active state is determined by whether\n * the host element is focused or hovered, and the offset is determined by the\n * host element's absolute position in the document.\n *\n * @param el - Tooltip host element\n *\n * @returns Tooltip observable\n */\nexport function watchTooltip2(\n  el: HTMLElement\n): Observable<Tooltip> {\n\n  // Compute whether tooltip should be shown - we need to watch both focus and\n  // hover events on the host element and emit if one of them is active. In case\n  // of a hover event, we keep the element visible for a short amount of time\n  // after the pointer left the host element for a better user experience.\n  const active$ =\n    combineLatest([\n      watchElementFocus(el),\n      watchElementHover(el)\n    ])\n      .pipe(\n        map(([focus, hover]) => focus || hover),\n        distinctUntilChanged()\n      )\n\n  // We need to determine all parent elements of the host element that are\n  // currently scrollable, as they might affect the position of the tooltip\n  // depending on their horizontal of vertical offset. We must track all of\n  // them and recompute the position of the tooltip if they change.\n  const offset$ =\n    defer(() => getElementContainers(el)).pipe(\n      mergeMap(watchElementContentOffset),\n      throttleTime(1),\n      // Note that we need to poll the value again if the active state changes,\n      // as otherwise the tooltip might be misplaced. This particularly happens\n      // when using third-party integrations like tablesort that change the\n      // position of elements \u2013 see https://t.ly/Y-V7X\n      combineLatestWith(active$),\n      map(() => getElementOffsetAbsolute(el)),\n    )\n\n  // Only track parent elements and compute offset of the tooltip host if the\n  // tooltip should be shown - we defer the computation of the offset until the\n  // tooltip becomes active for the first time. This is necessary, because we\n  // must also keep the tooltip active as long as it is focused or hovered.\n  return active$.pipe(\n    first(active => active),\n    switchMap(() => combineLatest([active$, offset$])),\n    map(([active, offset]) => ({ active, offset })),\n    share()\n  )\n}\n\n/**\n * Mount tooltip\n *\n * This function renders a tooltip with the content from the provided `content$`\n * observable as passed via the dependencies. If the returned element has a role\n * of type `dialog`, the tooltip is considered to be interactive, and rendered\n * either above or below the host element, depending on the available space.\n *\n * If the returned element has a role of type `tooltip`, the tooltip is always\n * rendered below the host element and considered to be non-interactive. This\n * allows us to reuse the same positioning logic for both interactive and\n * non-interactive tooltips, as it is largely the same.\n *\n * @param el - Tooltip host element\n * @param dependencies - Dependencies\n *\n * @returns Tooltip component observable\n */\nexport function mountTooltip2(\n  el: HTMLElement, dependencies: Dependencies\n): Observable<Component<Tooltip>> {\n  const { content$, viewport$ } = dependencies\n\n  // Compute unique tooltip id - this is necessary to associate the tooltip host\n  // element with the tooltip element for ARIA purposes\n  const id = `__tooltip2_${sequence++}`\n\n  // Create component on subscription\n  return defer(() => {\n    const push$ = new Subject<Tooltip>()\n\n    // Create subject to track tooltip presence and visibility - we use another\n    // purely internal subject to track the tooltip's presence and visibility,\n    // as the tooltip should be visible if the host element or tooltip itself\n    // is focused or hovered to allow for smooth pointer migration\n    const show$ = new BehaviorSubject(false)\n    push$.pipe(ignoreElements(), endWith(false))\n      .subscribe(show$)\n\n    // Create observable controlling tooltip element - we create and attach the\n    // tooltip only if it is actually present, in order to keep the number of\n    // elements low. We need to keep the tooltip visible for a short time after\n    // the pointer left the host element or tooltip itself. For this, we use an\n    // inner subscription to the tooltip observable, which we terminate when the\n    // tooltip should not be shown, automatically removing the element. Moreover\n    // we use the queue scheduler, which will schedule synchronously in case the\n    // tooltip should be shown, and asynchronously if it should be hidden.\n    const node$ = show$.pipe(\n      debounce(active => timer(+!active * 250, queueScheduler)),\n      distinctUntilChanged(),\n      switchMap(active => active ? content$ : EMPTY),\n      tap(node => node.id = id),\n      share()\n    )\n\n    // Compute tooltip presence and visibility - the tooltip should be shown if\n    // the host element or the tooltip itself is focused or hovered\n    combineLatest([\n      push$.pipe(map(({ active }) => active)),\n      node$.pipe(\n        switchMap(node => watchElementHover(node, 250)),\n        startWith(false)\n      )\n    ])\n      .pipe(map(states => states.some(active => active)))\n      .subscribe(show$)\n\n    // Compute tooltip origin - we need to compute the tooltip origin depending\n    // on the position of the host element, the viewport size, as well as the\n    // actual size of the tooltip, if positioned above. The tooltip must about\n    // to be rendered for this to be correct, which is why we do it here.\n    const origin$ = show$.pipe(\n      filter(active => active),\n      withLatestFrom(node$, viewport$),\n      map(([_, node, { size }]) => {\n        const host = el.getBoundingClientRect()\n        const x = host.width / 2\n\n        // If the tooltip is non-interactive, we always render it below the\n        // actual element because all operating systems do it that way\n        if (node.role === \"tooltip\") {\n          return { x, y: 8 + host.height }\n\n        // Otherwise, we determine where there is more space, and render the\n        // tooltip either above or below the host element\n        } else if (host.y >= size.height / 2) {\n          const { height } = getElementSize(node)\n          return { x, y: -16 - height }\n        } else {\n          return { x, y: +16 + host.height }\n        }\n      })\n    )\n\n    // Update tooltip position - we always need to update the position of the\n    // tooltip, as it might change depending on the viewport offset of the host\n    combineLatest([node$, push$, origin$])\n      .subscribe(([node, { offset }, origin]) => {\n        node.style.setProperty(\"--md-tooltip-host-x\", `${offset.x}px`)\n        node.style.setProperty(\"--md-tooltip-host-y\", `${offset.y}px`)\n\n        // Update tooltip origin - this is mainly set to determine the position\n        // of the tooltip tail, to show the direction it is originating from\n        node.style.setProperty(\"--md-tooltip-x\", `${origin.x}px`)\n        node.style.setProperty(\"--md-tooltip-y\", `${origin.y}px`)\n\n        // Update tooltip render location, i.e., whether the tooltip is shown\n        // above or below the host element, depending on the available space\n        node.classList.toggle(\"md-tooltip2--top\",    origin.y <  0)\n        node.classList.toggle(\"md-tooltip2--bottom\", origin.y >= 0)\n      })\n\n    // Update tooltip width - we only explicitly set the width of the tooltip\n    // if it is non-interactive, in case it should always be rendered centered\n    show$.pipe(\n      filter(active => active),\n      withLatestFrom(node$, (_, node) => node),\n      filter(node => node.role === \"tooltip\")\n    )\n      .subscribe(node => {\n        const size = getElementSize(getElement(\":scope > *\", node))\n\n        // Set tooltip width and remove tail by setting it to a width of zero -\n        // if authors want to keep the tail, we can move this to CSS later\n        node.style.setProperty(\"--md-tooltip-width\", `${size.width}px`)\n        node.style.setProperty(\"--md-tooltip-tail\",  `${0}px`)\n      })\n\n    // Update tooltip visibility - we defer to the next animation frame, because\n    // the tooltip must first be added to the document before we make it appear,\n    // or it will appear instantly without delay. Additionally, we need to keep\n    // the tooltip visible for a short time after the pointer left the host.\n    show$.pipe(\n      distinctUntilChanged(),\n      observeOn(animationFrameScheduler),\n      withLatestFrom(node$)\n    )\n      .subscribe(([active, node]) => {\n        node.classList.toggle(\"md-tooltip2--active\", active)\n      })\n\n    // Set up ARIA attributes when tooltip is visible\n    combineLatest([\n      show$.pipe(filter(active => active)),\n      node$\n    ])\n      .subscribe(([_, node]) => {\n        if (node.role === \"dialog\") {\n          el.setAttribute(\"aria-controls\", id)\n          el.setAttribute(\"aria-haspopup\", \"dialog\")\n        } else {\n          el.setAttribute(\"aria-describedby\", id)\n        }\n      })\n\n    // Remove ARIA attributes when tooltip is hidden\n    show$.pipe(filter(active => !active))\n      .subscribe(() => {\n        el.removeAttribute(\"aria-controls\")\n        el.removeAttribute(\"aria-describedby\")\n        el.removeAttribute(\"aria-haspopup\")\n      })\n\n    // Create and return component\n    return watchTooltip2(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n\n// ----------------------------------------------------------------------------\n\n/**\n * Mount inline tooltip\n *\n * @todo refactor this function\n *\n * @param el - Tooltip host element\n * @param dependencies - Dependencies\n * @param container - Container\n *\n * @returns Tooltip component observable\n */\nexport function mountInlineTooltip2(\n  el: HTMLElement, { viewport$ }: { viewport$: Observable<Viewport> },\n  container = document.body\n): Observable<Component<Tooltip>> {\n  return mountTooltip2(el, {\n    content$: new Observable<HTMLElement>(observer => {\n      const title = el.title\n      const node = renderInlineTooltip2(title)\n      observer.next(node)\n      el.removeAttribute(\"title\")\n      // Append tooltip and remove on unsubscription\n      container.append(node)\n      return () => {\n        node.remove()\n        el.setAttribute(\"title\", title)\n      }\n    }),\n    viewport$\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  auditTime,\n  combineLatest,\n  debounceTime,\n  defer,\n  delay,\n  endWith,\n  filter,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  switchMap,\n  take,\n  takeUntil,\n  tap,\n  throttleTime,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  ElementOffset,\n  getActiveElement,\n  getElementSize,\n  watchElementContentOffset,\n  watchElementFocus,\n  watchElementOffset,\n  watchElementVisibility\n} from \"~/browser\"\n\nimport { Component } from \"../../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Annotation\n */\nexport interface Annotation {\n  active: boolean                      /* Annotation is active */\n  offset: ElementOffset                /* Annotation offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch annotation\n *\n * @param el - Annotation element\n * @param container - Containing element\n *\n * @returns Annotation observable\n */\nexport function watchAnnotation(\n  el: HTMLElement, container: HTMLElement\n): Observable<Annotation> {\n  const offset$ = defer(() => combineLatest([\n    watchElementOffset(el),\n    watchElementContentOffset(container)\n  ]))\n    .pipe(\n      map(([{ x, y }, scroll]): ElementOffset => {\n        const { width, height } = getElementSize(el)\n        return ({\n          x: x - scroll.x + width  / 2,\n          y: y - scroll.y + height / 2\n        })\n      })\n    )\n\n  /* Actively watch annotation on focus */\n  return watchElementFocus(el)\n    .pipe(\n      switchMap(active => offset$\n        .pipe(\n          map(offset => ({ active, offset })),\n          take(+!active || Infinity)\n        )\n      )\n    )\n}\n\n/**\n * Mount annotation\n *\n * @param el - Annotation element\n * @param container - Containing element\n * @param options - Options\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotation(\n  el: HTMLElement, container: HTMLElement, { target$ }: MountOptions\n): Observable<Component<Annotation>> {\n  const [tooltip, index] = Array.from(el.children)\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<Annotation>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ offset }) {\n        el.style.setProperty(\"--md-tooltip-x\", `${offset.x}px`)\n        el.style.setProperty(\"--md-tooltip-y\", `${offset.y}px`)\n      },\n\n      /* Handle complete */\n      complete() {\n        el.style.removeProperty(\"--md-tooltip-x\")\n        el.style.removeProperty(\"--md-tooltip-y\")\n      }\n    })\n\n    /* Start animation only when annotation is visible */\n    watchElementVisibility(el)\n      .pipe(\n        takeUntil(done$)\n      )\n        .subscribe(visible => {\n          el.toggleAttribute(\"data-md-visible\", visible)\n        })\n\n    /* Toggle tooltip presence to mitigate empty lines when copying */\n    merge(\n      push$.pipe(filter(({ active }) => active)),\n      push$.pipe(debounceTime(250), filter(({ active }) => !active))\n    )\n      .subscribe({\n\n        /* Handle emission */\n        next({ active }) {\n          if (active)\n            el.prepend(tooltip)\n          else\n            tooltip.remove()\n        },\n\n        /* Handle complete */\n        complete() {\n          el.prepend(tooltip)\n        }\n      })\n\n    /* Toggle tooltip visibility */\n    push$\n      .pipe(\n        auditTime(16, animationFrameScheduler)\n      )\n        .subscribe(({ active }) => {\n          tooltip.classList.toggle(\"md-tooltip--active\", active)\n        })\n\n    /* Track relative origin of tooltip */\n    push$\n      .pipe(\n        throttleTime(125, animationFrameScheduler),\n        filter(() => !!el.offsetParent),\n        map(() => el.offsetParent!.getBoundingClientRect()),\n        map(({ x }) => x)\n      )\n        .subscribe({\n\n          /* Handle emission */\n          next(origin) {\n            if (origin)\n              el.style.setProperty(\"--md-tooltip-0\", `${-origin}px`)\n            else\n              el.style.removeProperty(\"--md-tooltip-0\")\n          },\n\n          /* Handle complete */\n          complete() {\n            el.style.removeProperty(\"--md-tooltip-0\")\n          }\n        })\n\n    /* Allow to copy link without scrolling to anchor */\n    fromEvent<MouseEvent>(index, \"click\")\n      .pipe(\n        takeUntil(done$),\n        filter(ev => !(ev.metaKey || ev.ctrlKey))\n      )\n        .subscribe(ev => {\n          ev.stopPropagation()\n          ev.preventDefault()\n        })\n\n    /* Allow to open link in new tab or blur on close */\n    fromEvent<MouseEvent>(index, \"mousedown\")\n      .pipe(\n        takeUntil(done$),\n        withLatestFrom(push$)\n      )\n        .subscribe(([ev, { active }]) => {\n\n          /* Open in new tab */\n          if (ev.button !== 0 || ev.metaKey || ev.ctrlKey) {\n            ev.preventDefault()\n\n          /* Close annotation */\n          } else if (active) {\n            ev.preventDefault()\n\n            /* Focus parent annotation, if any */\n            const parent = el.parentElement!.closest(\".md-annotation\")\n            if (parent instanceof HTMLElement)\n              parent.focus()\n            else\n              getActiveElement()?.blur()\n          }\n        })\n\n    /* Open and focus annotation on location target */\n    target$\n      .pipe(\n        takeUntil(done$),\n        filter(target => target === tooltip),\n        delay(125)\n      )\n        .subscribe(() => el.focus())\n\n    /* Create and return component */\n    return watchAnnotation(el, container)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  endWith,\n  finalize,\n  ignoreElements,\n  merge,\n  share,\n  takeUntil\n} from \"rxjs\"\n\nimport {\n  getElement,\n  getElements,\n  getOptionalElement\n} from \"~/browser\"\nimport { renderAnnotation } from \"~/templates\"\n\nimport { Component } from \"../../../_\"\nimport {\n  Annotation,\n  mountAnnotation\n} from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find all annotation hosts in the containing element\n *\n * @param container - Containing element\n *\n * @returns Annotation hosts\n */\nfunction findHosts(container: HTMLElement): HTMLElement[] {\n  return container.tagName === \"CODE\"\n    ? getElements(\".c, .c1, .cm\", container)\n    : [container]\n}\n\n/**\n * Find all annotation markers in the containing element\n *\n * @param container - Containing element\n *\n * @returns Annotation markers\n */\nfunction findMarkers(container: HTMLElement): Text[] {\n  const markers: Text[] = []\n  for (const el of findHosts(container)) {\n    const nodes: Text[] = []\n\n    /* Find all text nodes in current element */\n    const it = document.createNodeIterator(el, NodeFilter.SHOW_TEXT)\n    for (let node = it.nextNode(); node; node = it.nextNode())\n      nodes.push(node as Text)\n\n    /* Find all markers in each text node */\n    for (let text of nodes) {\n      let match: RegExpExecArray | null\n\n      /* Split text at marker and add to list */\n      while ((match = /(\\(\\d+\\))(!)?/.exec(text.textContent!))) {\n        const [, id, force] = match\n        if (typeof force === \"undefined\") {\n          const marker = text.splitText(match.index)\n          text = marker.splitText(id.length)\n          markers.push(marker)\n\n        /* Replace entire text with marker */\n        } else {\n          text.textContent = id\n          markers.push(text)\n          break\n        }\n      }\n    }\n  }\n  return markers\n}\n\n/**\n * Swap the child nodes of two elements\n *\n * @param source - Source element\n * @param target - Target element\n */\nfunction swap(source: HTMLElement, target: HTMLElement): void {\n  target.append(...Array.from(source.childNodes))\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount annotation list\n *\n * This function analyzes the containing code block and checks for markers\n * referring to elements in the given annotation list. If no markers are found,\n * the list is left untouched. Otherwise, list elements are rendered as\n * annotations inside the code block.\n *\n * @param el - Annotation list element\n * @param container - Containing element\n * @param options - Options\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotationList(\n  el: HTMLElement, container: HTMLElement, { target$, print$ }: MountOptions\n): Observable<Component<Annotation>> {\n\n  /* Compute prefix for tooltip anchors */\n  const parent = container.closest(\"[id]\")\n  const prefix = parent?.id\n\n  /* Find and replace all markers with empty annotations */\n  const annotations = new Map<string, HTMLElement>()\n  for (const marker of findMarkers(container)) {\n    const [, id] = marker.textContent!.match(/\\((\\d+)\\)/)!\n    if (getOptionalElement(`:scope > li:nth-child(${id})`, el)) {\n      annotations.set(id, renderAnnotation(id, prefix))\n      marker.replaceWith(annotations.get(id)!)\n    }\n  }\n\n  /* Keep list if there are no annotations to render */\n  if (annotations.size === 0)\n    return EMPTY\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n\n    /* Retrieve container pairs for swapping */\n    const pairs: [HTMLElement, HTMLElement][] = []\n    for (const [id, annotation] of annotations)\n      pairs.push([\n        getElement(\".md-typeset\", annotation),\n        getElement(`:scope > li:nth-child(${id})`, el)\n      ])\n\n    /* Handle print mode - see https://bit.ly/3rgPdpt */\n    print$.pipe(takeUntil(done$))\n      .subscribe(active => {\n        el.hidden = !active\n\n        /* Add class to discern list element */\n        el.classList.toggle(\"md-annotation-list\", active)\n\n        /* Show annotations in code block or list (print) */\n        for (const [inner, child] of pairs)\n          if (!active)\n            swap(child, inner)\n          else\n            swap(inner, child)\n      })\n\n    /* Create and return component */\n    return merge(...[...annotations]\n      .map(([, annotation]) => (\n        mountAnnotation(annotation, container, { target$ })\n      ))\n    )\n      .pipe(\n        finalize(() => push$.complete()),\n        share()\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { EMPTY, Observable, defer } from \"rxjs\"\n\nimport { Component } from \"../../../_\"\nimport { Annotation } from \"../_\"\nimport { mountAnnotationList } from \"../list\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find list element directly following a block\n *\n * @param el - Annotation block element\n *\n * @returns List element or nothing\n */\nfunction findList(el: HTMLElement): HTMLElement | undefined {\n  if (el.nextElementSibling) {\n    const sibling = el.nextElementSibling as HTMLElement\n    if (sibling.tagName === \"OL\")\n      return sibling\n\n    /* Skip empty paragraphs - see https://bit.ly/3r4ZJ2O */\n    else if (sibling.tagName === \"P\" && !sibling.children.length)\n      return findList(sibling)\n  }\n\n  /* Everything else */\n  return undefined\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount annotation block\n *\n * @param el - Annotation block element\n * @param options - Options\n *\n * @returns Annotation component observable\n */\nexport function mountAnnotationBlock(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Annotation>> {\n  return defer(() => {\n    const list = findList(el)\n    return typeof list !== \"undefined\"\n      ? mountAnnotationList(list, el, options)\n      : EMPTY\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport ClipboardJS from \"clipboard\"\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  filter,\n  finalize,\n  map,\n  mergeWith,\n  switchMap,\n  take,\n  takeLast,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  getElementContentSize,\n  getElements,\n  watchElementSize,\n  watchElementVisibility\n} from \"~/browser\"\nimport {\n  Tooltip,\n  mountInlineTooltip2\n} from \"~/components/tooltip2\"\nimport { renderClipboardButton } from \"~/templates\"\n\nimport { Component } from \"../../../_\"\nimport {\n  Annotation,\n  mountAnnotationList\n} from \"../../annotation\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Code block overflow\n */\nexport interface Overflow {\n  scrollable: boolean                  /* Code block overflows */\n}\n\n/**\n * Code block\n */\nexport type CodeBlock =\n  | Overflow\n  | Annotation\n  | Tooltip\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Global sequence number for code blocks\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Find candidate list element directly following a code block\n *\n * @param el - Code block element\n *\n * @returns List element or nothing\n */\nfunction findCandidateList(el: HTMLElement): HTMLElement | undefined {\n  if (el.nextElementSibling) {\n    const sibling = el.nextElementSibling as HTMLElement\n    if (sibling.tagName === \"OL\")\n      return sibling\n\n    /* Skip empty paragraphs - see https://bit.ly/3r4ZJ2O */\n    else if (sibling.tagName === \"P\" && !sibling.children.length)\n      return findCandidateList(sibling)\n  }\n\n  /* Everything else */\n  return undefined\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch code block\n *\n * This function monitors size changes of the viewport, as well as switches of\n * content tabs with embedded code blocks, as both may trigger overflow.\n *\n * @param el - Code block element\n *\n * @returns Code block observable\n */\nexport function watchCodeBlock(\n  el: HTMLElement\n): Observable<Overflow> {\n  return watchElementSize(el)\n    .pipe(\n      map(({ width }) => {\n        const content = getElementContentSize(el)\n        return {\n          scrollable: content.width > width\n        }\n      }),\n      distinctUntilKeyChanged(\"scrollable\")\n    )\n}\n\n/**\n * Mount code block\n *\n * This function ensures that an overflowing code block is focusable through\n * keyboard, so it can be scrolled without a mouse to improve on accessibility.\n * Furthermore, if code annotations are enabled, they are mounted if and only\n * if the code block is currently visible, e.g., not in a hidden content tab.\n *\n * Note that code blocks may be mounted eagerly or lazily. If they're mounted\n * lazily (on first visibility), code annotation anchor links will not work,\n * as they are evaluated on initial page load, and code annotations in general\n * might feel a little bumpier.\n *\n * @param el - Code block element\n * @param options - Options\n *\n * @returns Code block and annotation component observable\n */\nexport function mountCodeBlock(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<CodeBlock>> {\n  const { matches: hover } = matchMedia(\"(hover)\")\n\n  /* Defer mounting of code block - see https://bit.ly/3vHVoVD */\n  const factory$ = defer(() => {\n    const push$ = new Subject<Overflow>()\n    const done$ = push$.pipe(takeLast(1))\n    push$.subscribe(({ scrollable }) => {\n      if (scrollable && hover)\n        el.setAttribute(\"tabindex\", \"0\")\n      else\n        el.removeAttribute(\"tabindex\")\n    })\n\n    /* Render button for Clipboard.js integration */\n    const content$: Array<Observable<Component<CodeBlock>>> = []\n    if (ClipboardJS.isSupported()) {\n      if (el.closest(\".copy\") || (\n        feature(\"content.code.copy\") && !el.closest(\".no-copy\")\n      )) {\n        const parent = el.closest(\"pre\")!\n        parent.id = `__code_${sequence++}`\n\n        /* Mount tooltip, if enabled */\n        const button = renderClipboardButton(parent.id)\n        parent.insertBefore(button, el)\n        if (feature(\"content.tooltips\"))\n          content$.push(mountInlineTooltip2(button, { viewport$ }))\n      }\n    }\n\n    /* Handle code annotations */\n    const container = el.closest(\".highlight\")\n    if (container instanceof HTMLElement) {\n      const list = findCandidateList(container)\n\n      /* Mount code annotations, if enabled */\n      if (typeof list !== \"undefined\" && (\n        container.classList.contains(\"annotate\") ||\n        feature(\"content.code.annotate\")\n      )) {\n        const annotations$ = mountAnnotationList(list, el, options)\n        content$.push(\n          watchElementSize(container)\n            .pipe(\n              takeUntil(done$),\n              map(({ width, height }) => width && height),\n              distinctUntilChanged(),\n              switchMap(active => active ? annotations$ : EMPTY)\n            )\n        )\n      }\n    }\n\n    // If the code block has line spans, we can add this additional class to\n    // the code block element, which fixes the problem for highlighted code\n    // lines not stretching to the entirety of the screen when the code block\n    // overflows, e.g., on mobile - see\n    const spans = getElements(\":scope > span[id]\", el)\n    if (spans.length)\n      el.classList.add(\"md-code__content\")\n\n    /* Create and return component */\n    return watchCodeBlock(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state })),\n        mergeWith(...content$)\n      )\n  })\n\n  /* Mount code block lazily */\n  if (feature(\"content.lazy\"))\n    return watchElementVisibility(el)\n      .pipe(\n        filter(visible => visible),\n        take(1),\n        switchMap(() => factory$)\n      )\n\n  /* Mount code block */\n  return factory$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  filter,\n  finalize,\n  map,\n  merge,\n  tap\n} from \"rxjs\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Details\n */\nexport interface Details {\n  action: \"open\" | \"close\"             /* Details state */\n  reveal?: boolean                     /* Details is revealed */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch details\n *\n * @param el - Details element\n * @param options - Options\n *\n * @returns Details observable\n */\nexport function watchDetails(\n  el: HTMLDetailsElement, { target$, print$ }: WatchOptions\n): Observable<Details> {\n  let open = true\n  return merge(\n\n    /* Open and focus details on location target */\n    target$\n      .pipe(\n        map(target => target.closest(\"details:not([open])\")!),\n        filter(details => el === details),\n        map(() => ({\n          action: \"open\", reveal: true\n        }) as Details)\n      ),\n\n    /* Open details on print and close afterwards */\n    print$\n      .pipe(\n        filter(active => active || !open),\n        tap(() => open = el.open),\n        map(active => ({\n          action: active ? \"open\" : \"close\"\n        }) as Details)\n      )\n  )\n}\n\n/**\n * Mount details\n *\n * This function ensures that `details` tags are opened on anchor jumps and\n * prior to printing, so the whole content of the page is visible.\n *\n * @param el - Details element\n * @param options - Options\n *\n * @returns Details component observable\n */\nexport function mountDetails(\n  el: HTMLDetailsElement, options: MountOptions\n): Observable<Component<Details>> {\n  return defer(() => {\n    const push$ = new Subject<Details>()\n    push$.subscribe(({ action, reveal }) => {\n      el.toggleAttribute(\"open\", action === \"open\")\n      if (reveal)\n        el.scrollIntoView()\n    })\n\n    /* Create and return component */\n    return watchDetails(el, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", ".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  map,\n  of,\n  shareReplay,\n  tap\n} from \"rxjs\"\n\nimport { watchScript } from \"~/browser\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\nimport themeCSS from \"./index.css\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mermaid diagram\n */\nexport interface Mermaid {}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Mermaid instance observable\n */\nlet mermaid$: Observable<void>\n\n/**\n * Global sequence number for diagrams\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch Mermaid script\n *\n * @returns Mermaid scripts observable\n */\nfunction fetchScripts(): Observable<void> {\n  return typeof mermaid === \"undefined\" || mermaid instanceof Element\n    ? watchScript(\"https://unpkg.com/mermaid@10/dist/mermaid.min.js\")\n    : of(undefined)\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount Mermaid diagram\n *\n * @param el - Code block element\n *\n * @returns Mermaid diagram component observable\n */\nexport function mountMermaid(\n  el: HTMLElement\n): Observable<Component<Mermaid>> {\n  el.classList.remove(\"mermaid\") // Hack: mitigate https://bit.ly/3CiN6Du\n  mermaid$ ||= fetchScripts()\n    .pipe(\n      tap(() => mermaid.initialize({\n        startOnLoad: false,\n        themeCSS,\n        sequence: {\n          actorFontSize: \"16px\", // Hack: mitigate https://bit.ly/3y0NEi3\n          messageFontSize: \"16px\",\n          noteFontSize: \"16px\"\n        }\n      })),\n      map(() => undefined),\n      shareReplay(1)\n    )\n\n  /* Render diagram */\n  mermaid$.subscribe(async () => {\n    el.classList.add(\"mermaid\") // Hack: mitigate https://bit.ly/3CiN6Du\n    const id = `__mermaid_${sequence++}`\n\n    /* Create host element to replace code block */\n    const host = h(\"div\", { class: \"mermaid\" })\n    const text = el.textContent\n\n    /* Render and inject diagram */\n    const { svg, fn } = await mermaid.render(id, text)\n\n    /* Create a shadow root and inject diagram */\n    const shadow = host.attachShadow({ mode: \"closed\" })\n    shadow.innerHTML = svg\n\n    /* Replace code block with diagram and bind functions */\n    el.replaceWith(host)\n    fn?.(shadow)\n  })\n\n  /* Create and return component */\n  return mermaid$\n    .pipe(\n      map(() => ({ ref: el }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Observable, of } from \"rxjs\"\n\nimport { renderTable } from \"~/templates\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Data table\n */\nexport interface DataTable {}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Sentinel for replacement\n */\nconst sentinel = h(\"table\")\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount data table\n *\n * This function wraps a data table in another scrollable container, so it can\n * be smoothly scrolled on smaller screen sizes and won't break the layout.\n *\n * @param el - Data table element\n *\n * @returns Data table component observable\n */\nexport function mountDataTable(\n  el: HTMLElement\n): Observable<Component<DataTable>> {\n  el.replaceWith(sentinel)\n  sentinel.replaceWith(renderTable(el))\n\n  /* Create and return component */\n  return of({ ref: el })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  asyncScheduler,\n  auditTime,\n  combineLatest,\n  defer,\n  endWith,\n  filter,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  skip,\n  startWith,\n  subscribeOn,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElement,\n  getElementContentOffset,\n  getElementContentSize,\n  getElementOffset,\n  getElementSize,\n  getElements,\n  watchElementContentOffset,\n  watchElementSize,\n  watchElementVisibility\n} from \"~/browser\"\nimport { renderTabbedControl } from \"~/templates\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Content tabs\n */\nexport interface ContentTabs {\n  active: HTMLLabelElement             /* Active tab label */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch content tabs\n *\n * @param inputs - Content tabs input elements\n *\n * @returns Content tabs observable\n */\nexport function watchContentTabs(\n  inputs: HTMLInputElement[]\n): Observable<ContentTabs> {\n  const initial = inputs.find(input => input.checked) || inputs[0]\n  return merge(...inputs.map(input => fromEvent(input, \"change\")\n    .pipe(\n      map(() => getElement<HTMLLabelElement>(`label[for=\"${input.id}\"]`))\n    )\n  ))\n    .pipe(\n      startWith(getElement<HTMLLabelElement>(`label[for=\"${initial.id}\"]`)),\n      map(active => ({ active }))\n    )\n}\n\n/**\n * Mount content tabs\n *\n * @param el - Content tabs element\n * @param options - Options\n *\n * @returns Content tabs component observable\n */\nexport function mountContentTabs(\n  el: HTMLElement, { viewport$, target$ }: MountOptions\n): Observable<Component<ContentTabs>> {\n  const container = getElement(\".tabbed-labels\", el)\n  const inputs = getElements<HTMLInputElement>(\":scope > input\", el)\n\n  /* Render content tab previous button for pagination */\n  const prev = renderTabbedControl(\"prev\")\n  el.append(prev)\n\n  /* Render content tab next button for pagination */\n  const next = renderTabbedControl(\"next\")\n  el.append(next)\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<ContentTabs>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    combineLatest([push$, watchElementSize(el), watchElementVisibility(el)])\n      .pipe(\n        takeUntil(done$),\n        auditTime(1, animationFrameScheduler)\n      )\n        .subscribe({\n\n          /* Handle emission */\n          next([{ active }, size]) {\n            const offset = getElementOffset(active)\n            const { width } = getElementSize(active)\n\n            /* Set tab indicator offset and width */\n            el.style.setProperty(\"--md-indicator-x\", `${offset.x}px`)\n            el.style.setProperty(\"--md-indicator-width\", `${width}px`)\n\n            /* Scroll container to active content tab */\n            const content = getElementContentOffset(container)\n            if (\n              offset.x         < content.x              ||\n              offset.x + width > content.x + size.width\n            )\n              container.scrollTo({\n                left: Math.max(0, offset.x - 16),\n                behavior: \"smooth\"\n              })\n          },\n\n          /* Handle complete */\n          complete() {\n            el.style.removeProperty(\"--md-indicator-x\")\n            el.style.removeProperty(\"--md-indicator-width\")\n          }\n        })\n\n    /* Hide content tab buttons on borders */\n    combineLatest([\n      watchElementContentOffset(container),\n      watchElementSize(container)\n    ])\n      .pipe(\n        takeUntil(done$)\n      )\n        .subscribe(([offset, size]) => {\n          const content = getElementContentSize(container)\n          prev.hidden = offset.x < 16\n          next.hidden = offset.x > content.width - size.width - 16\n        })\n\n    /* Paginate content tab container on click */\n    merge(\n      fromEvent(prev, \"click\").pipe(map(() => -1)),\n      fromEvent(next, \"click\").pipe(map(() => +1))\n    )\n      .pipe(\n        takeUntil(done$)\n      )\n        .subscribe(direction => {\n          const { width } = getElementSize(container)\n          container.scrollBy({\n            left: width * direction,\n            behavior: \"smooth\"\n          })\n        })\n\n    /* Switch to content tab target */\n    target$\n      .pipe(\n        takeUntil(done$),\n        filter(input => inputs.includes(input as HTMLInputElement))\n      )\n        .subscribe(input => input.click())\n\n    /* Add link to each content tab label */\n    container.classList.add(\"tabbed-labels--linked\")\n    for (const input of inputs) {\n      const label = getElement<HTMLLabelElement>(`label[for=\"${input.id}\"]`)\n      label.replaceChildren(h(\"a\", {\n        href: `#${label.htmlFor}`,\n        tabIndex: -1\n      }, ...Array.from(label.childNodes)))\n\n      /* Allow to copy link without scrolling to anchor */\n      fromEvent<MouseEvent>(label.firstElementChild!, \"click\")\n        .pipe(\n          takeUntil(done$),\n          filter(ev => !(ev.metaKey || ev.ctrlKey)),\n          tap(ev => {\n            ev.preventDefault()\n            ev.stopPropagation()\n          })\n        )\n          // @todo we might need to remove the anchor link on complete\n          .subscribe(() => {\n            history.replaceState({}, \"\", `#${label.htmlFor}`)\n            label.click()\n          })\n    }\n\n    /* Set up linking of content tabs, if enabled */\n    if (feature(\"content.tabs.link\"))\n      push$.pipe(\n        skip(1),\n        withLatestFrom(viewport$)\n      )\n        .subscribe(([{ active }, { offset }]) => {\n          const tab = active.innerText.trim()\n          if (active.hasAttribute(\"data-md-switching\")) {\n            active.removeAttribute(\"data-md-switching\")\n\n          /* Determine viewport offset of active tab */\n          } else {\n            const y = el.offsetTop - offset.y\n\n            /* Passively activate other tabs */\n            for (const set of getElements(\"[data-tabs]\"))\n              for (const input of getElements<HTMLInputElement>(\n                \":scope > input\", set\n              )) {\n                const label = getElement(`label[for=\"${input.id}\"]`)\n                if (\n                  label !== active &&\n                  label.innerText.trim() === tab\n                ) {\n                  label.setAttribute(\"data-md-switching\", \"\")\n                  input.click()\n                  break\n                }\n              }\n\n            /* Bring active tab into view */\n            window.scrollTo({\n              top: el.offsetTop - y\n            })\n\n            /* Persist active tabs in local storage */\n            const tabs = __md_get<string[]>(\"__tabs\") || []\n            __md_set(\"__tabs\", [...new Set([tab, ...tabs])])\n          }\n        })\n\n    /* Pause media (audio, video) on switch - see https://bit.ly/3Bk6cel */\n    push$.pipe(takeUntil(done$))\n      .subscribe(() => {\n        for (const media of getElements<HTMLAudioElement>(\"audio, video\", el))\n          media.pause()\n      })\n\n    /* Create and return component */\n    return watchContentTabs(inputs)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n    .pipe(\n      subscribeOn(asyncScheduler)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Observable, merge } from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { Viewport, getElements } from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport {\n  Tooltip,\n  mountInlineTooltip2\n} from \"../../tooltip2\"\nimport {\n  Annotation,\n  mountAnnotationBlock\n} from \"../annotation\"\nimport {\n  CodeBlock,\n  mountCodeBlock\n} from \"../code\"\nimport {\n  Details,\n  mountDetails\n} from \"../details\"\nimport {\n  Mermaid,\n  mountMermaid\n} from \"../mermaid\"\nimport {\n  DataTable,\n  mountDataTable\n} from \"../table\"\nimport {\n  ContentTabs,\n  mountContentTabs\n} from \"../tabs\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Content\n */\nexport type Content =\n  | Annotation\n  | CodeBlock\n  | ContentTabs\n  | DataTable\n  | Details\n  | Mermaid\n  | Tooltip\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n  print$: Observable<boolean>          /* Media print observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount content\n *\n * This function mounts all components that are found in the content of the\n * actual article, including code blocks, data tables and details.\n *\n * @param el - Content element\n * @param options - Options\n *\n * @returns Content component observable\n */\nexport function mountContent(\n  el: HTMLElement, { viewport$, target$, print$ }: MountOptions\n): Observable<Component<Content>> {\n  return merge(\n\n    /* Annotations */\n    ...getElements(\".annotate:not(.highlight)\", el)\n      .map(child => mountAnnotationBlock(child, { target$, print$ })),\n\n    /* Code blocks */\n    ...getElements(\"pre:not(.mermaid) > code\", el)\n      .map(child => mountCodeBlock(child, { target$, print$ })),\n\n    /* Mermaid diagrams */\n    ...getElements(\"pre.mermaid\", el)\n      .map(child => mountMermaid(child)),\n\n    /* Data tables */\n    ...getElements(\"table:not([class])\", el)\n      .map(child => mountDataTable(child)),\n\n    /* Details */\n    ...getElements(\"details\", el)\n      .map(child => mountDetails(child, { target$, print$ })),\n\n    /* Content tabs */\n    ...getElements(\"[data-tabs]\", el)\n      .map(child => mountContentTabs(child, { viewport$, target$ })),\n\n    /* Tooltips */\n    ...getElements(\"[title]\", el)\n      .filter(() => feature(\"content.tooltips\"))\n      .map(child => mountInlineTooltip2(child, { viewport$ }))\n  )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  delay,\n  finalize,\n  map,\n  merge,\n  of,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { getElement } from \"~/browser\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Dialog\n */\nexport interface Dialog {\n  message: string                      /* Dialog message */\n  active: boolean                      /* Dialog is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  alert$: Subject<string>              /* Alert subject */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  alert$: Subject<string>              /* Alert subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch dialog\n *\n * @param _el - Dialog element\n * @param options - Options\n *\n * @returns Dialog observable\n */\nexport function watchDialog(\n  _el: HTMLElement, { alert$ }: WatchOptions\n): Observable<Dialog> {\n  return alert$\n    .pipe(\n      switchMap(message => merge(\n        of(true),\n        of(false).pipe(delay(2000))\n      )\n        .pipe(\n          map(active => ({ message, active }))\n        )\n      )\n    )\n}\n\n/**\n * Mount dialog\n *\n * This function reveals the dialog in the right corner when a new alert is\n * emitted through the subject that is passed as part of the options.\n *\n * @param el - Dialog element\n * @param options - Options\n *\n * @returns Dialog component observable\n */\nexport function mountDialog(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Dialog>> {\n  const inner = getElement(\".md-typeset\", el)\n  return defer(() => {\n    const push$ = new Subject<Dialog>()\n    push$.subscribe(({ message, active }) => {\n      el.classList.toggle(\"md-dialog--active\", active)\n      inner.textContent = message\n    })\n\n    /* Create and return component */\n    return watchDialog(el, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  asyncScheduler,\n  auditTime,\n  combineLatest,\n  debounceTime,\n  defer,\n  distinctUntilChanged,\n  filter,\n  finalize,\n  map,\n  merge,\n  of,\n  subscribeOn,\n  tap,\n  throttleTime\n} from \"rxjs\"\n\nimport {\n  ElementOffset,\n  getElement,\n  getElementContainer,\n  getElementOffset,\n  getElementSize,\n  watchElementContentOffset,\n  watchElementFocus,\n  watchElementHover\n} from \"~/browser\"\nimport { renderTooltip } from \"~/templates\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Tooltip\n */\nexport interface Tooltip {\n  active: boolean                      /* Tooltip is active */\n  offset: ElementOffset                /* Tooltip offset */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Global sequence number for tooltips\n */\nlet sequence = 0\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch tooltip\n *\n * This function will append the tooltip temporarily to compute its width,\n * which is necessary for correct centering, and then removing it again.\n *\n * @param el - Tooltip element\n * @param host - Host element\n *\n * @returns Tooltip observable\n */\nexport function watchTooltip(\n  el: HTMLElement, host: HTMLElement\n): Observable<Tooltip> {\n  document.body.append(el)\n\n  /* Compute width and remove tooltip immediately */\n  const { width } = getElementSize(el)\n  el.style.setProperty(\"--md-tooltip-width\", `${width}px`)\n  el.remove()\n\n  /* Retrieve and watch containing element */\n  const container = getElementContainer(host)\n  const scroll$ =\n    typeof container !== \"undefined\"\n      ? watchElementContentOffset(container)\n      : of({ x: 0, y: 0 })\n\n  /* Compute tooltip visibility */\n  const active$ = merge(\n    watchElementFocus(host),\n    watchElementHover(host)\n  )\n    .pipe(\n      distinctUntilChanged()\n    )\n\n  /* Compute tooltip offset */\n  return combineLatest([active$, scroll$])\n    .pipe(\n      map(([active, scroll]) => {\n        let { x, y } = getElementOffset(host)\n        const size = getElementSize(host)\n\n        /**\n         * Experimental: fix handling of tables - see https://bit.ly/3TQEj5O\n         *\n         * If this proves to be a viable fix, we should refactor tooltip\n         * positioning and somehow streamline the current process. This might\n         * also fix positioning for annotations inside tables, which is another\n         * limitation.\n         */\n        const table = host.closest(\"table\")\n        if (table && host.parentElement) {\n          x += table.offsetLeft + host.parentElement.offsetLeft\n          y += table.offsetTop  + host.parentElement.offsetTop\n        }\n        return {\n          active,\n          offset: {\n            x: x - scroll.x + size.width  / 2 - width / 2,\n            y: y - scroll.y + size.height + 8\n          }\n        }\n      })\n    )\n}\n\n/**\n * Mount tooltip\n *\n * @param el - Host element\n *\n * @returns Tooltip component observable\n */\nexport function mountTooltip(\n  el: HTMLElement\n): Observable<Component<Tooltip>> {\n  const title = el.title\n  if (!title.length)\n    return EMPTY\n\n  /* Render tooltip and set title from host element */\n  const id = `__tooltip_${sequence++}`\n  const tooltip = renderTooltip(id, \"inline\")\n  const typeset = getElement(\".md-typeset\", tooltip)\n  typeset.innerHTML = title\n\n  /* Mount component on subscription */\n  return defer(() => {\n    const push$ = new Subject<Tooltip>()\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ offset }) {\n        tooltip.style.setProperty(\"--md-tooltip-x\", `${offset.x}px`)\n        tooltip.style.setProperty(\"--md-tooltip-y\", `${offset.y}px`)\n      },\n\n      /* Handle complete */\n      complete() {\n        tooltip.style.removeProperty(\"--md-tooltip-x\")\n        tooltip.style.removeProperty(\"--md-tooltip-y\")\n      }\n    })\n\n    /* Toggle tooltip presence to mitigate empty lines when copying */\n    merge(\n      push$.pipe(filter(({ active }) => active)),\n      push$.pipe(debounceTime(250), filter(({ active }) => !active))\n    )\n      .subscribe({\n\n        /* Handle emission */\n        next({ active }) {\n          if (active) {\n            el.insertAdjacentElement(\"afterend\", tooltip)\n            el.setAttribute(\"aria-describedby\", id)\n            el.removeAttribute(\"title\")\n          } else {\n            tooltip.remove()\n            el.removeAttribute(\"aria-describedby\")\n            el.setAttribute(\"title\", title)\n          }\n        },\n\n        /* Handle complete */\n        complete() {\n          tooltip.remove()\n          el.removeAttribute(\"aria-describedby\")\n          el.setAttribute(\"title\", title)\n        }\n      })\n\n    /* Toggle tooltip visibility */\n    push$\n      .pipe(\n        auditTime(16, animationFrameScheduler)\n      )\n        .subscribe(({ active }) => {\n          tooltip.classList.toggle(\"md-tooltip--active\", active)\n        })\n\n    // @todo - refactor positioning together with annotations \u2013 there are\n    // several things that overlap and are identical in handling\n\n    /* Track relative origin of tooltip */\n    push$\n      .pipe(\n        throttleTime(125, animationFrameScheduler),\n        filter(() => !!el.offsetParent),\n        map(() => el.offsetParent!.getBoundingClientRect()),\n        map(({ x }) => x)\n      )\n      .subscribe({\n\n        /* Handle emission */\n        next(origin) {\n          if (origin)\n            tooltip.style.setProperty(\"--md-tooltip-0\", `${-origin}px`)\n          else\n            tooltip.style.removeProperty(\"--md-tooltip-0\")\n        },\n\n        /* Handle complete */\n        complete() {\n          tooltip.style.removeProperty(\"--md-tooltip-0\")\n        }\n      })\n\n    /* Create and return component */\n    return watchTooltip(tooltip, el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n    .pipe(\n      subscribeOn(asyncScheduler)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  bufferCount,\n  combineLatest,\n  combineLatestWith,\n  defer,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  filter,\n  from,\n  ignoreElements,\n  map,\n  mergeMap,\n  mergeWith,\n  of,\n  shareReplay,\n  startWith,\n  switchMap,\n  takeUntil\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElements,\n  watchElementSize,\n  watchToggle\n} from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { Main } from \"../../main\"\nimport {\n  Tooltip,\n  mountTooltip\n} from \"../../tooltip\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Header\n */\nexport interface Header {\n  height: number                       /* Header visible height */\n  hidden: boolean                      /* Header is hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Compute whether the header is hidden\n *\n * If the user scrolls past a certain threshold, the header can be hidden when\n * scrolling down, and shown when scrolling up.\n *\n * @param options - Options\n *\n * @returns Toggle observable\n */\nfunction isHidden({ viewport$ }: WatchOptions): Observable<boolean> {\n  if (!feature(\"header.autohide\"))\n    return of(false)\n\n  /* Compute direction and turning point */\n  const direction$ = viewport$\n    .pipe(\n      map(({ offset: { y } }) => y),\n      bufferCount(2, 1),\n      map(([a, b]) => [a < b, b] as const),\n      distinctUntilKeyChanged(0)\n    )\n\n  /* Compute whether header should be hidden */\n  const hidden$ = combineLatest([viewport$, direction$])\n    .pipe(\n      filter(([{ offset }, [, y]]) => Math.abs(y - offset.y) > 100),\n      map(([, [direction]]) => direction),\n      distinctUntilChanged()\n    )\n\n  /* Compute threshold for hiding */\n  const search$ = watchToggle(\"search\")\n  return combineLatest([viewport$, search$])\n    .pipe(\n      map(([{ offset }, search]) => offset.y > 400 && !search),\n      distinctUntilChanged(),\n      switchMap(active => active ? hidden$ : of(false)),\n      startWith(false)\n    )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch header\n *\n * @param el - Header element\n * @param options - Options\n *\n * @returns Header observable\n */\nexport function watchHeader(\n  el: HTMLElement, options: WatchOptions\n): Observable<Header> {\n  return defer(() => combineLatest([\n    watchElementSize(el),\n    isHidden(options)\n  ]))\n    .pipe(\n      map(([{ height }, hidden]) => ({\n        height,\n        hidden\n      })),\n      distinctUntilChanged((a, b) => (\n        a.height === b.height &&\n        a.hidden === b.hidden\n      )),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount header\n *\n * This function manages the different states of the header, i.e. whether it's\n * hidden or rendered with a shadow. This depends heavily on the main area.\n *\n * @param el - Header element\n * @param options - Options\n *\n * @returns Header component observable\n */\nexport function mountHeader(\n  el: HTMLElement, { header$, main$ }: MountOptions\n): Observable<Component<Header | Tooltip>> {\n  return defer(() => {\n    const push$ = new Subject<Main>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    push$\n      .pipe(\n        distinctUntilKeyChanged(\"active\"),\n        combineLatestWith(header$)\n      )\n        .subscribe(([{ active }, { hidden }]) => {\n          el.classList.toggle(\"md-header--shadow\", active && !hidden)\n          el.hidden = hidden\n        })\n\n    /* Mount tooltips, if enabled */\n    const tooltips = from(getElements(\"[title]\", el))\n      .pipe(\n        filter(() => feature(\"content.tooltips\")),\n        mergeMap(child => mountTooltip(child))\n      )\n\n    /* Link to main area */\n    main$.subscribe(push$)\n\n    /* Create and return component */\n    return header$\n      .pipe(\n        takeUntil(done$),\n        map(state => ({ ref: el, ...state })),\n        mergeWith(tooltips.pipe(takeUntil(done$)))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  defer,\n  distinctUntilKeyChanged,\n  finalize,\n  map,\n  tap\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  getElementSize,\n  getOptionalElement,\n  watchViewportAt\n} from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { Header } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Header\n */\nexport interface HeaderTitle {\n  active: boolean                      /* Header title is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch header title\n *\n * @param el - Heading element\n * @param options - Options\n *\n * @returns Header title observable\n */\nexport function watchHeaderTitle(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<HeaderTitle> {\n  return watchViewportAt(el, { viewport$, header$ })\n    .pipe(\n      map(({ offset: { y } }) => {\n        const { height } = getElementSize(el)\n        return {\n          active: y >= height\n        }\n      }),\n      distinctUntilKeyChanged(\"active\")\n    )\n}\n\n/**\n * Mount header title\n *\n * This function swaps the header title from the site title to the title of the\n * current page when the user scrolls past the first headline.\n *\n * @param el - Header title element\n * @param options - Options\n *\n * @returns Header title component observable\n */\nexport function mountHeaderTitle(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<HeaderTitle>> {\n  return defer(() => {\n    const push$ = new Subject<HeaderTitle>()\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ active }) {\n        el.classList.toggle(\"md-header__title--active\", active)\n      },\n\n      /* Handle complete */\n      complete() {\n        el.classList.remove(\"md-header__title--active\")\n      }\n    })\n\n    /* Obtain headline, if any */\n    const heading = getOptionalElement(\".md-content h1\")\n    if (typeof heading === \"undefined\")\n      return EMPTY\n\n    /* Create and return component */\n    return watchHeaderTitle(heading, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  map,\n  switchMap\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  watchElementSize\n} from \"~/browser\"\n\nimport { Header } from \"../header\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Main area\n */\nexport interface Main {\n  offset: number                       /* Main area top offset */\n  height: number                       /* Main area visible height */\n  active: boolean                      /* Main area is active */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch main area\n *\n * This function returns an observable that computes the visual parameters of\n * the main area which depends on the viewport vertical offset and height, as\n * well as the height of the header element, if the header is fixed.\n *\n * @param el - Main area element\n * @param options - Options\n *\n * @returns Main area observable\n */\nexport function watchMain(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<Main> {\n\n  /* Compute necessary adjustment for header */\n  const adjust$ = header$\n    .pipe(\n      map(({ height }) => height),\n      distinctUntilChanged()\n    )\n\n  /* Compute the main area's top and bottom borders */\n  const border$ = adjust$\n    .pipe(\n      switchMap(() => watchElementSize(el)\n        .pipe(\n          map(({ height }) => ({\n            top:    el.offsetTop,\n            bottom: el.offsetTop + height\n          })),\n          distinctUntilKeyChanged(\"bottom\")\n        )\n      )\n    )\n\n  /* Compute the main area's offset, visible height and if we scrolled past */\n  return combineLatest([adjust$, border$, viewport$])\n    .pipe(\n      map(([header, { top, bottom }, { offset: { y }, size: { height } }]) => {\n        height = Math.max(0, height\n          - Math.max(0, top    - y,  header)\n          - Math.max(0, height + y - bottom)\n        )\n        return {\n          offset: top - header,\n          height,\n          active: top - header <= y\n        }\n      }),\n      distinctUntilChanged((a, b) => (\n        a.offset === b.offset &&\n        a.height === b.height &&\n        a.active === b.active\n      ))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  asyncScheduler,\n  defer,\n  filter,\n  finalize,\n  fromEvent,\n  map,\n  mergeMap,\n  observeOn,\n  of,\n  repeat,\n  shareReplay,\n  skip,\n  startWith,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { getElements, watchMedia } from \"~/browser\"\nimport { h } from \"~/utilities\"\n\nimport {\n  Component,\n  getComponentElement\n} from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Palette colors\n */\nexport interface PaletteColor {\n  media?: string                       /* Media query */\n  scheme?: string                      /* Color scheme */\n  primary?: string                     /* Primary color */\n  accent?: string                      /* Accent color */\n}\n\n/**\n * Palette\n */\nexport interface Palette {\n  index: number                        /* Palette index */\n  color: PaletteColor                  /* Palette colors */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch color palette\n *\n * @param inputs - Color palette element\n *\n * @returns Color palette observable\n */\nexport function watchPalette(\n  inputs: HTMLInputElement[]\n): Observable<Palette> {\n  const current = __md_get<Palette>(\"__palette\") || {\n    index: inputs.findIndex(input => matchMedia(\n      input.getAttribute(\"data-md-color-media\")!\n    ).matches)\n  }\n\n  /* Emit changes in color palette */\n  const index = Math.max(0, Math.min(current.index, inputs.length - 1))\n  return of(...inputs)\n    .pipe(\n      mergeMap(input => fromEvent(input, \"change\").pipe(map(() => input))),\n      startWith(inputs[index]),\n      map(input => ({\n        index: inputs.indexOf(input),\n        color: {\n          media:   input.getAttribute(\"data-md-color-media\"),\n          scheme:  input.getAttribute(\"data-md-color-scheme\"),\n          primary: input.getAttribute(\"data-md-color-primary\"),\n          accent:  input.getAttribute(\"data-md-color-accent\")\n        }\n      } as Palette)),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount color palette\n *\n * @param el - Color palette element\n *\n * @returns Color palette component observable\n */\nexport function mountPalette(\n  el: HTMLElement\n): Observable<Component<Palette>> {\n  const inputs = getElements<HTMLInputElement>(\"input\", el)\n  const meta = h(\"meta\", { name: \"theme-color\" })\n  document.head.appendChild(meta)\n\n  // Add color scheme meta tag\n  const scheme = h(\"meta\", { name: \"color-scheme\" })\n  document.head.appendChild(scheme)\n\n  /* Mount component on subscription */\n  const media$ = watchMedia(\"(prefers-color-scheme: light)\")\n  return defer(() => {\n    const push$ = new Subject<Palette>()\n    push$.subscribe(palette => {\n      document.body.setAttribute(\"data-md-color-switching\", \"\")\n\n      /* Retrieve color palette for system preference */\n      if (palette.color.media === \"(prefers-color-scheme)\") {\n        const media = matchMedia(\"(prefers-color-scheme: light)\")\n        const input = document.querySelector(media.matches\n          ? \"[data-md-color-media='(prefers-color-scheme: light)']\"\n          : \"[data-md-color-media='(prefers-color-scheme: dark)']\"\n        )!\n\n        /* Retrieve colors for system preference */\n        palette.color.scheme  = input.getAttribute(\"data-md-color-scheme\")!\n        palette.color.primary = input.getAttribute(\"data-md-color-primary\")!\n        palette.color.accent  = input.getAttribute(\"data-md-color-accent\")!\n      }\n\n      /* Set color palette */\n      for (const [key, value] of Object.entries(palette.color))\n        document.body.setAttribute(`data-md-color-${key}`, value)\n\n      /* Set toggle visibility */\n      for (let index = 0; index < inputs.length; index++) {\n        const label = inputs[index].nextElementSibling\n        if (label instanceof HTMLElement)\n          label.hidden = palette.index !== index\n      }\n\n      /* Persist preference in local storage */\n      __md_set(\"__palette\", palette)\n    })\n\n    // Handle color switch on Enter or Space - see https://t.ly/YIhVj\n    fromEvent<KeyboardEvent>(el, \"keydown\").pipe(\n      filter(ev => ev.key === \"Enter\"),\n      withLatestFrom(push$, (_, palette) => palette)\n    )\n      .subscribe(({ index }) => {\n        index = (index + 1) % inputs.length\n        inputs[index].click()\n        inputs[index].focus()\n      })\n\n    /* Update theme-color meta tag */\n    push$\n      .pipe(\n        map(() => {\n          const header = getComponentElement(\"header\")\n          const style  = window.getComputedStyle(header)\n\n          // Set color scheme\n          scheme.content = style.colorScheme\n\n          /* Return color in hexadecimal format */\n          return style.backgroundColor.match(/\\d+/g)!\n            .map(value => (+value).toString(16).padStart(2, \"0\"))\n            .join(\"\")\n        })\n      )\n        .subscribe(color => meta.content = `#${color}`)\n\n    /* Revert transition durations after color switch */\n    push$.pipe(observeOn(asyncScheduler))\n      .subscribe(() => {\n        document.body.removeAttribute(\"data-md-color-switching\")\n      })\n\n    /* Create and return component */\n    return watchPalette(inputs)\n      .pipe(\n        takeUntil(media$.pipe(skip(1))),\n        repeat(),\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  finalize,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { Component } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Progress indicator\n */\nexport interface Progress {\n  value: number                        // Progress value\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  progress$: Subject<number>           // Progress subject\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount progress indicator\n *\n * @param el - Progress indicator element\n * @param options - Options\n *\n * @returns Progress indicator component observable\n */\nexport function mountProgress(\n  el: HTMLElement, { progress$ }: MountOptions\n): Observable<Component<Progress>> {\n\n  // Mount component on subscription\n  return defer(() => {\n    const push$ = new Subject<Progress>()\n    push$.subscribe(({ value }) => {\n      el.style.setProperty(\"--md-progress-value\", `${value}`)\n    })\n\n    // Create and return component\n    return progress$\n      .pipe(\n        tap(value => push$.next({ value })),\n        finalize(() => push$.complete()),\n        map(value => ({ ref: el, value }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport ClipboardJS from \"clipboard\"\nimport {\n  Observable,\n  Subject,\n  map,\n  tap\n} from \"rxjs\"\n\nimport { translation } from \"~/_\"\nimport { getElement } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n  alert$: Subject<string>              /* Alert subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Extract text to copy\n *\n * @param el - HTML element\n *\n * @returns Extracted text\n */\nfunction extract(el: HTMLElement): string {\n  el.setAttribute(\"data-md-copying\", \"\")\n  const copy = el.closest(\"[data-copy]\")\n  const text = copy\n    ? copy.getAttribute(\"data-copy\")!\n    : el.innerText\n  el.removeAttribute(\"data-md-copying\")\n  return text.trimEnd()\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up Clipboard.js integration\n *\n * @param options - Options\n */\nexport function setupClipboardJS(\n  { alert$ }: SetupOptions\n): void {\n  if (ClipboardJS.isSupported()) {\n    new Observable<ClipboardJS.Event>(subscriber => {\n      new ClipboardJS(\"[data-clipboard-target], [data-clipboard-text]\", {\n        text: el => (\n          el.getAttribute(\"data-clipboard-text\")! ||\n          extract(getElement(\n            el.getAttribute(\"data-clipboard-target\")!\n          ))\n        )\n      })\n        .on(\"success\", ev => subscriber.next(ev))\n    })\n      .pipe(\n        tap(ev => {\n          const trigger = ev.trigger as HTMLElement\n          trigger.focus()\n        }),\n        map(() => translation(\"clipboard.copied\"))\n      )\n        .subscribe(alert$)\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  catchError,\n  map,\n  of\n} from \"rxjs\"\n\nimport {\n  getElement,\n  getElements,\n  requestXML\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Sitemap, i.e. a list of URLs\n */\nexport type Sitemap = Map<string, URL[]>\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Resolve URL to the given base URL\n *\n * When serving the site with instant navigation, MkDocs will set the hostname\n * to the value as specified in `dev_addr`, but the browser allows for several\n * hostnames to be used: `localhost`, `127.0.0.1` or even `0.0.0.0`, depending\n * on configuration. This function resolves the URL to the given hostname.\n *\n * @param url - URL\n * @param base - Base URL\n *\n * @returns Resolved URL\n */\nfunction resolve(url: URL, base: URL) {\n  url.protocol = base.protocol\n  url.hostname = base.hostname\n  return url\n}\n\n/**\n * Extract sitemap from document\n *\n * This function extracts the URLs and alternate links from the document, and\n * associates alternate links to the original URL as found in `loc`, allowing\n * the browser to navigate to the correct page when switching languages. The\n * format of the sitemap is expected to adhere to:\n *\n * ``` xml\n * <urlset>\n *   <url>\n *     <loc>...</loc>\n *     <xhtml:link rel=\"alternate\" hreflang=\"en\" href=\"...\"/>\n *     <xhtml:link rel=\"alternate\" hreflang=\"de\" href=\"...\"/>\n *     ...\n *   </url>\n *   ...\n * </urlset>\n * ```\n *\n * @param document - Document\n * @param base - Base URL\n *\n * @returns Sitemap\n */\nfunction extract(document: Document, base: URL): Sitemap {\n  const sitemap: Sitemap = new Map()\n  for (const el of getElements(\"url\", document)) {\n    const url = getElement(\"loc\", el)\n\n    // Create entry for location and add it to the list of links\n    const links = [resolve(new URL(url.textContent!), base)]\n    sitemap.set(`${links[0]}`, links)\n\n    // Attach alternate links to current entry\n    for (const link of getElements(\"[rel=alternate]\", el)) {\n      const href = link.getAttribute(\"href\")\n      if (href != null)\n        links.push(resolve(new URL(href), base))\n    }\n  }\n\n  // Return sitemap\n  return sitemap\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch the sitemap for the given base URL\n *\n * If a network or parsing error occurs, we just default to an empty sitemap,\n * which means the caller should fall back to regular navigation.\n *\n * @param base - Base URL\n *\n * @returns Sitemap observable\n */\nexport function fetchSitemap(base: URL | string): Observable<Sitemap> {\n  return requestXML(new URL(\"sitemap.xml\", base))\n    .pipe(\n      map(document => extract(document, new URL(base))),\n      catchError(() => of(new Map())),\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  catchError,\n  combineLatestWith,\n  concat,\n  debounceTime,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  of,\n  share,\n  switchMap,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { configuration, feature } from \"~/_\"\nimport {\n  Viewport,\n  getElements,\n  getLocation,\n  getOptionalElement,\n  requestHTML,\n  setLocation,\n  setLocationHash\n} from \"~/browser\"\nimport { getComponentElement } from \"~/components\"\n\nimport { Sitemap, fetchSitemap } from \"../sitemap\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n  location$: Subject<URL>              // Location subject\n  viewport$: Observable<Viewport>      // Viewport observable\n  progress$: Subject<number>           // Progress subject\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Handle clicks on internal URLs while skipping external URLs\n *\n * @param ev - Mouse event\n * @param sitemap - Sitemap\n *\n * @returns URL observable\n */\nfunction handle(\n  ev: MouseEvent, sitemap: Sitemap\n): Observable<URL> {\n  if (!(ev.target instanceof Element))\n    return EMPTY\n\n  // Skip, as target is not within a link - clicks on non-link elements are\n  // also captured, which we need to exclude from processing\n  const el = ev.target.closest(\"a\")\n  if (el === null)\n    return EMPTY\n\n  // Skip, as link opens in new window - we now know we have captured a click\n  // on a link, but the link either has a `target` property defined, or the\n  // user pressed the `meta` or `ctrl` key to open it in a new window. Thus,\n  // we need to filter this event as well.\n  if (el.target || ev.metaKey || ev.ctrlKey)\n    return EMPTY\n\n  // Next, we must check if the URL is relevant for us, i.e., if it's an\n  // internal link to a page that is managed by MkDocs. Only then we can be\n  // sure that the structure of the page to be loaded adheres to the current\n  // document structure and can subsequently be injected into it without doing\n  // a full reload. For this reason, we must canonicalize the URL by removing\n  // all search parameters and hash fragments.\n  const url = new URL(el.href)\n  url.search = url.hash = \"\"\n\n  // Skip, if URL is not included in the sitemap - this could be the case when\n  // linking between versions or languages, or to another page that the author\n  // included as part of the build, but that is not managed by MkDocs. In that\n  // case we must not continue with instant navigation.\n  if (!sitemap.has(`${url}`))\n    return EMPTY\n\n  // We now know that we have a link to an internal page, so we prevent the\n  // browser from navigation and emit the URL for instant navigation. Note that\n  // this also includes anchor links, which means we need to implement anchor\n  // positioning ourselves. The reason for this is that if we wouldn't manage\n  // anchor links as well, scroll restoration will not work correctly (e.g.\n  // following an anchor link and scrolling).\n  ev.preventDefault()\n  return of(new URL(el.href))\n}\n\n/**\n * Create a map of head elements for lookup and replacement\n *\n * @param document - Document\n *\n * @returns Tag map\n */\nfunction head(document: Document): Map<string, HTMLElement> {\n  const tags = new Map<string, HTMLElement>()\n  for (const el of getElements(\":scope > *\", document.head))\n    tags.set(el.outerHTML, el)\n\n  // Return tag map\n  return tags\n}\n\n/**\n * Resolve relative URLs in the given document\n *\n * This function resolves relative `href` and `src` attributes, which can belong\n * to all sorts of tags, like meta tags, links, images, scripts and more.\n *\n * @param document - Document\n *\n * @returns Document observable\n */\nfunction resolve(document: Document): Observable<Document> {\n  for (const el of getElements(\"[href], [src]\", document))\n    for (const key of [\"href\", \"src\"]) {\n      const value = el.getAttribute(key)\n      if (value && !/^(?:[a-z]+:)?\\/\\//i.test(value)) {\n        // @ts-expect-error - trick: self-assign to resolve URL\n        el[key] = el[key]\n        break\n      }\n    }\n\n  // Return document observable\n  return of(document)\n}\n\n/**\n * Inject the contents of a document into the current one\n *\n * @param next - Next document\n *\n * @returns Document observable\n */\nfunction inject(next: Document): Observable<Document> {\n  for (const selector of [\n    \"[data-md-component=announce]\",\n    \"[data-md-component=container]\",\n    \"[data-md-component=header-topic]\",\n    \"[data-md-component=outdated]\",\n    \"[data-md-component=logo]\",\n    \"[data-md-component=skip]\",\n    ...feature(\"navigation.tabs.sticky\")\n      ? [\"[data-md-component=tabs]\"]\n      : []\n  ]) {\n    const source = getOptionalElement(selector)\n    const target = getOptionalElement(selector, next)\n    if (\n      typeof source !== \"undefined\" &&\n      typeof target !== \"undefined\"\n    ) {\n      source.replaceWith(target)\n    }\n  }\n\n  // Update meta tags\n  const tags = head(document)\n  for (const [html, el] of head(next))\n    if (tags.has(html))\n      tags.delete(html)\n    else\n      document.head.appendChild(el)\n\n  // Remove meta tags that are not present in the new document\n  for (const el of tags.values()) {\n    const name = el.getAttribute(\"name\")\n    // @todo - find a better way to handle attributes we add dynamically in\n    // other components without mounting components on every navigation, as\n    // this might impact overall performance - see https://t.ly/ehp_O\n    if (name !== \"theme-color\" && name !== \"color-scheme\")\n      el.remove()\n  }\n\n  // After components and meta tags were replaced, re-evaluate scripts\n  // that were provided by the author as part of Markdown files\n  const container = getComponentElement(\"container\")\n  return concat(getElements(\"script\", container))\n    .pipe(\n      switchMap(el => {\n        const script = next.createElement(\"script\")\n        if (el.src) {\n          for (const name of el.getAttributeNames())\n            script.setAttribute(name, el.getAttribute(name)!)\n          el.replaceWith(script)\n\n          // Complete when script is loaded\n          return new Observable(observer => {\n            script.onload = () => observer.complete()\n          })\n\n        // Complete immediately\n        } else {\n          script.textContent = el.textContent\n          el.replaceWith(script)\n          return EMPTY\n        }\n      }),\n      ignoreElements(),\n      endWith(document)\n    )\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up instant navigation\n *\n * This is a heavily orchestrated operation - see inline comments to learn how\n * this works with Material for MkDocs, and how you can hook into it.\n *\n * @param options - Options\n *\n * @returns Document observable\n */\nexport function setupInstantNavigation(\n  { location$, viewport$, progress$ }: SetupOptions\n): Observable<Document> {\n  const config = configuration()\n  if (location.protocol === \"file:\")\n    return EMPTY\n\n  // Load sitemap immediately, so we have it available when the user initiates\n  // the first navigation request without any perceivable delay\n  const sitemap$ = fetchSitemap(config.base)\n\n  // Since we might be on a slow connection, the user might trigger multiple\n  // instant navigation events that overlap. MkDocs produces relative URLs for\n  // all internal links, which becomes a problem in this case, because we need\n  // to change the base URL the moment the user clicks a link that should be\n  // intercepted in order to be consistent with popstate, which means that the\n  // base URL would now be incorrect when resolving another relative link from\n  // the same site. For this reason we always resolve all relative links to\n  // absolute links, so we can be sure this never happens.\n  of(document)\n    .subscribe(resolve)\n\n  // --------------------------------------------------------------------------\n  // Navigation interception\n  // --------------------------------------------------------------------------\n\n  // Intercept navigation - to keep the number of event listeners down we use\n  // the fact that uncaptured events bubble up to the body. This has the nice\n  // property that we don't need to detach and then re-attach event listeners\n  // when the document is replaced after a navigation event.\n  const instant$ =\n    fromEvent<MouseEvent>(document.body, \"click\")\n      .pipe(\n        combineLatestWith(sitemap$),\n        switchMap(([ev, sitemap]) => handle(ev, sitemap)),\n        share()\n      )\n\n  // Intercept history change events, e.g. when the user uses the browser's\n  // back or forward buttons, and emit new location for fetching and parsing\n  const history$ =\n    fromEvent<PopStateEvent>(window, \"popstate\")\n      .pipe(\n        map(getLocation),\n        share()\n      )\n\n  // While it would be better UX to defer navigation events until the document\n  // is fully fetched and parsed, we must schedule it here to synchronize with\n  // popstate events, as they are emitted immediately. Moreover we need to\n  // store the current viewport offset for scroll restoration later on.\n  instant$.pipe(withLatestFrom(viewport$))\n    .subscribe(([url, { offset }]) => {\n      history.replaceState(offset, \"\")\n      history.pushState(null, \"\", url)\n    })\n\n  // Emit URLs that should be fetched via instant navigation on location subject\n  // which was passed into this function. The state of instant navigation can be\n  // intercepted by other parts of the application, which can synchronously back\n  // up or restore state before or after instant navigation happens.\n  merge(instant$, history$)\n    .subscribe(location$)\n\n  // --------------------------------------------------------------------------\n  // Fetching and parsing\n  // --------------------------------------------------------------------------\n\n  // Fetch document - we deduplicate requests to the same location, so we don't\n  // end up with multiple requests for the same page. We use `switchMap`, since\n  // we want to cancel the previous request when a new one is triggered, which\n  // is automatically handled by the observable returned by `request`. This is\n  // essential to ensure a good user experience, as we don't want to load pages\n  // that are not needed anymore, e.g., when the user clicks multiple links in\n  // quick succession or on slow connections. If the request fails for some\n  // reason, we fall back and use regular navigation, forcing a reload.\n  const document$ =\n    location$.pipe(\n      distinctUntilKeyChanged(\"pathname\"),\n      switchMap(url => requestHTML(url, { progress$ })\n        .pipe(\n          catchError(() => {\n            setLocation(url, true)\n            return EMPTY\n          })\n        )\n      ),\n\n      // The document was successfully fetched and parsed, so we can inject its\n      // contents into the currently active document\n      switchMap(resolve),\n      switchMap(inject),\n      share()\n    )\n\n  // --------------------------------------------------------------------------\n  // Scroll restoration\n  // --------------------------------------------------------------------------\n\n  // Handle scroll restoration - we must restore the viewport offset after the\n  // document has been fetched and injected, and every time the user clicks an\n  // anchor that leads to an element on the same page, which might also happen\n  // when the user uses the back or forward button.\n  merge(\n    document$.pipe(withLatestFrom(location$, (_, url) => url)),\n\n    // Handle instant navigation events that are triggered by the user clicking\n    // on an anchor link with a hash fragment different from the current one, as\n    // well as from popstate events, which are emitted when the user navigates\n    // back and forth between pages. We use a two-layered subscription to scope\n    // the scroll restoration to the current page, as we don't need to restore\n    // the viewport offset when the user navigates to a different page, as this\n    // is already handled by the previous observable.\n    document$.pipe(\n      switchMap(() => location$),\n      distinctUntilKeyChanged(\"pathname\"),\n      switchMap(() => location$),\n      distinctUntilKeyChanged(\"hash\")\n    ),\n\n    // Handle instant navigation events that are triggered by the user clicking\n    // on an anchor link with the same hash fragment as the current one in the\n    // URL. It is essential that we only intercept those from instant navigation\n    // events and not from history change events, or we'll end up in and endless\n    // loop. The top-level history entry must be removed, as it will be replaced\n    // with a new one, which would otherwise lead to a duplicate entry.\n    location$.pipe(\n      distinctUntilChanged((a, b) => (\n        a.pathname === b.pathname &&\n        a.hash     === b.hash\n      )),\n      switchMap(() => instant$),\n      tap(() => history.back())\n    )\n  )\n    .subscribe(url => {\n\n      // Check if the current history entry has a state, which happens when the\n      // user presses the back or forward button to visit a page we've already\n      // seen. If there's no state, it means a new page was visited and we must\n      // scroll to the top, unless an anchor is given.\n      if (history.state !== null || !url.hash) {\n        window.scrollTo(0, history.state?.y ?? 0)\n      } else {\n        history.scrollRestoration = \"auto\"\n        setLocationHash(url.hash)\n        history.scrollRestoration = \"manual\"\n      }\n    })\n\n  // Disable scroll restoration when an instant navigation event occurs, so the\n  // browser does not immediately set the viewport offset to the prior history\n  // entry, scrolling to the position on the same page, which would look odd.\n  // Instead, we manually restore the position once the page has loaded.\n  location$.subscribe(() => {\n    history.scrollRestoration = \"manual\"\n  })\n\n  // Enable scroll restoration before window unloads - this is essential to\n  // ensure that full reloads (F5) restore the viewport offset correctly. If\n  // only popstate events wouldn't reset the viewport offset prior to their\n  // emission, we could just reset this in popstate. Meh.\n  fromEvent(window, \"beforeunload\")\n    .subscribe(() => {\n      history.scrollRestoration = \"auto\"\n    })\n\n  // Track viewport offset, so we can restore it when the user navigates back\n  // and forth between pages. Note that this must be debounced and cannot be\n  // done in popstate, as popstate has already removed the entry from the\n  // history, which means it is too late.\n  viewport$.pipe(\n    distinctUntilKeyChanged(\"offset\"),\n    debounceTime(100)\n  )\n    .subscribe(({ offset }) => {\n      history.replaceState(offset, \"\")\n    })\n\n  // Return document observable\n  return document$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport escapeHTML from \"escape-html\"\n\nimport { SearchConfig } from \"../config\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search highlight function\n *\n * @param value - Value\n *\n * @returns Highlighted value\n */\nexport type SearchHighlightFn = (value: string) => string\n\n/**\n * Search highlight factory function\n *\n * @param query - Query value\n *\n * @returns Search highlight function\n */\nexport type SearchHighlightFactoryFn = (query: string) => SearchHighlightFn\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Create a search highlighter\n *\n * @param config - Search configuration\n *\n * @returns Search highlight factory function\n */\nexport function setupSearchHighlighter(\n  config: SearchConfig\n): SearchHighlightFactoryFn {\n  // Hack: temporarily remove pure lookaheads and lookbehinds\n  const regex = config.separator.split(\"|\").map(term => {\n    const temp = term.replace(/(\\(\\?[!=<][^)]+\\))/g, \"\")\n    return temp.length === 0 ? \"\uFFFD\" : term\n  })\n    .join(\"|\")\n\n  const separator = new RegExp(regex, \"img\")\n  const highlight = (_: unknown, data: string, term: string) => {\n    return `${data}<mark data-md-highlight>${term}</mark>`\n  }\n\n  /* Return factory function */\n  return (query: string) => {\n    query = query\n      .replace(/[\\s*+\\-:~^]+/g, \" \")\n      .trim()\n\n    /* Create search term match expression */\n    const match = new RegExp(`(^|${config.separator}|)(${\n      query\n        .replace(/[|\\\\{}()[\\]^$+*?.-]/g, \"\\\\$&\")\n        .replace(separator, \"|\")\n    })`, \"img\")\n\n    /* Highlight string value */\n    return value => escapeHTML(value)\n      .replace(match, highlight)\n      .replace(/<\\/mark>(\\s+)<mark[^>]*>/img, \"$1\")\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A RTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { SearchResult } from \"../../_\"\nimport { SearchIndex } from \"../../config\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search message type\n */\nexport const enum SearchMessageType {\n  SETUP,                               /* Search index setup */\n  READY,                               /* Search index ready */\n  QUERY,                               /* Search query */\n  RESULT                               /* Search results */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Message containing the data necessary to setup the search index\n */\nexport interface SearchSetupMessage {\n  type: SearchMessageType.SETUP        /* Message type */\n  data: SearchIndex                    /* Message data */\n}\n\n/**\n * Message indicating the search index is ready\n */\nexport interface SearchReadyMessage {\n  type: SearchMessageType.READY        /* Message type */\n}\n\n/**\n * Message containing a search query\n */\nexport interface SearchQueryMessage {\n  type: SearchMessageType.QUERY        /* Message type */\n  data: string                         /* Message data */\n}\n\n/**\n * Message containing results for a search query\n */\nexport interface SearchResultMessage {\n  type: SearchMessageType.RESULT       /* Message type */\n  data: SearchResult                   /* Message data */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Message exchanged with the search worker\n */\nexport type SearchMessage =\n  | SearchSetupMessage\n  | SearchReadyMessage\n  | SearchQueryMessage\n  | SearchResultMessage\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Type guard for search ready messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchReadyMessage(\n  message: SearchMessage\n): message is SearchReadyMessage {\n  return message.type === SearchMessageType.READY\n}\n\n/**\n * Type guard for search result messages\n *\n * @param message - Search worker message\n *\n * @returns Test result\n */\nexport function isSearchResultMessage(\n  message: SearchMessage\n): message is SearchResultMessage {\n  return message.type === SearchMessageType.RESULT\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A RTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  ObservableInput,\n  Subject,\n  first,\n  merge,\n  of,\n  switchMap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport { watchToggle, watchWorker } from \"~/browser\"\n\nimport { SearchIndex } from \"../../config\"\nimport {\n  SearchMessage,\n  SearchMessageType\n} from \"../message\"\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up search worker\n *\n * This function creates and initializes a web worker that is used for search,\n * so that the user interface doesn't freeze. In general, the application does\n * not care how search is implemented, as long as the web worker conforms to\n * the format expected by the application as defined in `SearchMessage`. This\n * allows the author to implement custom search functionality, by providing a\n * custom web worker via configuration.\n *\n * Material for MkDocs' built-in search implementation makes use of Lunr.js, an\n * efficient and fast implementation for client-side search. Leveraging a tiny\n * iframe-based web worker shim, search is even supported for the `file://`\n * protocol, enabling search for local non-hosted builds.\n *\n * If the protocol is `file://`, search initialization is deferred to mitigate\n * freezing, as it's now synchronous by design - see https://bit.ly/3C521EO\n *\n * @see https://bit.ly/3igvtQv - How to implement custom search\n *\n * @param url - Worker URL\n * @param index$ - Search index observable input\n *\n * @returns Search worker\n */\nexport function setupSearchWorker(\n  url: string, index$: ObservableInput<SearchIndex>\n): Subject<SearchMessage> {\n  const worker$ = watchWorker<SearchMessage>(url)\n  merge(\n    of(location.protocol !== \"file:\"),\n    watchToggle(\"search\")\n  )\n    .pipe(\n      first(active => active),\n      switchMap(() => index$)\n    )\n      .subscribe(({ config, docs }) => worker$.next({\n        type: SearchMessageType.SETUP,\n        data: {\n          config,\n          docs,\n          options: {\n            suggest: feature(\"search.suggest\")\n          }\n        }\n      }))\n\n  /* Return search worker */\n  return worker$\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Subject,\n  catchError,\n  combineLatest,\n  filter,\n  fromEvent,\n  map,\n  of,\n  switchMap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { configuration } from \"~/_\"\nimport {\n  getElement,\n  getLocation,\n  requestJSON,\n  setLocation\n} from \"~/browser\"\nimport { getComponentElements } from \"~/components\"\nimport {\n  Version,\n  renderVersionSelector\n} from \"~/templates\"\n\nimport { fetchSitemap } from \"../sitemap\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Setup options\n */\ninterface SetupOptions {\n  document$: Subject<Document>         /* Document subject */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Set up version selector\n *\n * @param options - Options\n */\nexport function setupVersionSelector(\n  { document$ }: SetupOptions\n): void {\n  const config = configuration()\n  const versions$ = requestJSON<Version[]>(\n    new URL(\"../versions.json\", config.base)\n  )\n    .pipe(\n      catchError(() => EMPTY) // @todo refactor instant loading\n    )\n\n  /* Determine current version */\n  const current$ = versions$\n    .pipe(\n      map(versions => {\n        const [, current] = config.base.match(/([^/]+)\\/?$/)!\n        return versions.find(({ version, aliases }) => (\n          version === current || aliases.includes(current)\n        )) || versions[0]\n      })\n    )\n\n  /* Intercept inter-version navigation */\n  versions$\n    .pipe(\n      map(versions => new Map(versions.map(version => [\n        `${new URL(`../${version.version}/`, config.base)}`,\n        version\n      ]))),\n      switchMap(urls => fromEvent<MouseEvent>(document.body, \"click\")\n        .pipe(\n          filter(ev => !ev.metaKey && !ev.ctrlKey),\n          withLatestFrom(current$),\n          switchMap(([ev, current]) => {\n            if (ev.target instanceof Element) {\n              const el = ev.target.closest(\"a\")\n              if (el && !el.target && urls.has(el.href)) {\n                const url = el.href\n                // This is a temporary hack to detect if a version inside the\n                // version selector or on another part of the site was clicked.\n                // If we're inside the version selector, we definitely want to\n                // find the same page, as we might have different deployments\n                // due to aliases. However, if we're outside the version\n                // selector, we must abort here, because we might otherwise\n                // interfere with instant navigation. We need to refactor this\n                // at some point together with instant navigation.\n                //\n                // See https://github.com/squidfunk/mkdocs-material/issues/4012\n                if (!ev.target.closest(\".md-version\")) {\n                  const version = urls.get(url)!\n                  if (version === current)\n                    return EMPTY\n                }\n                ev.preventDefault()\n                return of(url)\n              }\n            }\n            return EMPTY\n          }),\n          switchMap(url => {\n            return fetchSitemap(new URL(url))\n              .pipe(\n                map(sitemap => {\n                  const location = getLocation()\n                  const path = location.href.replace(config.base, url)\n                  return sitemap.has(path.split(\"#\")[0])\n                    ? new URL(path)\n                    : new URL(url)\n                })\n              )\n          })\n        )\n      )\n    )\n      .subscribe(url => setLocation(url, true))\n\n  /* Render version selector and warning */\n  combineLatest([versions$, current$])\n    .subscribe(([versions, current]) => {\n      const topic = getElement(\".md-header__topic\")\n      topic.appendChild(renderVersionSelector(versions, current))\n    })\n\n  /* Integrate outdated version banner with instant navigation */\n  document$.pipe(switchMap(() => current$))\n    .subscribe(current => {\n\n      /* Check if version state was already determined */\n      let outdated = __md_get(\"__outdated\", sessionStorage)\n      if (outdated === null) {\n        outdated = true\n\n        /* Obtain and normalize default versions */\n        let ignored = config.version?.default || \"latest\"\n        if (!Array.isArray(ignored))\n          ignored = [ignored]\n\n        /* Check if version is considered a default */\n        main: for (const ignore of ignored)\n          for (const version of current.aliases.concat(current.version))\n            if (new RegExp(ignore, \"i\").test(version)) {\n              outdated = false\n              break main\n            }\n\n        /* Persist version state in session storage */\n        __md_set(\"__outdated\", outdated, sessionStorage)\n      }\n\n      /* Unhide outdated version banner */\n      if (outdated)\n        for (const warning of getComponentElements(\"outdated\"))\n          warning.hidden = false\n    })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  combineLatest,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  finalize,\n  first,\n  fromEvent,\n  ignoreElements,\n  map,\n  merge,\n  shareReplay,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport {\n  getElement,\n  getLocation,\n  setToggle,\n  watchElementFocus,\n  watchToggle\n} from \"~/browser\"\nimport {\n  SearchMessage,\n  SearchMessageType,\n  isSearchReadyMessage\n} from \"~/integrations\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search query\n */\nexport interface SearchQuery {\n  value: string                        /* Query value */\n  focus: boolean                       /* Query focus */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch search query\n *\n * Note that the focus event which triggers re-reading the current query value\n * is delayed by `1ms` so the input's empty state is allowed to propagate.\n *\n * @param el - Search query element\n * @param options - Options\n *\n * @returns Search query observable\n */\nexport function watchSearchQuery(\n  el: HTMLInputElement, { worker$ }: WatchOptions\n): Observable<SearchQuery> {\n\n  /* Support search deep linking */\n  const { searchParams } = getLocation()\n  if (searchParams.has(\"q\")) {\n    setToggle(\"search\", true)\n\n    /* Set query from parameter */\n    el.value = searchParams.get(\"q\")!\n    el.focus()\n\n    /* Remove query parameter on close */\n    watchToggle(\"search\")\n      .pipe(\n        first(active => !active)\n      )\n        .subscribe(() => {\n          const url = getLocation()\n          url.searchParams.delete(\"q\")\n          history.replaceState({}, \"\", `${url}`)\n        })\n  }\n\n  /* Intercept focus and input events */\n  const focus$ = watchElementFocus(el)\n  const value$ = merge(\n    worker$.pipe(first(isSearchReadyMessage)),\n    fromEvent(el, \"keyup\"),\n    focus$\n  )\n    .pipe(\n      map(() => el.value),\n      distinctUntilChanged()\n    )\n\n  /* Combine into single observable */\n  return combineLatest([value$, focus$])\n    .pipe(\n      map(([value, focus]) => ({ value, focus })),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount search query\n *\n * @param el - Search query element\n * @param options - Options\n *\n * @returns Search query component observable\n */\nexport function mountSearchQuery(\n  el: HTMLInputElement, { worker$ }: MountOptions\n): Observable<Component<SearchQuery, HTMLInputElement>> {\n  const push$ = new Subject<SearchQuery>()\n  const done$ = push$.pipe(ignoreElements(), endWith(true))\n\n  /* Handle value change */\n  combineLatest([\n    worker$.pipe(first(isSearchReadyMessage)),\n    push$\n  ], (_, query) => query)\n    .pipe(\n      distinctUntilKeyChanged(\"value\")\n    )\n      .subscribe(({ value }) => worker$.next({\n        type: SearchMessageType.QUERY,\n        data: value\n      }))\n\n  /* Handle focus change */\n  push$\n    .pipe(\n      distinctUntilKeyChanged(\"focus\")\n    )\n      .subscribe(({ focus }) => {\n        if (focus)\n          setToggle(\"search\", focus)\n      })\n\n  /* Handle reset */\n  fromEvent(el.form!, \"reset\")\n    .pipe(\n      takeUntil(done$)\n    )\n      .subscribe(() => el.focus())\n\n  // Focus search query on label click - note that this is necessary to bring\n  // up the keyboard on iOS and other mobile platforms, as the search dialog is\n  // not visible at first, and programatically focusing an input element must\n  // be triggered by a user interaction - see https://t.ly/Cb30n\n  const label = getElement(\"header [for=__search]\")\n  fromEvent(label, \"click\")\n    .subscribe(() => el.focus())\n\n  /* Create and return component */\n  return watchSearchQuery(el, { worker$ })\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state })),\n      shareReplay(1)\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  bufferCount,\n  filter,\n  finalize,\n  first,\n  fromEvent,\n  map,\n  merge,\n  mergeMap,\n  of,\n  share,\n  skipUntil,\n  switchMap,\n  takeUntil,\n  tap,\n  withLatestFrom,\n  zipWith\n} from \"rxjs\"\n\nimport { translation } from \"~/_\"\nimport {\n  getElement,\n  getOptionalElement,\n  watchElementBoundary,\n  watchToggle\n} from \"~/browser\"\nimport {\n  SearchMessage,\n  SearchResult,\n  isSearchReadyMessage,\n  isSearchResultMessage\n} from \"~/integrations\"\nimport { renderSearchResultItem } from \"~/templates\"\nimport { round } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\nimport { SearchQuery } from \"../query\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  query$: Observable<SearchQuery>      /* Search query observable */\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search result list\n *\n * This function performs a lazy rendering of the search results, depending on\n * the vertical offset of the search result container.\n *\n * @param el - Search result list element\n * @param options - Options\n *\n * @returns Search result list component observable\n */\nexport function mountSearchResult(\n  el: HTMLElement, { worker$, query$ }: MountOptions\n): Observable<Component<SearchResult>> {\n  const push$ = new Subject<SearchResult>()\n  const boundary$ = watchElementBoundary(el.parentElement!)\n    .pipe(\n      filter(Boolean)\n    )\n\n  /* Retrieve container */\n  const container = el.parentElement!\n\n  /* Retrieve nested components */\n  const meta = getElement(\":scope > :first-child\", el)\n  const list = getElement(\":scope > :last-child\", el)\n\n  /* Reveal to accessibility tree \u2013 see https://bit.ly/3iAA7t8 */\n  watchToggle(\"search\")\n    .subscribe(active => list.setAttribute(\n      \"role\", active ? \"list\" : \"presentation\"\n    ))\n\n  /* Update search result metadata */\n  push$\n    .pipe(\n      withLatestFrom(query$),\n      skipUntil(worker$.pipe(first(isSearchReadyMessage)))\n    )\n      .subscribe(([{ items }, { value }]) => {\n        switch (items.length) {\n\n          /* No results */\n          case 0:\n            meta.textContent = value.length\n              ? translation(\"search.result.none\")\n              : translation(\"search.result.placeholder\")\n            break\n\n          /* One result */\n          case 1:\n            meta.textContent = translation(\"search.result.one\")\n            break\n\n          /* Multiple result */\n          default:\n            const count = round(items.length)\n            meta.textContent = translation(\"search.result.other\", count)\n        }\n      })\n\n  /* Render search result item */\n  const render$ = push$\n    .pipe(\n      tap(() => list.innerHTML = \"\"),\n      switchMap(({ items }) => merge(\n        of(...items.slice(0, 10)),\n        of(...items.slice(10))\n          .pipe(\n            bufferCount(4),\n            zipWith(boundary$),\n            switchMap(([chunk]) => chunk)\n          )\n      )),\n      map(renderSearchResultItem),\n      share()\n    )\n\n  /* Update search result list */\n  render$.subscribe(item => list.appendChild(item))\n  render$\n    .pipe(\n      mergeMap(item => {\n        const details = getOptionalElement(\"details\", item)\n        if (typeof details === \"undefined\")\n          return EMPTY\n\n        /* Keep position of details element stable */\n        return fromEvent(details, \"toggle\")\n          .pipe(\n            takeUntil(push$),\n            map(() => details)\n          )\n      })\n    )\n      .subscribe(details => {\n        if (\n          details.open === false &&\n          details.offsetTop <= container.scrollTop\n        )\n          container.scrollTo({ top: details.offsetTop })\n      })\n\n  /* Filter search result message */\n  const result$ = worker$\n    .pipe(\n      filter(isSearchResultMessage),\n      map(({ data }) => data)\n    )\n\n  /* Create and return component */\n  return result$\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  endWith,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport { getLocation } from \"~/browser\"\n\nimport { Component } from \"../../_\"\nimport { SearchQuery } from \"../query\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search sharing\n */\nexport interface SearchShare {\n  url: URL                             /* Deep link for sharing */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  query$: Observable<SearchQuery>      /* Search query observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  query$: Observable<SearchQuery>      /* Search query observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search sharing\n *\n * @param _el - Search sharing element\n * @param options - Options\n *\n * @returns Search sharing observable\n */\nexport function watchSearchShare(\n  _el: HTMLElement, { query$ }: WatchOptions\n): Observable<SearchShare> {\n  return query$\n    .pipe(\n      map(({ value }) => {\n        const url = getLocation()\n        url.hash = \"\"\n\n        /* Compute readable query strings */\n        value = value\n          .replace(/\\s+/g, \"+\")        /* Collapse whitespace */\n          .replace(/&/g, \"%26\")        /* Escape '&' character */\n          .replace(/=/g, \"%3D\")        /* Escape '=' character */\n\n        /* Replace query string */\n        url.search = `q=${value}`\n        return { url }\n      })\n    )\n}\n\n/**\n * Mount search sharing\n *\n * @param el - Search sharing element\n * @param options - Options\n *\n * @returns Search sharing component observable\n */\nexport function mountSearchShare(\n  el: HTMLAnchorElement, options: MountOptions\n): Observable<Component<SearchShare>> {\n  const push$ = new Subject<SearchShare>()\n  const done$ = push$.pipe(ignoreElements(), endWith(true))\n  push$.subscribe(({ url }) => {\n    el.setAttribute(\"data-clipboard-text\", el.href)\n    el.href = `${url}`\n  })\n\n  /* Prevent following of link */\n  fromEvent(el, \"click\")\n    .pipe(\n      takeUntil(done$)\n    )\n      .subscribe(ev => ev.preventDefault())\n\n  /* Create and return component */\n  return watchSearchShare(el, options)\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  asyncScheduler,\n  combineLatestWith,\n  distinctUntilChanged,\n  filter,\n  finalize,\n  fromEvent,\n  map,\n  merge,\n  observeOn,\n  tap\n} from \"rxjs\"\n\nimport { Keyboard } from \"~/browser\"\nimport {\n  SearchMessage,\n  SearchResult,\n  isSearchResultMessage\n} from \"~/integrations\"\n\nimport { Component, getComponentElement } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search suggestions\n */\nexport interface SearchSuggest {}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  keyboard$: Observable<Keyboard>      /* Keyboard observable */\n  worker$: Subject<SearchMessage>      /* Search worker */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search suggestions\n *\n * This function will perform a lazy rendering of the search results, depending\n * on the vertical offset of the search result container.\n *\n * @param el - Search result list element\n * @param options - Options\n *\n * @returns Search result list component observable\n */\nexport function mountSearchSuggest(\n  el: HTMLElement, { worker$, keyboard$ }: MountOptions\n): Observable<Component<SearchSuggest>> {\n  const push$ = new Subject<SearchResult>()\n\n  /* Retrieve query component and track all changes */\n  const query  = getComponentElement(\"search-query\")\n  const query$ = merge(\n    fromEvent(query, \"keydown\"),\n    fromEvent(query, \"focus\")\n  )\n    .pipe(\n      observeOn(asyncScheduler),\n      map(() => query.value),\n      distinctUntilChanged(),\n    )\n\n  /* Update search suggestions */\n  push$\n    .pipe(\n      combineLatestWith(query$),\n      map(([{ suggest }, value]) => {\n        const words = value.split(/([\\s-]+)/)\n        if (suggest?.length && words[words.length - 1]) {\n          const last = suggest[suggest.length - 1]\n          if (last.startsWith(words[words.length - 1]))\n            words[words.length - 1] = last\n        } else {\n          words.length = 0\n        }\n        return words\n      })\n    )\n      .subscribe(words => el.innerHTML = words\n        .join(\"\")\n        .replace(/\\s/g, \"&nbsp;\")\n      )\n\n  /* Set up search keyboard handlers */\n  keyboard$\n    .pipe(\n      filter(({ mode }) => mode === \"search\")\n    )\n      .subscribe(key => {\n        switch (key.type) {\n\n          /* Right arrow: accept current suggestion */\n          case \"ArrowRight\":\n            if (\n              el.innerText.length &&\n              query.selectionStart === query.value.length\n            )\n              query.value = el.innerText\n            break\n        }\n      })\n\n  /* Filter search result message */\n  const result$ = worker$\n    .pipe(\n      filter(isSearchResultMessage),\n      map(({ data }) => data)\n    )\n\n  /* Create and return component */\n  return result$\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(() => ({ ref: el }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  NEVER,\n  Observable,\n  ObservableInput,\n  filter,\n  fromEvent,\n  merge,\n  mergeWith\n} from \"rxjs\"\n\nimport { configuration } from \"~/_\"\nimport {\n  Keyboard,\n  getActiveElement,\n  getElements,\n  setToggle\n} from \"~/browser\"\nimport {\n  SearchIndex,\n  SearchResult,\n  setupSearchWorker\n} from \"~/integrations\"\n\nimport {\n  Component,\n  getComponentElement,\n  getComponentElements\n} from \"../../_\"\nimport {\n  SearchQuery,\n  mountSearchQuery\n} from \"../query\"\nimport { mountSearchResult } from \"../result\"\nimport {\n  SearchShare,\n  mountSearchShare\n} from \"../share\"\nimport {\n  SearchSuggest,\n  mountSearchSuggest\n} from \"../suggest\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search\n */\nexport type Search =\n  | SearchQuery\n  | SearchResult\n  | SearchShare\n  | SearchSuggest\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  index$: ObservableInput<SearchIndex> /* Search index observable */\n  keyboard$: Observable<Keyboard>      /* Keyboard observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search\n *\n * This function sets up the search functionality, including the underlying\n * web worker and all keyboard bindings.\n *\n * @param el - Search element\n * @param options - Options\n *\n * @returns Search component observable\n */\nexport function mountSearch(\n  el: HTMLElement, { index$, keyboard$ }: MountOptions\n): Observable<Component<Search>> {\n  const config = configuration()\n  try {\n    const worker$ = setupSearchWorker(config.search, index$)\n\n    /* Retrieve query and result components */\n    const query  = getComponentElement(\"search-query\", el)\n    const result = getComponentElement(\"search-result\", el)\n\n    /* Always close search on result selection */\n    fromEvent<PointerEvent>(el, \"click\")\n      .pipe(\n        filter(({ target }) => (\n          target instanceof Element && !!target.closest(\"a\")\n        ))\n      )\n        .subscribe(() => setToggle(\"search\", false))\n\n    /* Set up search keyboard handlers */\n    keyboard$\n      .pipe(\n        filter(({ mode }) => mode === \"search\")\n      )\n        .subscribe(key => {\n          const active = getActiveElement()\n          switch (key.type) {\n\n            /* Enter: go to first (best) result */\n            case \"Enter\":\n              if (active === query) {\n                const anchors = new Map<HTMLAnchorElement, number>()\n                for (const anchor of getElements<HTMLAnchorElement>(\n                  \":first-child [href]\", result\n                )) {\n                  const article = anchor.firstElementChild!\n                  anchors.set(anchor, parseFloat(\n                    article.getAttribute(\"data-md-score\")!\n                  ))\n                }\n\n                /* Go to result with highest score, if any */\n                if (anchors.size) {\n                  const [[best]] = [...anchors].sort(([, a], [, b]) => b - a)\n                  best.click()\n                }\n\n                /* Otherwise omit form submission */\n                key.claim()\n              }\n              break\n\n            /* Escape or Tab: close search */\n            case \"Escape\":\n            case \"Tab\":\n              setToggle(\"search\", false)\n              query.blur()\n              break\n\n            /* Vertical arrows: select previous or next search result */\n            case \"ArrowUp\":\n            case \"ArrowDown\":\n              if (typeof active === \"undefined\") {\n                query.focus()\n              } else {\n                const els = [query, ...getElements(\n                  \":not(details) > [href], summary, details[open] [href]\",\n                  result\n                )]\n                const i = Math.max(0, (\n                  Math.max(0, els.indexOf(active)) + els.length + (\n                    key.type === \"ArrowUp\" ? -1 : +1\n                  )\n                ) % els.length)\n                els[i].focus()\n              }\n\n              /* Prevent scrolling of page */\n              key.claim()\n              break\n\n            /* All other keys: hand to search query */\n            default:\n              if (query !== getActiveElement())\n                query.focus()\n          }\n        })\n\n    /* Set up global keyboard handlers */\n    keyboard$\n      .pipe(\n        filter(({ mode }) => mode === \"global\")\n      )\n        .subscribe(key => {\n          switch (key.type) {\n\n            /* Open search and select query */\n            case \"f\":\n            case \"s\":\n            case \"/\":\n              query.focus()\n              query.select()\n\n              /* Prevent scrolling of page */\n              key.claim()\n              break\n          }\n        })\n\n    /* Create and return component */\n    const query$ = mountSearchQuery(query, { worker$ })\n    return merge(\n      query$,\n      mountSearchResult(result, { worker$, query$ })\n    )\n      .pipe(\n        mergeWith(\n\n          /* Search sharing */\n          ...getComponentElements(\"search-share\", el)\n            .map(child => mountSearchShare(child, { query$ })),\n\n          /* Search suggestions */\n          ...getComponentElements(\"search-suggest\", el)\n            .map(child => mountSearchSuggest(child, { worker$, keyboard$ }))\n        )\n      )\n\n  /* Gracefully handle broken search */\n  } catch (err) {\n    el.hidden = true\n    return NEVER\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  ObservableInput,\n  combineLatest,\n  filter,\n  map,\n  startWith\n} from \"rxjs\"\n\nimport { getLocation } from \"~/browser\"\nimport {\n  SearchIndex,\n  setupSearchHighlighter\n} from \"~/integrations\"\nimport { h } from \"~/utilities\"\n\nimport { Component } from \"../../_\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Search highlighting\n */\nexport interface SearchHighlight {\n  nodes: Map<ChildNode, string>        /* Map of replacements */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  index$: ObservableInput<SearchIndex> /* Search index observable */\n  location$: Observable<URL>           /* Location observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Mount search highlighting\n *\n * @param el - Content element\n * @param options - Options\n *\n * @returns Search highlighting component observable\n */\nexport function mountSearchHiglight(\n  el: HTMLElement, { index$, location$ }: MountOptions\n): Observable<Component<SearchHighlight>> {\n  return combineLatest([\n    index$,\n    location$\n      .pipe(\n        startWith(getLocation()),\n        filter(url => !!url.searchParams.get(\"h\"))\n      )\n  ])\n    .pipe(\n      map(([index, url]) => setupSearchHighlighter(index.config)(\n        url.searchParams.get(\"h\")!\n      )),\n      map(fn => {\n        const nodes = new Map<ChildNode, string>()\n\n        /* Traverse text nodes and collect matches */\n        const it = document.createNodeIterator(el, NodeFilter.SHOW_TEXT)\n        for (let node = it.nextNode(); node; node = it.nextNode()) {\n          if (node.parentElement?.offsetHeight) {\n            const original = node.textContent!\n            const replaced = fn(original)\n            if (replaced.length > original.length)\n              nodes.set(node as ChildNode, replaced)\n          }\n        }\n\n        /* Replace original nodes with matches */\n        for (const [node, text] of nodes) {\n          const { childNodes } = h(\"span\", null, text)\n          node.replaceWith(...Array.from(childNodes))\n        }\n\n        /* Return component */\n        return { ref: el, nodes }\n      })\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  animationFrameScheduler,\n  asyncScheduler,\n  auditTime,\n  combineLatest,\n  defer,\n  distinctUntilChanged,\n  endWith,\n  finalize,\n  first,\n  from,\n  fromEvent,\n  ignoreElements,\n  map,\n  mergeMap,\n  observeOn,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  getElement,\n  getElementOffset,\n  getElementSize,\n  getElements\n} from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Sidebar\n */\nexport interface Sidebar {\n  height: number                       /* Sidebar height */\n  locked: boolean                      /* Sidebar is locked */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  main$: Observable<Main>              /* Main area observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch sidebar\n *\n * This function returns an observable that computes the visual parameters of\n * the sidebar which depends on the vertical viewport offset, as well as the\n * height of the main area. When the page is scrolled beyond the header, the\n * sidebar is locked and fills the remaining space.\n *\n * @param el - Sidebar element\n * @param options - Options\n *\n * @returns Sidebar observable\n */\nexport function watchSidebar(\n  el: HTMLElement, { viewport$, main$ }: WatchOptions\n): Observable<Sidebar> {\n  const parent = el.closest<HTMLElement>(\".md-grid\")!\n  const adjust =\n    parent.offsetTop -\n    parent.parentElement!.offsetTop\n\n  /* Compute the sidebar's available height and if it should be locked */\n  return combineLatest([main$, viewport$])\n    .pipe(\n      map(([{ offset, height }, { offset: { y } }]) => {\n        height = height\n          + Math.min(adjust, Math.max(0, y - offset))\n          - adjust\n        return {\n          height,\n          locked: y >= offset + adjust\n        }\n      }),\n      distinctUntilChanged((a, b) => (\n        a.height === b.height &&\n        a.locked === b.locked\n      ))\n    )\n}\n\n/**\n * Mount sidebar\n *\n * This function doesn't set the height of the actual sidebar, but of its first\n * child \u2013 the `.md-sidebar__scrollwrap` element in order to mitigiate jittery\n * sidebars when the footer is scrolled into view. At some point we switched\n * from `absolute` / `fixed` positioning to `sticky` positioning, significantly\n * reducing jitter in some browsers (respectively Firefox and Safari) when\n * scrolling from the top. However, top-aligned sticky positioning means that\n * the sidebar snaps to the bottom when the end of the container is reached.\n * This is what leads to the mentioned jitter, as the sidebar's height may be\n * updated too slowly.\n *\n * This behaviour can be mitigiated by setting the height of the sidebar to `0`\n * while preserving the padding, and the height on its first element.\n *\n * @param el - Sidebar element\n * @param options - Options\n *\n * @returns Sidebar component observable\n */\nexport function mountSidebar(\n  el: HTMLElement, { header$, ...options }: MountOptions\n): Observable<Component<Sidebar>> {\n  const inner = getElement(\".md-sidebar__scrollwrap\", el)\n  const { y } = getElementOffset(inner)\n  return defer(() => {\n    const push$ = new Subject<Sidebar>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    const next$ = push$\n      .pipe(\n        auditTime(0, animationFrameScheduler)\n      )\n\n    /* Update sidebar height and offset */\n    next$.pipe(withLatestFrom(header$))\n      .subscribe({\n\n        /* Handle emission */\n        next([{ height }, { height: offset }]) {\n          inner.style.height = `${height - 2 * y}px`\n          el.style.top       = `${offset}px`\n        },\n\n        /* Handle complete */\n        complete() {\n          inner.style.height = \"\"\n          el.style.top       = \"\"\n        }\n      })\n\n    /* Bring active item into view on initial load */\n    next$.pipe(first())\n      .subscribe(() => {\n        for (const item of getElements(\".md-nav__link--active[href]\", el)) {\n          if (!item.clientHeight) // skip invisible toc in left sidebar\n            continue\n          const container = item.closest<HTMLElement>(\".md-sidebar__scrollwrap\")!\n          if (typeof container !== \"undefined\") {\n            const offset = item.offsetTop - container.offsetTop\n            const { height } = getElementSize(container)\n            container.scrollTo({\n              top: offset - height / 2\n            })\n          }\n        }\n      })\n\n    /* Handle accessibility for expandable items, see https://bit.ly/3jaod9p */\n    from(getElements<HTMLLabelElement>(\"label[tabindex]\", el))\n      .pipe(\n        mergeMap(label => fromEvent(label, \"click\")\n          .pipe(\n            observeOn(asyncScheduler),\n            map(() => label),\n            takeUntil(done$)\n          )\n        )\n      )\n        .subscribe(label => {\n          const input = getElement<HTMLInputElement>(`[id=\"${label.htmlFor}\"]`)\n          const nav = getElement(`[aria-labelledby=\"${label.id}\"]`)\n          nav.setAttribute(\"aria-expanded\", `${input.checked}`)\n        })\n\n    /* Create and return component */\n    return watchSidebar(el, options)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { Repo, User } from \"github-types\"\nimport {\n  EMPTY,\n  Observable,\n  catchError,\n  defaultIfEmpty,\n  map,\n  zip\n} from \"rxjs\"\n\nimport { requestJSON } from \"~/browser\"\n\nimport { SourceFacts } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * GitHub release (partial)\n */\ninterface Release {\n  tag_name: string                     /* Tag name */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch GitHub repository facts\n *\n * @param user - GitHub user or organization\n * @param repo - GitHub repository\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFactsFromGitHub(\n  user: string, repo?: string\n): Observable<SourceFacts> {\n  if (typeof repo !== \"undefined\") {\n    const url = `https://api.github.com/repos/${user}/${repo}`\n    return zip(\n\n      /* Fetch version */\n      requestJSON<Release>(`${url}/releases/latest`)\n        .pipe(\n          catchError(() => EMPTY), // @todo refactor instant loading\n          map(release => ({\n            version: release.tag_name\n          })),\n          defaultIfEmpty({})\n        ),\n\n      /* Fetch stars and forks */\n      requestJSON<Repo>(url)\n        .pipe(\n          catchError(() => EMPTY), // @todo refactor instant loading\n          map(info => ({\n            stars: info.stargazers_count,\n            forks: info.forks_count\n          })),\n          defaultIfEmpty({})\n        )\n    )\n      .pipe(\n        map(([release, info]) => ({ ...release, ...info }))\n      )\n\n  /* User or organization */\n  } else {\n    const url = `https://api.github.com/users/${user}`\n    return requestJSON<User>(url)\n      .pipe(\n        map(info => ({\n          repositories: info.public_repos\n        })),\n        defaultIfEmpty({})\n      )\n  }\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { ProjectSchema } from \"gitlab\"\nimport {\n  EMPTY,\n  Observable,\n  catchError,\n  defaultIfEmpty,\n  map,\n  zip\n} from \"rxjs\"\n\nimport { requestJSON } from \"~/browser\"\n\nimport { SourceFacts } from \"../_\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * GitLab release (partial)\n */\ninterface Release { // @todo remove and use the ReleaseSchema type instead after switching from gitlab to @gitbeaker/rest\n  tag_name: string                     /* Tag name */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch GitLab repository facts\n *\n * @param base - GitLab base\n * @param project - GitLab project\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFactsFromGitLab(\n  base: string, project: string\n): Observable<SourceFacts> {\n  const url = `https://${base}/api/v4/projects/${encodeURIComponent(project)}`\n  return zip(\n\n    /* Fetch version */\n    requestJSON<Release>(`${url}/releases/permalink/latest`)\n      .pipe(\n        catchError(() => EMPTY), // @todo refactor instant loading\n        map(({ tag_name }) => ({\n          version: tag_name\n        })),\n        defaultIfEmpty({})\n      ),\n\n    /* Fetch stars and forks */\n    requestJSON<ProjectSchema>(url)\n      .pipe(\n        catchError(() => EMPTY), // @todo refactor instant loading\n        map(({ star_count, forks_count }) => ({\n          stars: star_count,\n          forks: forks_count\n        })),\n        defaultIfEmpty({})\n      )\n  )\n    .pipe(\n      map(([release, info]) => ({ ...release, ...info }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport { EMPTY, Observable } from \"rxjs\"\n\nimport { fetchSourceFactsFromGitHub } from \"../github\"\nimport { fetchSourceFactsFromGitLab } from \"../gitlab\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository facts for repositories\n */\nexport interface RepositoryFacts {\n  stars?: number                       /* Number of stars */\n  forks?: number                       /* Number of forks */\n  version?: string                     /* Latest version */\n}\n\n/**\n * Repository facts for organizations\n */\nexport interface OrganizationFacts {\n  repositories?: number                /* Number of repositories */\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Repository facts\n */\nexport type SourceFacts =\n  | RepositoryFacts\n  | OrganizationFacts\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch repository facts\n *\n * @param url - Repository URL\n *\n * @returns Repository facts observable\n */\nexport function fetchSourceFacts(\n  url: string\n): Observable<SourceFacts> {\n\n  /* Try to match GitHub repository */\n  let match = url.match(/^.+github\\.com\\/([^/]+)\\/?([^/]+)?/i)\n  if (match) {\n    const [, user, repo] = match\n    return fetchSourceFactsFromGitHub(user, repo)\n  }\n\n  /* Try to match GitLab repository */\n  match = url.match(/^.+?([^/]*gitlab[^/]+)\\/(.+?)\\/?$/i)\n  if (match) {\n    const [, base, slug] = match\n    return fetchSourceFactsFromGitLab(base, slug)\n  }\n\n  /* Fallback */\n  return EMPTY\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  Subject,\n  catchError,\n  defer,\n  filter,\n  finalize,\n  map,\n  of,\n  shareReplay,\n  tap\n} from \"rxjs\"\n\nimport { getElement } from \"~/browser\"\nimport { ConsentDefaults } from \"~/components/consent\"\nimport { renderSourceFacts } from \"~/templates\"\n\nimport {\n  Component,\n  getComponentElements\n} from \"../../_\"\nimport {\n  SourceFacts,\n  fetchSourceFacts\n} from \"../facts\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository information\n */\nexport interface Source {\n  facts: SourceFacts                   /* Repository facts */\n}\n\n/* ----------------------------------------------------------------------------\n * Data\n * ------------------------------------------------------------------------- */\n\n/**\n * Repository information observable\n */\nlet fetch$: Observable<Source>\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch repository information\n *\n * This function tries to read the repository facts from session storage, and\n * if unsuccessful, fetches them from the underlying provider.\n *\n * @param el - Repository information element\n *\n * @returns Repository information observable\n */\nexport function watchSource(\n  el: HTMLAnchorElement\n): Observable<Source> {\n  return fetch$ ||= defer(() => {\n    const cached = __md_get<SourceFacts>(\"__source\", sessionStorage)\n    if (cached) {\n      return of(cached)\n    } else {\n\n      /* Check if consent is configured and was given */\n      const els = getComponentElements(\"consent\")\n      if (els.length) {\n        const consent = __md_get<ConsentDefaults>(\"__consent\")\n        if (!(consent && consent.github))\n          return EMPTY\n      }\n\n      /* Fetch repository facts */\n      return fetchSourceFacts(el.href)\n        .pipe(\n          tap(facts => __md_set(\"__source\", facts, sessionStorage))\n        )\n    }\n  })\n    .pipe(\n      catchError(() => EMPTY),\n      filter(facts => Object.keys(facts).length > 0),\n      map(facts => ({ facts })),\n      shareReplay(1)\n    )\n}\n\n/**\n * Mount repository information\n *\n * @param el - Repository information element\n *\n * @returns Repository information component observable\n */\nexport function mountSource(\n  el: HTMLAnchorElement\n): Observable<Component<Source>> {\n  const inner = getElement(\":scope > :last-child\", el)\n  return defer(() => {\n    const push$ = new Subject<Source>()\n    push$.subscribe(({ facts }) => {\n      inner.appendChild(renderSourceFacts(facts))\n      inner.classList.add(\"md-source__repository--active\")\n    })\n\n    /* Create and return component */\n    return watchSource(el)\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  defer,\n  distinctUntilKeyChanged,\n  finalize,\n  map,\n  of,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  watchElementSize,\n  watchViewportAt\n} from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Navigation tabs\n */\nexport interface Tabs {\n  hidden: boolean                      /* Navigation tabs are hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch navigation tabs\n *\n * @param el - Navigation tabs element\n * @param options - Options\n *\n * @returns Navigation tabs observable\n */\nexport function watchTabs(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<Tabs> {\n  return watchElementSize(document.body)\n    .pipe(\n      switchMap(() => watchViewportAt(el, { header$, viewport$ })),\n      map(({ offset: { y } }) => {\n        return {\n          hidden: y >= 10\n        }\n      }),\n      distinctUntilKeyChanged(\"hidden\")\n    )\n}\n\n/**\n * Mount navigation tabs\n *\n * This function hides the navigation tabs when scrolling past the threshold\n * and makes them reappear in a nice CSS animation when scrolling back up.\n *\n * @param el - Navigation tabs element\n * @param options - Options\n *\n * @returns Navigation tabs component observable\n */\nexport function mountTabs(\n  el: HTMLElement, options: MountOptions\n): Observable<Component<Tabs>> {\n  return defer(() => {\n    const push$ = new Subject<Tabs>()\n    push$.subscribe({\n\n      /* Handle emission */\n      next({ hidden }) {\n        el.hidden = hidden\n      },\n\n      /* Handle complete */\n      complete() {\n        el.hidden = false\n      }\n    })\n\n    /* Create and return component */\n    return (\n      feature(\"navigation.tabs.sticky\")\n        ? of({ hidden: false })\n        : watchTabs(el, options)\n    )\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  asyncScheduler,\n  bufferCount,\n  combineLatestWith,\n  debounceTime,\n  defer,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  filter,\n  finalize,\n  ignoreElements,\n  map,\n  merge,\n  observeOn,\n  of,\n  repeat,\n  scan,\n  share,\n  skip,\n  startWith,\n  switchMap,\n  takeUntil,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElement,\n  getElementContainer,\n  getElementSize,\n  getElements,\n  getLocation,\n  getOptionalElement,\n  watchElementSize\n} from \"~/browser\"\n\nimport {\n  Component,\n  getComponentElement\n} from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Table of contents\n */\nexport interface TableOfContents {\n  prev: HTMLAnchorElement[][]          /* Anchors (previous) */\n  next: HTMLAnchorElement[][]          /* Anchors (next) */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch table of contents\n *\n * This is effectively a scroll spy implementation which will account for the\n * fixed header and automatically re-calculate anchor offsets when the viewport\n * is resized. The returned observable will only emit if the table of contents\n * needs to be repainted.\n *\n * This implementation tracks an anchor element's entire path starting from its\n * level up to the top-most anchor element, e.g. `[h3, h2, h1]`. Although the\n * Material theme currently doesn't make use of this information, it enables\n * the styling of the entire hierarchy through customization.\n *\n * Note that the current anchor is the last item of the `prev` anchor list.\n *\n * @param el - Table of contents element\n * @param options - Options\n *\n * @returns Table of contents observable\n */\nexport function watchTableOfContents(\n  el: HTMLElement, { viewport$, header$ }: WatchOptions\n): Observable<TableOfContents> {\n  const table = new Map<HTMLAnchorElement, HTMLElement>()\n\n  /* Compute anchor-to-target mapping */\n  const anchors = getElements<HTMLAnchorElement>(\".md-nav__link\", el)\n  for (const anchor of anchors) {\n    const id = decodeURIComponent(anchor.hash.substring(1))\n    const target = getOptionalElement(`[id=\"${id}\"]`)\n    if (typeof target !== \"undefined\")\n      table.set(anchor, target)\n  }\n\n  /* Compute necessary adjustment for header */\n  const adjust$ = header$\n    .pipe(\n      distinctUntilKeyChanged(\"height\"),\n      map(({ height }) => {\n        const main = getComponentElement(\"main\")\n        const grid = getElement(\":scope > :first-child\", main)\n        return height + 0.8 * (\n          grid.offsetTop -\n          main.offsetTop\n        )\n      }),\n      share()\n    )\n\n  /* Compute partition of previous and next anchors */\n  const partition$ = watchElementSize(document.body)\n    .pipe(\n      distinctUntilKeyChanged(\"height\"),\n\n      /* Build index to map anchor paths to vertical offsets */\n      switchMap(body => defer(() => {\n        let path: HTMLAnchorElement[] = []\n        return of([...table].reduce((index, [anchor, target]) => {\n          while (path.length) {\n            const last = table.get(path[path.length - 1])!\n            if (last.tagName >= target.tagName) {\n              path.pop()\n            } else {\n              break\n            }\n          }\n\n          /* If the current anchor is hidden, continue with its parent */\n          let offset = target.offsetTop\n          while (!offset && target.parentElement) {\n            target = target.parentElement\n            offset = target.offsetTop\n          }\n\n          /* Fix anchor offsets in tables - see https://bit.ly/3CUFOcn */\n          let parent = target.offsetParent as HTMLElement\n          for (; parent; parent = parent.offsetParent as HTMLElement)\n            offset += parent.offsetTop\n\n          /* Map reversed anchor path to vertical offset */\n          return index.set(\n            [...path = [...path, anchor]].reverse(),\n            offset\n          )\n        }, new Map<HTMLAnchorElement[], number>()))\n      })\n        .pipe(\n\n          /* Sort index by vertical offset (see https://bit.ly/30z6QSO) */\n          map(index => new Map([...index].sort(([, a], [, b]) => a - b))),\n          combineLatestWith(adjust$),\n\n          /* Re-compute partition when viewport offset changes */\n          switchMap(([index, adjust]) => viewport$\n            .pipe(\n              scan(([prev, next], { offset: { y }, size }) => {\n                const last = y + size.height >= Math.floor(body.height)\n\n                /* Look forward */\n                while (next.length) {\n                  const [, offset] = next[0]\n                  if (offset - adjust < y || last) {\n                    prev = [...prev, next.shift()!]\n                  } else {\n                    break\n                  }\n                }\n\n                /* Look backward */\n                while (prev.length) {\n                  const [, offset] = prev[prev.length - 1]\n                  if (offset - adjust >= y && !last) {\n                    next = [prev.pop()!, ...next]\n                  } else {\n                    break\n                  }\n                }\n\n                /* Return partition */\n                return [prev, next]\n              }, [[], [...index]]),\n              distinctUntilChanged((a, b) => (\n                a[0] === b[0] &&\n                a[1] === b[1]\n              ))\n            )\n          )\n        )\n      )\n    )\n\n  /* Compute and return anchor list migrations */\n  return partition$\n    .pipe(\n      map(([prev, next]) => ({\n        prev: prev.map(([path]) => path),\n        next: next.map(([path]) => path)\n      })),\n\n      /* Extract anchor list migrations */\n      startWith({ prev: [], next: [] }),\n      bufferCount(2, 1),\n      map(([a, b]) => {\n\n        /* Moving down */\n        if (a.prev.length < b.prev.length) {\n          return {\n            prev: b.prev.slice(Math.max(0, a.prev.length - 1), b.prev.length),\n            next: []\n          }\n\n        /* Moving up */\n        } else {\n          return {\n            prev: b.prev.slice(-1),\n            next: b.next.slice(0, b.next.length - a.next.length)\n          }\n        }\n      })\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount table of contents\n *\n * @param el - Table of contents element\n * @param options - Options\n *\n * @returns Table of contents component observable\n */\nexport function mountTableOfContents(\n  el: HTMLElement, { viewport$, header$, main$, target$ }: MountOptions\n): Observable<Component<TableOfContents>> {\n  return defer(() => {\n    const push$ = new Subject<TableOfContents>()\n    const done$ = push$.pipe(ignoreElements(), endWith(true))\n    push$.subscribe(({ prev, next }) => {\n\n      /* Look forward */\n      for (const [anchor] of next) {\n        anchor.classList.remove(\"md-nav__link--passed\")\n        anchor.classList.remove(\"md-nav__link--active\")\n      }\n\n      /* Look backward */\n      for (const [index, [anchor]] of prev.entries()) {\n        anchor.classList.add(\"md-nav__link--passed\")\n        anchor.classList.toggle(\n          \"md-nav__link--active\",\n          index === prev.length - 1\n        )\n      }\n    })\n\n    /* Set up following, if enabled */\n    if (feature(\"toc.follow\")) {\n\n      /* Toggle smooth scrolling only for anchor clicks */\n      const smooth$ = merge(\n        viewport$.pipe(debounceTime(1), map(() => undefined)),\n        viewport$.pipe(debounceTime(250), map(() => \"smooth\" as const))\n      )\n\n      /* Bring active anchor into view */ // @todo: refactor\n      push$\n        .pipe(\n          filter(({ prev }) => prev.length > 0),\n          combineLatestWith(main$.pipe(observeOn(asyncScheduler))),\n          withLatestFrom(smooth$)\n        )\n          .subscribe(([[{ prev }], behavior]) => {\n            const [anchor] = prev[prev.length - 1]\n            if (anchor.offsetHeight) {\n\n              /* Retrieve overflowing container and scroll */\n              const container = getElementContainer(anchor)\n              if (typeof container !== \"undefined\") {\n                const offset = anchor.offsetTop - container.offsetTop\n                const { height } = getElementSize(container)\n                container.scrollTo({\n                  top: offset - height / 2,\n                  behavior\n                })\n              }\n            }\n          })\n    }\n\n    /* Set up anchor tracking, if enabled */\n    if (feature(\"navigation.tracking\"))\n      viewport$\n        .pipe(\n          takeUntil(done$),\n          distinctUntilKeyChanged(\"offset\"),\n          debounceTime(250),\n          skip(1),\n          takeUntil(target$.pipe(skip(1))),\n          repeat({ delay: 250 }),\n          withLatestFrom(push$)\n        )\n          .subscribe(([, { prev }]) => {\n            const url = getLocation()\n\n            /* Set hash fragment to active anchor */\n            const anchor = prev[prev.length - 1]\n            if (anchor && anchor.length) {\n              const [active] = anchor\n              const { hash } = new URL(active.href)\n              if (url.hash !== hash) {\n                url.hash = hash\n                history.replaceState({}, \"\", `${url}`)\n              }\n\n            /* Reset anchor when at the top */\n            } else {\n              url.hash = \"\"\n              history.replaceState({}, \"\", `${url}`)\n            }\n          })\n\n    /* Create and return component */\n    return watchTableOfContents(el, { viewport$, header$ })\n      .pipe(\n        tap(state => push$.next(state)),\n        finalize(() => push$.complete()),\n        map(state => ({ ref: el, ...state }))\n      )\n  })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  Subject,\n  bufferCount,\n  combineLatest,\n  distinctUntilChanged,\n  distinctUntilKeyChanged,\n  endWith,\n  finalize,\n  fromEvent,\n  ignoreElements,\n  map,\n  repeat,\n  skip,\n  takeUntil,\n  tap\n} from \"rxjs\"\n\nimport { Viewport } from \"~/browser\"\n\nimport { Component } from \"../_\"\nimport { Header } from \"../header\"\nimport { Main } from \"../main\"\n\n/* ----------------------------------------------------------------------------\n * Types\n * ------------------------------------------------------------------------- */\n\n/**\n * Back-to-top button\n */\nexport interface BackToTop {\n  hidden: boolean                      /* Back-to-top button is hidden */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch options\n */\ninterface WatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  main$: Observable<Main>              /* Main area observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/**\n * Mount options\n */\ninterface MountOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  header$: Observable<Header>          /* Header observable */\n  main$: Observable<Main>              /* Main area observable */\n  target$: Observable<HTMLElement>     /* Location target observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Watch back-to-top\n *\n * @param _el - Back-to-top element\n * @param options - Options\n *\n * @returns Back-to-top observable\n */\nexport function watchBackToTop(\n  _el: HTMLElement, { viewport$, main$, target$ }: WatchOptions\n): Observable<BackToTop> {\n\n  /* Compute direction */\n  const direction$ = viewport$\n    .pipe(\n      map(({ offset: { y } }) => y),\n      bufferCount(2, 1),\n      map(([a, b]) => a > b && b > 0),\n      distinctUntilChanged()\n    )\n\n  /* Compute whether main area is active */\n  const active$ = main$\n    .pipe(\n      map(({ active }) => active)\n    )\n\n  /* Compute threshold for hiding */\n  return combineLatest([active$, direction$])\n    .pipe(\n      map(([active, direction]) => !(active && direction)),\n      distinctUntilChanged(),\n      takeUntil(target$.pipe(skip(1))),\n      endWith(true),\n      repeat({ delay: 250 }),\n      map(hidden => ({ hidden }))\n    )\n}\n\n/* ------------------------------------------------------------------------- */\n\n/**\n * Mount back-to-top\n *\n * @param el - Back-to-top element\n * @param options - Options\n *\n * @returns Back-to-top component observable\n */\nexport function mountBackToTop(\n  el: HTMLElement, { viewport$, header$, main$, target$ }: MountOptions\n): Observable<Component<BackToTop>> {\n  const push$ = new Subject<BackToTop>()\n  const done$ = push$.pipe(ignoreElements(), endWith(true))\n  push$.subscribe({\n\n    /* Handle emission */\n    next({ hidden }) {\n      el.hidden = hidden\n      if (hidden) {\n        el.setAttribute(\"tabindex\", \"-1\")\n        el.blur()\n      } else {\n        el.removeAttribute(\"tabindex\")\n      }\n    },\n\n    /* Handle complete */\n    complete() {\n      el.style.top = \"\"\n      el.hidden = true\n      el.removeAttribute(\"tabindex\")\n    }\n  })\n\n  /* Watch header height */\n  header$\n    .pipe(\n      takeUntil(done$),\n      distinctUntilKeyChanged(\"height\")\n    )\n      .subscribe(({ height }) => {\n        el.style.top = `${height + 16}px`\n      })\n\n  /* Go back to top */\n  fromEvent(el, \"click\")\n    .subscribe(ev => {\n      ev.preventDefault()\n      window.scrollTo({ top: 0 })\n    })\n\n  /* Create and return component */\n  return watchBackToTop(el, { viewport$, main$, target$ })\n    .pipe(\n      tap(state => push$.next(state)),\n      finalize(() => push$.complete()),\n      map(state => ({ ref: el, ...state }))\n    )\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  EMPTY,\n  Observable,\n  filter,\n  finalize,\n  map,\n  mergeMap,\n  skip,\n  switchMap,\n  take,\n  takeUntil\n} from \"rxjs\"\n\nimport { feature } from \"~/_\"\nimport {\n  Viewport,\n  getElements,\n  watchElementVisibility\n} from \"~/browser\"\nimport { mountInlineTooltip2 } from \"~/components/tooltip2\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  document$: Observable<Document>      /* Document observable */\n  viewport$: Observable<Viewport>      /* Viewport observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch ellipsis\n *\n * This function will fetch all elements that are shortened with ellipsis, and\n * filter those which are visible. Once they become visible, they stay in that\n * state, even though they may be hidden again. This optimization is necessary\n * to reduce pressure on the browser, with elements fading in and out of view.\n *\n * @param options - Options\n */\nexport function patchEllipsis(\n  { document$, viewport$ }: PatchOptions\n): void {\n  document$\n    .pipe(\n      switchMap(() => getElements(\".md-ellipsis\")),\n      mergeMap(el => watchElementVisibility(el)\n        .pipe(\n          takeUntil(document$.pipe(skip(1))),\n          filter(visible => visible),\n          map(() => el),\n          take(1)\n        )\n      ),\n      filter(el => el.offsetWidth < el.scrollWidth),\n      mergeMap(el => {\n        const text = el.innerText\n        const host = el.closest(\"a\") || el\n        host.title = text\n\n        // Do not mount improved tooltip if feature is disabled\n        if (!feature(\"content.tooltips\"))\n          return EMPTY\n\n        /* Mount tooltip */\n        return mountInlineTooltip2(host, { viewport$ })\n          .pipe(\n            takeUntil(document$.pipe(skip(1))),\n            finalize(() => host.removeAttribute(\"title\"))\n          )\n      })\n    )\n      .subscribe()\n\n  // @todo move this outside of here and fix memleaks\n  if (feature(\"content.tooltips\"))\n    document$\n      .pipe(\n        switchMap(() => getElements(\".md-status\")),\n        mergeMap(el => mountInlineTooltip2(el, { viewport$ }))\n      )\n        .subscribe()\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  fromEvent,\n  map,\n  mergeMap,\n  switchMap,\n  takeWhile,\n  tap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  document$: Observable<Document>      /* Document observable */\n  tablet$: Observable<boolean>         /* Media tablet observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch indeterminate checkboxes\n *\n * This function replaces the indeterminate \"pseudo state\" with the actual\n * indeterminate state, which is used to keep navigation always expanded.\n *\n * @param options - Options\n */\nexport function patchIndeterminate(\n  { document$, tablet$ }: PatchOptions\n): void {\n  document$\n    .pipe(\n      switchMap(() => getElements<HTMLInputElement>(\n        \".md-toggle--indeterminate\"\n      )),\n      tap(el => {\n        el.indeterminate = true\n        el.checked = false\n      }),\n      mergeMap(el => fromEvent(el, \"change\")\n        .pipe(\n          takeWhile(() => el.classList.contains(\"md-toggle--indeterminate\")),\n          map(() => el)\n        )\n      ),\n      withLatestFrom(tablet$)\n    )\n      .subscribe(([el, tablet]) => {\n        el.classList.remove(\"md-toggle--indeterminate\")\n        if (tablet)\n          el.checked = false\n      })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  filter,\n  fromEvent,\n  map,\n  mergeMap,\n  switchMap,\n  tap\n} from \"rxjs\"\n\nimport { getElements } from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  document$: Observable<Document>      /* Document observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Helper functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Check whether the given device is an Apple device\n *\n * @returns Test result\n */\nfunction isAppleDevice(): boolean {\n  return /(iPad|iPhone|iPod)/.test(navigator.userAgent)\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch all elements with `data-md-scrollfix` attributes\n *\n * This is a year-old patch which ensures that overflow scrolling works at the\n * top and bottom of containers on iOS by ensuring a `1px` scroll offset upon\n * the start of a touch event.\n *\n * @see https://bit.ly/2SCtAOO - Original source\n *\n * @param options - Options\n */\nexport function patchScrollfix(\n  { document$ }: PatchOptions\n): void {\n  document$\n    .pipe(\n      switchMap(() => getElements(\"[data-md-scrollfix]\")),\n      tap(el => el.removeAttribute(\"data-md-scrollfix\")),\n      filter(isAppleDevice),\n      mergeMap(el => fromEvent(el, \"touchstart\")\n        .pipe(\n          map(() => el)\n        )\n      )\n    )\n      .subscribe(el => {\n        const top = el.scrollTop\n\n        /* We're at the top of the container */\n        if (top === 0) {\n          el.scrollTop = 1\n\n        /* We're at the bottom of the container */\n        } else if (top + el.offsetHeight === el.scrollHeight) {\n          el.scrollTop = top - 1\n        }\n      })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport {\n  Observable,\n  combineLatest,\n  delay,\n  map,\n  of,\n  switchMap,\n  withLatestFrom\n} from \"rxjs\"\n\nimport {\n  Viewport,\n  watchToggle\n} from \"~/browser\"\n\n/* ----------------------------------------------------------------------------\n * Helper types\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch options\n */\ninterface PatchOptions {\n  viewport$: Observable<Viewport>      /* Viewport observable */\n  tablet$: Observable<boolean>         /* Media tablet observable */\n}\n\n/* ----------------------------------------------------------------------------\n * Functions\n * ------------------------------------------------------------------------- */\n\n/**\n * Patch the document body to lock when search is open\n *\n * For mobile and tablet viewports, the search is rendered full screen, which\n * leads to scroll leaking when at the top or bottom of the search result. This\n * function locks the body when the search is in full screen mode, and restores\n * the scroll position when leaving.\n *\n * @param options - Options\n */\nexport function patchScrolllock(\n  { viewport$, tablet$ }: PatchOptions\n): void {\n  combineLatest([watchToggle(\"search\"), tablet$])\n    .pipe(\n      map(([active, tablet]) => active && !tablet),\n      switchMap(active => of(active)\n        .pipe(\n          delay(active ? 400 : 100)\n        )\n      ),\n      withLatestFrom(viewport$)\n    )\n      .subscribe(([active, { offset: { y }}]) => {\n        if (active) {\n          document.body.setAttribute(\"data-md-scrolllock\", \"\")\n          document.body.style.top = `-${y}px`\n        } else {\n          const value = -1 * parseInt(document.body.style.top, 10)\n          document.body.removeAttribute(\"data-md-scrolllock\")\n          document.body.style.top = \"\"\n          if (value)\n            window.scrollTo(0, value)\n        }\n      })\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath <martin.donath@squidfunk.com>\n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\n/* ----------------------------------------------------------------------------\n * Polyfills\n * ------------------------------------------------------------------------- */\n\n/* Polyfill `Object.entries` */\nif (!Object.entries)\n  Object.entries = function (obj: object) {\n    const data: [string, string][] = []\n    for (const key of Object.keys(obj))\n      // @ts-expect-error - ignore property access warning\n      data.push([key, obj[key]])\n\n    /* Return entries */\n    return data\n  }\n\n/* Polyfill `Object.values` */\nif (!Object.values)\n  Object.values = function (obj: object) {\n    const data: string[] = []\n    for (const key of Object.keys(obj))\n      // @ts-expect-error - ignore property access warning\n      data.push(obj[key])\n\n    /* Return values */\n    return data\n  }\n\n/* ------------------------------------------------------------------------- */\n\n/* Polyfills for `Element` */\nif (typeof Element !== \"undefined\") {\n\n  /* Polyfill `Element.scrollTo` */\n  if (!Element.prototype.scrollTo)\n    Element.prototype.scrollTo = function (\n      x?: ScrollToOptions | number, y?: number\n    ): void {\n      if (typeof x === \"object\") {\n        this.scrollLeft = x.left!\n        this.scrollTop = x.top!\n      } else {\n        this.scrollLeft = x!\n        this.scrollTop = y!\n      }\n    }\n\n  /* Polyfill `Element.replaceWith` */\n  if (!Element.prototype.replaceWith)\n    Element.prototype.replaceWith = function (\n      ...nodes: Array<string | Node>\n    ): void {\n      const parent = this.parentNode\n      if (parent) {\n        if (nodes.length === 0)\n          parent.removeChild(this)\n\n        /* Replace children and create text nodes */\n        for (let i = nodes.length - 1; i >= 0; i--) {\n          let node = nodes[i]\n          if (typeof node === \"string\")\n            node = document.createTextNode(node)\n          else if (node.parentNode)\n            node.parentNode.removeChild(node)\n\n          /* Replace child or insert before previous sibling */\n          if (!i)\n            parent.replaceChild(node, this)\n          else\n            parent.insertBefore(this.previousSibling!, node)\n        }\n      }\n    }\n}\n"],
+  "mappings": "2rCAAA,IAAAA,GAAAC,GAAA,CAAAC,GAAAC,KAAA,EAAC,SAAUC,EAAQC,EAAS,CAC1B,OAAOH,IAAY,UAAY,OAAOC,IAAW,YAAcE,EAAQ,EACvE,OAAO,QAAW,YAAc,OAAO,IAAM,OAAOA,CAAO,EAC1DA,EAAQ,CACX,GAAEH,GAAO,UAAY,CAAE,aASrB,SAASI,EAA0BC,EAAO,CACxC,IAAIC,EAAmB,GACnBC,EAA0B,GAC1BC,EAAiC,KAEjCC,EAAsB,CACxB,KAAM,GACN,OAAQ,GACR,IAAK,GACL,IAAK,GACL,MAAO,GACP,SAAU,GACV,OAAQ,GACR,KAAM,GACN,MAAO,GACP,KAAM,GACN,KAAM,GACN,SAAU,GACV,iBAAkB,EACpB,EAOA,SAASC,EAAmBC,EAAI,CAC9B,MACE,GAAAA,GACAA,IAAO,UACPA,EAAG,WAAa,QAChBA,EAAG,WAAa,QAChB,cAAeA,GACf,aAAcA,EAAG,UAKrB,CASA,SAASC,EAA8BD,EAAI,CACzC,IAAIE,GAAOF,EAAG,KACVG,GAAUH,EAAG,QAUjB,MARI,GAAAG,KAAY,SAAWL,EAAoBI,EAAI,GAAK,CAACF,EAAG,UAIxDG,KAAY,YAAc,CAACH,EAAG,UAI9BA,EAAG,kBAKT,CAOA,SAASI,EAAqBJ,EAAI,CAC5BA,EAAG,UAAU,SAAS,eAAe,IAGzCA,EAAG,UAAU,IAAI,eAAe,EAChCA,EAAG,aAAa,2BAA4B,EAAE,EAChD,CAOA,SAASK,EAAwBL,EAAI,CAC9BA,EAAG,aAAa,0BAA0B,IAG/CA,EAAG,UAAU,OAAO,eAAe,EACnCA,EAAG,gBAAgB,0BAA0B,EAC/C,CAUA,SAASM,EAAUC,EAAG,CAChBA,EAAE,SAAWA,EAAE,QAAUA,EAAE,UAI3BR,EAAmBL,EAAM,aAAa,GACxCU,EAAqBV,EAAM,aAAa,EAG1CC,EAAmB,GACrB,CAUA,SAASa,EAAcD,EAAG,CACxBZ,EAAmB,EACrB,CASA,SAASc,EAAQF,EAAG,CAEbR,EAAmBQ,EAAE,MAAM,IAI5BZ,GAAoBM,EAA8BM,EAAE,MAAM,IAC5DH,EAAqBG,EAAE,MAAM,CAEjC,CAMA,SAASG,EAAOH,EAAG,CACZR,EAAmBQ,EAAE,MAAM,IAK9BA,EAAE,OAAO,UAAU,SAAS,eAAe,GAC3CA,EAAE,OAAO,aAAa,0BAA0B,KAMhDX,EAA0B,GAC1B,OAAO,aAAaC,CAA8B,EAClDA,EAAiC,OAAO,WAAW,UAAW,CAC5DD,EAA0B,EAC5B,EAAG,GAAG,EACNS,EAAwBE,EAAE,MAAM,EAEpC,CAOA,SAASI,EAAmBJ,EAAG,CACzB,SAAS,kBAAoB,WAK3BX,IACFD,EAAmB,IAErBiB,GAA+B,EAEnC,CAQA,SAASA,IAAiC,CACxC,SAAS,iBAAiB,YAAaC,CAAoB,EAC3D,SAAS,iBAAiB,YAAaA,CAAoB,EAC3D,SAAS,iBAAiB,UAAWA,CAAoB,EACzD,SAAS,iBAAiB,cAAeA,CAAoB,EAC7D,SAAS,iBAAiB,cAAeA,CAAoB,EAC7D,SAAS,iBAAiB,YAAaA,CAAoB,EAC3D,SAAS,iBAAiB,YAAaA,CAAoB,EAC3D,SAAS,iBAAiB,aAAcA,CAAoB,EAC5D,SAAS,iBAAiB,WAAYA,CAAoB,CAC5D,CAEA,SAASC,IAAoC,CAC3C,SAAS,oBAAoB,YAAaD,CAAoB,EAC9D,SAAS,oBAAoB,YAAaA,CAAoB,EAC9D,SAAS,oBAAoB,UAAWA,CAAoB,EAC5D,SAAS,oBAAoB,cAAeA,CAAoB,EAChE,SAAS,oBAAoB,cAAeA,CAAoB,EAChE,SAAS,oBAAoB,YAAaA,CAAoB,EAC9D,SAAS,oBAAoB,YAAaA,CAAoB,EAC9D,SAAS,oBAAoB,aAAcA,CAAoB,EAC/D,SAAS,oBAAoB,WAAYA,CAAoB,CAC/D,CASA,SAASA,EAAqBN,EAAG,CAG3BA,EAAE,OAAO,UAAYA,EAAE,OAAO,SAAS,YAAY,IAAM,SAI7DZ,EAAmB,GACnBmB,GAAkC,EACpC,CAKA,SAAS,iBAAiB,UAAWR,EAAW,EAAI,EACpD,SAAS,iBAAiB,YAAaE,EAAe,EAAI,EAC1D,SAAS,iBAAiB,cAAeA,EAAe,EAAI,EAC5D,SAAS,iBAAiB,aAAcA,EAAe,EAAI,EAC3D,SAAS,iBAAiB,mBAAoBG,EAAoB,EAAI,EAEtEC,GAA+B,EAM/BlB,EAAM,iBAAiB,QAASe,EAAS,EAAI,EAC7Cf,EAAM,iBAAiB,OAAQgB,EAAQ,EAAI,EAOvChB,EAAM,WAAa,KAAK,wBAA0BA,EAAM,KAI1DA,EAAM,KAAK,aAAa,wBAAyB,EAAE,EAC1CA,EAAM,WAAa,KAAK,gBACjC,SAAS,gBAAgB,UAAU,IAAI,kBAAkB,EACzD,SAAS,gBAAgB,aAAa,wBAAyB,EAAE,EAErE,CAKA,GAAI,OAAO,QAAW,aAAe,OAAO,UAAa,YAAa,CAIpE,OAAO,0BAA4BD,EAInC,IAAIsB,EAEJ,GAAI,CACFA,EAAQ,IAAI,YAAY,8BAA8B,CACxD,OAASC,EAAO,CAEdD,EAAQ,SAAS,YAAY,aAAa,EAC1CA,EAAM,gBAAgB,+BAAgC,GAAO,GAAO,CAAC,CAAC,CACxE,CAEA,OAAO,cAAcA,CAAK,CAC5B,CAEI,OAAO,UAAa,aAGtBtB,EAA0B,QAAQ,CAGtC,CAAE,ICvTF,IAAAwB,GAAAC,GAAA,CAAAC,GAAAC,KAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,GAeA,IAAIC,GAAkB,UAOtBD,GAAO,QAAUE,GAUjB,SAASA,GAAWC,EAAQ,CAC1B,IAAIC,EAAM,GAAKD,EACXE,EAAQJ,GAAgB,KAAKG,CAAG,EAEpC,GAAI,CAACC,EACH,OAAOD,EAGT,IAAIE,EACAC,EAAO,GACPC,EAAQ,EACRC,EAAY,EAEhB,IAAKD,EAAQH,EAAM,MAAOG,EAAQJ,EAAI,OAAQI,IAAS,CACrD,OAAQJ,EAAI,WAAWI,CAAK,EAAG,CAC7B,IAAK,IACHF,EAAS,SACT,MACF,IAAK,IACHA,EAAS,QACT,MACF,IAAK,IACHA,EAAS,QACT,MACF,IAAK,IACHA,EAAS,OACT,MACF,IAAK,IACHA,EAAS,OACT,MACF,QACE,QACJ,CAEIG,IAAcD,IAChBD,GAAQH,EAAI,UAAUK,EAAWD,CAAK,GAGxCC,EAAYD,EAAQ,EACpBD,GAAQD,CACV,CAEA,OAAOG,IAAcD,EACjBD,EAAOH,EAAI,UAAUK,EAAWD,CAAK,EACrCD,CACN,IC7EA,IAAAG,GAAAC,GAAA,CAAAC,GAAAC,KAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAMC,SAA0CC,EAAMC,EAAS,CACtD,OAAOH,IAAY,UAAY,OAAOC,IAAW,SACnDA,GAAO,QAAUE,EAAQ,EAClB,OAAO,QAAW,YAAc,OAAO,IAC9C,OAAO,CAAC,EAAGA,CAAO,EACX,OAAOH,IAAY,SAC1BA,GAAQ,YAAiBG,EAAQ,EAEjCD,EAAK,YAAiBC,EAAQ,CAChC,GAAGH,GAAM,UAAW,CACpB,OAAiB,UAAW,CAClB,IAAII,EAAuB,CAE/B,IACC,SAASC,EAAyBC,EAAqBC,EAAqB,CAEnF,aAGAA,EAAoB,EAAED,EAAqB,CACzC,QAAW,UAAW,CAAE,OAAqBE,EAAW,CAC1D,CAAC,EAGD,IAAIC,EAAeF,EAAoB,GAAG,EACtCG,EAAoCH,EAAoB,EAAEE,CAAY,EAEtEE,EAASJ,EAAoB,GAAG,EAChCK,EAA8BL,EAAoB,EAAEI,CAAM,EAE1DE,EAAaN,EAAoB,GAAG,EACpCO,EAA8BP,EAAoB,EAAEM,CAAU,EAOlE,SAASE,EAAQC,EAAM,CACrB,GAAI,CACF,OAAO,SAAS,YAAYA,CAAI,CAClC,OAASC,EAAK,CACZ,MAAO,EACT,CACF,CAUA,IAAIC,EAAqB,SAA4BC,EAAQ,CAC3D,IAAIC,EAAeN,EAAe,EAAEK,CAAM,EAC1C,OAAAJ,EAAQ,KAAK,EACNK,CACT,EAEiCC,EAAeH,EAOhD,SAASI,EAAkBC,EAAO,CAChC,IAAIC,EAAQ,SAAS,gBAAgB,aAAa,KAAK,IAAM,MACzDC,EAAc,SAAS,cAAc,UAAU,EAEnDA,EAAY,MAAM,SAAW,OAE7BA,EAAY,MAAM,OAAS,IAC3BA,EAAY,MAAM,QAAU,IAC5BA,EAAY,MAAM,OAAS,IAE3BA,EAAY,MAAM,SAAW,WAC7BA,EAAY,MAAMD,EAAQ,QAAU,MAAM,EAAI,UAE9C,IAAIE,EAAY,OAAO,aAAe,SAAS,gBAAgB,UAC/D,OAAAD,EAAY,MAAM,IAAM,GAAG,OAAOC,EAAW,IAAI,EACjDD,EAAY,aAAa,WAAY,EAAE,EACvCA,EAAY,MAAQF,EACbE,CACT,CAYA,IAAIE,GAAiB,SAAwBJ,EAAOK,EAAS,CAC3D,IAAIH,EAAcH,EAAkBC,CAAK,EACzCK,EAAQ,UAAU,YAAYH,CAAW,EACzC,IAAIL,EAAeN,EAAe,EAAEW,CAAW,EAC/C,OAAAV,EAAQ,MAAM,EACdU,EAAY,OAAO,EACZL,CACT,EASIS,GAAsB,SAA6BV,EAAQ,CAC7D,IAAIS,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAChF,UAAW,SAAS,IACtB,EACIR,EAAe,GAEnB,OAAI,OAAOD,GAAW,SACpBC,EAAeO,GAAeR,EAAQS,CAAO,EACpCT,aAAkB,kBAAoB,CAAC,CAAC,OAAQ,SAAU,MAAO,MAAO,UAAU,EAAE,SAASA,GAAW,KAA4B,OAASA,EAAO,IAAI,EAEjKC,EAAeO,GAAeR,EAAO,MAAOS,CAAO,GAEnDR,EAAeN,EAAe,EAAEK,CAAM,EACtCJ,EAAQ,MAAM,GAGTK,CACT,EAEiCU,EAAgBD,GAEjD,SAASE,EAAQC,EAAK,CAAE,0BAA2B,OAAI,OAAO,QAAW,YAAc,OAAO,OAAO,UAAa,SAAYD,EAAU,SAAiBC,EAAK,CAAE,OAAO,OAAOA,CAAK,EAAYD,EAAU,SAAiBC,EAAK,CAAE,OAAOA,GAAO,OAAO,QAAW,YAAcA,EAAI,cAAgB,QAAUA,IAAQ,OAAO,UAAY,SAAW,OAAOA,CAAK,EAAYD,EAAQC,CAAG,CAAG,CAUzX,IAAIC,GAAyB,UAAkC,CAC7D,IAAIL,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAAC,EAE/EM,EAAkBN,EAAQ,OAC1BO,EAASD,IAAoB,OAAS,OAASA,EAC/CE,EAAYR,EAAQ,UACpBT,EAASS,EAAQ,OACjBS,GAAOT,EAAQ,KAEnB,GAAIO,IAAW,QAAUA,IAAW,MAClC,MAAM,IAAI,MAAM,oDAAoD,EAItE,GAAIhB,IAAW,OACb,GAAIA,GAAUY,EAAQZ,CAAM,IAAM,UAAYA,EAAO,WAAa,EAAG,CACnE,GAAIgB,IAAW,QAAUhB,EAAO,aAAa,UAAU,EACrD,MAAM,IAAI,MAAM,mFAAmF,EAGrG,GAAIgB,IAAW,QAAUhB,EAAO,aAAa,UAAU,GAAKA,EAAO,aAAa,UAAU,GACxF,MAAM,IAAI,MAAM,uGAAwG,CAE5H,KACE,OAAM,IAAI,MAAM,6CAA6C,EAKjE,GAAIkB,GACF,OAAOP,EAAaO,GAAM,CACxB,UAAWD,CACb,CAAC,EAIH,GAAIjB,EACF,OAAOgB,IAAW,MAAQd,EAAYF,CAAM,EAAIW,EAAaX,EAAQ,CACnE,UAAWiB,CACb,CAAC,CAEL,EAEiCE,GAAmBL,GAEpD,SAASM,GAAiBP,EAAK,CAAE,0BAA2B,OAAI,OAAO,QAAW,YAAc,OAAO,OAAO,UAAa,SAAYO,GAAmB,SAAiBP,EAAK,CAAE,OAAO,OAAOA,CAAK,EAAYO,GAAmB,SAAiBP,EAAK,CAAE,OAAOA,GAAO,OAAO,QAAW,YAAcA,EAAI,cAAgB,QAAUA,IAAQ,OAAO,UAAY,SAAW,OAAOA,CAAK,EAAYO,GAAiBP,CAAG,CAAG,CAE7Z,SAASQ,GAAgBC,EAAUC,EAAa,CAAE,GAAI,EAAED,aAAoBC,GAAgB,MAAM,IAAI,UAAU,mCAAmC,CAAK,CAExJ,SAASC,GAAkBxB,EAAQyB,EAAO,CAAE,QAASC,EAAI,EAAGA,EAAID,EAAM,OAAQC,IAAK,CAAE,IAAIC,EAAaF,EAAMC,CAAC,EAAGC,EAAW,WAAaA,EAAW,YAAc,GAAOA,EAAW,aAAe,GAAU,UAAWA,IAAYA,EAAW,SAAW,IAAM,OAAO,eAAe3B,EAAQ2B,EAAW,IAAKA,CAAU,CAAG,CAAE,CAE5T,SAASC,GAAaL,EAAaM,EAAYC,EAAa,CAAE,OAAID,GAAYL,GAAkBD,EAAY,UAAWM,CAAU,EAAOC,GAAaN,GAAkBD,EAAaO,CAAW,EAAUP,CAAa,CAEtN,SAASQ,GAAUC,EAAUC,EAAY,CAAE,GAAI,OAAOA,GAAe,YAAcA,IAAe,KAAQ,MAAM,IAAI,UAAU,oDAAoD,EAAKD,EAAS,UAAY,OAAO,OAAOC,GAAcA,EAAW,UAAW,CAAE,YAAa,CAAE,MAAOD,EAAU,SAAU,GAAM,aAAc,EAAK,CAAE,CAAC,EAAOC,GAAYC,GAAgBF,EAAUC,CAAU,CAAG,CAEhY,SAASC,GAAgBC,EAAGC,EAAG,CAAE,OAAAF,GAAkB,OAAO,gBAAkB,SAAyBC,EAAGC,EAAG,CAAE,OAAAD,EAAE,UAAYC,EAAUD,CAAG,EAAUD,GAAgBC,EAAGC,CAAC,CAAG,CAEzK,SAASC,GAAaC,EAAS,CAAE,IAAIC,EAA4BC,GAA0B,EAAG,OAAO,UAAgC,CAAE,IAAIC,EAAQC,GAAgBJ,CAAO,EAAGK,EAAQ,GAAIJ,EAA2B,CAAE,IAAIK,EAAYF,GAAgB,IAAI,EAAE,YAAaC,EAAS,QAAQ,UAAUF,EAAO,UAAWG,CAAS,CAAG,MAASD,EAASF,EAAM,MAAM,KAAM,SAAS,EAAK,OAAOI,GAA2B,KAAMF,CAAM,CAAG,CAAG,CAExa,SAASE,GAA2BC,EAAMC,EAAM,CAAE,OAAIA,IAAS3B,GAAiB2B,CAAI,IAAM,UAAY,OAAOA,GAAS,YAAsBA,EAAeC,GAAuBF,CAAI,CAAG,CAEzL,SAASE,GAAuBF,EAAM,CAAE,GAAIA,IAAS,OAAU,MAAM,IAAI,eAAe,2DAA2D,EAAK,OAAOA,CAAM,CAErK,SAASN,IAA4B,CAA0E,GAApE,OAAO,SAAY,aAAe,CAAC,QAAQ,WAA6B,QAAQ,UAAU,KAAM,MAAO,GAAO,GAAI,OAAO,OAAU,WAAY,MAAO,GAAM,GAAI,CAAE,YAAK,UAAU,SAAS,KAAK,QAAQ,UAAU,KAAM,CAAC,EAAG,UAAY,CAAC,CAAC,CAAC,EAAU,EAAM,OAASS,EAAG,CAAE,MAAO,EAAO,CAAE,CAEnU,SAASP,GAAgBP,EAAG,CAAE,OAAAO,GAAkB,OAAO,eAAiB,OAAO,eAAiB,SAAyBP,EAAG,CAAE,OAAOA,EAAE,WAAa,OAAO,eAAeA,CAAC,CAAG,EAAUO,GAAgBP,CAAC,CAAG,CAa5M,SAASe,GAAkBC,EAAQC,EAAS,CAC1C,IAAIC,EAAY,kBAAkB,OAAOF,CAAM,EAE/C,GAAKC,EAAQ,aAAaC,CAAS,EAInC,OAAOD,EAAQ,aAAaC,CAAS,CACvC,CAOA,IAAIC,GAAyB,SAAUC,EAAU,CAC/CxB,GAAUuB,EAAWC,CAAQ,EAE7B,IAAIC,EAASnB,GAAaiB,CAAS,EAMnC,SAASA,EAAUG,EAAShD,EAAS,CACnC,IAAIiD,EAEJ,OAAArC,GAAgB,KAAMiC,CAAS,EAE/BI,EAAQF,EAAO,KAAK,IAAI,EAExBE,EAAM,eAAejD,CAAO,EAE5BiD,EAAM,YAAYD,CAAO,EAElBC,CACT,CAQA,OAAA9B,GAAa0B,EAAW,CAAC,CACvB,IAAK,iBACL,MAAO,UAA0B,CAC/B,IAAI7C,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAAC,EACnF,KAAK,OAAS,OAAOA,EAAQ,QAAW,WAAaA,EAAQ,OAAS,KAAK,cAC3E,KAAK,OAAS,OAAOA,EAAQ,QAAW,WAAaA,EAAQ,OAAS,KAAK,cAC3E,KAAK,KAAO,OAAOA,EAAQ,MAAS,WAAaA,EAAQ,KAAO,KAAK,YACrE,KAAK,UAAYW,GAAiBX,EAAQ,SAAS,IAAM,SAAWA,EAAQ,UAAY,SAAS,IACnG,CAMF,EAAG,CACD,IAAK,cACL,MAAO,SAAqBgD,EAAS,CACnC,IAAIE,EAAS,KAEb,KAAK,SAAWlE,EAAe,EAAEgE,EAAS,QAAS,SAAUR,GAAG,CAC9D,OAAOU,EAAO,QAAQV,EAAC,CACzB,CAAC,CACH,CAMF,EAAG,CACD,IAAK,UACL,MAAO,SAAiBA,EAAG,CACzB,IAAIQ,EAAUR,EAAE,gBAAkBA,EAAE,cAChCjC,GAAS,KAAK,OAAOyC,CAAO,GAAK,OACjCvC,GAAOC,GAAgB,CACzB,OAAQH,GACR,UAAW,KAAK,UAChB,OAAQ,KAAK,OAAOyC,CAAO,EAC3B,KAAM,KAAK,KAAKA,CAAO,CACzB,CAAC,EAED,KAAK,KAAKvC,GAAO,UAAY,QAAS,CACpC,OAAQF,GACR,KAAME,GACN,QAASuC,EACT,eAAgB,UAA0B,CACpCA,GACFA,EAAQ,MAAM,EAGhB,OAAO,aAAa,EAAE,gBAAgB,CACxC,CACF,CAAC,CACH,CAMF,EAAG,CACD,IAAK,gBACL,MAAO,SAAuBA,EAAS,CACrC,OAAOP,GAAkB,SAAUO,CAAO,CAC5C,CAMF,EAAG,CACD,IAAK,gBACL,MAAO,SAAuBA,EAAS,CACrC,IAAIG,EAAWV,GAAkB,SAAUO,CAAO,EAElD,GAAIG,EACF,OAAO,SAAS,cAAcA,CAAQ,CAE1C,CAQF,EAAG,CACD,IAAK,cAML,MAAO,SAAqBH,EAAS,CACnC,OAAOP,GAAkB,OAAQO,CAAO,CAC1C,CAKF,EAAG,CACD,IAAK,UACL,MAAO,UAAmB,CACxB,KAAK,SAAS,QAAQ,CACxB,CACF,CAAC,EAAG,CAAC,CACH,IAAK,OACL,MAAO,SAAczD,EAAQ,CAC3B,IAAIS,EAAU,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAChF,UAAW,SAAS,IACtB,EACA,OAAOE,EAAaX,EAAQS,CAAO,CACrC,CAOF,EAAG,CACD,IAAK,MACL,MAAO,SAAaT,EAAQ,CAC1B,OAAOE,EAAYF,CAAM,CAC3B,CAOF,EAAG,CACD,IAAK,cACL,MAAO,UAAuB,CAC5B,IAAIgB,EAAS,UAAU,OAAS,GAAK,UAAU,CAAC,IAAM,OAAY,UAAU,CAAC,EAAI,CAAC,OAAQ,KAAK,EAC3F6C,EAAU,OAAO7C,GAAW,SAAW,CAACA,CAAM,EAAIA,EAClD8C,GAAU,CAAC,CAAC,SAAS,sBACzB,OAAAD,EAAQ,QAAQ,SAAU7C,GAAQ,CAChC8C,GAAUA,IAAW,CAAC,CAAC,SAAS,sBAAsB9C,EAAM,CAC9D,CAAC,EACM8C,EACT,CACF,CAAC,CAAC,EAEKR,CACT,EAAG/D,EAAqB,CAAE,EAEOF,GAAaiE,EAExC,EAEA,IACC,SAASxE,EAAQ,CAExB,IAAIiF,EAAqB,EAKzB,GAAI,OAAO,SAAY,aAAe,CAAC,QAAQ,UAAU,QAAS,CAC9D,IAAIC,EAAQ,QAAQ,UAEpBA,EAAM,QAAUA,EAAM,iBACNA,EAAM,oBACNA,EAAM,mBACNA,EAAM,kBACNA,EAAM,qBAC1B,CASA,SAASC,EAASb,EAASQ,EAAU,CACjC,KAAOR,GAAWA,EAAQ,WAAaW,GAAoB,CACvD,GAAI,OAAOX,EAAQ,SAAY,YAC3BA,EAAQ,QAAQQ,CAAQ,EAC1B,OAAOR,EAETA,EAAUA,EAAQ,UACtB,CACJ,CAEAtE,EAAO,QAAUmF,CAGX,EAEA,IACC,SAASnF,EAAQoF,EAA0B9E,EAAqB,CAEvE,IAAI6E,EAAU7E,EAAoB,GAAG,EAYrC,SAAS+E,EAAUf,EAASQ,EAAU/D,EAAMuE,EAAUC,EAAY,CAC9D,IAAIC,EAAaC,EAAS,MAAM,KAAM,SAAS,EAE/C,OAAAnB,EAAQ,iBAAiBvD,EAAMyE,EAAYD,CAAU,EAE9C,CACH,QAAS,UAAW,CAChBjB,EAAQ,oBAAoBvD,EAAMyE,EAAYD,CAAU,CAC5D,CACJ,CACJ,CAYA,SAASG,EAASC,EAAUb,EAAU/D,EAAMuE,EAAUC,EAAY,CAE9D,OAAI,OAAOI,EAAS,kBAAqB,WAC9BN,EAAU,MAAM,KAAM,SAAS,EAItC,OAAOtE,GAAS,WAGTsE,EAAU,KAAK,KAAM,QAAQ,EAAE,MAAM,KAAM,SAAS,GAI3D,OAAOM,GAAa,WACpBA,EAAW,SAAS,iBAAiBA,CAAQ,GAI1C,MAAM,UAAU,IAAI,KAAKA,EAAU,SAAUrB,EAAS,CACzD,OAAOe,EAAUf,EAASQ,EAAU/D,EAAMuE,EAAUC,CAAU,CAClE,CAAC,EACL,CAWA,SAASE,EAASnB,EAASQ,EAAU/D,EAAMuE,EAAU,CACjD,OAAO,SAASnB,EAAG,CACfA,EAAE,eAAiBgB,EAAQhB,EAAE,OAAQW,CAAQ,EAEzCX,EAAE,gBACFmB,EAAS,KAAKhB,EAASH,CAAC,CAEhC,CACJ,CAEAnE,EAAO,QAAU0F,CAGX,EAEA,IACC,SAAStF,EAAyBL,EAAS,CAQlDA,EAAQ,KAAO,SAASuB,EAAO,CAC3B,OAAOA,IAAU,QACVA,aAAiB,aACjBA,EAAM,WAAa,CAC9B,EAQAvB,EAAQ,SAAW,SAASuB,EAAO,CAC/B,IAAIP,EAAO,OAAO,UAAU,SAAS,KAAKO,CAAK,EAE/C,OAAOA,IAAU,SACTP,IAAS,qBAAuBA,IAAS,4BACzC,WAAYO,IACZA,EAAM,SAAW,GAAKvB,EAAQ,KAAKuB,EAAM,CAAC,CAAC,EACvD,EAQAvB,EAAQ,OAAS,SAASuB,EAAO,CAC7B,OAAO,OAAOA,GAAU,UACjBA,aAAiB,MAC5B,EAQAvB,EAAQ,GAAK,SAASuB,EAAO,CACzB,IAAIP,EAAO,OAAO,UAAU,SAAS,KAAKO,CAAK,EAE/C,OAAOP,IAAS,mBACpB,CAGM,EAEA,IACC,SAASf,EAAQoF,EAA0B9E,EAAqB,CAEvE,IAAIsF,EAAKtF,EAAoB,GAAG,EAC5BoF,EAAWpF,EAAoB,GAAG,EAWtC,SAASI,EAAOQ,EAAQH,EAAMuE,EAAU,CACpC,GAAI,CAACpE,GAAU,CAACH,GAAQ,CAACuE,EACrB,MAAM,IAAI,MAAM,4BAA4B,EAGhD,GAAI,CAACM,EAAG,OAAO7E,CAAI,EACf,MAAM,IAAI,UAAU,kCAAkC,EAG1D,GAAI,CAAC6E,EAAG,GAAGN,CAAQ,EACf,MAAM,IAAI,UAAU,mCAAmC,EAG3D,GAAIM,EAAG,KAAK1E,CAAM,EACd,OAAO2E,EAAW3E,EAAQH,EAAMuE,CAAQ,EAEvC,GAAIM,EAAG,SAAS1E,CAAM,EACvB,OAAO4E,EAAe5E,EAAQH,EAAMuE,CAAQ,EAE3C,GAAIM,EAAG,OAAO1E,CAAM,EACrB,OAAO6E,EAAe7E,EAAQH,EAAMuE,CAAQ,EAG5C,MAAM,IAAI,UAAU,2EAA2E,CAEvG,CAWA,SAASO,EAAWG,EAAMjF,EAAMuE,EAAU,CACtC,OAAAU,EAAK,iBAAiBjF,EAAMuE,CAAQ,EAE7B,CACH,QAAS,UAAW,CAChBU,EAAK,oBAAoBjF,EAAMuE,CAAQ,CAC3C,CACJ,CACJ,CAWA,SAASQ,EAAeG,EAAUlF,EAAMuE,EAAU,CAC9C,aAAM,UAAU,QAAQ,KAAKW,EAAU,SAASD,EAAM,CAClDA,EAAK,iBAAiBjF,EAAMuE,CAAQ,CACxC,CAAC,EAEM,CACH,QAAS,UAAW,CAChB,MAAM,UAAU,QAAQ,KAAKW,EAAU,SAASD,EAAM,CAClDA,EAAK,oBAAoBjF,EAAMuE,CAAQ,CAC3C,CAAC,CACL,CACJ,CACJ,CAWA,SAASS,EAAejB,EAAU/D,EAAMuE,EAAU,CAC9C,OAAOI,EAAS,SAAS,KAAMZ,EAAU/D,EAAMuE,CAAQ,CAC3D,CAEAtF,EAAO,QAAUU,CAGX,EAEA,IACC,SAASV,EAAQ,CAExB,SAASkG,EAAO5B,EAAS,CACrB,IAAInD,EAEJ,GAAImD,EAAQ,WAAa,SACrBA,EAAQ,MAAM,EAEdnD,EAAemD,EAAQ,cAElBA,EAAQ,WAAa,SAAWA,EAAQ,WAAa,WAAY,CACtE,IAAI6B,EAAa7B,EAAQ,aAAa,UAAU,EAE3C6B,GACD7B,EAAQ,aAAa,WAAY,EAAE,EAGvCA,EAAQ,OAAO,EACfA,EAAQ,kBAAkB,EAAGA,EAAQ,MAAM,MAAM,EAE5C6B,GACD7B,EAAQ,gBAAgB,UAAU,EAGtCnD,EAAemD,EAAQ,KAC3B,KACK,CACGA,EAAQ,aAAa,iBAAiB,GACtCA,EAAQ,MAAM,EAGlB,IAAI8B,EAAY,OAAO,aAAa,EAChCC,EAAQ,SAAS,YAAY,EAEjCA,EAAM,mBAAmB/B,CAAO,EAChC8B,EAAU,gBAAgB,EAC1BA,EAAU,SAASC,CAAK,EAExBlF,EAAeiF,EAAU,SAAS,CACtC,CAEA,OAAOjF,CACX,CAEAnB,EAAO,QAAUkG,CAGX,EAEA,IACC,SAASlG,EAAQ,CAExB,SAASsG,GAAK,CAGd,CAEAA,EAAE,UAAY,CACZ,GAAI,SAAUC,EAAMjB,EAAUkB,EAAK,CACjC,IAAIrC,EAAI,KAAK,IAAM,KAAK,EAAI,CAAC,GAE7B,OAACA,EAAEoC,CAAI,IAAMpC,EAAEoC,CAAI,EAAI,CAAC,IAAI,KAAK,CAC/B,GAAIjB,EACJ,IAAKkB,CACP,CAAC,EAEM,IACT,EAEA,KAAM,SAAUD,EAAMjB,EAAUkB,EAAK,CACnC,IAAIxC,EAAO,KACX,SAASyB,GAAY,CACnBzB,EAAK,IAAIuC,EAAMd,CAAQ,EACvBH,EAAS,MAAMkB,EAAK,SAAS,CAC/B,CAEA,OAAAf,EAAS,EAAIH,EACN,KAAK,GAAGiB,EAAMd,EAAUe,CAAG,CACpC,EAEA,KAAM,SAAUD,EAAM,CACpB,IAAIE,EAAO,CAAC,EAAE,MAAM,KAAK,UAAW,CAAC,EACjCC,IAAW,KAAK,IAAM,KAAK,EAAI,CAAC,IAAIH,CAAI,GAAK,CAAC,GAAG,MAAM,EACvD3D,EAAI,EACJ+D,EAAMD,EAAO,OAEjB,IAAK9D,EAAGA,EAAI+D,EAAK/D,IACf8D,EAAO9D,CAAC,EAAE,GAAG,MAAM8D,EAAO9D,CAAC,EAAE,IAAK6D,CAAI,EAGxC,OAAO,IACT,EAEA,IAAK,SAAUF,EAAMjB,EAAU,CAC7B,IAAInB,EAAI,KAAK,IAAM,KAAK,EAAI,CAAC,GACzByC,EAAOzC,EAAEoC,CAAI,EACbM,EAAa,CAAC,EAElB,GAAID,GAAQtB,EACV,QAAS1C,EAAI,EAAG+D,EAAMC,EAAK,OAAQhE,EAAI+D,EAAK/D,IACtCgE,EAAKhE,CAAC,EAAE,KAAO0C,GAAYsB,EAAKhE,CAAC,EAAE,GAAG,IAAM0C,GAC9CuB,EAAW,KAAKD,EAAKhE,CAAC,CAAC,EAQ7B,OAACiE,EAAW,OACR1C,EAAEoC,CAAI,EAAIM,EACV,OAAO1C,EAAEoC,CAAI,EAEV,IACT,CACF,EAEAvG,EAAO,QAAUsG,EACjBtG,EAAO,QAAQ,YAAcsG,CAGvB,CAEI,EAGIQ,EAA2B,CAAC,EAGhC,SAASxG,EAAoByG,EAAU,CAEtC,GAAGD,EAAyBC,CAAQ,EACnC,OAAOD,EAAyBC,CAAQ,EAAE,QAG3C,IAAI/G,EAAS8G,EAAyBC,CAAQ,EAAI,CAGjD,QAAS,CAAC,CACX,EAGA,OAAA5G,EAAoB4G,CAAQ,EAAE/G,EAAQA,EAAO,QAASM,CAAmB,EAGlEN,EAAO,OACf,CAIA,OAAC,UAAW,CAEXM,EAAoB,EAAI,SAASN,EAAQ,CACxC,IAAIgH,EAAShH,GAAUA,EAAO,WAC7B,UAAW,CAAE,OAAOA,EAAO,OAAY,EACvC,UAAW,CAAE,OAAOA,CAAQ,EAC7B,OAAAM,EAAoB,EAAE0G,EAAQ,CAAE,EAAGA,CAAO,CAAC,EACpCA,CACR,CACD,EAAE,EAGD,UAAW,CAEX1G,EAAoB,EAAI,SAASP,EAASkH,EAAY,CACrD,QAAQC,KAAOD,EACX3G,EAAoB,EAAE2G,EAAYC,CAAG,GAAK,CAAC5G,EAAoB,EAAEP,EAASmH,CAAG,GAC/E,OAAO,eAAenH,EAASmH,EAAK,CAAE,WAAY,GAAM,IAAKD,EAAWC,CAAG,CAAE,CAAC,CAGjF,CACD,EAAE,EAGD,UAAW,CACX5G,EAAoB,EAAI,SAASyB,EAAKoF,EAAM,CAAE,OAAO,OAAO,UAAU,eAAe,KAAKpF,EAAKoF,CAAI,CAAG,CACvG,EAAE,EAMK7G,EAAoB,GAAG,CAC/B,EAAG,EACX,OACD,CAAC,ICn2BD,IAAA8G,GAAO,SCtBP;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gFAgBA,IAAIC,GAAgB,SAASC,EAAGC,EAAG,CAC/B,OAAAF,GAAgB,OAAO,gBAClB,CAAE,UAAW,CAAC,CAAE,YAAa,OAAS,SAAUC,EAAGC,EAAG,CAAED,EAAE,UAAYC,CAAG,GAC1E,SAAUD,EAAGC,EAAG,CAAE,QAASC,KAAKD,EAAO,OAAO,UAAU,eAAe,KAAKA,EAAGC,CAAC,IAAGF,EAAEE,CAAC,EAAID,EAAEC,CAAC,EAAG,EAC7FH,GAAcC,EAAGC,CAAC,CAC7B,EAEO,SAASE,GAAUH,EAAGC,EAAG,CAC5B,GAAI,OAAOA,GAAM,YAAcA,IAAM,KACjC,MAAM,IAAI,UAAU,uBAAyB,OAAOA,CAAC,EAAI,+BAA+B,EAC5FF,GAAcC,EAAGC,CAAC,EAClB,SAASG,GAAK,CAAE,KAAK,YAAcJ,CAAG,CACtCA,EAAE,UAAYC,IAAM,KAAO,OAAO,OAAOA,CAAC,GAAKG,EAAG,UAAYH,EAAE,UAAW,IAAIG,EACnF,CAwCO,SAASC,GAAUC,EAASC,EAAYC,EAAGC,EAAW,CACzD,SAASC,EAAMC,EAAO,CAAE,OAAOA,aAAiBH,EAAIG,EAAQ,IAAIH,EAAE,SAAUI,EAAS,CAAEA,EAAQD,CAAK,CAAG,CAAC,CAAG,CAC3G,OAAO,IAAKH,IAAMA,EAAI,UAAU,SAAUI,EAASC,EAAQ,CACvD,SAASC,EAAUH,EAAO,CAAE,GAAI,CAAEI,EAAKN,EAAU,KAAKE,CAAK,CAAC,CAAG,OAASK,EAAG,CAAEH,EAAOG,CAAC,CAAG,CAAE,CAC1F,SAASC,EAASN,EAAO,CAAE,GAAI,CAAEI,EAAKN,EAAU,MAASE,CAAK,CAAC,CAAG,OAASK,EAAG,CAAEH,EAAOG,CAAC,CAAG,CAAE,CAC7F,SAASD,EAAKG,EAAQ,CAAEA,EAAO,KAAON,EAAQM,EAAO,KAAK,EAAIR,EAAMQ,EAAO,KAAK,EAAE,KAAKJ,EAAWG,CAAQ,CAAG,CAC7GF,GAAMN,EAAYA,EAAU,MAAMH,EAASC,GAAc,CAAC,CAAC,GAAG,KAAK,CAAC,CACxE,CAAC,CACL,CAEO,SAASY,GAAYb,EAASc,EAAM,CACvC,IAAIC,EAAI,CAAE,MAAO,EAAG,KAAM,UAAW,CAAE,GAAIC,EAAE,CAAC,EAAI,EAAG,MAAMA,EAAE,CAAC,EAAG,OAAOA,EAAE,CAAC,CAAG,EAAG,KAAM,CAAC,EAAG,IAAK,CAAC,CAAE,EAAGC,EAAGC,EAAGF,EAAGG,EAC/G,OAAOA,EAAI,CAAE,KAAMC,EAAK,CAAC,EAAG,MAASA,EAAK,CAAC,EAAG,OAAUA,EAAK,CAAC,CAAE,EAAG,OAAO,QAAW,aAAeD,EAAE,OAAO,QAAQ,EAAI,UAAW,CAAE,OAAO,IAAM,GAAIA,EACvJ,SAASC,EAAKC,EAAG,CAAE,OAAO,SAAUC,EAAG,CAAE,OAAOb,EAAK,CAACY,EAAGC,CAAC,CAAC,CAAG,CAAG,CACjE,SAASb,EAAKc,EAAI,CACd,GAAIN,EAAG,MAAM,IAAI,UAAU,iCAAiC,EAC5D,KAAOF,GAAG,GAAI,CACV,GAAIE,EAAI,EAAGC,IAAMF,EAAIO,EAAG,CAAC,EAAI,EAAIL,EAAE,OAAYK,EAAG,CAAC,EAAIL,EAAE,SAAcF,EAAIE,EAAE,SAAcF,EAAE,KAAKE,CAAC,EAAG,GAAKA,EAAE,OAAS,EAAEF,EAAIA,EAAE,KAAKE,EAAGK,EAAG,CAAC,CAAC,GAAG,KAAM,OAAOP,EAE3J,OADIE,EAAI,EAAGF,IAAGO,EAAK,CAACA,EAAG,CAAC,EAAI,EAAGP,EAAE,KAAK,GAC9BO,EAAG,CAAC,EAAG,CACX,IAAK,GAAG,IAAK,GAAGP,EAAIO,EAAI,MACxB,IAAK,GAAG,OAAAR,EAAE,QAAgB,CAAE,MAAOQ,EAAG,CAAC,EAAG,KAAM,EAAM,EACtD,IAAK,GAAGR,EAAE,QAASG,EAAIK,EAAG,CAAC,EAAGA,EAAK,CAAC,CAAC,EAAG,SACxC,IAAK,GAAGA,EAAKR,EAAE,IAAI,IAAI,EAAGA,EAAE,KAAK,IAAI,EAAG,SACxC,QACI,GAAMC,EAAID,EAAE,KAAM,EAAAC,EAAIA,EAAE,OAAS,GAAKA,EAAEA,EAAE,OAAS,CAAC,KAAOO,EAAG,CAAC,IAAM,GAAKA,EAAG,CAAC,IAAM,GAAI,CAAER,EAAI,EAAG,QAAU,CAC3G,GAAIQ,EAAG,CAAC,IAAM,IAAM,CAACP,GAAMO,EAAG,CAAC,EAAIP,EAAE,CAAC,GAAKO,EAAG,CAAC,EAAIP,EAAE,CAAC,GAAK,CAAED,EAAE,MAAQQ,EAAG,CAAC,EAAG,KAAO,CACrF,GAAIA,EAAG,CAAC,IAAM,GAAKR,EAAE,MAAQC,EAAE,CAAC,EAAG,CAAED,EAAE,MAAQC,EAAE,CAAC,EAAGA,EAAIO,EAAI,KAAO,CACpE,GAAIP,GAAKD,EAAE,MAAQC,EAAE,CAAC,EAAG,CAAED,EAAE,MAAQC,EAAE,CAAC,EAAGD,EAAE,IAAI,KAAKQ,CAAE,EAAG,KAAO,CAC9DP,EAAE,CAAC,GAAGD,EAAE,IAAI,IAAI,EACpBA,EAAE,KAAK,IAAI,EAAG,QACtB,CACAQ,EAAKT,EAAK,KAAKd,EAASe,CAAC,CAC7B,OAASL,EAAG,CAAEa,EAAK,CAAC,EAAGb,CAAC,EAAGQ,EAAI,CAAG,QAAE,CAAUD,EAAID,EAAI,CAAG,CACzD,GAAIO,EAAG,CAAC,EAAI,EAAG,MAAMA,EAAG,CAAC,EAAG,MAAO,CAAE,MAAOA,EAAG,CAAC,EAAIA,EAAG,CAAC,EAAI,OAAQ,KAAM,EAAK,CACnF,CACJ,CAcO,SAASC,GAASC,EAAG,CACxB,IAAIC,EAAI,OAAO,QAAW,YAAc,OAAO,SAAUC,EAAID,GAAKD,EAAEC,CAAC,EAAGE,EAAI,EAC5E,GAAID,EAAG,OAAOA,EAAE,KAAKF,CAAC,EACtB,GAAIA,GAAK,OAAOA,EAAE,QAAW,SAAU,MAAO,CAC1C,KAAM,UAAY,CACd,OAAIA,GAAKG,GAAKH,EAAE,SAAQA,EAAI,QACrB,CAAE,MAAOA,GAAKA,EAAEG,GAAG,EAAG,KAAM,CAACH,CAAE,CAC1C,CACJ,EACA,MAAM,IAAI,UAAUC,EAAI,0BAA4B,iCAAiC,CACzF,CAEO,SAASG,EAAOJ,EAAGK,EAAG,CACzB,IAAIH,EAAI,OAAO,QAAW,YAAcF,EAAE,OAAO,QAAQ,EACzD,GAAI,CAACE,EAAG,OAAOF,EACf,IAAIG,EAAID,EAAE,KAAKF,CAAC,EAAGM,EAAGC,EAAK,CAAC,EAAGC,EAC/B,GAAI,CACA,MAAQH,IAAM,QAAUA,KAAM,IAAM,EAAEC,EAAIH,EAAE,KAAK,GAAG,MAAMI,EAAG,KAAKD,EAAE,KAAK,CAC7E,OACOG,EAAO,CAAED,EAAI,CAAE,MAAOC,CAAM,CAAG,QACtC,CACI,GAAI,CACIH,GAAK,CAACA,EAAE,OAASJ,EAAIC,EAAE,SAAYD,EAAE,KAAKC,CAAC,CACnD,QACA,CAAU,GAAIK,EAAG,MAAMA,EAAE,KAAO,CACpC,CACA,OAAOD,CACX,CAkBO,SAASG,EAAcC,EAAIC,EAAMC,EAAM,CAC1C,GAAIA,GAAQ,UAAU,SAAW,EAAG,QAASC,EAAI,EAAGC,EAAIH,EAAK,OAAQI,EAAIF,EAAIC,EAAGD,KACxEE,GAAM,EAAEF,KAAKF,MACRI,IAAIA,EAAK,MAAM,UAAU,MAAM,KAAKJ,EAAM,EAAGE,CAAC,GACnDE,EAAGF,CAAC,EAAIF,EAAKE,CAAC,GAGtB,OAAOH,EAAG,OAAOK,GAAM,MAAM,UAAU,MAAM,KAAKJ,CAAI,CAAC,CAC3D,CAEO,SAASK,GAAQC,EAAG,CACvB,OAAO,gBAAgBD,IAAW,KAAK,EAAIC,EAAG,MAAQ,IAAID,GAAQC,CAAC,CACvE,CAEO,SAASC,GAAiBC,EAASC,EAAYC,EAAW,CAC7D,GAAI,CAAC,OAAO,cAAe,MAAM,IAAI,UAAU,sCAAsC,EACrF,IAAIC,EAAID,EAAU,MAAMF,EAASC,GAAc,CAAC,CAAC,EAAGP,EAAGU,EAAI,CAAC,EAC5D,OAAOV,EAAI,CAAC,EAAGW,EAAK,MAAM,EAAGA,EAAK,OAAO,EAAGA,EAAK,QAAQ,EAAGX,EAAE,OAAO,aAAa,EAAI,UAAY,CAAE,OAAO,IAAM,EAAGA,EACpH,SAASW,EAAKC,EAAG,CAAMH,EAAEG,CAAC,IAAGZ,EAAEY,CAAC,EAAI,SAAUR,EAAG,CAAE,OAAO,IAAI,QAAQ,SAAUS,EAAGC,EAAG,CAAEJ,EAAE,KAAK,CAACE,EAAGR,EAAGS,EAAGC,CAAC,CAAC,EAAI,GAAKC,EAAOH,EAAGR,CAAC,CAAG,CAAC,CAAG,EAAG,CACzI,SAASW,EAAOH,EAAGR,EAAG,CAAE,GAAI,CAAEY,EAAKP,EAAEG,CAAC,EAAER,CAAC,CAAC,CAAG,OAASa,EAAG,CAAEC,EAAOR,EAAE,CAAC,EAAE,CAAC,EAAGO,CAAC,CAAG,CAAE,CACjF,SAASD,EAAKG,EAAG,CAAEA,EAAE,iBAAiBhB,GAAU,QAAQ,QAAQgB,EAAE,MAAM,CAAC,EAAE,KAAKC,EAASC,CAAM,EAAIH,EAAOR,EAAE,CAAC,EAAE,CAAC,EAAGS,CAAC,CAAG,CACvH,SAASC,EAAQE,EAAO,CAAEP,EAAO,OAAQO,CAAK,CAAG,CACjD,SAASD,EAAOC,EAAO,CAAEP,EAAO,QAASO,CAAK,CAAG,CACjD,SAASJ,EAAOK,EAAGnB,EAAG,CAAMmB,EAAEnB,CAAC,EAAGM,EAAE,MAAM,EAAGA,EAAE,QAAQK,EAAOL,EAAE,CAAC,EAAE,CAAC,EAAGA,EAAE,CAAC,EAAE,CAAC,CAAC,CAAG,CACrF,CAQO,SAASc,GAAcC,EAAG,CAC7B,GAAI,CAAC,OAAO,cAAe,MAAM,IAAI,UAAU,sCAAsC,EACrF,IAAIC,EAAID,EAAE,OAAO,aAAa,EAAGE,EACjC,OAAOD,EAAIA,EAAE,KAAKD,CAAC,GAAKA,EAAI,OAAOG,IAAa,WAAaA,GAASH,CAAC,EAAIA,EAAE,OAAO,QAAQ,EAAE,EAAGE,EAAI,CAAC,EAAGE,EAAK,MAAM,EAAGA,EAAK,OAAO,EAAGA,EAAK,QAAQ,EAAGF,EAAE,OAAO,aAAa,EAAI,UAAY,CAAE,OAAO,IAAM,EAAGA,GAC9M,SAASE,EAAKC,EAAG,CAAEH,EAAEG,CAAC,EAAIL,EAAEK,CAAC,GAAK,SAAUC,EAAG,CAAE,OAAO,IAAI,QAAQ,SAAUC,EAASC,EAAQ,CAAEF,EAAIN,EAAEK,CAAC,EAAEC,CAAC,EAAGG,EAAOF,EAASC,EAAQF,EAAE,KAAMA,EAAE,KAAK,CAAG,CAAC,CAAG,CAAG,CAC/J,SAASG,EAAOF,EAASC,EAAQE,EAAGJ,EAAG,CAAE,QAAQ,QAAQA,CAAC,EAAE,KAAK,SAASA,EAAG,CAAEC,EAAQ,CAAE,MAAOD,EAAG,KAAMI,CAAE,CAAC,CAAG,EAAGF,CAAM,CAAG,CAC/H,CCtMM,SAAUG,EAAWC,EAAU,CACnC,OAAO,OAAOA,GAAU,UAC1B,CCGM,SAAUC,GAAoBC,EAAgC,CAClE,IAAMC,EAAS,SAACC,EAAa,CAC3B,MAAM,KAAKA,CAAQ,EACnBA,EAAS,MAAQ,IAAI,MAAK,EAAG,KAC/B,EAEMC,EAAWH,EAAWC,CAAM,EAClC,OAAAE,EAAS,UAAY,OAAO,OAAO,MAAM,SAAS,EAClDA,EAAS,UAAU,YAAcA,EAC1BA,CACT,CCDO,IAAMC,GAA+CC,GAC1D,SAACC,EAAM,CACL,OAAA,SAA4CC,EAA0B,CACpED,EAAO,IAAI,EACX,KAAK,QAAUC,EACRA,EAAO,OAAM;EACxBA,EAAO,IAAI,SAACC,EAAKC,EAAC,CAAK,OAAGA,EAAI,EAAC,KAAKD,EAAI,SAAQ,CAAzB,CAA6B,EAAE,KAAK;GAAM,EACzD,GACJ,KAAK,KAAO,sBACZ,KAAK,OAASD,CAChB,CARA,CAQC,ECvBC,SAAUG,GAAaC,EAA6BC,EAAO,CAC/D,GAAID,EAAK,CACP,IAAME,EAAQF,EAAI,QAAQC,CAAI,EAC9B,GAAKC,GAASF,EAAI,OAAOE,EAAO,CAAC,EAErC,CCOA,IAAAC,GAAA,UAAA,CAyBE,SAAAA,EAAoBC,EAA4B,CAA5B,KAAA,gBAAAA,EAdb,KAAA,OAAS,GAER,KAAA,WAAmD,KAMnD,KAAA,YAAqD,IAMV,CAQnD,OAAAD,EAAA,UAAA,YAAA,UAAA,aACME,EAEJ,GAAI,CAAC,KAAK,OAAQ,CAChB,KAAK,OAAS,GAGN,IAAAC,EAAe,KAAI,WAC3B,GAAIA,EAEF,GADA,KAAK,WAAa,KACd,MAAM,QAAQA,CAAU,MAC1B,QAAqBC,EAAAC,GAAAF,CAAU,EAAAG,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAA5B,IAAMG,EAAMD,EAAA,MACfC,EAAO,OAAO,IAAI,yGAGpBJ,EAAW,OAAO,IAAI,EAIlB,IAAiBK,EAAqB,KAAI,gBAClD,GAAIC,EAAWD,CAAgB,EAC7B,GAAI,CACFA,EAAgB,QACTE,EAAG,CACVR,EAASQ,aAAaC,GAAsBD,EAAE,OAAS,CAACA,CAAC,EAIrD,IAAAE,EAAgB,KAAI,YAC5B,GAAIA,EAAa,CACf,KAAK,YAAc,SACnB,QAAwBC,EAAAR,GAAAO,CAAW,EAAAE,EAAAD,EAAA,KAAA,EAAA,CAAAC,EAAA,KAAAA,EAAAD,EAAA,KAAA,EAAE,CAAhC,IAAME,EAASD,EAAA,MAClB,GAAI,CACFE,GAAcD,CAAS,QAChBE,EAAK,CACZf,EAASA,GAAM,KAANA,EAAU,CAAA,EACfe,aAAeN,GACjBT,EAAMgB,EAAAA,EAAA,CAAA,EAAAC,EAAOjB,CAAM,CAAA,EAAAiB,EAAKF,EAAI,MAAM,CAAA,EAElCf,EAAO,KAAKe,CAAG,sGAMvB,GAAIf,EACF,MAAM,IAAIS,GAAoBT,CAAM,EAG1C,EAoBAF,EAAA,UAAA,IAAA,SAAIoB,EAAuB,OAGzB,GAAIA,GAAYA,IAAa,KAC3B,GAAI,KAAK,OAGPJ,GAAcI,CAAQ,MACjB,CACL,GAAIA,aAAoBpB,EAAc,CAGpC,GAAIoB,EAAS,QAAUA,EAAS,WAAW,IAAI,EAC7C,OAEFA,EAAS,WAAW,IAAI,GAEzB,KAAK,aAAcC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAAA,EAAI,CAAA,GAAI,KAAKD,CAAQ,EAG/D,EAOQpB,EAAA,UAAA,WAAR,SAAmBsB,EAAoB,CAC7B,IAAAnB,EAAe,KAAI,WAC3B,OAAOA,IAAemB,GAAW,MAAM,QAAQnB,CAAU,GAAKA,EAAW,SAASmB,CAAM,CAC1F,EASQtB,EAAA,UAAA,WAAR,SAAmBsB,EAAoB,CAC7B,IAAAnB,EAAe,KAAI,WAC3B,KAAK,WAAa,MAAM,QAAQA,CAAU,GAAKA,EAAW,KAAKmB,CAAM,EAAGnB,GAAcA,EAAa,CAACA,EAAYmB,CAAM,EAAIA,CAC5H,EAMQtB,EAAA,UAAA,cAAR,SAAsBsB,EAAoB,CAChC,IAAAnB,EAAe,KAAI,WACvBA,IAAemB,EACjB,KAAK,WAAa,KACT,MAAM,QAAQnB,CAAU,GACjCoB,GAAUpB,EAAYmB,CAAM,CAEhC,EAgBAtB,EAAA,UAAA,OAAA,SAAOoB,EAAsC,CACnC,IAAAR,EAAgB,KAAI,YAC5BA,GAAeW,GAAUX,EAAaQ,CAAQ,EAE1CA,aAAoBpB,GACtBoB,EAAS,cAAc,IAAI,CAE/B,EAlLcpB,EAAA,MAAS,UAAA,CACrB,IAAMwB,EAAQ,IAAIxB,EAClB,OAAAwB,EAAM,OAAS,GACRA,CACT,EAAE,EA+KJxB,GArLA,EAuLO,IAAMyB,GAAqBC,GAAa,MAEzC,SAAUC,GAAeC,EAAU,CACvC,OACEA,aAAiBF,IAChBE,GAAS,WAAYA,GAASC,EAAWD,EAAM,MAAM,GAAKC,EAAWD,EAAM,GAAG,GAAKC,EAAWD,EAAM,WAAW,CAEpH,CAEA,SAASE,GAAcC,EAAwC,CACzDF,EAAWE,CAAS,EACtBA,EAAS,EAETA,EAAU,YAAW,CAEzB,CChNO,IAAMC,GAAuB,CAClC,iBAAkB,KAClB,sBAAuB,KACvB,QAAS,OACT,sCAAuC,GACvC,yBAA0B,ICGrB,IAAMC,GAAmC,CAG9C,WAAA,SAAWC,EAAqBC,EAAgB,SAAEC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,EAAA,CAAA,EAAA,UAAAA,CAAA,EACxC,IAAAC,EAAaL,GAAe,SACpC,OAAIK,GAAQ,MAARA,EAAU,WACLA,EAAS,WAAU,MAAnBA,EAAQC,EAAA,CAAYL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,EAE/C,WAAU,MAAA,OAAAG,EAAA,CAACL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,CAC7C,EACA,aAAA,SAAaK,EAAM,CACT,IAAAH,EAAaL,GAAe,SACpC,QAAQK,GAAQ,KAAA,OAARA,EAAU,eAAgB,cAAcG,CAAa,CAC/D,EACA,SAAU,QCjBN,SAAUC,GAAqBC,EAAQ,CAC3CC,GAAgB,WAAW,UAAA,CACjB,IAAAC,EAAqBC,GAAM,iBACnC,GAAID,EAEFA,EAAiBF,CAAG,MAGpB,OAAMA,CAEV,CAAC,CACH,CCtBM,SAAUI,IAAI,CAAK,CCMlB,IAAMC,GAAyB,UAAA,CAAM,OAAAC,GAAmB,IAAK,OAAW,MAAS,CAA5C,EAAsE,EAO5G,SAAUC,GAAkBC,EAAU,CAC1C,OAAOF,GAAmB,IAAK,OAAWE,CAAK,CACjD,CAOM,SAAUC,GAAoBC,EAAQ,CAC1C,OAAOJ,GAAmB,IAAKI,EAAO,MAAS,CACjD,CAQM,SAAUJ,GAAmBK,EAAuBD,EAAYF,EAAU,CAC9E,MAAO,CACL,KAAIG,EACJ,MAAKD,EACL,MAAKF,EAET,CCrCA,IAAII,GAAuD,KASrD,SAAUC,GAAaC,EAAc,CACzC,GAAIC,GAAO,sCAAuC,CAChD,IAAMC,EAAS,CAACJ,GAKhB,GAJII,IACFJ,GAAU,CAAE,YAAa,GAAO,MAAO,IAAI,GAE7CE,EAAE,EACEE,EAAQ,CACJ,IAAAC,EAAyBL,GAAvBM,EAAWD,EAAA,YAAEE,EAAKF,EAAA,MAE1B,GADAL,GAAU,KACNM,EACF,MAAMC,QAMVL,EAAE,CAEN,CAMM,SAAUM,GAAaC,EAAQ,CAC/BN,GAAO,uCAAyCH,KAClDA,GAAQ,YAAc,GACtBA,GAAQ,MAAQS,EAEpB,CCrBA,IAAAC,GAAA,SAAAC,EAAA,CAAmCC,GAAAF,EAAAC,CAAA,EA6BjC,SAAAD,EAAYG,EAA6C,CAAzD,IAAAC,EACEH,EAAA,KAAA,IAAA,GAAO,KATC,OAAAG,EAAA,UAAqB,GAUzBD,GACFC,EAAK,YAAcD,EAGfE,GAAeF,CAAW,GAC5BA,EAAY,IAAIC,CAAI,GAGtBA,EAAK,YAAcE,IAEvB,CAzBO,OAAAN,EAAA,OAAP,SAAiBO,EAAwBC,EAA2BC,EAAqB,CACvF,OAAO,IAAIC,GAAeH,EAAMC,EAAOC,CAAQ,CACjD,EAgCAT,EAAA,UAAA,KAAA,SAAKW,EAAS,CACR,KAAK,UACPC,GAA0BC,GAAiBF,CAAK,EAAG,IAAI,EAEvD,KAAK,MAAMA,CAAM,CAErB,EASAX,EAAA,UAAA,MAAA,SAAMc,EAAS,CACT,KAAK,UACPF,GAA0BG,GAAkBD,CAAG,EAAG,IAAI,GAEtD,KAAK,UAAY,GACjB,KAAK,OAAOA,CAAG,EAEnB,EAQAd,EAAA,UAAA,SAAA,UAAA,CACM,KAAK,UACPY,GAA0BI,GAAuB,IAAI,GAErD,KAAK,UAAY,GACjB,KAAK,UAAS,EAElB,EAEAhB,EAAA,UAAA,YAAA,UAAA,CACO,KAAK,SACR,KAAK,UAAY,GACjBC,EAAA,UAAM,YAAW,KAAA,IAAA,EACjB,KAAK,YAAc,KAEvB,EAEUD,EAAA,UAAA,MAAV,SAAgBW,EAAQ,CACtB,KAAK,YAAY,KAAKA,CAAK,CAC7B,EAEUX,EAAA,UAAA,OAAV,SAAiBc,EAAQ,CACvB,GAAI,CACF,KAAK,YAAY,MAAMA,CAAG,UAE1B,KAAK,YAAW,EAEpB,EAEUd,EAAA,UAAA,UAAV,UAAA,CACE,GAAI,CACF,KAAK,YAAY,SAAQ,UAEzB,KAAK,YAAW,EAEpB,EACFA,CAAA,EApHmCiB,EAAY,EA2H/C,IAAMC,GAAQ,SAAS,UAAU,KAEjC,SAASC,GAAyCC,EAAQC,EAAY,CACpE,OAAOH,GAAM,KAAKE,EAAIC,CAAO,CAC/B,CAMA,IAAAC,GAAA,UAAA,CACE,SAAAA,EAAoBC,EAAqC,CAArC,KAAA,gBAAAA,CAAwC,CAE5D,OAAAD,EAAA,UAAA,KAAA,SAAKE,EAAQ,CACH,IAAAD,EAAoB,KAAI,gBAChC,GAAIA,EAAgB,KAClB,GAAI,CACFA,EAAgB,KAAKC,CAAK,QACnBC,EAAO,CACdC,GAAqBD,CAAK,EAGhC,EAEAH,EAAA,UAAA,MAAA,SAAMK,EAAQ,CACJ,IAAAJ,EAAoB,KAAI,gBAChC,GAAIA,EAAgB,MAClB,GAAI,CACFA,EAAgB,MAAMI,CAAG,QAClBF,EAAO,CACdC,GAAqBD,CAAK,OAG5BC,GAAqBC,CAAG,CAE5B,EAEAL,EAAA,UAAA,SAAA,UAAA,CACU,IAAAC,EAAoB,KAAI,gBAChC,GAAIA,EAAgB,SAClB,GAAI,CACFA,EAAgB,SAAQ,QACjBE,EAAO,CACdC,GAAqBD,CAAK,EAGhC,EACFH,CAAA,EArCA,EAuCAM,GAAA,SAAAC,EAAA,CAAuCC,GAAAF,EAAAC,CAAA,EACrC,SAAAD,EACEG,EACAN,EACAO,EAA8B,CAHhC,IAAAC,EAKEJ,EAAA,KAAA,IAAA,GAAO,KAEHN,EACJ,GAAIW,EAAWH,CAAc,GAAK,CAACA,EAGjCR,EAAkB,CAChB,KAAOQ,GAAc,KAAdA,EAAkB,OACzB,MAAON,GAAK,KAALA,EAAS,OAChB,SAAUO,GAAQ,KAARA,EAAY,YAEnB,CAEL,IAAIG,EACAF,GAAQG,GAAO,0BAIjBD,EAAU,OAAO,OAAOJ,CAAc,EACtCI,EAAQ,YAAc,UAAA,CAAM,OAAAF,EAAK,YAAW,CAAhB,EAC5BV,EAAkB,CAChB,KAAMQ,EAAe,MAAQZ,GAAKY,EAAe,KAAMI,CAAO,EAC9D,MAAOJ,EAAe,OAASZ,GAAKY,EAAe,MAAOI,CAAO,EACjE,SAAUJ,EAAe,UAAYZ,GAAKY,EAAe,SAAUI,CAAO,IAI5EZ,EAAkBQ,EAMtB,OAAAE,EAAK,YAAc,IAAIX,GAAiBC,CAAe,GACzD,CACF,OAAAK,CAAA,EAzCuCS,EAAU,EA2CjD,SAASC,GAAqBC,EAAU,CAClCC,GAAO,sCACTC,GAAaF,CAAK,EAIlBG,GAAqBH,CAAK,CAE9B,CAQA,SAASI,GAAoBC,EAAQ,CACnC,MAAMA,CACR,CAOA,SAASC,GAA0BC,EAA2CC,EAA2B,CAC/F,IAAAC,EAA0BR,GAAM,sBACxCQ,GAAyBC,GAAgB,WAAW,UAAA,CAAM,OAAAD,EAAsBF,EAAcC,CAAU,CAA9C,CAA+C,CAC3G,CAOO,IAAMG,GAA6D,CACxE,OAAQ,GACR,KAAMC,GACN,MAAOR,GACP,SAAUQ,IC5QL,IAAMC,GAA+B,UAAA,CAAM,OAAC,OAAO,QAAW,YAAc,OAAO,YAAe,cAAvD,EAAsE,ECoClH,SAAUC,GAAYC,EAAI,CAC9B,OAAOA,CACT,CCiCM,SAAUC,IAAI,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACnB,OAAOC,GAAcF,CAAG,CAC1B,CAGM,SAAUE,GAAoBF,EAA+B,CACjE,OAAIA,EAAI,SAAW,EACVG,GAGLH,EAAI,SAAW,EACVA,EAAI,CAAC,EAGP,SAAeI,EAAQ,CAC5B,OAAOJ,EAAI,OAAO,SAACK,EAAWC,EAAuB,CAAK,OAAAA,EAAGD,CAAI,CAAP,EAAUD,CAAY,CAClF,CACF,CC9EA,IAAAG,EAAA,UAAA,CAkBE,SAAAA,EAAYC,EAA6E,CACnFA,IACF,KAAK,WAAaA,EAEtB,CA4BA,OAAAD,EAAA,UAAA,KAAA,SAAQE,EAAyB,CAC/B,IAAMC,EAAa,IAAIH,EACvB,OAAAG,EAAW,OAAS,KACpBA,EAAW,SAAWD,EACfC,CACT,EA6IAH,EAAA,UAAA,UAAA,SACEI,EACAC,EACAC,EAA8B,CAHhC,IAAAC,EAAA,KAKQC,EAAaC,GAAaL,CAAc,EAAIA,EAAiB,IAAIM,GAAeN,EAAgBC,EAAOC,CAAQ,EAErH,OAAAK,GAAa,UAAA,CACL,IAAAC,EAAuBL,EAArBL,EAAQU,EAAA,SAAEC,EAAMD,EAAA,OACxBJ,EAAW,IACTN,EAGIA,EAAS,KAAKM,EAAYK,CAAM,EAChCA,EAIAN,EAAK,WAAWC,CAAU,EAG1BD,EAAK,cAAcC,CAAU,CAAC,CAEtC,CAAC,EAEMA,CACT,EAGUR,EAAA,UAAA,cAAV,SAAwBc,EAAmB,CACzC,GAAI,CACF,OAAO,KAAK,WAAWA,CAAI,QACpBC,EAAK,CAIZD,EAAK,MAAMC,CAAG,EAElB,EA6DAf,EAAA,UAAA,QAAA,SAAQgB,EAA0BC,EAAoC,CAAtE,IAAAV,EAAA,KACE,OAAAU,EAAcC,GAAeD,CAAW,EAEjC,IAAIA,EAAkB,SAACE,EAASC,EAAM,CAC3C,IAAMZ,EAAa,IAAIE,GAAkB,CACvC,KAAM,SAACW,EAAK,CACV,GAAI,CACFL,EAAKK,CAAK,QACHN,EAAK,CACZK,EAAOL,CAAG,EACVP,EAAW,YAAW,EAE1B,EACA,MAAOY,EACP,SAAUD,EACX,EACDZ,EAAK,UAAUC,CAAU,CAC3B,CAAC,CACH,EAGUR,EAAA,UAAA,WAAV,SAAqBQ,EAA2B,OAC9C,OAAOI,EAAA,KAAK,UAAM,MAAAA,IAAA,OAAA,OAAAA,EAAE,UAAUJ,CAAU,CAC1C,EAOAR,EAAA,UAACG,EAAiB,EAAlB,UAAA,CACE,OAAO,IACT,EA4FAH,EAAA,UAAA,KAAA,UAAA,SAAKsB,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACH,OAAOC,GAAcF,CAAU,EAAE,IAAI,CACvC,EA6BAtB,EAAA,UAAA,UAAA,SAAUiB,EAAoC,CAA9C,IAAAV,EAAA,KACE,OAAAU,EAAcC,GAAeD,CAAW,EAEjC,IAAIA,EAAY,SAACE,EAASC,EAAM,CACrC,IAAIC,EACJd,EAAK,UACH,SAACkB,EAAI,CAAK,OAACJ,EAAQI,CAAT,EACV,SAACV,EAAQ,CAAK,OAAAK,EAAOL,CAAG,CAAV,EACd,UAAA,CAAM,OAAAI,EAAQE,CAAK,CAAb,CAAc,CAExB,CAAC,CACH,EA1aOrB,EAAA,OAAkC,SAAIC,EAAwD,CACnG,OAAO,IAAID,EAAcC,CAAS,CACpC,EAyaFD,GA9cA,EAudA,SAAS0B,GAAeC,EAA+C,OACrE,OAAOC,EAAAD,GAAW,KAAXA,EAAeE,GAAO,WAAO,MAAAD,IAAA,OAAAA,EAAI,OAC1C,CAEA,SAASE,GAAcC,EAAU,CAC/B,OAAOA,GAASC,EAAWD,EAAM,IAAI,GAAKC,EAAWD,EAAM,KAAK,GAAKC,EAAWD,EAAM,QAAQ,CAChG,CAEA,SAASE,GAAgBF,EAAU,CACjC,OAAQA,GAASA,aAAiBG,IAAgBJ,GAAWC,CAAK,GAAKI,GAAeJ,CAAK,CAC7F,CCzeM,SAAUK,GAAQC,EAAW,CACjC,OAAOC,EAAWD,GAAM,KAAA,OAANA,EAAQ,IAAI,CAChC,CAMM,SAAUE,EACdC,EAAqF,CAErF,OAAO,SAACH,EAAqB,CAC3B,GAAID,GAAQC,CAAM,EAChB,OAAOA,EAAO,KAAK,SAA+BI,EAA2B,CAC3E,GAAI,CACF,OAAOD,EAAKC,EAAc,IAAI,QACvBC,EAAK,CACZ,KAAK,MAAMA,CAAG,EAElB,CAAC,EAEH,MAAM,IAAI,UAAU,wCAAwC,CAC9D,CACF,CCjBM,SAAUC,EACdC,EACAC,EACAC,EACAC,EACAC,EAAuB,CAEvB,OAAO,IAAIC,GAAmBL,EAAaC,EAAQC,EAAYC,EAASC,CAAU,CACpF,CAMA,IAAAC,GAAA,SAAAC,EAAA,CAA2CC,GAAAF,EAAAC,CAAA,EAiBzC,SAAAD,EACEL,EACAC,EACAC,EACAC,EACQC,EACAI,EAAiC,CAN3C,IAAAC,EAoBEH,EAAA,KAAA,KAAMN,CAAW,GAAC,KAfV,OAAAS,EAAA,WAAAL,EACAK,EAAA,kBAAAD,EAeRC,EAAK,MAAQR,EACT,SAAuCS,EAAQ,CAC7C,GAAI,CACFT,EAAOS,CAAK,QACLC,EAAK,CACZX,EAAY,MAAMW,CAAG,EAEzB,EACAL,EAAA,UAAM,MACVG,EAAK,OAASN,EACV,SAAuCQ,EAAQ,CAC7C,GAAI,CACFR,EAAQQ,CAAG,QACJA,EAAK,CAEZX,EAAY,MAAMW,CAAG,UAGrB,KAAK,YAAW,EAEpB,EACAL,EAAA,UAAM,OACVG,EAAK,UAAYP,EACb,UAAA,CACE,GAAI,CACFA,EAAU,QACHS,EAAK,CAEZX,EAAY,MAAMW,CAAG,UAGrB,KAAK,YAAW,EAEpB,EACAL,EAAA,UAAM,WACZ,CAEA,OAAAD,EAAA,UAAA,YAAA,UAAA,OACE,GAAI,CAAC,KAAK,mBAAqB,KAAK,kBAAiB,EAAI,CAC/C,IAAAO,EAAW,KAAI,OACvBN,EAAA,UAAM,YAAW,KAAA,IAAA,EAEjB,CAACM,KAAUC,EAAA,KAAK,cAAU,MAAAA,IAAA,QAAAA,EAAA,KAAf,IAAI,GAEnB,EACFR,CAAA,EAnF2CS,EAAU,ECd9C,IAAMC,GAAiD,CAG5D,SAAA,SAASC,EAAQ,CACf,IAAIC,EAAU,sBACVC,EAAkD,qBAC9CC,EAAaJ,GAAsB,SACvCI,IACFF,EAAUE,EAAS,sBACnBD,EAASC,EAAS,sBAEpB,IAAMC,EAASH,EAAQ,SAACI,EAAS,CAI/BH,EAAS,OACTF,EAASK,CAAS,CACpB,CAAC,EACD,OAAO,IAAIC,GAAa,UAAA,CAAM,OAAAJ,GAAM,KAAA,OAANA,EAASE,CAAM,CAAf,CAAgB,CAChD,EACA,sBAAqB,UAAA,SAACG,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACZ,IAAAL,EAAaJ,GAAsB,SAC3C,QAAQI,GAAQ,KAAA,OAARA,EAAU,wBAAyB,uBAAsB,MAAA,OAAAM,EAAA,CAAA,EAAAC,EAAIH,CAAI,CAAA,CAAA,CAC3E,EACA,qBAAoB,UAAA,SAACA,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACX,IAAAL,EAAaJ,GAAsB,SAC3C,QAAQI,GAAQ,KAAA,OAARA,EAAU,uBAAwB,sBAAqB,MAAA,OAAAM,EAAA,CAAA,EAAAC,EAAIH,CAAI,CAAA,CAAA,CACzE,EACA,SAAU,QCrBL,IAAMI,GAAuDC,GAClE,SAACC,EAAM,CACL,OAAA,UAAoC,CAClCA,EAAO,IAAI,EACX,KAAK,KAAO,0BACZ,KAAK,QAAU,qBACjB,CAJA,CAIC,ECXL,IAAAC,EAAA,SAAAC,EAAA,CAAgCC,GAAAF,EAAAC,CAAA,EAwB9B,SAAAD,GAAA,CAAA,IAAAG,EAEEF,EAAA,KAAA,IAAA,GAAO,KAzBT,OAAAE,EAAA,OAAS,GAEDA,EAAA,iBAAyC,KAGjDA,EAAA,UAA2B,CAAA,EAE3BA,EAAA,UAAY,GAEZA,EAAA,SAAW,GAEXA,EAAA,YAAmB,MAenB,CAGA,OAAAH,EAAA,UAAA,KAAA,SAAQI,EAAwB,CAC9B,IAAMC,EAAU,IAAIC,GAAiB,KAAM,IAAI,EAC/C,OAAAD,EAAQ,SAAWD,EACZC,CACT,EAGUL,EAAA,UAAA,eAAV,UAAA,CACE,GAAI,KAAK,OACP,MAAM,IAAIO,EAEd,EAEAP,EAAA,UAAA,KAAA,SAAKQ,EAAQ,CAAb,IAAAL,EAAA,KACEM,GAAa,UAAA,SAEX,GADAN,EAAK,eAAc,EACf,CAACA,EAAK,UAAW,CACdA,EAAK,mBACRA,EAAK,iBAAmB,MAAM,KAAKA,EAAK,SAAS,OAEnD,QAAuBO,EAAAC,GAAAR,EAAK,gBAAgB,EAAAS,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAzC,IAAMG,EAAQD,EAAA,MACjBC,EAAS,KAAKL,CAAK,qGAGzB,CAAC,CACH,EAEAR,EAAA,UAAA,MAAA,SAAMc,EAAQ,CAAd,IAAAX,EAAA,KACEM,GAAa,UAAA,CAEX,GADAN,EAAK,eAAc,EACf,CAACA,EAAK,UAAW,CACnBA,EAAK,SAAWA,EAAK,UAAY,GACjCA,EAAK,YAAcW,EAEnB,QADQC,EAAcZ,EAAI,UACnBY,EAAU,QACfA,EAAU,MAAK,EAAI,MAAMD,CAAG,EAGlC,CAAC,CACH,EAEAd,EAAA,UAAA,SAAA,UAAA,CAAA,IAAAG,EAAA,KACEM,GAAa,UAAA,CAEX,GADAN,EAAK,eAAc,EACf,CAACA,EAAK,UAAW,CACnBA,EAAK,UAAY,GAEjB,QADQY,EAAcZ,EAAI,UACnBY,EAAU,QACfA,EAAU,MAAK,EAAI,SAAQ,EAGjC,CAAC,CACH,EAEAf,EAAA,UAAA,YAAA,UAAA,CACE,KAAK,UAAY,KAAK,OAAS,GAC/B,KAAK,UAAY,KAAK,iBAAmB,IAC3C,EAEA,OAAA,eAAIA,EAAA,UAAA,WAAQ,KAAZ,UAAA,OACE,QAAOgB,EAAA,KAAK,aAAS,MAAAA,IAAA,OAAA,OAAAA,EAAE,QAAS,CAClC,kCAGUhB,EAAA,UAAA,cAAV,SAAwBiB,EAAyB,CAC/C,YAAK,eAAc,EACZhB,EAAA,UAAM,cAAa,KAAA,KAACgB,CAAU,CACvC,EAGUjB,EAAA,UAAA,WAAV,SAAqBiB,EAAyB,CAC5C,YAAK,eAAc,EACnB,KAAK,wBAAwBA,CAAU,EAChC,KAAK,gBAAgBA,CAAU,CACxC,EAGUjB,EAAA,UAAA,gBAAV,SAA0BiB,EAA2B,CAArD,IAAAd,EAAA,KACQa,EAAqC,KAAnCE,EAAQF,EAAA,SAAEG,EAASH,EAAA,UAAED,EAASC,EAAA,UACtC,OAAIE,GAAYC,EACPC,IAET,KAAK,iBAAmB,KACxBL,EAAU,KAAKE,CAAU,EAClB,IAAII,GAAa,UAAA,CACtBlB,EAAK,iBAAmB,KACxBmB,GAAUP,EAAWE,CAAU,CACjC,CAAC,EACH,EAGUjB,EAAA,UAAA,wBAAV,SAAkCiB,EAA2B,CACrD,IAAAD,EAAuC,KAArCE,EAAQF,EAAA,SAAEO,EAAWP,EAAA,YAAEG,EAASH,EAAA,UACpCE,EACFD,EAAW,MAAMM,CAAW,EACnBJ,GACTF,EAAW,SAAQ,CAEvB,EAQAjB,EAAA,UAAA,aAAA,UAAA,CACE,IAAMwB,EAAkB,IAAIC,EAC5B,OAAAD,EAAW,OAAS,KACbA,CACT,EAxHOxB,EAAA,OAAkC,SAAI0B,EAA0BC,EAAqB,CAC1F,OAAO,IAAIrB,GAAoBoB,EAAaC,CAAM,CACpD,EAuHF3B,GA7IgCyB,CAAU,EAkJ1C,IAAAG,GAAA,SAAAC,EAAA,CAAyCC,GAAAF,EAAAC,CAAA,EACvC,SAAAD,EAESG,EACPC,EAAsB,CAHxB,IAAAC,EAKEJ,EAAA,KAAA,IAAA,GAAO,KAHA,OAAAI,EAAA,YAAAF,EAIPE,EAAK,OAASD,GAChB,CAEA,OAAAJ,EAAA,UAAA,KAAA,SAAKM,EAAQ,UACXC,GAAAC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAA,OAAAA,EAAE,QAAI,MAAAD,IAAA,QAAAA,EAAA,KAAAC,EAAGF,CAAK,CAChC,EAEAN,EAAA,UAAA,MAAA,SAAMS,EAAQ,UACZF,GAAAC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAA,OAAAA,EAAE,SAAK,MAAAD,IAAA,QAAAA,EAAA,KAAAC,EAAGC,CAAG,CAC/B,EAEAT,EAAA,UAAA,SAAA,UAAA,UACEO,GAAAC,EAAA,KAAK,eAAW,MAAAA,IAAA,OAAA,OAAAA,EAAE,YAAQ,MAAAD,IAAA,QAAAA,EAAA,KAAAC,CAAA,CAC5B,EAGUR,EAAA,UAAA,WAAV,SAAqBU,EAAyB,SAC5C,OAAOH,GAAAC,EAAA,KAAK,UAAM,MAAAA,IAAA,OAAA,OAAAA,EAAE,UAAUE,CAAU,KAAC,MAAAH,IAAA,OAAAA,EAAII,EAC/C,EACFX,CAAA,EA1ByCY,CAAO,ECxJhD,IAAAC,GAAA,SAAAC,EAAA,CAAwCC,GAAAF,EAAAC,CAAA,EACtC,SAAAD,EAAoBG,EAAS,CAA7B,IAAAC,EACEH,EAAA,KAAA,IAAA,GAAO,KADW,OAAAG,EAAA,OAAAD,GAEpB,CAEA,cAAA,eAAIH,EAAA,UAAA,QAAK,KAAT,UAAA,CACE,OAAO,KAAK,SAAQ,CACtB,kCAGUA,EAAA,UAAA,WAAV,SAAqBK,EAAyB,CAC5C,IAAMC,EAAeL,EAAA,UAAM,WAAU,KAAA,KAACI,CAAU,EAChD,OAACC,EAAa,QAAUD,EAAW,KAAK,KAAK,MAAM,EAC5CC,CACT,EAEAN,EAAA,UAAA,SAAA,UAAA,CACQ,IAAAO,EAAoC,KAAlCC,EAAQD,EAAA,SAAEE,EAAWF,EAAA,YAAEJ,EAAMI,EAAA,OACrC,GAAIC,EACF,MAAMC,EAER,YAAK,eAAc,EACZN,CACT,EAEAH,EAAA,UAAA,KAAA,SAAKU,EAAQ,CACXT,EAAA,UAAM,KAAI,KAAA,KAAE,KAAK,OAASS,CAAM,CAClC,EACFV,CAAA,EA5BwCW,CAAO,ECJxC,IAAMC,GAA+C,CAC1D,IAAG,UAAA,CAGD,OAAQA,GAAsB,UAAY,MAAM,IAAG,CACrD,EACA,SAAU,QCwBZ,IAAAC,GAAA,SAAAC,EAAA,CAAsCC,GAAAF,EAAAC,CAAA,EAUpC,SAAAD,EACUG,EACAC,EACAC,EAA6D,CAF7DF,IAAA,SAAAA,EAAA,KACAC,IAAA,SAAAA,EAAA,KACAC,IAAA,SAAAA,EAAAC,IAHV,IAAAC,EAKEN,EAAA,KAAA,IAAA,GAAO,KAJC,OAAAM,EAAA,YAAAJ,EACAI,EAAA,YAAAH,EACAG,EAAA,mBAAAF,EAZFE,EAAA,QAA0B,CAAA,EAC1BA,EAAA,oBAAsB,GAc5BA,EAAK,oBAAsBH,IAAgB,IAC3CG,EAAK,YAAc,KAAK,IAAI,EAAGJ,CAAW,EAC1CI,EAAK,YAAc,KAAK,IAAI,EAAGH,CAAW,GAC5C,CAEA,OAAAJ,EAAA,UAAA,KAAA,SAAKQ,EAAQ,CACL,IAAAC,EAA+E,KAA7EC,EAASD,EAAA,UAAEE,EAAOF,EAAA,QAAEG,EAAmBH,EAAA,oBAAEJ,EAAkBI,EAAA,mBAAEL,EAAWK,EAAA,YAC3EC,IACHC,EAAQ,KAAKH,CAAK,EAClB,CAACI,GAAuBD,EAAQ,KAAKN,EAAmB,IAAG,EAAKD,CAAW,GAE7E,KAAK,YAAW,EAChBH,EAAA,UAAM,KAAI,KAAA,KAACO,CAAK,CAClB,EAGUR,EAAA,UAAA,WAAV,SAAqBa,EAAyB,CAC5C,KAAK,eAAc,EACnB,KAAK,YAAW,EAQhB,QANMC,EAAe,KAAK,gBAAgBD,CAAU,EAE9CJ,EAAmC,KAAjCG,EAAmBH,EAAA,oBAAEE,EAAOF,EAAA,QAG9BM,EAAOJ,EAAQ,MAAK,EACjBK,EAAI,EAAGA,EAAID,EAAK,QAAU,CAACF,EAAW,OAAQG,GAAKJ,EAAsB,EAAI,EACpFC,EAAW,KAAKE,EAAKC,CAAC,CAAM,EAG9B,YAAK,wBAAwBH,CAAU,EAEhCC,CACT,EAEQd,EAAA,UAAA,YAAR,UAAA,CACQ,IAAAS,EAAoE,KAAlEN,EAAWM,EAAA,YAAEJ,EAAkBI,EAAA,mBAAEE,EAAOF,EAAA,QAAEG,EAAmBH,EAAA,oBAK/DQ,GAAsBL,EAAsB,EAAI,GAAKT,EAK3D,GAJAA,EAAc,KAAYc,EAAqBN,EAAQ,QAAUA,EAAQ,OAAO,EAAGA,EAAQ,OAASM,CAAkB,EAIlH,CAACL,EAAqB,CAKxB,QAJMM,EAAMb,EAAmB,IAAG,EAC9Bc,EAAO,EAGFH,EAAI,EAAGA,EAAIL,EAAQ,QAAWA,EAAQK,CAAC,GAAgBE,EAAKF,GAAK,EACxEG,EAAOH,EAETG,GAAQR,EAAQ,OAAO,EAAGQ,EAAO,CAAC,EAEtC,EACFnB,CAAA,EAzEsCoB,CAAO,EClB7C,IAAAC,GAAA,SAAAC,EAAA,CAA+BC,GAAAF,EAAAC,CAAA,EAC7B,SAAAD,EAAYG,EAAsBC,EAAmD,QACnFH,EAAA,KAAA,IAAA,GAAO,IACT,CAWO,OAAAD,EAAA,UAAA,SAAP,SAAgBK,EAAWC,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,GAClB,IACT,EACFN,CAAA,EAjB+BO,EAAY,ECHpC,IAAMC,GAAqC,CAGhD,YAAA,SAAYC,EAAqBC,EAAgB,SAAEC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,EAAA,CAAA,EAAA,UAAAA,CAAA,EACzC,IAAAC,EAAaL,GAAgB,SACrC,OAAIK,GAAQ,MAARA,EAAU,YACLA,EAAS,YAAW,MAApBA,EAAQC,EAAA,CAAaL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,EAEhD,YAAW,MAAA,OAAAG,EAAA,CAACL,EAASC,CAAO,EAAAK,EAAKJ,CAAI,CAAA,CAAA,CAC9C,EACA,cAAA,SAAcK,EAAM,CACV,IAAAH,EAAaL,GAAgB,SACrC,QAAQK,GAAQ,KAAA,OAARA,EAAU,gBAAiB,eAAeG,CAAa,CACjE,EACA,SAAU,QCrBZ,IAAAC,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAOlC,SAAAD,EAAsBG,EAAqCC,EAAmD,CAA9G,IAAAC,EACEJ,EAAA,KAAA,KAAME,EAAWC,CAAI,GAAC,KADF,OAAAC,EAAA,UAAAF,EAAqCE,EAAA,KAAAD,EAFjDC,EAAA,QAAmB,IAI7B,CAEO,OAAAL,EAAA,UAAA,SAAP,SAAgBM,EAAWC,EAAiB,OAC1C,GADyBA,IAAA,SAAAA,EAAA,GACrB,KAAK,OACP,OAAO,KAIT,KAAK,MAAQD,EAEb,IAAME,EAAK,KAAK,GACVL,EAAY,KAAK,UAuBvB,OAAIK,GAAM,OACR,KAAK,GAAK,KAAK,eAAeL,EAAWK,EAAID,CAAK,GAKpD,KAAK,QAAU,GAEf,KAAK,MAAQA,EAEb,KAAK,IAAKE,EAAA,KAAK,MAAE,MAAAA,IAAA,OAAAA,EAAI,KAAK,eAAeN,EAAW,KAAK,GAAII,CAAK,EAE3D,IACT,EAEUP,EAAA,UAAA,eAAV,SAAyBG,EAA2BO,EAAmBH,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,GAC9DI,GAAiB,YAAYR,EAAU,MAAM,KAAKA,EAAW,IAAI,EAAGI,CAAK,CAClF,EAEUP,EAAA,UAAA,eAAV,SAAyBY,EAA4BJ,EAAkBD,EAAwB,CAE7F,GAFqEA,IAAA,SAAAA,EAAA,GAEjEA,GAAS,MAAQ,KAAK,QAAUA,GAAS,KAAK,UAAY,GAC5D,OAAOC,EAILA,GAAM,MACRG,GAAiB,cAAcH,CAAE,CAIrC,EAMOR,EAAA,UAAA,QAAP,SAAeM,EAAUC,EAAa,CACpC,GAAI,KAAK,OACP,OAAO,IAAI,MAAM,8BAA8B,EAGjD,KAAK,QAAU,GACf,IAAMM,EAAQ,KAAK,SAASP,EAAOC,CAAK,EACxC,GAAIM,EACF,OAAOA,EACE,KAAK,UAAY,IAAS,KAAK,IAAM,OAc9C,KAAK,GAAK,KAAK,eAAe,KAAK,UAAW,KAAK,GAAI,IAAI,EAE/D,EAEUb,EAAA,UAAA,SAAV,SAAmBM,EAAUQ,EAAc,CACzC,IAAIC,EAAmB,GACnBC,EACJ,GAAI,CACF,KAAK,KAAKV,CAAK,QACRW,EAAG,CACVF,EAAU,GAIVC,EAAaC,GAAQ,IAAI,MAAM,oCAAoC,EAErE,GAAIF,EACF,YAAK,YAAW,EACTC,CAEX,EAEAhB,EAAA,UAAA,YAAA,UAAA,CACE,GAAI,CAAC,KAAK,OAAQ,CACV,IAAAS,EAAoB,KAAlBD,EAAEC,EAAA,GAAEN,EAASM,EAAA,UACbS,EAAYf,EAAS,QAE7B,KAAK,KAAO,KAAK,MAAQ,KAAK,UAAY,KAC1C,KAAK,QAAU,GAEfgB,GAAUD,EAAS,IAAI,EACnBV,GAAM,OACR,KAAK,GAAK,KAAK,eAAeL,EAAWK,EAAI,IAAI,GAGnD,KAAK,MAAQ,KACbP,EAAA,UAAM,YAAW,KAAA,IAAA,EAErB,EACFD,CAAA,EA9IoCoB,EAAM,ECgB1C,IAAAC,GAAA,UAAA,CAGE,SAAAA,EAAoBC,EAAoCC,EAAiC,CAAjCA,IAAA,SAAAA,EAAoBF,EAAU,KAAlE,KAAA,oBAAAC,EAClB,KAAK,IAAMC,CACb,CA6BO,OAAAF,EAAA,UAAA,SAAP,SAAmBG,EAAqDC,EAAmBC,EAAS,CAA5B,OAAAD,IAAA,SAAAA,EAAA,GAC/D,IAAI,KAAK,oBAAuB,KAAMD,CAAI,EAAE,SAASE,EAAOD,CAAK,CAC1E,EAnCcJ,EAAA,IAAoBM,GAAsB,IAoC1DN,GArCA,ECnBA,IAAAO,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAkBlC,SAAAD,EAAYG,EAAgCC,EAAiC,CAAjCA,IAAA,SAAAA,EAAoBC,GAAU,KAA1E,IAAAC,EACEL,EAAA,KAAA,KAAME,EAAiBC,CAAG,GAAC,KAlBtB,OAAAE,EAAA,QAAmC,CAAA,EAOnCA,EAAA,QAAmB,IAY1B,CAEO,OAAAN,EAAA,UAAA,MAAP,SAAaO,EAAwB,CAC3B,IAAAC,EAAY,KAAI,QAExB,GAAI,KAAK,QAAS,CAChBA,EAAQ,KAAKD,CAAM,EACnB,OAGF,IAAIE,EACJ,KAAK,QAAU,GAEf,EACE,IAAKA,EAAQF,EAAO,QAAQA,EAAO,MAAOA,EAAO,KAAK,EACpD,YAEMA,EAASC,EAAQ,MAAK,GAIhC,GAFA,KAAK,QAAU,GAEXC,EAAO,CACT,KAAQF,EAASC,EAAQ,MAAK,GAC5BD,EAAO,YAAW,EAEpB,MAAME,EAEV,EACFT,CAAA,EAhDoCK,EAAS,EC6CtC,IAAMK,GAAiB,IAAIC,GAAeC,EAAW,EAK/CC,GAAQH,GCjDrB,IAAAI,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAClC,SAAAD,EAAsBG,EAAqCC,EAAmD,CAA9G,IAAAC,EACEJ,EAAA,KAAA,KAAME,EAAWC,CAAI,GAAC,KADF,OAAAC,EAAA,UAAAF,EAAqCE,EAAA,KAAAD,GAE3D,CAEO,OAAAJ,EAAA,UAAA,SAAP,SAAgBM,EAAWC,EAAiB,CAC1C,OADyBA,IAAA,SAAAA,EAAA,GACrBA,EAAQ,EACHN,EAAA,UAAM,SAAQ,KAAA,KAACK,EAAOC,CAAK,GAEpC,KAAK,MAAQA,EACb,KAAK,MAAQD,EACb,KAAK,UAAU,MAAM,IAAI,EAClB,KACT,EAEON,EAAA,UAAA,QAAP,SAAeM,EAAUC,EAAa,CACpC,OAAOA,EAAQ,GAAK,KAAK,OAASN,EAAA,UAAM,QAAO,KAAA,KAACK,EAAOC,CAAK,EAAI,KAAK,SAASD,EAAOC,CAAK,CAC5F,EAEUP,EAAA,UAAA,eAAV,SAAyBG,EAA2BK,EAAkBD,EAAiB,CAKrF,OALoEA,IAAA,SAAAA,EAAA,GAK/DA,GAAS,MAAQA,EAAQ,GAAOA,GAAS,MAAQ,KAAK,MAAQ,EAC1DN,EAAA,UAAM,eAAc,KAAA,KAACE,EAAWK,EAAID,CAAK,GAIlDJ,EAAU,MAAM,IAAI,EAMb,EACT,EACFH,CAAA,EArCoCS,EAAW,ECJ/C,IAAAC,GAAA,SAAAC,EAAA,CAAoCC,GAAAF,EAAAC,CAAA,EAApC,SAAAD,GAAA,+CACA,CAAA,OAAAA,CAAA,EADoCG,EAAc,ECgE3C,IAAMC,GAAiB,IAAIC,GAAeC,EAAW,EC5D5D,IAAAC,GAAA,SAAAC,EAAA,CAA6CC,GAAAF,EAAAC,CAAA,EAC3C,SAAAD,EAAsBG,EAA8CC,EAAmD,CAAvH,IAAAC,EACEJ,EAAA,KAAA,KAAME,EAAWC,CAAI,GAAC,KADF,OAAAC,EAAA,UAAAF,EAA8CE,EAAA,KAAAD,GAEpE,CAEU,OAAAJ,EAAA,UAAA,eAAV,SAAyBG,EAAoCG,EAAkBC,EAAiB,CAE9F,OAF6EA,IAAA,SAAAA,EAAA,GAEzEA,IAAU,MAAQA,EAAQ,EACrBN,EAAA,UAAM,eAAc,KAAA,KAACE,EAAWG,EAAIC,CAAK,GAGlDJ,EAAU,QAAQ,KAAK,IAAI,EAIpBA,EAAU,aAAeA,EAAU,WAAaK,GAAuB,sBAAsB,UAAA,CAAM,OAAAL,EAAU,MAAM,MAAS,CAAzB,CAA0B,GACtI,EAEUH,EAAA,UAAA,eAAV,SAAyBG,EAAoCG,EAAkBC,EAAiB,OAI9F,GAJ6EA,IAAA,SAAAA,EAAA,GAIzEA,GAAS,KAAOA,EAAQ,EAAI,KAAK,MAAQ,EAC3C,OAAON,EAAA,UAAM,eAAc,KAAA,KAACE,EAAWG,EAAIC,CAAK,EAK1C,IAAAE,EAAYN,EAAS,QACzBG,GAAM,QAAQI,EAAAD,EAAQA,EAAQ,OAAS,CAAC,KAAC,MAAAC,IAAA,OAAA,OAAAA,EAAE,MAAOJ,IACpDE,GAAuB,qBAAqBF,CAAY,EACxDH,EAAU,WAAa,OAI3B,EACFH,CAAA,EApC6CW,EAAW,ECHxD,IAAAC,GAAA,SAAAC,EAAA,CAA6CC,GAAAF,EAAAC,CAAA,EAA7C,SAAAD,GAAA,+CAkCA,CAjCS,OAAAA,EAAA,UAAA,MAAP,SAAaG,EAAyB,CACpC,KAAK,QAAU,GAUf,IAAMC,EAAU,KAAK,WACrB,KAAK,WAAa,OAEV,IAAAC,EAAY,KAAI,QACpBC,EACJH,EAASA,GAAUE,EAAQ,MAAK,EAEhC,EACE,IAAKC,EAAQH,EAAO,QAAQA,EAAO,MAAOA,EAAO,KAAK,EACpD,aAEMA,EAASE,EAAQ,CAAC,IAAMF,EAAO,KAAOC,GAAWC,EAAQ,MAAK,GAIxE,GAFA,KAAK,QAAU,GAEXC,EAAO,CACT,MAAQH,EAASE,EAAQ,CAAC,IAAMF,EAAO,KAAOC,GAAWC,EAAQ,MAAK,GACpEF,EAAO,YAAW,EAEpB,MAAMG,EAEV,EACFN,CAAA,EAlC6CO,EAAc,ECgCpD,IAAMC,GAA0B,IAAIC,GAAwBC,EAAoB,EC8BhF,IAAMC,EAAQ,IAAIC,EAAkB,SAACC,EAAU,CAAK,OAAAA,EAAW,SAAQ,CAAnB,CAAqB,EC9D1E,SAAUC,GAAYC,EAAU,CACpC,OAAOA,GAASC,EAAWD,EAAM,QAAQ,CAC3C,CCDA,SAASE,GAAQC,EAAQ,CACvB,OAAOA,EAAIA,EAAI,OAAS,CAAC,CAC3B,CAEM,SAAUC,GAAkBC,EAAW,CAC3C,OAAOC,EAAWJ,GAAKG,CAAI,CAAC,EAAIA,EAAK,IAAG,EAAK,MAC/C,CAEM,SAAUE,GAAaF,EAAW,CACtC,OAAOG,GAAYN,GAAKG,CAAI,CAAC,EAAIA,EAAK,IAAG,EAAK,MAChD,CAEM,SAAUI,GAAUJ,EAAaK,EAAoB,CACzD,OAAO,OAAOR,GAAKG,CAAI,GAAM,SAAWA,EAAK,IAAG,EAAMK,CACxD,CClBO,IAAMC,GAAe,SAAIC,EAAM,CAAwB,OAAAA,GAAK,OAAOA,EAAE,QAAW,UAAY,OAAOA,GAAM,UAAlD,ECMxD,SAAUC,GAAUC,EAAU,CAClC,OAAOC,EAAWD,GAAK,KAAA,OAALA,EAAO,IAAI,CAC/B,CCHM,SAAUE,GAAoBC,EAAU,CAC5C,OAAOC,EAAWD,EAAME,EAAiB,CAAC,CAC5C,CCLM,SAAUC,GAAmBC,EAAQ,CACzC,OAAO,OAAO,eAAiBC,EAAWD,GAAG,KAAA,OAAHA,EAAM,OAAO,aAAa,CAAC,CACvE,CCAM,SAAUE,GAAiCC,EAAU,CAEzD,OAAO,IAAI,UACT,iBACEA,IAAU,MAAQ,OAAOA,GAAU,SAAW,oBAAsB,IAAIA,EAAK,KAAG,0HACwC,CAE9H,CCXM,SAAUC,IAAiB,CAC/B,OAAI,OAAO,QAAW,YAAc,CAAC,OAAO,SACnC,aAGF,OAAO,QAChB,CAEO,IAAMC,GAAWD,GAAiB,ECJnC,SAAUE,GAAWC,EAAU,CACnC,OAAOC,EAAWD,GAAK,KAAA,OAALA,EAAQE,EAAe,CAAC,CAC5C,CCHM,SAAiBC,GAAsCC,EAAqC,mGAC1FC,EAASD,EAAe,UAAS,2DAGX,MAAA,CAAA,EAAAE,GAAMD,EAAO,KAAI,CAAE,CAAA,gBAArCE,EAAkBC,EAAA,KAAA,EAAhBC,EAAKF,EAAA,MAAEG,EAAIH,EAAA,KACfG,iBAAA,CAAA,EAAA,CAAA,SACF,MAAA,CAAA,EAAAF,EAAA,KAAA,CAAA,qBAEIC,CAAM,CAAA,SAAZ,MAAA,CAAA,EAAAD,EAAA,KAAA,CAAA,SAAA,OAAAA,EAAA,KAAA,mCAGF,OAAAH,EAAO,YAAW,6BAIhB,SAAUM,GAAwBC,EAAQ,CAG9C,OAAOC,EAAWD,GAAG,KAAA,OAAHA,EAAK,SAAS,CAClC,CCPM,SAAUE,EAAaC,EAAyB,CACpD,GAAIA,aAAiBC,EACnB,OAAOD,EAET,GAAIA,GAAS,KAAM,CACjB,GAAIE,GAAoBF,CAAK,EAC3B,OAAOG,GAAsBH,CAAK,EAEpC,GAAII,GAAYJ,CAAK,EACnB,OAAOK,GAAcL,CAAK,EAE5B,GAAIM,GAAUN,CAAK,EACjB,OAAOO,GAAYP,CAAK,EAE1B,GAAIQ,GAAgBR,CAAK,EACvB,OAAOS,GAAkBT,CAAK,EAEhC,GAAIU,GAAWV,CAAK,EAClB,OAAOW,GAAaX,CAAK,EAE3B,GAAIY,GAAqBZ,CAAK,EAC5B,OAAOa,GAAuBb,CAAK,EAIvC,MAAMc,GAAiCd,CAAK,CAC9C,CAMM,SAAUG,GAAyBY,EAAQ,CAC/C,OAAO,IAAId,EAAW,SAACe,EAAyB,CAC9C,IAAMC,EAAMF,EAAIG,EAAiB,EAAC,EAClC,GAAIC,EAAWF,EAAI,SAAS,EAC1B,OAAOA,EAAI,UAAUD,CAAU,EAGjC,MAAM,IAAI,UAAU,gEAAgE,CACtF,CAAC,CACH,CASM,SAAUX,GAAiBe,EAAmB,CAClD,OAAO,IAAInB,EAAW,SAACe,EAAyB,CAU9C,QAASK,EAAI,EAAGA,EAAID,EAAM,QAAU,CAACJ,EAAW,OAAQK,IACtDL,EAAW,KAAKI,EAAMC,CAAC,CAAC,EAE1BL,EAAW,SAAQ,CACrB,CAAC,CACH,CAEM,SAAUT,GAAee,EAAuB,CACpD,OAAO,IAAIrB,EAAW,SAACe,EAAyB,CAC9CM,EACG,KACC,SAACC,EAAK,CACCP,EAAW,SACdA,EAAW,KAAKO,CAAK,EACrBP,EAAW,SAAQ,EAEvB,EACA,SAACQ,EAAQ,CAAK,OAAAR,EAAW,MAAMQ,CAAG,CAApB,CAAqB,EAEpC,KAAK,KAAMC,EAAoB,CACpC,CAAC,CACH,CAEM,SAAUd,GAAgBe,EAAqB,CACnD,OAAO,IAAIzB,EAAW,SAACe,EAAyB,aAC9C,QAAoBW,EAAAC,GAAAF,CAAQ,EAAAG,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAzB,IAAMJ,EAAKM,EAAA,MAEd,GADAb,EAAW,KAAKO,CAAK,EACjBP,EAAW,OACb,yGAGJA,EAAW,SAAQ,CACrB,CAAC,CACH,CAEM,SAAUP,GAAqBqB,EAA+B,CAClE,OAAO,IAAI7B,EAAW,SAACe,EAAyB,CAC9Ce,GAAQD,EAAed,CAAU,EAAE,MAAM,SAACQ,EAAG,CAAK,OAAAR,EAAW,MAAMQ,CAAG,CAApB,CAAqB,CACzE,CAAC,CACH,CAEM,SAAUX,GAA0BmB,EAAqC,CAC7E,OAAOvB,GAAkBwB,GAAmCD,CAAc,CAAC,CAC7E,CAEA,SAAeD,GAAWD,EAAiCd,EAAyB,uIACxDkB,EAAAC,GAAAL,CAAa,gFAIrC,GAJeP,EAAKa,EAAA,MACpBpB,EAAW,KAAKO,CAAK,EAGjBP,EAAW,OACb,MAAA,CAAA,CAAA,6RAGJ,OAAAA,EAAW,SAAQ,WChHf,SAAUqB,GACdC,EACAC,EACAC,EACAC,EACAC,EAAc,CADdD,IAAA,SAAAA,EAAA,GACAC,IAAA,SAAAA,EAAA,IAEA,IAAMC,EAAuBJ,EAAU,SAAS,UAAA,CAC9CC,EAAI,EACAE,EACFJ,EAAmB,IAAI,KAAK,SAAS,KAAMG,CAAK,CAAC,EAEjD,KAAK,YAAW,CAEpB,EAAGA,CAAK,EAIR,GAFAH,EAAmB,IAAIK,CAAoB,EAEvC,CAACD,EAKH,OAAOC,CAEX,CCeM,SAAUC,GAAaC,EAA0BC,EAAS,CAAT,OAAAA,IAAA,SAAAA,EAAA,GAC9CC,EAAQ,SAACC,EAAQC,EAAU,CAChCD,EAAO,UACLE,EACED,EACA,SAACE,EAAK,CAAK,OAAAC,GAAgBH,EAAYJ,EAAW,UAAA,CAAM,OAAAI,EAAW,KAAKE,CAAK,CAArB,EAAwBL,CAAK,CAA1E,EACX,UAAA,CAAM,OAAAM,GAAgBH,EAAYJ,EAAW,UAAA,CAAM,OAAAI,EAAW,SAAQ,CAAnB,EAAuBH,CAAK,CAAzE,EACN,SAACO,EAAG,CAAK,OAAAD,GAAgBH,EAAYJ,EAAW,UAAA,CAAM,OAAAI,EAAW,MAAMI,CAAG,CAApB,EAAuBP,CAAK,CAAzE,CAA0E,CACpF,CAEL,CAAC,CACH,CCPM,SAAUQ,GAAeC,EAA0BC,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,GAChDC,EAAQ,SAACC,EAAQC,EAAU,CAChCA,EAAW,IAAIJ,EAAU,SAAS,UAAA,CAAM,OAAAG,EAAO,UAAUC,CAAU,CAA3B,EAA8BH,CAAK,CAAC,CAC9E,CAAC,CACH,CC7DM,SAAUI,GAAsBC,EAA6BC,EAAwB,CACzF,OAAOC,EAAUF,CAAK,EAAE,KAAKG,GAAYF,CAAS,EAAGG,GAAUH,CAAS,CAAC,CAC3E,CCFM,SAAUI,GAAmBC,EAAuBC,EAAwB,CAChF,OAAOC,EAAUF,CAAK,EAAE,KAAKG,GAAYF,CAAS,EAAGG,GAAUH,CAAS,CAAC,CAC3E,CCJM,SAAUI,GAAiBC,EAAqBC,EAAwB,CAC5E,OAAO,IAAIC,EAAc,SAACC,EAAU,CAElC,IAAIC,EAAI,EAER,OAAOH,EAAU,SAAS,UAAA,CACpBG,IAAMJ,EAAM,OAGdG,EAAW,SAAQ,GAInBA,EAAW,KAAKH,EAAMI,GAAG,CAAC,EAIrBD,EAAW,QACd,KAAK,SAAQ,EAGnB,CAAC,CACH,CAAC,CACH,CCfM,SAAUE,GAAoBC,EAAoBC,EAAwB,CAC9E,OAAO,IAAIC,EAAc,SAACC,EAAU,CAClC,IAAIC,EAKJ,OAAAC,GAAgBF,EAAYF,EAAW,UAAA,CAErCG,EAAYJ,EAAcI,EAAe,EAAC,EAE1CC,GACEF,EACAF,EACA,UAAA,OACMK,EACAC,EACJ,GAAI,CAEDC,EAAkBJ,EAAS,KAAI,EAA7BE,EAAKE,EAAA,MAAED,EAAIC,EAAA,WACPC,EAAK,CAEZN,EAAW,MAAMM,CAAG,EACpB,OAGEF,EAKFJ,EAAW,SAAQ,EAGnBA,EAAW,KAAKG,CAAK,CAEzB,EACA,EACA,EAAI,CAER,CAAC,EAMM,UAAA,CAAM,OAAAI,EAAWN,GAAQ,KAAA,OAARA,EAAU,MAAM,GAAKA,EAAS,OAAM,CAA/C,CACf,CAAC,CACH,CCvDM,SAAUO,GAAyBC,EAAyBC,EAAwB,CACxF,GAAI,CAACD,EACH,MAAM,IAAI,MAAM,yBAAyB,EAE3C,OAAO,IAAIE,EAAc,SAACC,EAAU,CAClCC,GAAgBD,EAAYF,EAAW,UAAA,CACrC,IAAMI,EAAWL,EAAM,OAAO,aAAa,EAAC,EAC5CI,GACED,EACAF,EACA,UAAA,CACEI,EAAS,KAAI,EAAG,KAAK,SAACC,EAAM,CACtBA,EAAO,KAGTH,EAAW,SAAQ,EAEnBA,EAAW,KAAKG,EAAO,KAAK,CAEhC,CAAC,CACH,EACA,EACA,EAAI,CAER,CAAC,CACH,CAAC,CACH,CCzBM,SAAUC,GAA8BC,EAA8BC,EAAwB,CAClG,OAAOC,GAAsBC,GAAmCH,CAAK,EAAGC,CAAS,CACnF,CCoBM,SAAUG,GAAaC,EAA2BC,EAAwB,CAC9E,GAAID,GAAS,KAAM,CACjB,GAAIE,GAAoBF,CAAK,EAC3B,OAAOG,GAAmBH,EAAOC,CAAS,EAE5C,GAAIG,GAAYJ,CAAK,EACnB,OAAOK,GAAcL,EAAOC,CAAS,EAEvC,GAAIK,GAAUN,CAAK,EACjB,OAAOO,GAAgBP,EAAOC,CAAS,EAEzC,GAAIO,GAAgBR,CAAK,EACvB,OAAOS,GAAsBT,EAAOC,CAAS,EAE/C,GAAIS,GAAWV,CAAK,EAClB,OAAOW,GAAiBX,EAAOC,CAAS,EAE1C,GAAIW,GAAqBZ,CAAK,EAC5B,OAAOa,GAA2Bb,EAAOC,CAAS,EAGtD,MAAMa,GAAiCd,CAAK,CAC9C,CCoDM,SAAUe,GAAQC,EAA2BC,EAAyB,CAC1E,OAAOA,EAAYC,GAAUF,EAAOC,CAAS,EAAIE,EAAUH,CAAK,CAClE,CCxBM,SAAUI,GAAE,SAAIC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACpB,IAAMC,EAAYC,GAAaH,CAAI,EACnC,OAAOI,GAAKJ,EAAaE,CAAS,CACpC,CCsCM,SAAUG,GAAWC,EAA0BC,EAAyB,CAC5E,IAAMC,EAAeC,EAAWH,CAAmB,EAAIA,EAAsB,UAAA,CAAM,OAAAA,CAAA,EAC7EI,EAAO,SAACC,EAA6B,CAAK,OAAAA,EAAW,MAAMH,EAAY,CAAE,CAA/B,EAChD,OAAO,IAAII,EAAWL,EAAY,SAACI,EAAU,CAAK,OAAAJ,EAAU,SAASG,EAAa,EAAGC,CAAU,CAA7C,EAAiDD,CAAI,CACzG,CCpGO,IAAMG,GAA6BC,GAAiB,SAACC,EAAM,CAAK,OAAA,UAAuB,CAC5FA,EAAO,IAAI,EACX,KAAK,KAAO,aACZ,KAAK,QAAU,yBACjB,CAJuE,CAItE,ECrBK,SAAUC,GAAYC,EAAU,CACpC,OAAOA,aAAiB,MAAQ,CAAC,MAAMA,CAAY,CACrD,CCsCM,SAAUC,EAAUC,EAAyCC,EAAa,CAC9E,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAEhC,IAAIC,EAAQ,EAGZF,EAAO,UACLG,EAAyBF,EAAY,SAACG,EAAQ,CAG5CH,EAAW,KAAKJ,EAAQ,KAAKC,EAASM,EAAOF,GAAO,CAAC,CACvD,CAAC,CAAC,CAEN,CAAC,CACH,CC1DQ,IAAAG,GAAY,MAAK,QAEzB,SAASC,GAAkBC,EAA6BC,EAAW,CAC/D,OAAOH,GAAQG,CAAI,EAAID,EAAE,MAAA,OAAAE,EAAA,CAAA,EAAAC,EAAIF,CAAI,CAAA,CAAA,EAAID,EAAGC,CAAI,CAChD,CAMM,SAAUG,GAAuBJ,EAA2B,CAC9D,OAAOK,EAAI,SAAAJ,EAAI,CAAI,OAAAF,GAAYC,EAAIC,CAAI,CAApB,CAAqB,CAC5C,CCfQ,IAAAK,GAAY,MAAK,QACjBC,GAA0D,OAAM,eAArCC,GAA+B,OAAM,UAAlBC,GAAY,OAAM,KAQlE,SAAUC,GAAqDC,EAAuB,CAC1F,GAAIA,EAAK,SAAW,EAAG,CACrB,IAAMC,EAAQD,EAAK,CAAC,EACpB,GAAIL,GAAQM,CAAK,EACf,MAAO,CAAE,KAAMA,EAAO,KAAM,IAAI,EAElC,GAAIC,GAAOD,CAAK,EAAG,CACjB,IAAME,EAAOL,GAAQG,CAAK,EAC1B,MAAO,CACL,KAAME,EAAK,IAAI,SAACC,EAAG,CAAK,OAAAH,EAAMG,CAAG,CAAT,CAAU,EAClC,KAAID,IAKV,MAAO,CAAE,KAAMH,EAAa,KAAM,IAAI,CACxC,CAEA,SAASE,GAAOG,EAAQ,CACtB,OAAOA,GAAO,OAAOA,GAAQ,UAAYT,GAAeS,CAAG,IAAMR,EACnE,CC7BM,SAAUS,GAAaC,EAAgBC,EAAa,CACxD,OAAOD,EAAK,OAAO,SAACE,EAAQC,EAAKC,EAAC,CAAK,OAAEF,EAAOC,CAAG,EAAIF,EAAOG,CAAC,EAAIF,CAA5B,EAAqC,CAAA,CAAS,CACvF,CCsMM,SAAUG,GAAa,SAAoCC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAC/D,IAAMC,EAAYC,GAAaH,CAAI,EAC7BI,EAAiBC,GAAkBL,CAAI,EAEvCM,EAA8BC,GAAqBP,CAAI,EAA/CQ,EAAWF,EAAA,KAAEG,EAAIH,EAAA,KAE/B,GAAIE,EAAY,SAAW,EAIzB,OAAOE,GAAK,CAAA,EAAIR,CAAgB,EAGlC,IAAMS,EAAS,IAAIC,EACjBC,GACEL,EACAN,EACAO,EAEI,SAACK,EAAM,CAAK,OAAAC,GAAaN,EAAMK,CAAM,CAAzB,EAEZE,EAAQ,CACb,EAGH,OAAOZ,EAAkBO,EAAO,KAAKM,GAAiBb,CAAc,CAAC,EAAsBO,CAC7F,CAEM,SAAUE,GACdL,EACAN,EACAgB,EAAiD,CAAjD,OAAAA,IAAA,SAAAA,EAAAF,IAEO,SAACG,EAA2B,CAGjCC,GACElB,EACA,UAAA,CAaE,QAZQmB,EAAWb,EAAW,OAExBM,EAAS,IAAI,MAAMO,CAAM,EAG3BC,EAASD,EAITE,EAAuBF,aAGlBG,EAAC,CACRJ,GACElB,EACA,UAAA,CACE,IAAMuB,EAASf,GAAKF,EAAYgB,CAAC,EAAGtB,CAAgB,EAChDwB,EAAgB,GACpBD,EAAO,UACLE,EACER,EACA,SAACS,EAAK,CAEJd,EAAOU,CAAC,EAAII,EACPF,IAEHA,EAAgB,GAChBH,KAEGA,GAGHJ,EAAW,KAAKD,EAAeJ,EAAO,MAAK,CAAE,CAAC,CAElD,EACA,UAAA,CACO,EAAEQ,GAGLH,EAAW,SAAQ,CAEvB,CAAC,CACF,CAEL,EACAA,CAAU,GAjCLK,EAAI,EAAGA,EAAIH,EAAQG,MAAnBA,CAAC,CAoCZ,EACAL,CAAU,CAEd,CACF,CAMA,SAASC,GAAclB,EAAsC2B,EAAqBC,EAA0B,CACtG5B,EACF6B,GAAgBD,EAAc5B,EAAW2B,CAAO,EAEhDA,EAAO,CAEX,CC3RM,SAAUG,GACdC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EACAC,EAAgC,CAGhC,IAAMC,EAAc,CAAA,EAEhBC,EAAS,EAETC,EAAQ,EAERC,EAAa,GAKXC,EAAgB,UAAA,CAIhBD,GAAc,CAACH,EAAO,QAAU,CAACC,GACnCR,EAAW,SAAQ,CAEvB,EAGMY,EAAY,SAACC,EAAQ,CAAK,OAACL,EAASN,EAAaY,EAAWD,CAAK,EAAIN,EAAO,KAAKM,CAAK,CAA5D,EAE1BC,EAAa,SAACD,EAAQ,CAI1BT,GAAUJ,EAAW,KAAKa,CAAY,EAItCL,IAKA,IAAIO,GAAgB,GAGpBC,EAAUf,EAAQY,EAAOJ,GAAO,CAAC,EAAE,UACjCQ,EACEjB,EACA,SAACkB,GAAU,CAGTf,GAAY,MAAZA,EAAee,EAAU,EAErBd,EAGFQ,EAAUM,EAAiB,EAG3BlB,EAAW,KAAKkB,EAAU,CAE9B,EACA,UAAA,CAGEH,GAAgB,EAClB,EAEA,OACA,UAAA,CAIE,GAAIA,GAKF,GAAI,CAIFP,IAKA,sBACE,IAAMW,EAAgBZ,EAAO,MAAK,EAI9BF,EACFe,GAAgBpB,EAAYK,EAAmB,UAAA,CAAM,OAAAS,EAAWK,CAAa,CAAxB,CAAyB,EAE9EL,EAAWK,CAAa,GARrBZ,EAAO,QAAUC,EAASN,QAYjCS,EAAa,QACNU,EAAK,CACZrB,EAAW,MAAMqB,CAAG,EAG1B,CAAC,CACF,CAEL,EAGA,OAAAtB,EAAO,UACLkB,EAAyBjB,EAAYY,EAAW,UAAA,CAE9CF,EAAa,GACbC,EAAa,CACf,CAAC,CAAC,EAKG,UAAA,CACLL,GAAmB,MAAnBA,EAAmB,CACrB,CACF,CClEM,SAAUgB,GACdC,EACAC,EACAC,EAA6B,CAE7B,OAFAA,IAAA,SAAAA,EAAA,KAEIC,EAAWF,CAAc,EAEpBF,GAAS,SAACK,EAAGC,EAAC,CAAK,OAAAC,EAAI,SAACC,EAAQC,EAAU,CAAK,OAAAP,EAAeG,EAAGG,EAAGF,EAAGG,CAAE,CAA1B,CAA2B,EAAEC,EAAUT,EAAQI,EAAGC,CAAC,CAAC,CAAC,CAAjF,EAAoFH,CAAU,GAC/G,OAAOD,GAAmB,WACnCC,EAAaD,GAGRS,EAAQ,SAACC,EAAQC,EAAU,CAAK,OAAAC,GAAeF,EAAQC,EAAYZ,EAASE,CAAU,CAAtD,CAAuD,EAChG,CChCM,SAAUY,GAAyCC,EAA6B,CAA7B,OAAAA,IAAA,SAAAA,EAAA,KAChDC,GAASC,GAAUF,CAAU,CACtC,CCNM,SAAUG,IAAS,CACvB,OAAOC,GAAS,CAAC,CACnB,CCmDM,SAAUC,IAAM,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACrB,OAAOC,GAAS,EAAGC,GAAKH,EAAMI,GAAaJ,CAAI,CAAC,CAAC,CACnD,CC9DM,SAAUK,EAAsCC,EAA0B,CAC9E,OAAO,IAAIC,EAA+B,SAACC,EAAU,CACnDC,EAAUH,EAAiB,CAAE,EAAE,UAAUE,CAAU,CACrD,CAAC,CACH,CChDA,IAAME,GAA0B,CAAC,cAAe,gBAAgB,EAC1DC,GAAqB,CAAC,mBAAoB,qBAAqB,EAC/DC,GAAgB,CAAC,KAAM,KAAK,EAkO5B,SAAUC,EACdC,EACAC,EACAC,EACAC,EAAsC,CAMtC,GAJIC,EAAWF,CAAO,IACpBC,EAAiBD,EACjBA,EAAU,QAERC,EACF,OAAOJ,EAAaC,EAAQC,EAAWC,CAA+B,EAAE,KAAKG,GAAiBF,CAAc,CAAC,EAUzG,IAAAG,EAAAC,EAEJC,GAAcR,CAAM,EAChBH,GAAmB,IAAI,SAACY,EAAU,CAAK,OAAA,SAACC,EAAY,CAAK,OAAAV,EAAOS,CAAU,EAAER,EAAWS,EAASR,CAA+B,CAAtE,CAAlB,CAAyF,EAElIS,GAAwBX,CAAM,EAC5BJ,GAAwB,IAAIgB,GAAwBZ,EAAQC,CAAS,CAAC,EACtEY,GAA0Bb,CAAM,EAChCF,GAAc,IAAIc,GAAwBZ,EAAQC,CAAS,CAAC,EAC5D,CAAA,EAAE,CAAA,EATDa,EAAGR,EAAA,CAAA,EAAES,EAAMT,EAAA,CAAA,EAgBlB,GAAI,CAACQ,GACCE,GAAYhB,CAAM,EACpB,OAAOiB,GAAS,SAACC,EAAc,CAAK,OAAAnB,EAAUmB,EAAWjB,EAAWC,CAA+B,CAA/D,CAAgE,EAClGiB,EAAUnB,CAAM,CAAC,EAOvB,GAAI,CAACc,EACH,MAAM,IAAI,UAAU,sBAAsB,EAG5C,OAAO,IAAIM,EAAc,SAACC,EAAU,CAIlC,IAAMX,EAAU,UAAA,SAACY,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAAmB,OAAAF,EAAW,KAAK,EAAIC,EAAK,OAASA,EAAOA,EAAK,CAAC,CAAC,CAAhD,EAEpC,OAAAR,EAAIJ,CAAO,EAEJ,UAAA,CAAM,OAAAK,EAAQL,CAAO,CAAf,CACf,CAAC,CACH,CASA,SAASE,GAAwBZ,EAAaC,EAAiB,CAC7D,OAAO,SAACQ,EAAkB,CAAK,OAAA,SAACC,EAAY,CAAK,OAAAV,EAAOS,CAAU,EAAER,EAAWS,CAAO,CAArC,CAAlB,CACjC,CAOA,SAASC,GAAwBX,EAAW,CAC1C,OAAOI,EAAWJ,EAAO,WAAW,GAAKI,EAAWJ,EAAO,cAAc,CAC3E,CAOA,SAASa,GAA0Bb,EAAW,CAC5C,OAAOI,EAAWJ,EAAO,EAAE,GAAKI,EAAWJ,EAAO,GAAG,CACvD,CAOA,SAASQ,GAAcR,EAAW,CAChC,OAAOI,EAAWJ,EAAO,gBAAgB,GAAKI,EAAWJ,EAAO,mBAAmB,CACrF,CCnMM,SAAUwB,GACdC,EACAC,EACAC,EAAsC,CAEtC,OAAIA,EACKH,GAAoBC,EAAYC,CAAa,EAAE,KAAKE,GAAiBD,CAAc,CAAC,EAGtF,IAAIE,EAAoB,SAACC,EAAU,CACxC,IAAMC,EAAU,UAAA,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAAc,OAAAH,EAAW,KAAKE,EAAE,SAAW,EAAIA,EAAE,CAAC,EAAIA,CAAC,CAAzC,EACzBE,EAAWT,EAAWM,CAAO,EACnC,OAAOI,EAAWT,CAAa,EAAI,UAAA,CAAM,OAAAA,EAAcK,EAASG,CAAQ,CAA/B,EAAmC,MAC9E,CAAC,CACH,CCtBM,SAAUE,GACdC,EACAC,EACAC,EAAyC,CAFzCF,IAAA,SAAAA,EAAA,GAEAE,IAAA,SAAAA,EAAAC,IAIA,IAAIC,EAAmB,GAEvB,OAAIH,GAAuB,OAIrBI,GAAYJ,CAAmB,EACjCC,EAAYD,EAIZG,EAAmBH,GAIhB,IAAIK,EAAW,SAACC,EAAU,CAI/B,IAAIC,EAAMC,GAAYT,CAAO,EAAI,CAACA,EAAUE,EAAW,IAAG,EAAKF,EAE3DQ,EAAM,IAERA,EAAM,GAIR,IAAIE,EAAI,EAGR,OAAOR,EAAU,SAAS,UAAA,CACnBK,EAAW,SAEdA,EAAW,KAAKG,GAAG,EAEf,GAAKN,EAGP,KAAK,SAAS,OAAWA,CAAgB,EAGzCG,EAAW,SAAQ,EAGzB,EAAGC,CAAG,CACR,CAAC,CACH,CChGM,SAAUG,GAAK,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACpB,IAAMC,EAAYC,GAAaH,CAAI,EAC7BI,EAAaC,GAAUL,EAAM,GAAQ,EACrCM,EAAUN,EAChB,OAAQM,EAAQ,OAGZA,EAAQ,SAAW,EAEnBC,EAAUD,EAAQ,CAAC,CAAC,EAEpBE,GAASJ,CAAU,EAAEK,GAAKH,EAASJ,CAAS,CAAC,EAL7CQ,CAMN,CCjEO,IAAMC,GAAQ,IAAIC,EAAkBC,EAAI,ECpCvC,IAAAC,GAAY,MAAK,QAMnB,SAAUC,GAAkBC,EAAiB,CACjD,OAAOA,EAAK,SAAW,GAAKF,GAAQE,EAAK,CAAC,CAAC,EAAIA,EAAK,CAAC,EAAKA,CAC5D,CCoDM,SAAUC,EAAUC,EAAiDC,EAAa,CACtF,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAEhC,IAAIC,EAAQ,EAIZF,EAAO,UAILG,EAAyBF,EAAY,SAACG,EAAK,CAAK,OAAAP,EAAU,KAAKC,EAASM,EAAOF,GAAO,GAAKD,EAAW,KAAKG,CAAK,CAAhE,CAAiE,CAAC,CAEtH,CAAC,CACH,CCxBM,SAAUC,IAAG,SAACC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAClB,IAAMC,EAAiBC,GAAkBH,CAAI,EAEvCI,EAAUC,GAAeL,CAAI,EAEnC,OAAOI,EAAQ,OACX,IAAIE,EAAsB,SAACC,EAAU,CAGnC,IAAIC,EAAuBJ,EAAQ,IAAI,UAAA,CAAM,MAAA,CAAA,CAAA,CAAE,EAK3CK,EAAYL,EAAQ,IAAI,UAAA,CAAM,MAAA,EAAA,CAAK,EAGvCG,EAAW,IAAI,UAAA,CACbC,EAAUC,EAAY,IACxB,CAAC,EAKD,mBAASC,EAAW,CAClBC,EAAUP,EAAQM,CAAW,CAAC,EAAE,UAC9BE,EACEL,EACA,SAACM,EAAK,CAKJ,GAJAL,EAAQE,CAAW,EAAE,KAAKG,CAAK,EAI3BL,EAAQ,MAAM,SAACM,EAAM,CAAK,OAAAA,EAAO,MAAP,CAAa,EAAG,CAC5C,IAAMC,EAAcP,EAAQ,IAAI,SAACM,EAAM,CAAK,OAAAA,EAAO,MAAK,CAAZ,CAAe,EAE3DP,EAAW,KAAKL,EAAiBA,EAAc,MAAA,OAAAc,EAAA,CAAA,EAAAC,EAAIF,CAAM,CAAA,CAAA,EAAIA,CAAM,EAI/DP,EAAQ,KAAK,SAACM,EAAQI,EAAC,CAAK,MAAA,CAACJ,EAAO,QAAUL,EAAUS,CAAC,CAA7B,CAA8B,GAC5DX,EAAW,SAAQ,EAGzB,EACA,UAAA,CAGEE,EAAUC,CAAW,EAAI,GAIzB,CAACF,EAAQE,CAAW,EAAE,QAAUH,EAAW,SAAQ,CACrD,CAAC,CACF,GA9BIG,EAAc,EAAG,CAACH,EAAW,QAAUG,EAAcN,EAAQ,OAAQM,MAArEA,CAAW,EAmCpB,OAAO,UAAA,CACLF,EAAUC,EAAY,IACxB,CACF,CAAC,EACDU,CACN,CC9DM,SAAUC,GAASC,EAAoD,CAC3E,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACXC,EAAsB,KACtBC,EAA6C,KAC7CC,EAAa,GAEXC,EAAc,UAAA,CAGlB,GAFAF,GAAkB,MAAlBA,EAAoB,YAAW,EAC/BA,EAAqB,KACjBF,EAAU,CACZA,EAAW,GACX,IAAMK,EAAQJ,EACdA,EAAY,KACZF,EAAW,KAAKM,CAAK,EAEvBF,GAAcJ,EAAW,SAAQ,CACnC,EAEMO,EAAkB,UAAA,CACtBJ,EAAqB,KACrBC,GAAcJ,EAAW,SAAQ,CACnC,EAEAD,EAAO,UACLS,EACER,EACA,SAACM,EAAK,CACJL,EAAW,GACXC,EAAYI,EACPH,GACHM,EAAUZ,EAAiBS,CAAK,CAAC,EAAE,UAChCH,EAAqBK,EAAyBR,EAAYK,EAAaE,CAAe,CAAE,CAG/F,EACA,UAAA,CACEH,EAAa,IACZ,CAACH,GAAY,CAACE,GAAsBA,EAAmB,SAAWH,EAAW,SAAQ,CACxF,CAAC,CACF,CAEL,CAAC,CACH,CC3CM,SAAUU,GAAaC,EAAkBC,EAAyC,CAAzC,OAAAA,IAAA,SAAAA,EAAAC,IACtCC,GAAM,UAAA,CAAM,OAAAC,GAAMJ,EAAUC,CAAS,CAAzB,CAA0B,CAC/C,CCEM,SAAUI,GAAeC,EAAoBC,EAAsC,CAAtC,OAAAA,IAAA,SAAAA,EAAA,MAGjDA,EAAmBA,GAAgB,KAAhBA,EAAoBD,EAEhCE,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAiB,CAAA,EACjBC,EAAQ,EAEZH,EAAO,UACLI,EACEH,EACA,SAACI,EAAK,aACAC,EAAuB,KAKvBH,IAAUL,IAAsB,GAClCI,EAAQ,KAAK,CAAA,CAAE,MAIjB,QAAqBK,EAAAC,GAAAN,CAAO,EAAAO,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAzB,IAAMG,EAAMD,EAAA,MACfC,EAAO,KAAKL,CAAK,EAMbR,GAAca,EAAO,SACvBJ,EAASA,GAAM,KAANA,EAAU,CAAA,EACnBA,EAAO,KAAKI,CAAM,uGAItB,GAAIJ,MAIF,QAAqBK,EAAAH,GAAAF,CAAM,EAAAM,GAAAD,EAAA,KAAA,EAAA,CAAAC,GAAA,KAAAA,GAAAD,EAAA,KAAA,EAAE,CAAxB,IAAMD,EAAME,GAAA,MACfC,GAAUX,EAASQ,CAAM,EACzBT,EAAW,KAAKS,CAAM,wGAG5B,EACA,UAAA,aAGE,QAAqBI,EAAAN,GAAAN,CAAO,EAAAa,EAAAD,EAAA,KAAA,EAAA,CAAAC,EAAA,KAAAA,EAAAD,EAAA,KAAA,EAAE,CAAzB,IAAMJ,EAAMK,EAAA,MACfd,EAAW,KAAKS,CAAM,oGAExBT,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEC,EAAU,IACZ,CAAC,CACF,CAEL,CAAC,CACH,CCbM,SAAUc,GACdC,EAAgD,CAEhD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAgC,KAChCC,EAAY,GACZC,EAEJF,EAAWF,EAAO,UAChBK,EAAyBJ,EAAY,OAAW,OAAW,SAACK,EAAG,CAC7DF,EAAgBG,EAAUT,EAASQ,EAAKT,GAAWC,CAAQ,EAAEE,CAAM,CAAC,CAAC,EACjEE,GACFA,EAAS,YAAW,EACpBA,EAAW,KACXE,EAAc,UAAUH,CAAU,GAIlCE,EAAY,EAEhB,CAAC,CAAC,EAGAA,IAMFD,EAAS,YAAW,EACpBA,EAAW,KACXE,EAAe,UAAUH,CAAU,EAEvC,CAAC,CACH,CC/HM,SAAUO,GACdC,EACAC,EACAC,EACAC,EACAC,EAAqC,CAErC,OAAO,SAACC,EAAuBC,EAA2B,CAIxD,IAAIC,EAAWL,EAIXM,EAAaP,EAEbQ,EAAQ,EAGZJ,EAAO,UACLK,EACEJ,EACA,SAACK,EAAK,CAEJ,IAAMC,EAAIH,IAEVD,EAAQD,EAEJP,EAAYQ,EAAOG,EAAOC,CAAC,GAIzBL,EAAW,GAAOI,GAGxBR,GAAcG,EAAW,KAAKE,CAAK,CACrC,EAGAJ,GACG,UAAA,CACCG,GAAYD,EAAW,KAAKE,CAAK,EACjCF,EAAW,SAAQ,CACrB,CAAE,CACL,CAEL,CACF,CCnCM,SAAUO,IAAa,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAClC,IAAMC,EAAiBC,GAAkBH,CAAI,EAC7C,OAAOE,EACHE,GAAKL,GAAa,MAAA,OAAAM,EAAA,CAAA,EAAAC,EAAKN,CAAoC,CAAA,CAAA,EAAGO,GAAiBL,CAAc,CAAC,EAC9FM,EAAQ,SAACC,EAAQC,EAAU,CACzBC,GAAiBN,EAAA,CAAEI,CAAM,EAAAH,EAAKM,GAAeZ,CAAI,CAAC,CAAA,CAAA,EAAGU,CAAU,CACjE,CAAC,CACP,CCUM,SAAUG,IAAiB,SAC/BC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAEA,OAAOC,GAAa,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIJ,CAAY,CAAA,CAAA,CACtC,CCkBM,SAAUK,GAAYC,EAAoD,CAC9E,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACXC,EAAsB,KAEtBC,EAA6C,KAE3CC,EAAO,UAAA,CAMX,GAFAD,GAAkB,MAAlBA,EAAoB,YAAW,EAC/BA,EAAqB,KACjBF,EAAU,CAEZA,EAAW,GACX,IAAMI,EAAQH,EACdA,EAAY,KACZF,EAAW,KAAKK,CAAK,EAEzB,EAEAN,EAAO,UACLO,EACEN,EACA,SAACK,EAAQ,CAIPF,GAAkB,MAAlBA,EAAoB,YAAW,EAC/BF,EAAW,GACXC,EAAYG,EAGZF,EAAqBG,EAAyBN,EAAYI,EAAMG,EAAI,EAEpEC,EAAUX,EAAiBQ,CAAK,CAAC,EAAE,UAAUF,CAAkB,CACjE,EACA,UAAA,CAGEC,EAAI,EACJJ,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEE,EAAYC,EAAqB,IACnC,CAAC,CACF,CAEL,CAAC,CACH,CCvDM,SAAUM,GAAgBC,EAAiBC,EAAyC,CAAzC,OAAAA,IAAA,SAAAA,EAAAC,IACxCC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAkC,KAClCC,EAAsB,KACtBC,EAA0B,KAExBC,EAAO,UAAA,CACX,GAAIH,EAAY,CAEdA,EAAW,YAAW,EACtBA,EAAa,KACb,IAAMI,EAAQH,EACdA,EAAY,KACZF,EAAW,KAAKK,CAAK,EAEzB,EACA,SAASC,GAAY,CAInB,IAAMC,EAAaJ,EAAYR,EACzBa,EAAMZ,EAAU,IAAG,EACzB,GAAIY,EAAMD,EAAY,CAEpBN,EAAa,KAAK,SAAS,OAAWM,EAAaC,CAAG,EACtDR,EAAW,IAAIC,CAAU,EACzB,OAGFG,EAAI,CACN,CAEAL,EAAO,UACLU,EACET,EACA,SAACK,EAAQ,CACPH,EAAYG,EACZF,EAAWP,EAAU,IAAG,EAGnBK,IACHA,EAAaL,EAAU,SAASU,EAAcX,CAAO,EACrDK,EAAW,IAAIC,CAAU,EAE7B,EACA,UAAA,CAGEG,EAAI,EACJJ,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEE,EAAYD,EAAa,IAC3B,CAAC,CACF,CAEL,CAAC,CACH,CCpFM,SAAUS,GAAqBC,EAAe,CAClD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACfF,EAAO,UACLG,EACEF,EACA,SAACG,EAAK,CACJF,EAAW,GACXD,EAAW,KAAKG,CAAK,CACvB,EACA,UAAA,CACOF,GACHD,EAAW,KAAKH,CAAa,EAE/BG,EAAW,SAAQ,CACrB,CAAC,CACF,CAEL,CAAC,CACH,CCXM,SAAUI,GAAQC,EAAa,CACnC,OAAOA,GAAS,EAEZ,UAAA,CAAM,OAAAC,CAAA,EACNC,EAAQ,SAACC,EAAQC,EAAU,CACzB,IAAIC,EAAO,EACXF,EAAO,UACLG,EAAyBF,EAAY,SAACG,EAAK,CAIrC,EAAEF,GAAQL,IACZI,EAAW,KAAKG,CAAK,EAIjBP,GAASK,GACXD,EAAW,SAAQ,EAGzB,CAAC,CAAC,CAEN,CAAC,CACP,CC9BM,SAAUI,GAAc,CAC5B,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChCD,EAAO,UAAUE,EAAyBD,EAAYE,EAAI,CAAC,CAC7D,CAAC,CACH,CCCM,SAAUC,GAASC,EAAQ,CAC/B,OAAOC,EAAI,UAAA,CAAM,OAAAD,CAAA,CAAK,CACxB,CC4CM,SAAUE,GACdC,EACAC,EAAmC,CAEnC,OAAIA,EAEK,SAACC,EAAqB,CAC3B,OAAAC,GAAOF,EAAkB,KAAKG,GAAK,CAAC,EAAGC,EAAc,CAAE,EAAGH,EAAO,KAAKH,GAAUC,CAAqB,CAAC,CAAC,CAAvG,EAGGM,GAAS,SAACC,EAAOC,EAAK,CAAK,OAAAC,EAAUT,EAAsBO,EAAOC,CAAK,CAAC,EAAE,KAAKJ,GAAK,CAAC,EAAGM,GAAMH,CAAK,CAAC,CAAzE,CAA0E,CAC9G,CCzCM,SAAUI,GAASC,EAAoBC,EAAyC,CAAzCA,IAAA,SAAAA,EAAAC,IAC3C,IAAMC,EAAWC,GAAMJ,EAAKC,CAAS,EACrC,OAAOI,GAAU,UAAA,CAAM,OAAAF,CAAA,CAAQ,CACjC,CC0EM,SAAUG,EACdC,EACAC,EAA0D,CAA1D,OAAAA,IAAA,SAAAA,EAA+BC,IAK/BF,EAAaA,GAAU,KAAVA,EAAcG,GAEpBC,EAAQ,SAACC,EAAQC,EAAU,CAGhC,IAAIC,EAEAC,EAAQ,GAEZH,EAAO,UACLI,EAAyBH,EAAY,SAACI,EAAK,CAEzC,IAAMC,EAAaV,EAAYS,CAAK,GAKhCF,GAAS,CAACR,EAAYO,EAAaI,CAAU,KAM/CH,EAAQ,GACRD,EAAcI,EAGdL,EAAW,KAAKI,CAAK,EAEzB,CAAC,CAAC,CAEN,CAAC,CACH,CAEA,SAASP,GAAeS,EAAQC,EAAM,CACpC,OAAOD,IAAMC,CACf,CCjHM,SAAUC,EAA8CC,EAAQC,EAAuC,CAC3G,OAAOC,EAAqB,SAACC,EAAMC,EAAI,CAAK,OAAAH,EAAUA,EAAQE,EAAEH,CAAG,EAAGI,EAAEJ,CAAG,CAAC,EAAIG,EAAEH,CAAG,IAAMI,EAAEJ,CAAG,CAApD,CAAqD,CACnG,CC7BM,SAAUK,GAAgBC,EAA6C,CAA7C,OAAAA,IAAA,SAAAA,EAAAC,IACvBC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAW,GACfF,EAAO,UACLG,EACEF,EACA,SAACG,EAAK,CACJF,EAAW,GACXD,EAAW,KAAKG,CAAK,CACvB,EACA,UAAA,CAAM,OAACF,EAAWD,EAAW,SAAQ,EAAKA,EAAW,MAAMJ,EAAY,CAAE,CAAnE,CAAqE,CAC5E,CAEL,CAAC,CACH,CAEA,SAASC,IAAmB,CAC1B,OAAO,IAAIO,EACb,CCMM,SAAUC,IAAO,SAAIC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACzB,OAAO,SAACC,EAAqB,CAAK,OAAAC,GAAOD,EAAQE,EAAE,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIN,CAAM,CAAA,CAAA,CAAA,CAA3B,CACpC,CCHM,SAAUO,EAAYC,EAAoB,CAC9C,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAGhC,GAAI,CACFD,EAAO,UAAUC,CAAU,UAE3BA,EAAW,IAAIH,CAAQ,EAE3B,CAAC,CACH,CCMM,SAAUI,GACdC,EACAC,EAAgB,CAEhB,IAAMC,EAAkB,UAAU,QAAU,EAC5C,OAAO,SAACC,EAAqB,CAC3B,OAAAA,EAAO,KACLH,EAAYI,EAAO,SAACC,EAAG,EAAC,CAAK,OAAAL,EAAUK,EAAG,EAAGF,CAAM,CAAtB,CAAuB,EAAIG,GACxDC,GAAK,CAAC,EACNL,EAAkBM,GAAeP,CAAa,EAAIQ,GAAa,UAAA,CAAM,OAAA,IAAIC,EAAJ,CAAgB,CAAC,CAHxF,CAKJ,CC/CM,SAAUC,GAAYC,EAAa,CACvC,OAAOA,GAAS,EACZ,UAAA,CAAM,OAAAC,CAAA,EACNC,EAAQ,SAACC,EAAQC,EAAU,CAKzB,IAAIC,EAAc,CAAA,EAClBF,EAAO,UACLG,EACEF,EACA,SAACG,EAAK,CAEJF,EAAO,KAAKE,CAAK,EAGjBP,EAAQK,EAAO,QAAUA,EAAO,MAAK,CACvC,EACA,UAAA,aAGE,QAAoBG,EAAAC,GAAAJ,CAAM,EAAAK,EAAAF,EAAA,KAAA,EAAA,CAAAE,EAAA,KAAAA,EAAAF,EAAA,KAAA,EAAE,CAAvB,IAAMD,EAAKG,EAAA,MACdN,EAAW,KAAKG,CAAK,oGAEvBH,EAAW,SAAQ,CACrB,EAEA,OACA,UAAA,CAEEC,EAAS,IACX,CAAC,CACF,CAEL,CAAC,CACP,CC1DM,SAAUM,IAAK,SAAIC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACvB,IAAMC,EAAYC,GAAaH,CAAI,EAC7BI,EAAaC,GAAUL,EAAM,GAAQ,EAC3C,OAAAA,EAAOM,GAAeN,CAAI,EAEnBO,EAAQ,SAACC,EAAQC,EAAU,CAChCC,GAASN,CAAU,EAAEO,GAAIC,EAAA,CAAEJ,CAAM,EAAAK,EAAMb,CAA6B,CAAA,EAAGE,CAAS,CAAC,EAAE,UAAUO,CAAU,CACzG,CAAC,CACH,CCcM,SAAUK,IAAS,SACvBC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAEA,OAAOC,GAAK,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIJ,CAAY,CAAA,CAAA,CAC9B,CCmEM,SAAUK,GAAUC,EAAqC,OACzDC,EAAQ,IACRC,EAEJ,OAAIF,GAAiB,OACf,OAAOA,GAAkB,UACxBG,EAA4BH,EAAa,MAAzCC,EAAKE,IAAA,OAAG,IAAQA,EAAED,EAAUF,EAAa,OAE5CC,EAAQD,GAILC,GAAS,EACZ,UAAA,CAAM,OAAAG,CAAA,EACNC,EAAQ,SAACC,EAAQC,EAAU,CACzB,IAAIC,EAAQ,EACRC,EAEEC,EAAc,UAAA,CAGlB,GAFAD,GAAS,MAATA,EAAW,YAAW,EACtBA,EAAY,KACRP,GAAS,KAAM,CACjB,IAAMS,EAAW,OAAOT,GAAU,SAAWU,GAAMV,CAAK,EAAIW,EAAUX,EAAMM,CAAK,CAAC,EAC5EM,EAAqBC,EAAyBR,EAAY,UAAA,CAC9DO,EAAmB,YAAW,EAC9BE,EAAiB,CACnB,CAAC,EACDL,EAAS,UAAUG,CAAkB,OAErCE,EAAiB,CAErB,EAEMA,EAAoB,UAAA,CACxB,IAAIC,EAAY,GAChBR,EAAYH,EAAO,UACjBS,EAAyBR,EAAY,OAAW,UAAA,CAC1C,EAAEC,EAAQP,EACRQ,EACFC,EAAW,EAEXO,EAAY,GAGdV,EAAW,SAAQ,CAEvB,CAAC,CAAC,EAGAU,GACFP,EAAW,CAEf,EAEAM,EAAiB,CACnB,CAAC,CACP,CCpFM,SAAUE,GAAcC,EAA6DC,EAAQ,CAMjG,OAAOC,EAAQC,GAAcH,EAAaC,EAAW,UAAU,QAAU,EAAG,EAAI,CAAC,CACnF,CC+CM,SAAUG,GAASC,EAA4B,CAA5BA,IAAA,SAAAA,EAAA,CAAA,GACf,IAAAC,EAAgHD,EAAO,UAAvHE,EAASD,IAAA,OAAG,UAAA,CAAM,OAAA,IAAIE,CAAJ,EAAgBF,EAAEG,EAA4EJ,EAAO,aAAnFK,EAAYD,IAAA,OAAG,GAAIA,EAAEE,EAAuDN,EAAO,gBAA9DO,EAAeD,IAAA,OAAG,GAAIA,EAAEE,EAA+BR,EAAO,oBAAtCS,EAAmBD,IAAA,OAAG,GAAIA,EAUnH,OAAO,SAACE,EAAa,CACnB,IAAIC,EACAC,EACAC,EACAC,EAAW,EACXC,EAAe,GACfC,EAAa,GAEXC,GAAc,UAAA,CAClBL,GAAe,MAAfA,EAAiB,YAAW,EAC5BA,EAAkB,MACpB,EAGMM,GAAQ,UAAA,CACZD,GAAW,EACXN,EAAaE,EAAU,OACvBE,EAAeC,EAAa,EAC9B,EACMG,EAAsB,UAAA,CAG1B,IAAMC,EAAOT,EACbO,GAAK,EACLE,GAAI,MAAJA,EAAM,YAAW,CACnB,EAEA,OAAOC,EAAc,SAACC,EAAQC,GAAU,CACtCT,IACI,CAACE,GAAc,CAACD,GAClBE,GAAW,EAOb,IAAMO,GAAQX,EAAUA,GAAO,KAAPA,EAAWX,EAAS,EAO5CqB,GAAW,IAAI,UAAA,CACbT,IAKIA,IAAa,GAAK,CAACE,GAAc,CAACD,IACpCH,EAAkBa,GAAYN,EAAqBV,CAAmB,EAE1E,CAAC,EAIDe,GAAK,UAAUD,EAAU,EAGvB,CAACZ,GAIDG,EAAW,IAOXH,EAAa,IAAIe,GAAe,CAC9B,KAAM,SAACC,GAAK,CAAK,OAAAH,GAAK,KAAKG,EAAK,CAAf,EACjB,MAAO,SAACC,GAAG,CACTZ,EAAa,GACbC,GAAW,EACXL,EAAkBa,GAAYP,GAAOb,EAAcuB,EAAG,EACtDJ,GAAK,MAAMI,EAAG,CAChB,EACA,SAAU,UAAA,CACRb,EAAe,GACfE,GAAW,EACXL,EAAkBa,GAAYP,GAAOX,CAAe,EACpDiB,GAAK,SAAQ,CACf,EACD,EACDK,EAAUP,CAAM,EAAE,UAAUX,CAAU,EAE1C,CAAC,EAAED,CAAa,CAClB,CACF,CAEA,SAASe,GACPP,EACAY,EAAoD,SACpDC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,EAAA,CAAA,EAAA,UAAAA,CAAA,EAEA,GAAIF,IAAO,GAAM,CACfZ,EAAK,EACL,OAGF,GAAIY,IAAO,GAIX,KAAMG,EAAe,IAAIP,GAAe,CACtC,KAAM,UAAA,CACJO,EAAa,YAAW,EACxBf,EAAK,CACP,EACD,EAED,OAAOW,EAAUC,EAAE,MAAA,OAAAI,EAAA,CAAA,EAAAC,EAAIJ,CAAI,CAAA,CAAA,CAAA,EAAG,UAAUE,CAAY,EACtD,CChHM,SAAUG,EACdC,EACAC,EACAC,EAAyB,WAErBC,EACAC,EAAW,GACf,OAAIJ,GAAsB,OAAOA,GAAuB,UACnDK,EAA8EL,EAAkB,WAAhGG,EAAUE,IAAA,OAAG,IAAQA,EAAEC,EAAuDN,EAAkB,WAAzEC,EAAUK,IAAA,OAAG,IAAQA,EAAEC,EAAgCP,EAAkB,SAAlDI,EAAQG,IAAA,OAAG,GAAKA,EAAEL,EAAcF,EAAkB,WAEnGG,EAAcH,GAAkB,KAAlBA,EAAsB,IAE/BQ,GAAS,CACd,UAAW,UAAA,CAAM,OAAA,IAAIC,GAAcN,EAAYF,EAAYC,CAAS,CAAnD,EACjB,aAAc,GACd,gBAAiB,GACjB,oBAAqBE,EACtB,CACH,CCxIM,SAAUM,GAAQC,EAAa,CACnC,OAAOC,EAAO,SAACC,EAAGC,EAAK,CAAK,OAAAH,GAASG,CAAT,CAAc,CAC5C,CCaM,SAAUC,GAAaC,EAA8B,CACzD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAS,GAEPC,EAAiBC,EACrBH,EACA,UAAA,CACEE,GAAc,MAAdA,EAAgB,YAAW,EAC3BD,EAAS,EACX,EACAG,EAAI,EAGNC,EAAUR,CAAQ,EAAE,UAAUK,CAAc,EAE5CH,EAAO,UAAUI,EAAyBH,EAAY,SAACM,EAAK,CAAK,OAAAL,GAAUD,EAAW,KAAKM,CAAK,CAA/B,CAAgC,CAAC,CACpG,CAAC,CACH,CCVM,SAAUC,GAAS,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EAC9B,IAAMC,EAAYC,GAAaH,CAAM,EACrC,OAAOI,EAAQ,SAACC,EAAQC,EAAU,EAI/BJ,EAAYK,GAAOP,EAAQK,EAAQH,CAAS,EAAIK,GAAOP,EAAQK,CAAM,GAAG,UAAUC,CAAU,CAC/F,CAAC,CACH,CCmBM,SAAUE,EACdC,EACAC,EAA6G,CAE7G,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAyD,KACzDC,EAAQ,EAERC,EAAa,GAIXC,EAAgB,UAAA,CAAM,OAAAD,GAAc,CAACF,GAAmBD,EAAW,SAAQ,CAArD,EAE5BD,EAAO,UACLM,EACEL,EACA,SAACM,EAAK,CAEJL,GAAe,MAAfA,EAAiB,YAAW,EAC5B,IAAIM,EAAa,EACXC,EAAaN,IAEnBO,EAAUb,EAAQU,EAAOE,CAAU,CAAC,EAAE,UACnCP,EAAkBI,EACjBL,EAIA,SAACU,EAAU,CAAK,OAAAV,EAAW,KAAKH,EAAiBA,EAAeS,EAAOI,EAAYF,EAAYD,GAAY,EAAIG,CAAU,CAAzG,EAChB,UAAA,CAIET,EAAkB,KAClBG,EAAa,CACf,CAAC,CACD,CAEN,EACA,UAAA,CACED,EAAa,GACbC,EAAa,CACf,CAAC,CACF,CAEL,CAAC,CACH,CCvFM,SAAUO,EAAaC,EAA8B,CACzD,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChCC,EAAUJ,CAAQ,EAAE,UAAUK,EAAyBF,EAAY,UAAA,CAAM,OAAAA,EAAW,SAAQ,CAAnB,EAAuBG,EAAI,CAAC,EACrG,CAACH,EAAW,QAAUD,EAAO,UAAUC,CAAU,CACnD,CAAC,CACH,CCIM,SAAUI,GAAaC,EAAiDC,EAAiB,CAAjB,OAAAA,IAAA,SAAAA,EAAA,IACrEC,EAAQ,SAACC,EAAQC,EAAU,CAChC,IAAIC,EAAQ,EACZF,EAAO,UACLG,EAAyBF,EAAY,SAACG,EAAK,CACzC,IAAMC,EAASR,EAAUO,EAAOF,GAAO,GACtCG,GAAUP,IAAcG,EAAW,KAAKG,CAAK,EAC9C,CAACC,GAAUJ,EAAW,SAAQ,CAChC,CAAC,CAAC,CAEN,CAAC,CACH,CCqGM,SAAUK,EACdC,EACAC,EACAC,EAA8B,CAK9B,IAAMC,EACJC,EAAWJ,CAAc,GAAKC,GAASC,EAElC,CAAE,KAAMF,EAA2E,MAAKC,EAAE,SAAQC,CAAA,EACnGF,EAEN,OAAOG,EACHE,EAAQ,SAACC,EAAQC,EAAU,QACzBC,EAAAL,EAAY,aAAS,MAAAK,IAAA,QAAAA,EAAA,KAArBL,CAAW,EACX,IAAIM,EAAU,GACdH,EAAO,UACLI,EACEH,EACA,SAACI,EAAK,QACJH,EAAAL,EAAY,QAAI,MAAAK,IAAA,QAAAA,EAAA,KAAhBL,EAAmBQ,CAAK,EACxBJ,EAAW,KAAKI,CAAK,CACvB,EACA,UAAA,OACEF,EAAU,IACVD,EAAAL,EAAY,YAAQ,MAAAK,IAAA,QAAAA,EAAA,KAApBL,CAAW,EACXI,EAAW,SAAQ,CACrB,EACA,SAACK,EAAG,OACFH,EAAU,IACVD,EAAAL,EAAY,SAAK,MAAAK,IAAA,QAAAA,EAAA,KAAjBL,EAAoBS,CAAG,EACvBL,EAAW,MAAMK,CAAG,CACtB,EACA,UAAA,SACMH,KACFD,EAAAL,EAAY,eAAW,MAAAK,IAAA,QAAAA,EAAA,KAAvBL,CAAW,IAEbU,EAAAV,EAAY,YAAQ,MAAAU,IAAA,QAAAA,EAAA,KAApBV,CAAW,CACb,CAAC,CACF,CAEL,CAAC,EAIDW,EACN,CCnIM,SAAUC,GAAYC,EAAsDC,EAAuB,CACvG,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAC1B,IAAAC,EAAuCJ,GAAM,KAANA,EAAU,CAAA,EAA/CK,EAAAD,EAAA,QAAAE,EAAOD,IAAA,OAAG,GAAIA,EAAEE,EAAAH,EAAA,SAAAI,EAAQD,IAAA,OAAG,GAAKA,EACpCE,EAAW,GACXC,EAAsB,KACtBC,EAAiC,KACjCC,EAAa,GAEXC,EAAgB,UAAA,CACpBF,GAAS,MAATA,EAAW,YAAW,EACtBA,EAAY,KACRH,IACFM,GAAI,EACJF,GAAcT,EAAW,SAAQ,EAErC,EAEMY,EAAoB,UAAA,CACxBJ,EAAY,KACZC,GAAcT,EAAW,SAAQ,CACnC,EAEMa,EAAgB,SAACC,GAAQ,CAC7B,OAACN,EAAYO,EAAUnB,EAAiBkB,EAAK,CAAC,EAAE,UAAUE,EAAyBhB,EAAYU,EAAeE,CAAiB,CAAC,CAAhI,EAEID,GAAO,UAAA,CACX,GAAIL,EAAU,CAIZA,EAAW,GACX,IAAMQ,GAAQP,EACdA,EAAY,KAEZP,EAAW,KAAKc,EAAK,EACrB,CAACL,GAAcI,EAAcC,EAAK,EAEtC,EAEAf,EAAO,UACLiB,EACEhB,EAMA,SAACc,GAAK,CACJR,EAAW,GACXC,EAAYO,GACZ,EAAEN,GAAa,CAACA,EAAU,UAAYL,EAAUQ,GAAI,EAAKE,EAAcC,EAAK,EAC9E,EACA,UAAA,CACEL,EAAa,GACb,EAAEJ,GAAYC,GAAYE,GAAa,CAACA,EAAU,SAAWR,EAAW,SAAQ,CAClF,CAAC,CACF,CAEL,CAAC,CACH,CCxFM,SAAUiB,GACdC,EACAC,EACAC,EAAuB,CADvBD,IAAA,SAAAA,EAAAE,IAGA,IAAMC,EAAYC,GAAML,EAAUC,CAAS,EAC3C,OAAOK,GAAS,UAAA,CAAM,OAAAF,CAAA,EAAWF,CAAM,CACzC,CCJM,SAAUK,IAAc,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACnC,IAAMC,EAAUC,GAAkBH,CAAM,EAExC,OAAOI,EAAQ,SAACC,EAAQC,EAAU,CAehC,QAdMC,EAAMP,EAAO,OACbQ,EAAc,IAAI,MAAMD,CAAG,EAI7BE,EAAWT,EAAO,IAAI,UAAA,CAAM,MAAA,EAAA,CAAK,EAGjCU,EAAQ,cAMHC,EAAC,CACRC,EAAUZ,EAAOW,CAAC,CAAC,EAAE,UACnBE,EACEP,EACA,SAACQ,EAAK,CACJN,EAAYG,CAAC,EAAIG,EACb,CAACJ,GAAS,CAACD,EAASE,CAAC,IAEvBF,EAASE,CAAC,EAAI,IAKbD,EAAQD,EAAS,MAAMM,EAAQ,KAAON,EAAW,MAEtD,EAGAO,EAAI,CACL,GAnBIL,EAAI,EAAGA,EAAIJ,EAAKI,MAAhBA,CAAC,EAwBVN,EAAO,UACLQ,EAAyBP,EAAY,SAACQ,EAAK,CACzC,GAAIJ,EAAO,CAET,IAAMO,EAAMC,EAAA,CAAIJ,CAAK,EAAAK,EAAKX,CAAW,CAAA,EACrCF,EAAW,KAAKJ,EAAUA,EAAO,MAAA,OAAAgB,EAAA,CAAA,EAAAC,EAAIF,CAAM,CAAA,CAAA,EAAIA,CAAM,EAEzD,CAAC,CAAC,CAEN,CAAC,CACH,CCxFM,SAAUG,IAAG,SAAOC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACxB,OAAOC,EAAQ,SAACC,EAAQC,EAAU,CAChCL,GAAS,MAAA,OAAAM,EAAA,CAACF,CAA8B,EAAAG,EAAMN,CAAuC,CAAA,CAAA,EAAE,UAAUI,CAAU,CAC7G,CAAC,CACH,CCCM,SAAUG,IAAO,SAAkCC,EAAA,CAAA,EAAAC,EAAA,EAAAA,EAAA,UAAA,OAAAA,IAAAD,EAAAC,CAAA,EAAA,UAAAA,CAAA,EACvD,OAAOC,GAAG,MAAA,OAAAC,EAAA,CAAA,EAAAC,EAAIJ,CAAW,CAAA,CAAA,CAC3B,CCYO,SAASK,IAAmC,CACjD,IAAMC,EAAY,IAAIC,GAAwB,CAAC,EAC/C,OAAAC,EAAU,SAAU,mBAAoB,CAAE,KAAM,EAAK,CAAC,EACnD,UAAU,IAAMF,EAAU,KAAK,QAAQ,CAAC,EAGpCA,CACT,CCHO,SAASG,EACdC,EAAkBC,EAAmB,SAChC,CACL,OAAO,MAAM,KAAKA,EAAK,iBAAoBD,CAAQ,CAAC,CACtD,CAuBO,SAASE,EACdF,EAAkBC,EAAmB,SAClC,CACH,IAAME,EAAKC,GAAsBJ,EAAUC,CAAI,EAC/C,GAAI,OAAOE,GAAO,YAChB,MAAM,IAAI,eACR,8BAA8BH,CAAQ,iBACxC,EAGF,OAAOG,CACT,CAsBO,SAASC,GACdJ,EAAkBC,EAAmB,SACtB,CACf,OAAOA,EAAK,cAAiBD,CAAQ,GAAK,MAC5C,CAOO,SAASK,IAA4C,CAnH5D,IAAAC,EAAAC,EAAAC,EAAAC,EAoHE,OACEA,GAAAD,GAAAD,GAAAD,EAAA,SAAS,gBAAT,YAAAA,EAAwB,aAAxB,YAAAC,EAAoC,gBAApC,KAAAC,EACA,SAAS,gBADT,KAAAC,EAEA,MAEJ,CCvEA,IAAMC,GAAYC,EAChBC,EAAU,SAAS,KAAM,SAAS,EAClCA,EAAU,SAAS,KAAM,UAAU,CACrC,EACG,KACCC,GAAa,CAAC,EACdC,EAAU,MAAS,EACnBC,EAAI,IAAMC,GAAiB,GAAK,SAAS,IAAI,EAC7CC,EAAY,CAAC,CACf,EAaK,SAASC,GACdC,EACqB,CACrB,OAAOT,GACJ,KACCK,EAAIK,GAAUD,EAAG,SAASC,CAAM,CAAC,EACjCC,EAAqB,CACvB,CACJ,CC7BO,SAASC,GACdC,EAAiBC,EACI,CACrB,OAAOC,EAAM,IAAMC,EACjBC,EAAUJ,EAAI,YAAY,EAAE,KAAKK,EAAI,IAAM,EAAI,CAAC,EAChDD,EAAUJ,EAAI,YAAY,EAAE,KAAKK,EAAI,IAAM,EAAK,CAAC,CACnD,EACG,KACCJ,EAAUK,GAASC,GAAUC,GAAM,CAAC,CAACD,EAASN,CAAO,CAAC,EAAIQ,GAC1DC,EAAUV,EAAG,QAAQ,QAAQ,CAAC,CAChC,CACF,CACF,CCPA,SAASW,GAAYC,EAAiBC,EAA8B,CAGlE,GAAI,OAAOA,GAAU,UAAY,OAAOA,GAAU,SAChDD,EAAG,WAAaC,EAAM,SAAS,UAGtBA,aAAiB,KAC1BD,EAAG,YAAYC,CAAK,UAGX,MAAM,QAAQA,CAAK,EAC5B,QAAWC,KAAQD,EACjBF,GAAYC,EAAIE,CAAI,CAE1B,CAyBO,SAASC,EACdC,EAAaC,KAAmCC,EAC7C,CACH,IAAMN,EAAK,SAAS,cAAcI,CAAG,EAGrC,GAAIC,EACF,QAAWE,KAAQ,OAAO,KAAKF,CAAU,EACnC,OAAOA,EAAWE,CAAI,GAAM,cAI5B,OAAOF,EAAWE,CAAI,GAAM,UAC9BP,EAAG,aAAaO,EAAMF,EAAWE,CAAI,CAAC,EAEtCP,EAAG,aAAaO,EAAM,EAAE,GAI9B,QAAWN,KAASK,EAClBP,GAAYC,EAAIC,CAAK,EAGvB,OAAOD,CACT,CC9EO,SAASQ,GAAMC,EAAuB,CAC3C,GAAIA,EAAQ,IAAK,CACf,IAAMC,EAAS,GAAGD,EAAQ,KAAO,IAAO,IACxC,MAAO,KAAKA,EAAQ,MAAY,KAAM,QAAQC,CAAM,CAAC,GACvD,KACE,QAAOD,EAAM,SAAS,CAE1B,CCCO,SAASE,GAAYC,EAA+B,CACzD,IAAMC,EAASC,EAAE,SAAU,CAAE,IAAAF,CAAI,CAAC,EAClC,OAAOG,EAAM,KACX,SAAS,KAAK,YAAYF,CAAM,EACzBG,EACLC,EAAUJ,EAAQ,MAAM,EACxBI,EAAUJ,EAAQ,OAAO,EACtB,KACCK,EAAU,IACRC,GAAW,IAAM,IAAI,eAAe,mBAAmBP,CAAG,EAAE,CAAC,CAC9D,CACH,CACJ,EACG,KACCQ,EAAI,IAAG,EAAY,EACnBC,EAAS,IAAM,SAAS,KAAK,YAAYR,CAAM,CAAC,EAChDS,GAAK,CAAC,CACR,EACH,CACH,CCVA,IAAMC,GAAS,IAAIC,EAiBbC,GAAYC,EAAM,IACtB,OAAO,gBAAmB,YACtBC,GAAY,4CAA4C,EACxDC,EAAG,MAAS,CACjB,EACE,KACCC,EAAI,IAAM,IAAI,eAAeC,GAC3BA,EAAQ,QAAQC,GAASR,GAAO,KAAKQ,CAAK,CAAC,CAC5C,CAAC,EACFC,EAAUC,GAAYC,EAAMC,GAAOP,EAAGK,CAAQ,CAAC,EAAE,KAC/CG,EAAS,IAAMH,EAAS,WAAW,CAAC,CACtC,CAAC,EACDI,EAAY,CAAC,CACf,EAaK,SAASC,GACdC,EACa,CACb,MAAO,CACL,MAAQA,EAAG,YACX,OAAQA,EAAG,YACb,CACF,CAuBO,SAASC,GACdD,EACyB,CAMzB,IAAIE,EAASF,EACb,KAAOE,EAAO,cAAgB,GACxBA,EAAO,eACTA,EAASA,EAAO,cAMpB,OAAOhB,GAAU,KACfiB,EAAIT,GAAYA,EAAS,QAAQQ,CAAM,CAAC,EACxCT,EAAUC,GAAYV,GAAO,KAC3BoB,EAAOZ,GAASA,EAAM,SAAWU,CAAM,EACvCL,EAAS,IAAMH,EAAS,UAAUQ,CAAM,CAAC,CAC3C,CAAC,EACDZ,EAAI,IAAMS,GAAeC,CAAE,CAAC,EAC5BK,EAAUN,GAAeC,CAAE,CAAC,CAC9B,CACF,CC3HO,SAASM,GACdC,EACa,CACb,MAAO,CACL,MAAQA,EAAG,YACX,OAAQA,EAAG,YACb,CACF,CASO,SAASC,GACdD,EACyB,CACzB,IAAIE,EAASF,EAAG,cAChB,KAAOE,IAEHF,EAAG,aAAgBE,EAAO,aAC1BF,EAAG,cAAgBE,EAAO,eAE1BA,GAAUF,EAAKE,GAAQ,cAK3B,OAAOA,EAASF,EAAK,MACvB,CAYO,SAASG,GACdH,EACe,CACf,IAAMI,EAA4B,CAAC,EAG/BF,EAASF,EAAG,cAChB,KAAOE,IAEHF,EAAG,YAAeE,EAAO,aACzBF,EAAG,aAAeE,EAAO,eAEzBE,EAAW,KAAKF,CAAM,EAGxBA,GAAUF,EAAKE,GAAQ,cAKzB,OAAIE,EAAW,SAAW,GACxBA,EAAW,KAAK,SAAS,eAAe,EAGnCA,CACT,CC9CO,SAASC,GACdC,EACe,CACf,MAAO,CACL,EAAGA,EAAG,WACN,EAAGA,EAAG,SACR,CACF,CASO,SAASC,GACdD,EACe,CACf,IAAME,EAAOF,EAAG,sBAAsB,EACtC,MAAO,CACL,EAAGE,EAAK,EAAI,OAAO,QACnB,EAAGA,EAAK,EAAI,OAAO,OACrB,CACF,CAWO,SAASC,GACdH,EAC2B,CAC3B,OAAOI,EACLC,EAAU,OAAQ,MAAM,EACxBA,EAAU,OAAQ,QAAQ,CAC5B,EACG,KACCC,GAAU,EAAGC,EAAuB,EACpCC,EAAI,IAAMT,GAAiBC,CAAE,CAAC,EAC9BS,EAAUV,GAAiBC,CAAE,CAAC,CAChC,CACJ,CC3DO,SAASU,GACdC,EACe,CACf,MAAO,CACL,EAAGA,EAAG,WACN,EAAGA,EAAG,SACR,CACF,CAWO,SAASC,GACdD,EAC2B,CAC3B,OAAOE,EACLC,EAAUH,EAAI,QAAQ,EACtBG,EAAU,OAAQ,QAAQ,EAC1BA,EAAU,OAAQ,QAAQ,CAC5B,EACG,KACCC,GAAU,EAAGC,EAAuB,EACpCC,EAAI,IAAMP,GAAwBC,CAAE,CAAC,EACrCO,EAAUR,GAAwBC,CAAE,CAAC,CACvC,CACJ,CCzBA,IAAMQ,GAAS,IAAIC,EAUbC,GAAYC,EAAM,IAAMC,EAC5B,IAAI,qBAAqBC,GAAW,CAClC,QAAWC,KAASD,EAClBL,GAAO,KAAKM,CAAK,CACrB,EAAG,CACD,UAAW,CACb,CAAC,CACH,CAAC,EACE,KACCC,EAAUC,GAAYC,EAAMC,GAAON,EAAGI,CAAQ,CAAC,EAC5C,KACCG,EAAS,IAAMH,EAAS,WAAW,CAAC,CACtC,CACF,EACAI,EAAY,CAAC,CACf,EAaK,SAASC,GACdC,EACqB,CACrB,OAAOZ,GACJ,KACCa,EAAIP,GAAYA,EAAS,QAAQM,CAAE,CAAC,EACpCP,EAAUC,GAAYR,GACnB,KACCgB,EAAO,CAAC,CAAE,OAAAC,CAAO,IAAMA,IAAWH,CAAE,EACpCH,EAAS,IAAMH,EAAS,UAAUM,CAAE,CAAC,EACrCI,EAAI,CAAC,CAAE,eAAAC,CAAe,IAAMA,CAAc,CAC5C,CACF,CACF,CACJ,CAaO,SAASC,GACdN,EAAiBO,EAAY,GACR,CACrB,OAAOC,GAA0BR,CAAE,EAChC,KACCI,EAAI,CAAC,CAAE,EAAAK,CAAE,IAAM,CACb,IAAMC,EAAUC,GAAeX,CAAE,EAC3BY,EAAUC,GAAsBb,CAAE,EACxC,OAAOS,GACLG,EAAQ,OAASF,EAAQ,OAASH,CAEtC,CAAC,EACDO,EAAqB,CACvB,CACJ,CCjFA,IAAMC,GAA4C,CAChD,OAAQC,EAAW,yBAAyB,EAC5C,OAAQA,EAAW,yBAAyB,CAC9C,EAaO,SAASC,GAAUC,EAAuB,CAC/C,OAAOH,GAAQG,CAAI,EAAE,OACvB,CAaO,SAASC,GAAUD,EAAcE,EAAsB,CACxDL,GAAQG,CAAI,EAAE,UAAYE,GAC5BL,GAAQG,CAAI,EAAE,MAAM,CACxB,CAWO,SAASG,GAAYH,EAAmC,CAC7D,IAAMI,EAAKP,GAAQG,CAAI,EACvB,OAAOK,EAAUD,EAAI,QAAQ,EAC1B,KACCE,EAAI,IAAMF,EAAG,OAAO,EACpBG,EAAUH,EAAG,OAAO,CACtB,CACJ,CC9BA,SAASI,GACPC,EAAiBC,EACR,CACT,OAAQD,EAAG,YAAa,CAGtB,KAAK,iBAEH,OAAIA,EAAG,OAAS,QACP,SAAS,KAAKC,CAAI,EAElB,GAGX,KAAK,kBACL,KAAK,oBACH,MAAO,GAGT,QACE,OAAOD,EAAG,iBACd,CACF,CAWO,SAASE,IAAwC,CACtD,OAAOC,EACLC,EAAU,OAAQ,kBAAkB,EAAE,KAAKC,EAAI,IAAM,EAAI,CAAC,EAC1DD,EAAU,OAAQ,gBAAgB,EAAE,KAAKC,EAAI,IAAM,EAAK,CAAC,CAC3D,EACG,KACCC,EAAU,EAAK,CACjB,CACJ,CAOO,SAASC,IAAsC,CACpD,IAAMC,EAAYJ,EAAyB,OAAQ,SAAS,EACzD,KACCK,EAAOC,GAAM,EAAEA,EAAG,SAAWA,EAAG,QAAQ,EACxCL,EAAIK,IAAO,CACT,KAAMC,GAAU,QAAQ,EAAI,SAAW,SACvC,KAAMD,EAAG,IACT,OAAQ,CACNA,EAAG,eAAe,EAClBA,EAAG,gBAAgB,CACrB,CACF,EAAc,EACdD,EAAO,CAAC,CAAE,KAAAG,EAAM,KAAAX,CAAK,IAAM,CACzB,GAAIW,IAAS,SAAU,CACrB,IAAMC,EAASC,GAAiB,EAChC,GAAI,OAAOD,GAAW,YACpB,MAAO,CAACd,GAAwBc,EAAQZ,CAAI,CAChD,CACA,MAAO,EACT,CAAC,EACDc,GAAM,CACR,EAGF,OAAOb,GAAiB,EACrB,KACCc,EAAUH,GAAWA,EAAqBI,EAAZT,CAAiB,CACjD,CACJ,CC1GO,SAASU,IAAmB,CACjC,OAAO,IAAI,IAAI,SAAS,IAAI,CAC9B,CAgBO,SAASC,GACdC,EAA4BC,EAAW,GACjC,CACN,GAAIC,EAAQ,oBAAoB,GAAK,CAACD,EAAU,CAC9C,IAAME,EAAKC,EAAE,IAAK,CAAE,KAAMJ,EAAI,IAAK,CAAC,EACpC,SAAS,KAAK,YAAYG,CAAE,EAC5BA,EAAG,MAAM,EACTA,EAAG,OAAO,CAIZ,MACE,SAAS,KAAOH,EAAI,IAExB,CASO,SAASK,IAA8B,CAC5C,OAAO,IAAIC,CACb,CCxCO,SAASC,IAA0B,CACxC,OAAO,SAAS,KAAK,MAAM,CAAC,CAC9B,CAYO,SAASC,GAAgBC,EAAoB,CAClD,IAAMC,EAAKC,EAAE,IAAK,CAAE,KAAMF,CAAK,CAAC,EAChCC,EAAG,iBAAiB,QAASE,GAAMA,EAAG,gBAAgB,CAAC,EACvDF,EAAG,MAAM,CACX,CAWO,SAASG,GACdC,EACoB,CACpB,OAAOC,EACLC,EAA2B,OAAQ,YAAY,EAC/CF,CACF,EACG,KACCG,EAAIV,EAAe,EACnBW,EAAUX,GAAgB,CAAC,EAC3BY,EAAOV,GAAQA,EAAK,OAAS,CAAC,EAC9BW,EAAY,CAAC,CACf,CACJ,CASO,SAASC,GACdP,EACyB,CACzB,OAAOD,GAAkBC,CAAS,EAC/B,KACCG,EAAIK,GAAMC,GAAmB,QAAQD,CAAE,IAAI,CAAE,EAC7CH,EAAOT,GAAM,OAAOA,GAAO,WAAW,CACxC,CACJ,CCtDO,SAASc,GAAWC,EAAoC,CAC7D,IAAMC,EAAQ,WAAWD,CAAK,EAC9B,OAAOE,GAA0BC,GAC/BF,EAAM,YAAY,IAAME,EAAKF,EAAM,OAAO,CAAC,CAC5C,EACE,KACCG,EAAUH,EAAM,OAAO,CACzB,CACJ,CAOO,SAASI,IAAkC,CAChD,IAAMJ,EAAQ,WAAW,OAAO,EAChC,OAAOK,EACLC,EAAU,OAAQ,aAAa,EAAE,KAAKC,EAAI,IAAM,EAAI,CAAC,EACrDD,EAAU,OAAQ,YAAY,EAAE,KAAKC,EAAI,IAAM,EAAK,CAAC,CACvD,EACG,KACCJ,EAAUH,EAAM,OAAO,CACzB,CACJ,CAcO,SAASQ,GACdC,EAA6BC,EACd,CACf,OAAOD,EACJ,KACCE,EAAUC,GAAUA,EAASF,EAAQ,EAAIG,CAAK,CAChD,CACJ,CC/BO,SAASC,GACdC,EAAmBC,EACD,CAClB,OAAO,IAAIC,EAAiBC,GAAY,CACtC,IAAMC,EAAM,IAAI,eAChB,OAAAA,EAAI,KAAK,MAAO,GAAGJ,CAAG,EAAE,EACxBI,EAAI,aAAe,OAGnBA,EAAI,iBAAiB,OAAQ,IAAM,CAC7BA,EAAI,QAAU,KAAOA,EAAI,OAAS,KACpCD,EAAS,KAAKC,EAAI,QAAQ,EAC1BD,EAAS,SAAS,GAIlBA,EAAS,MAAM,IAAI,MAAMC,EAAI,UAAU,CAAC,CAE5C,CAAC,EAGDA,EAAI,iBAAiB,QAAS,IAAM,CAClCD,EAAS,MAAM,IAAI,MAAM,eAAe,CAAC,CAC3C,CAAC,EAGDC,EAAI,iBAAiB,QAAS,IAAM,CAClCD,EAAS,SAAS,CACpB,CAAC,EAGG,OAAOF,GAAA,YAAAA,EAAS,YAAc,cAChCG,EAAI,iBAAiB,WAAYC,GAAS,CA/FhD,IAAAC,EAgGQ,GAAID,EAAM,iBACRJ,EAAQ,UAAW,KAAMI,EAAM,OAASA,EAAM,MAAS,GAAG,MAIrD,CACL,IAAME,GAASD,EAAAF,EAAI,kBAAkB,gBAAgB,IAAtC,KAAAE,EAA2C,EAC1DL,EAAQ,UAAW,KAAMI,EAAM,OAAS,CAACE,EAAU,GAAG,CACxD,CACF,CAAC,EAGDN,EAAQ,UAAU,KAAK,CAAC,GAI1BG,EAAI,KAAK,EACF,IAAMA,EAAI,MAAM,CACzB,CAAC,CACH,CAcO,SAASI,GACdR,EAAmBC,EACJ,CACf,OAAOF,GAAQC,EAAKC,CAAO,EACxB,KACCQ,EAAUC,GAAOA,EAAI,KAAK,CAAC,EAC3BC,EAAIC,GAAQ,KAAK,MAAMA,CAAI,CAAM,EACjCC,EAAY,CAAC,CACf,CACJ,CAUO,SAASC,GACdd,EAAmBC,EACG,CACtB,IAAMc,EAAM,IAAI,UAChB,OAAOhB,GAAQC,EAAKC,CAAO,EACxB,KACCQ,EAAUC,GAAOA,EAAI,KAAK,CAAC,EAC3BC,EAAID,GAAOK,EAAI,gBAAgBL,EAAK,WAAW,CAAC,EAChDG,EAAY,CAAC,CACf,CACJ,CAUO,SAASG,GACdhB,EAAmBC,EACG,CACtB,IAAMc,EAAM,IAAI,UAChB,OAAOhB,GAAQC,EAAKC,CAAO,EACxB,KACCQ,EAAUC,GAAOA,EAAI,KAAK,CAAC,EAC3BC,EAAID,GAAOK,EAAI,gBAAgBL,EAAK,UAAU,CAAC,EAC/CG,EAAY,CAAC,CACf,CACJ,CC5HO,SAASI,IAAoC,CAClD,MAAO,CACL,EAAG,KAAK,IAAI,EAAG,OAAO,EACtB,EAAG,KAAK,IAAI,EAAG,OAAO,CACxB,CACF,CASO,SAASC,IAAkD,CAChE,OAAOC,EACLC,EAAU,OAAQ,SAAU,CAAE,QAAS,EAAK,CAAC,EAC7CA,EAAU,OAAQ,SAAU,CAAE,QAAS,EAAK,CAAC,CAC/C,EACG,KACCC,EAAIJ,EAAiB,EACrBK,EAAUL,GAAkB,CAAC,CAC/B,CACJ,CC3BO,SAASM,IAAgC,CAC9C,MAAO,CACL,MAAQ,WACR,OAAQ,WACV,CACF,CASO,SAASC,IAA8C,CAC5D,OAAOC,EAAU,OAAQ,SAAU,CAAE,QAAS,EAAK,CAAC,EACjD,KACCC,EAAIH,EAAe,EACnBI,EAAUJ,GAAgB,CAAC,CAC7B,CACJ,CCXO,SAASK,IAAsC,CACpD,OAAOC,EAAc,CACnBC,GAAoB,EACpBC,GAAkB,CACpB,CAAC,EACE,KACCC,EAAI,CAAC,CAACC,EAAQC,CAAI,KAAO,CAAE,OAAAD,EAAQ,KAAAC,CAAK,EAAE,EAC1CC,EAAY,CAAC,CACf,CACJ,CCVO,SAASC,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EAChB,CACtB,IAAMC,EAAQF,EACX,KACCG,EAAwB,MAAM,CAChC,EAGIC,EAAUC,EAAc,CAACH,EAAOD,CAAO,CAAC,EAC3C,KACCK,EAAI,IAAMC,GAAiBR,CAAE,CAAC,CAChC,EAGF,OAAOM,EAAc,CAACJ,EAASD,EAAWI,CAAO,CAAC,EAC/C,KACCE,EAAI,CAAC,CAAC,CAAE,OAAAE,CAAO,EAAG,CAAE,OAAAC,EAAQ,KAAAC,CAAK,EAAG,CAAE,EAAAC,EAAG,EAAAC,CAAE,CAAC,KAAO,CACjD,OAAQ,CACN,EAAGH,EAAO,EAAIE,EACd,EAAGF,EAAO,EAAIG,EAAIJ,CACpB,EACA,KAAAE,CACF,EAAE,CACJ,CACJ,CCzBA,SAASG,GAAQC,EAA+B,CAC9C,OAAOC,EAA8BD,EAAQ,UAAWE,GAAMA,EAAG,IAAI,CACvE,CAWA,SAASC,GAAQH,EAA4B,CAC3C,IAAMI,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAUE,GAAQN,EAAO,YAAYM,CAAI,CAAC,EAGzCF,CACT,CAgBO,SAASG,GACdC,EAAaR,EAAS,IAAI,OAAOQ,CAAG,EACxB,CACZ,IAAMC,EAAQV,GAAQC,CAAM,EACtBI,EAAQD,GAAQH,CAAM,EAGtBU,EAAU,IAAIL,EACpBK,EAAQ,UAAUN,CAAK,EAGvB,IAAMO,EAAQP,EAAM,KAAKQ,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxD,OAAOH,EACJ,KACCE,EAAe,EACfE,GAAUL,EAAM,KAAKM,EAAUJ,CAAK,CAAC,CAAC,EACtCK,GAAM,CACR,CACJ,CCJA,IAAMC,GAASC,EAAW,WAAW,EAC/BC,GAAiB,KAAK,MAAMF,GAAO,WAAY,EACrDE,GAAO,KAAO,GAAG,IAAI,IAAIA,GAAO,KAAMC,GAAY,CAAC,CAAC,GAW7C,SAASC,IAAwB,CACtC,OAAOF,EACT,CASO,SAASG,EAAQC,EAAqB,CAC3C,OAAOJ,GAAO,SAAS,SAASI,CAAI,CACtC,CAUO,SAASC,GACdC,EAAkBC,EACV,CACR,OAAO,OAAOA,GAAU,YACpBP,GAAO,aAAaM,CAAG,EAAE,QAAQ,IAAKC,EAAM,SAAS,CAAC,EACtDP,GAAO,aAAaM,CAAG,CAC7B,CChCO,SAASE,GACdC,EAASC,EAAmB,SACP,CACrB,OAAOC,EAAW,sBAAsBF,CAAI,IAAKC,CAAI,CACvD,CAYO,SAASE,GACdH,EAASC,EAAmB,SACL,CACvB,OAAOG,EAAY,sBAAsBJ,CAAI,IAAKC,CAAI,CACxD,CC7EO,SAASI,GACdC,EACsB,CACtB,IAAMC,EAASC,EAAW,6BAA8BF,CAAE,EAC1D,OAAOG,EAAUF,EAAQ,QAAS,CAAE,KAAM,EAAK,CAAC,EAC7C,KACCG,EAAI,IAAMF,EAAW,cAAeF,CAAE,CAAC,EACvCI,EAAIC,IAAY,CAAE,KAAM,UAAUA,EAAQ,SAAS,CAAE,EAAE,CACzD,CACJ,CASO,SAASC,GACdN,EACiC,CACjC,GAAI,CAACO,EAAQ,kBAAkB,GAAK,CAACP,EAAG,kBACtC,OAAOQ,EAGT,GAAI,CAACR,EAAG,OAAQ,CACd,IAAMK,EAAUH,EAAW,cAAeF,CAAE,EACxC,UAAUK,EAAQ,SAAS,IAAM,SAAS,YAAY,IACxDL,EAAG,OAAS,GAChB,CAGA,OAAOS,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,KAAAE,CAAK,IAAM,CAC5BZ,EAAG,OAAS,GAGZ,SAAiB,aAAcY,CAAI,CACrC,CAAC,EAGMb,GAAcC,CAAE,EACpB,KACCa,EAAIC,GAASJ,EAAM,KAAKI,CAAK,CAAC,EAC9BC,EAAS,IAAML,EAAM,SAAS,CAAC,EAC/BN,EAAIU,GAAUE,EAAA,CAAE,IAAKhB,GAAOc,EAAQ,CACtC,CACJ,CAAC,CACH,CC5BO,SAASG,GACdC,EAAiB,CAAE,QAAAC,CAAQ,EACN,CACrB,OAAOA,EACJ,KACCC,EAAIC,IAAW,CAAE,OAAQA,IAAWH,CAAG,EAAE,CAC3C,CACJ,CAYO,SAASI,GACdJ,EAAiBK,EACe,CAChC,IAAMC,EAAY,IAAIC,EACtB,OAAAD,EAAU,UAAU,CAAC,CAAE,OAAAE,CAAO,IAAM,CAClCR,EAAG,OAASQ,CACd,CAAC,EAGMT,GAAaC,EAAIK,CAAO,EAC5B,KACCI,EAAIC,GAASJ,EAAU,KAAKI,CAAK,CAAC,EAClCC,EAAS,IAAML,EAAU,SAAS,CAAC,EACnCJ,EAAIQ,GAAUE,EAAA,CAAE,IAAKZ,GAAOU,EAAQ,CACtC,CACJ,CCnEO,SAASG,GACdC,EAAaC,EACA,CACb,OAAIA,IAAU,SAEVC,EAAC,OAAI,MAAM,gCAAgC,GAAIF,EAAI,KAAK,WACtDE,EAAC,OAAI,MAAM,+BAA+B,CAC5C,EAIAA,EAAC,OAAI,MAAM,aAAa,GAAIF,EAAI,KAAK,WACnCE,EAAC,OAAI,MAAM,+BAA+B,CAC5C,CAGN,CAGO,SAASC,MACXC,EACU,CACb,OACEF,EAAC,OAAI,MAAM,cAAc,KAAK,WAC5BA,EAAC,OAAI,MAAM,iCACRE,CACH,CACF,CAEJ,CCvCO,SAASC,GACdC,EAAqBC,EACR,CAIb,GAHAA,EAASA,EAAS,GAAGA,CAAM,eAAeD,CAAE,GAAK,OAG7CC,EAAQ,CACV,IAAMC,EAASD,EAAS,IAAIA,CAAM,GAAK,OACvC,OACEE,EAAC,SAAM,MAAM,gBAAgB,SAAU,GACpCC,GAAcH,CAAM,EACrBE,EAAC,KAAE,KAAMD,EAAQ,MAAM,uBAAuB,SAAU,IACtDC,EAAC,QAAK,wBAAuBH,EAAI,CACnC,CACF,CAEJ,KACE,QACEG,EAAC,SAAM,MAAM,gBAAgB,SAAU,GACpCC,GAAcH,CAAM,EACrBE,EAAC,QAAK,MAAM,uBAAuB,SAAU,IAC3CA,EAAC,QAAK,wBAAuBH,EAAI,CACnC,CACF,CAGN,CC5BO,SAASK,GAAsBC,EAAyB,CAC7D,OACEC,EAAC,UACC,MAAM,uBACN,MAAOC,GAAY,gBAAgB,EACnC,wBAAuB,IAAIF,CAAE,UAC9B,CAEL,CCtBA,IAAAG,GAAuB,SA+BvB,SAASC,GACPC,EAAsBC,EACT,CACb,IAAMC,EAASD,EAAO,EAChBE,EAASF,EAAO,EAGhBG,EAAU,OAAO,KAAKJ,EAAS,KAAK,EACvC,OAAOK,GAAO,CAACL,EAAS,MAAMK,CAAG,CAAC,EAClC,OAAyB,CAACC,EAAMD,IAAQ,CACvC,GAAGC,EAAMC,EAAC,cAAK,GAAAC,SAAWH,CAAG,CAAE,EAAQ,GACzC,EAAG,CAAC,CAAC,EACJ,MAAM,EAAG,EAAE,EAGRI,EAASC,GAAc,EACvBC,EAAM,IAAI,IAAIX,EAAS,SAAUS,EAAO,IAAI,EAC9CG,EAAQ,kBAAkB,GAC5BD,EAAI,aAAa,IAAI,IAAK,OAAO,QAAQX,EAAS,KAAK,EACpD,OAAO,CAAC,CAAC,CAAEa,CAAK,IAAMA,CAAK,EAC3B,OAAO,CAACC,EAAW,CAACC,CAAK,IAAM,GAAGD,CAAS,IAAIC,CAAK,GAAG,KAAK,EAAG,EAAE,CACpE,EAGF,GAAM,CAAE,KAAAC,CAAK,EAAIN,GAAc,EAC/B,OACEH,EAAC,KAAE,KAAM,GAAGI,CAAG,GAAI,MAAM,yBAAyB,SAAU,IAC1DJ,EAAC,WACC,MAAM,uCACN,gBAAeP,EAAS,MAAM,QAAQ,CAAC,GAEtCE,EAAS,GAAKK,EAAC,OAAI,MAAM,iCAAiC,EAC1DL,EAAS,GAAKK,EAAC,UAAIP,EAAS,KAAM,EAClCE,GAAU,GAAKK,EAAC,UAAIP,EAAS,KAAM,EACnCG,EAAS,GAAKH,EAAS,KAAK,OAAS,GACpCA,EAAS,KAEVA,EAAS,MAAQA,EAAS,KAAK,IAAIiB,GAAO,CACzC,IAAMC,EAAOF,EACTC,KAAOD,EACL,uBAAuBA,EAAKC,CAAG,CAAC,GAChC,cACF,GACJ,OACEV,EAAC,QAAK,MAAO,UAAUW,CAAI,IAAKD,CAAI,CAExC,CAAC,EACAd,EAAS,GAAKC,EAAQ,OAAS,GAC9BG,EAAC,KAAE,MAAM,2BACNY,GAAY,4BAA4B,EAAE,KAAG,GAAGf,CACnD,CAEJ,CACF,CAEJ,CAaO,SAASgB,GACdC,EACa,CACb,IAAMC,EAAYD,EAAO,CAAC,EAAE,MACtBE,EAAO,CAAC,GAAGF,CAAM,EAEjBZ,EAASC,GAAc,EAGvBR,EAASqB,EAAK,UAAUC,GAErB,CADG,GAAG,IAAI,IAAIA,EAAI,SAAUf,EAAO,IAAI,CAAC,GACrC,SAAS,GAAG,CACvB,EACK,CAACgB,CAAO,EAAIF,EAAK,OAAOrB,EAAQ,CAAC,EAGnCwB,EAAQH,EAAK,UAAUC,GAAOA,EAAI,MAAQF,CAAS,EACnDI,IAAU,KACZA,EAAQH,EAAK,QAGf,IAAMI,EAAOJ,EAAK,MAAM,EAAGG,CAAK,EAC1BE,EAAOL,EAAK,MAAMG,CAAK,EAGvBG,EAAW,CACf9B,GAAqB0B,EAAS,EAAc,EAAE,CAACvB,GAAUwB,IAAU,EAAE,EACrE,GAAGC,EAAK,IAAIG,GAAW/B,GAAqB+B,EAAS,CAAW,CAAC,EACjE,GAAGF,EAAK,OAAS,CACfrB,EAAC,WAAQ,MAAM,0BACbA,EAAC,WAAQ,SAAU,IACjBA,EAAC,WACEqB,EAAK,OAAS,GAAKA,EAAK,SAAW,EAChCT,GAAY,wBAAwB,EACpCA,GAAY,2BAA4BS,EAAK,MAAM,CAEzD,CACF,EACC,GAAGA,EAAK,IAAIE,GAAW/B,GAAqB+B,EAAS,CAAW,CAAC,CACpE,CACF,EAAI,CAAC,CACP,EAGA,OACEvB,EAAC,MAAG,MAAM,0BACPsB,CACH,CAEJ,CCtIO,SAASE,GAAkBC,EAAiC,CACjE,OACEC,EAAC,MAAG,MAAM,oBACP,OAAO,QAAQD,CAAK,EAAE,IAAI,CAAC,CAACE,EAAKC,CAAK,IACrCF,EAAC,MAAG,MAAO,oCAAoCC,CAAG,IAC/C,OAAOC,GAAU,SAAWC,GAAMD,CAAK,EAAIA,CAC9C,CACD,CACH,CAEJ,CCAO,SAASE,GACdC,EACa,CACb,IAAMC,EAAU,kCAAkCD,CAAI,GACtD,OACEE,EAAC,OAAI,MAAOD,EAAS,OAAM,IACzBC,EAAC,UAAO,MAAM,gBAAgB,SAAU,GAAI,cAAY,OAAO,CACjE,CAEJ,CCpBO,SAASC,GAAYC,EAAiC,CAC3D,OACEC,EAAC,OAAI,MAAM,0BACTA,EAAC,OAAI,MAAM,qBACRD,CACH,CACF,CAEJ,CCcA,SAASE,GAAcC,EAA+B,CAzDtD,IAAAC,EA0DE,IAAMC,EAASC,GAAc,EAGvBC,EAAM,IAAI,IAAI,MAAMJ,EAAQ,OAAO,IAAKE,EAAO,IAAI,EACzD,OACEG,EAAC,MAAG,MAAM,oBACRA,EAAC,KAAE,KAAM,GAAGD,CAAG,GAAI,MAAM,oBACtBJ,EAAQ,QACRC,EAAAC,EAAO,UAAP,YAAAD,EAAgB,QAASD,EAAQ,QAAQ,OAAS,GACjDK,EAAC,QAAK,MAAM,qBACTL,EAAQ,QAAQ,CAAC,CACpB,CAEJ,CACF,CAEJ,CAcO,SAASM,GACdC,EAAqBC,EACR,CA1Ff,IAAAP,EA2FE,IAAMC,EAASC,GAAc,EAC7B,OAAAI,EAAWA,EAAS,OAAOP,GAAQ,CA5FrC,IAAAC,EA4FwC,SAACA,EAAAD,EAAQ,aAAR,MAAAC,EAAoB,QAAM,EAE/DI,EAAC,OAAI,MAAM,cACTA,EAAC,UACC,MAAM,sBACN,aAAYI,GAAY,gBAAgB,GAEvCD,EAAO,QACPP,EAAAC,EAAO,UAAP,YAAAD,EAAgB,QAASO,EAAO,QAAQ,OAAS,GAChDH,EAAC,QAAK,MAAM,qBACTG,EAAO,QAAQ,CAAC,CACnB,CAEJ,EACAH,EAAC,MAAG,MAAM,oBACPE,EAAS,IAAIR,EAAa,CAC7B,CACF,CAEJ,CCdA,IAAIW,GAAW,EAkBR,SAASC,GACdC,EACqB,CAMrB,IAAMC,EACJC,EAAc,CACZC,GAAkBH,CAAE,EACpBI,GAAkBJ,CAAE,CACtB,CAAC,EACE,KACCK,EAAI,CAAC,CAACC,EAAOC,CAAK,IAAMD,GAASC,CAAK,EACtCC,EAAqB,CACvB,EAMEC,EACJC,EAAM,IAAMC,GAAqBX,CAAE,CAAC,EAAE,KACpCY,GAASC,EAAyB,EAClCC,GAAa,CAAC,EAKdC,GAAkBd,CAAO,EACzBI,EAAI,IAAMW,GAAyBhB,CAAE,CAAC,CACxC,EAMF,OAAOC,EAAQ,KACbgB,GAAMC,GAAUA,CAAM,EACtBC,EAAU,IAAMjB,EAAc,CAACD,EAASQ,CAAO,CAAC,CAAC,EACjDJ,EAAI,CAAC,CAACa,EAAQE,CAAM,KAAO,CAAE,OAAAF,EAAQ,OAAAE,CAAO,EAAE,EAC9CC,GAAM,CACR,CACF,CAoBO,SAASC,GACdtB,EAAiBuB,EACe,CAChC,GAAM,CAAE,SAAAC,EAAU,UAAAC,CAAU,EAAIF,EAI1BG,EAAK,cAAc5B,IAAU,GAGnC,OAAOY,EAAM,IAAM,CACjB,IAAMiB,EAAQ,IAAIC,EAMZC,EAAQ,IAAIC,GAAgB,EAAK,EACvCH,EAAM,KAAKI,EAAe,EAAGC,GAAQ,EAAK,CAAC,EACxC,UAAUH,CAAK,EAUlB,IAAMI,EAAQJ,EAAM,KAClBK,GAAShB,GAAUiB,GAAM,CAAC,CAACjB,EAAS,IAAKkB,EAAc,CAAC,EACxD5B,EAAqB,EACrBW,EAAUD,GAAUA,EAASM,EAAWa,CAAK,EAC7CC,EAAIC,GAAQA,EAAK,GAAKb,CAAE,EACxBL,GAAM,CACR,EAIAnB,EAAc,CACZyB,EAAM,KAAKtB,EAAI,CAAC,CAAE,OAAAa,CAAO,IAAMA,CAAM,CAAC,EACtCe,EAAM,KACJd,EAAUoB,GAAQnC,GAAkBmC,EAAM,GAAG,CAAC,EAC9CC,EAAU,EAAK,CACjB,CACF,CAAC,EACE,KAAKnC,EAAIoC,GAAUA,EAAO,KAAKvB,GAAUA,CAAM,CAAC,CAAC,EACjD,UAAUW,CAAK,EAMlB,IAAMa,EAAUb,EAAM,KACpBc,EAAOzB,GAAUA,CAAM,EACvB0B,GAAeX,EAAOR,CAAS,EAC/BpB,EAAI,CAAC,CAACwC,EAAGN,EAAM,CAAE,KAAAO,CAAK,CAAC,IAAM,CAC3B,IAAMC,EAAO/C,EAAG,sBAAsB,EAChCgD,EAAID,EAAK,MAAQ,EAIvB,GAAIR,EAAK,OAAS,UAChB,MAAO,CAAE,EAAAS,EAAG,EAAG,EAAID,EAAK,MAAO,EAI1B,GAAIA,EAAK,GAAKD,EAAK,OAAS,EAAG,CACpC,GAAM,CAAE,OAAAG,CAAO,EAAIC,GAAeX,CAAI,EACtC,MAAO,CAAE,EAAAS,EAAG,EAAG,IAAMC,CAAO,CAC9B,KACE,OAAO,CAAE,EAAAD,EAAG,EAAG,GAAMD,EAAK,MAAO,CAErC,CAAC,CACH,EAIA,OAAA7C,EAAc,CAAC+B,EAAON,EAAOe,CAAO,CAAC,EAClC,UAAU,CAAC,CAACH,EAAM,CAAE,OAAAnB,CAAO,EAAG+B,CAAM,IAAM,CACzCZ,EAAK,MAAM,YAAY,sBAAuB,GAAGnB,EAAO,CAAC,IAAI,EAC7DmB,EAAK,MAAM,YAAY,sBAAuB,GAAGnB,EAAO,CAAC,IAAI,EAI7DmB,EAAK,MAAM,YAAY,iBAAkB,GAAGY,EAAO,CAAC,IAAI,EACxDZ,EAAK,MAAM,YAAY,iBAAkB,GAAGY,EAAO,CAAC,IAAI,EAIxDZ,EAAK,UAAU,OAAO,mBAAuBY,EAAO,EAAK,CAAC,EAC1DZ,EAAK,UAAU,OAAO,sBAAuBY,EAAO,GAAK,CAAC,CAC5D,CAAC,EAIHtB,EAAM,KACJc,EAAOzB,GAAUA,CAAM,EACvB0B,GAAeX,EAAO,CAACY,EAAGN,IAASA,CAAI,EACvCI,EAAOJ,GAAQA,EAAK,OAAS,SAAS,CACxC,EACG,UAAUA,GAAQ,CACjB,IAAMO,EAAOI,GAAeE,EAAW,aAAcb,CAAI,CAAC,EAI1DA,EAAK,MAAM,YAAY,qBAAsB,GAAGO,EAAK,KAAK,IAAI,EAC9DP,EAAK,MAAM,YAAY,oBAAsB,KAAQ,CACvD,CAAC,EAMHV,EAAM,KACJrB,EAAqB,EACrB6C,GAAUC,EAAuB,EACjCV,GAAeX,CAAK,CACtB,EACG,UAAU,CAAC,CAACf,EAAQqB,CAAI,IAAM,CAC7BA,EAAK,UAAU,OAAO,sBAAuBrB,CAAM,CACrD,CAAC,EAGHhB,EAAc,CACZ2B,EAAM,KAAKc,EAAOzB,GAAUA,CAAM,CAAC,EACnCe,CACF,CAAC,EACE,UAAU,CAAC,CAACY,EAAGN,CAAI,IAAM,CACpBA,EAAK,OAAS,UAChBvC,EAAG,aAAa,gBAAiB0B,CAAE,EACnC1B,EAAG,aAAa,gBAAiB,QAAQ,GAEzCA,EAAG,aAAa,mBAAoB0B,CAAE,CAE1C,CAAC,EAGHG,EAAM,KAAKc,EAAOzB,GAAU,CAACA,CAAM,CAAC,EACjC,UAAU,IAAM,CACflB,EAAG,gBAAgB,eAAe,EAClCA,EAAG,gBAAgB,kBAAkB,EACrCA,EAAG,gBAAgB,eAAe,CACpC,CAAC,EAGID,GAAcC,CAAE,EACpB,KACCsC,EAAIiB,GAAS5B,EAAM,KAAK4B,CAAK,CAAC,EAC9BC,EAAS,IAAM7B,EAAM,SAAS,CAAC,EAC/BtB,EAAIkD,GAAUE,EAAA,CAAE,IAAKzD,GAAOuD,EAAQ,CACtC,CACJ,CAAC,CACH,CAeO,SAASG,GACd1D,EAAiB,CAAE,UAAAyB,CAAU,EAC7BkC,EAAY,SAAS,KACW,CAChC,OAAOrC,GAActB,EAAI,CACvB,SAAU,IAAI4D,EAAwBC,GAAY,CAChD,IAAMC,EAAQ9D,EAAG,MACXuC,EAAOwB,GAAqBD,CAAK,EACvC,OAAAD,EAAS,KAAKtB,CAAI,EAClBvC,EAAG,gBAAgB,OAAO,EAE1B2D,EAAU,OAAOpB,CAAI,EACd,IAAM,CACXA,EAAK,OAAO,EACZvC,EAAG,aAAa,QAAS8D,CAAK,CAChC,CACF,CAAC,EACD,UAAArC,CACF,CAAC,CACH,CCjRO,SAASuC,GACdC,EAAiBC,EACO,CACxB,IAAMC,EAAUC,EAAM,IAAMC,EAAc,CACxCC,GAAmBL,CAAE,EACrBM,GAA0BL,CAAS,CACrC,CAAC,CAAC,EACC,KACCM,EAAI,CAAC,CAAC,CAAE,EAAAC,EAAG,EAAAC,CAAE,EAAGC,CAAM,IAAqB,CACzC,GAAM,CAAE,MAAAC,EAAO,OAAAC,CAAO,EAAIC,GAAeb,CAAE,EAC3C,MAAQ,CACN,EAAGQ,EAAIE,EAAO,EAAIC,EAAS,EAC3B,EAAGF,EAAIC,EAAO,EAAIE,EAAS,CAC7B,CACF,CAAC,CACH,EAGF,OAAOE,GAAkBd,CAAE,EACxB,KACCe,EAAUC,GAAUd,EACjB,KACCK,EAAIU,IAAW,CAAE,OAAAD,EAAQ,OAAAC,CAAO,EAAE,EAClCC,GAAK,CAAC,CAACF,GAAU,GAAQ,CAC3B,CACF,CACF,CACJ,CAWO,SAASG,GACdnB,EAAiBC,EAAwB,CAAE,QAAAmB,CAAQ,EAChB,CACnC,GAAM,CAACC,EAASC,CAAK,EAAI,MAAM,KAAKtB,EAAG,QAAQ,EAG/C,OAAOG,EAAM,IAAM,CACjB,IAAMoB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxD,OAAAJ,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAN,CAAO,EAAG,CACfjB,EAAG,MAAM,YAAY,iBAAkB,GAAGiB,EAAO,CAAC,IAAI,EACtDjB,EAAG,MAAM,YAAY,iBAAkB,GAAGiB,EAAO,CAAC,IAAI,CACxD,EAGA,UAAW,CACTjB,EAAG,MAAM,eAAe,gBAAgB,EACxCA,EAAG,MAAM,eAAe,gBAAgB,CAC1C,CACF,CAAC,EAGD4B,GAAuB5B,CAAE,EACtB,KACC6B,EAAUJ,CAAK,CACjB,EACG,UAAUK,GAAW,CACpB9B,EAAG,gBAAgB,kBAAmB8B,CAAO,CAC/C,CAAC,EAGLC,EACER,EAAM,KAAKS,EAAO,CAAC,CAAE,OAAAhB,CAAO,IAAMA,CAAM,CAAC,EACzCO,EAAM,KAAKU,GAAa,GAAG,EAAGD,EAAO,CAAC,CAAE,OAAAhB,CAAO,IAAM,CAACA,CAAM,CAAC,CAC/D,EACG,UAAU,CAGT,KAAK,CAAE,OAAAA,CAAO,EAAG,CACXA,EACFhB,EAAG,QAAQqB,CAAO,EAElBA,EAAQ,OAAO,CACnB,EAGA,UAAW,CACTrB,EAAG,QAAQqB,CAAO,CACpB,CACF,CAAC,EAGHE,EACG,KACCW,GAAU,GAAIC,EAAuB,CACvC,EACG,UAAU,CAAC,CAAE,OAAAnB,CAAO,IAAM,CACzBK,EAAQ,UAAU,OAAO,qBAAsBL,CAAM,CACvD,CAAC,EAGLO,EACG,KACCa,GAAa,IAAKD,EAAuB,EACzCH,EAAO,IAAM,CAAC,CAAChC,EAAG,YAAY,EAC9BO,EAAI,IAAMP,EAAG,aAAc,sBAAsB,CAAC,EAClDO,EAAI,CAAC,CAAE,EAAAC,CAAE,IAAMA,CAAC,CAClB,EACG,UAAU,CAGT,KAAK6B,EAAQ,CACPA,EACFrC,EAAG,MAAM,YAAY,iBAAkB,GAAG,CAACqC,CAAM,IAAI,EAErDrC,EAAG,MAAM,eAAe,gBAAgB,CAC5C,EAGA,UAAW,CACTA,EAAG,MAAM,eAAe,gBAAgB,CAC1C,CACF,CAAC,EAGLsC,EAAsBhB,EAAO,OAAO,EACjC,KACCO,EAAUJ,CAAK,EACfO,EAAOO,GAAM,EAAEA,EAAG,SAAWA,EAAG,QAAQ,CAC1C,EACG,UAAUA,GAAM,CACfA,EAAG,gBAAgB,EACnBA,EAAG,eAAe,CACpB,CAAC,EAGLD,EAAsBhB,EAAO,WAAW,EACrC,KACCO,EAAUJ,CAAK,EACfe,GAAejB,CAAK,CACtB,EACG,UAAU,CAAC,CAACgB,EAAI,CAAE,OAAAvB,CAAO,CAAC,IAAM,CA3OzC,IAAAyB,EA8OU,GAAIF,EAAG,SAAW,GAAKA,EAAG,SAAWA,EAAG,QACtCA,EAAG,eAAe,UAGTvB,EAAQ,CACjBuB,EAAG,eAAe,EAGlB,IAAMG,EAAS1C,EAAG,cAAe,QAAQ,gBAAgB,EACrD0C,aAAkB,YACpBA,EAAO,MAAM,GAEbD,EAAAE,GAAiB,IAAjB,MAAAF,EAAoB,MACxB,CACF,CAAC,EAGLrB,EACG,KACCS,EAAUJ,CAAK,EACfO,EAAOY,GAAUA,IAAWvB,CAAO,EACnCwB,GAAM,GAAG,CACX,EACG,UAAU,IAAM7C,EAAG,MAAM,CAAC,EAGxBD,GAAgBC,EAAIC,CAAS,EACjC,KACC6C,EAAIC,GAASxB,EAAM,KAAKwB,CAAK,CAAC,EAC9BC,EAAS,IAAMzB,EAAM,SAAS,CAAC,EAC/BhB,EAAIwC,GAAUE,EAAA,CAAE,IAAKjD,GAAO+C,EAAQ,CACtC,CACJ,CAAC,CACH,CCxMA,SAASG,GAAUC,EAAuC,CACxD,OAAOA,EAAU,UAAY,OACzBC,EAAY,eAAgBD,CAAS,EACrC,CAACA,CAAS,CAChB,CASA,SAASE,GAAYF,EAAgC,CACnD,IAAMG,EAAkB,CAAC,EACzB,QAAWC,KAAML,GAAUC,CAAS,EAAG,CACrC,IAAMK,EAAgB,CAAC,EAGjBC,EAAK,SAAS,mBAAmBF,EAAI,WAAW,SAAS,EAC/D,QAASG,EAAOD,EAAG,SAAS,EAAGC,EAAMA,EAAOD,EAAG,SAAS,EACtDD,EAAM,KAAKE,CAAY,EAGzB,QAASC,KAAQH,EAAO,CACtB,IAAII,EAGJ,KAAQA,EAAQ,gBAAgB,KAAKD,EAAK,WAAY,GAAI,CACxD,GAAM,CAAC,CAAEE,EAAIC,CAAK,EAAIF,EACtB,GAAI,OAAOE,GAAU,YAAa,CAChC,IAAMC,EAASJ,EAAK,UAAUC,EAAM,KAAK,EACzCD,EAAOI,EAAO,UAAUF,EAAG,MAAM,EACjCP,EAAQ,KAAKS,CAAM,CAGrB,KAAO,CACLJ,EAAK,YAAcE,EACnBP,EAAQ,KAAKK,CAAI,EACjB,KACF,CACF,CACF,CACF,CACA,OAAOL,CACT,CAQA,SAASU,GAAKC,EAAqBC,EAA2B,CAC5DA,EAAO,OAAO,GAAG,MAAM,KAAKD,EAAO,UAAU,CAAC,CAChD,CAoBO,SAASE,GACdZ,EAAiBJ,EAAwB,CAAE,QAAAiB,EAAS,OAAAC,CAAO,EACxB,CAGnC,IAAMC,EAASnB,EAAU,QAAQ,MAAM,EACjCoB,EAASD,GAAA,YAAAA,EAAQ,GAGjBE,EAAc,IAAI,IACxB,QAAWT,KAAUV,GAAYF,CAAS,EAAG,CAC3C,GAAM,CAAC,CAAEU,CAAE,EAAIE,EAAO,YAAa,MAAM,WAAW,EAChDU,GAAmB,yBAAyBZ,CAAE,IAAKN,CAAE,IACvDiB,EAAY,IAAIX,EAAIa,GAAiBb,EAAIU,CAAM,CAAC,EAChDR,EAAO,YAAYS,EAAY,IAAIX,CAAE,CAAE,EAE3C,CAGA,OAAIW,EAAY,OAAS,EAChBG,EAGFC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAGlDC,EAAsC,CAAC,EAC7C,OAAW,CAACrB,EAAIsB,CAAU,IAAKX,EAC7BU,EAAM,KAAK,CACTE,EAAW,cAAeD,CAAU,EACpCC,EAAW,yBAAyBvB,CAAE,IAAKN,CAAE,CAC/C,CAAC,EAGH,OAAAc,EAAO,KAAKgB,EAAUN,CAAK,CAAC,EACzB,UAAUO,GAAU,CACnB/B,EAAG,OAAS,CAAC+B,EAGb/B,EAAG,UAAU,OAAO,qBAAsB+B,CAAM,EAGhD,OAAW,CAACC,EAAOC,CAAK,IAAKN,EACtBI,EAGHtB,GAAKuB,EAAOC,CAAK,EAFjBxB,GAAKwB,EAAOD,CAAK,CAGvB,CAAC,EAGIE,EAAM,GAAG,CAAC,GAAGjB,CAAW,EAC5B,IAAI,CAAC,CAAC,CAAEW,CAAU,IACjBO,GAAgBP,EAAYhC,EAAW,CAAE,QAAAiB,CAAQ,CAAC,CACnD,CACH,EACG,KACCuB,EAAS,IAAMd,EAAM,SAAS,CAAC,EAC/Be,GAAM,CACR,CACJ,CAAC,CACH,CC7JA,SAASC,GAASC,EAA0C,CAC1D,GAAIA,EAAG,mBAAoB,CACzB,IAAMC,EAAUD,EAAG,mBACnB,GAAIC,EAAQ,UAAY,KACtB,OAAOA,EAGJ,GAAIA,EAAQ,UAAY,KAAO,CAACA,EAAQ,SAAS,OACpD,OAAOF,GAASE,CAAO,CAC3B,CAIF,CAcO,SAASC,GACdF,EAAiBG,EACkB,CACnC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAON,GAASC,CAAE,EACxB,OAAO,OAAOK,GAAS,YACnBC,GAAoBD,EAAML,EAAIG,CAAO,EACrCI,CACN,CAAC,CACH,CCjEA,IAAAC,GAAwB,SA4ExB,IAAIC,GAAW,EAaf,SAASC,GAAkBC,EAA0C,CACnE,GAAIA,EAAG,mBAAoB,CACzB,IAAMC,EAAUD,EAAG,mBACnB,GAAIC,EAAQ,UAAY,KACtB,OAAOA,EAGJ,GAAIA,EAAQ,UAAY,KAAO,CAACA,EAAQ,SAAS,OACpD,OAAOF,GAAkBE,CAAO,CACpC,CAIF,CAgBO,SAASC,GACdF,EACsB,CACtB,OAAOG,GAAiBH,CAAE,EACvB,KACCI,EAAI,CAAC,CAAE,MAAAC,CAAM,KAEJ,CACL,WAFcC,GAAsBN,CAAE,EAElB,MAAQK,CAC9B,EACD,EACDE,EAAwB,YAAY,CACtC,CACJ,CAoBO,SAASC,GACdR,EAAiBS,EACiB,CAClC,GAAM,CAAE,QAASC,CAAM,EAAI,WAAW,SAAS,EAGzCC,EAAWC,EAAM,IAAM,CAC3B,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,GAAS,CAAC,CAAC,EACpCH,EAAM,UAAU,CAAC,CAAE,WAAAI,CAAW,IAAM,CAC9BA,GAAcP,EAChBV,EAAG,aAAa,WAAY,GAAG,EAE/BA,EAAG,gBAAgB,UAAU,CACjC,CAAC,EAGD,IAAMkB,EAAoD,CAAC,EAC3D,GAAI,GAAAC,QAAY,YAAY,IACtBnB,EAAG,QAAQ,OAAO,GACpBoB,EAAQ,mBAAmB,GAAK,CAACpB,EAAG,QAAQ,UAAU,GACrD,CACD,IAAMqB,EAASrB,EAAG,QAAQ,KAAK,EAC/BqB,EAAO,GAAK,UAAUvB,IAAU,GAGhC,IAAMwB,EAASC,GAAsBF,EAAO,EAAE,EAC9CA,EAAO,aAAaC,EAAQtB,CAAE,EAC1BoB,EAAQ,kBAAkB,GAC5BF,EAAS,KAAKM,GAAoBF,EAAQ,CAAE,SAAU,CAAC,CAAC,CAC5D,CAIF,IAAMG,EAAYzB,EAAG,QAAQ,YAAY,EACzC,GAAIyB,aAAqB,YAAa,CACpC,IAAMC,EAAO3B,GAAkB0B,CAAS,EAGxC,GAAI,OAAOC,GAAS,cAClBD,EAAU,UAAU,SAAS,UAAU,GACvCL,EAAQ,uBAAuB,GAC9B,CACD,IAAMO,EAAeC,GAAoBF,EAAM1B,EAAIS,CAAO,EAC1DS,EAAS,KACPf,GAAiBsB,CAAS,EACvB,KACCI,EAAUd,CAAK,EACfX,EAAI,CAAC,CAAE,MAAAC,EAAO,OAAAyB,CAAO,IAAMzB,GAASyB,CAAM,EAC1CC,EAAqB,EACrBC,EAAUC,GAAUA,EAASN,EAAeO,CAAK,CACnD,CACJ,CACF,CACF,CAOA,OADcC,EAAY,oBAAqBnC,CAAE,EACvC,QACRA,EAAG,UAAU,IAAI,kBAAkB,EAG9BE,GAAeF,CAAE,EACrB,KACCoC,EAAIC,GAASxB,EAAM,KAAKwB,CAAK,CAAC,EAC9BC,EAAS,IAAMzB,EAAM,SAAS,CAAC,EAC/BT,EAAIiC,GAAUE,EAAA,CAAE,IAAKvC,GAAOqC,EAAQ,EACpCG,GAAU,GAAGtB,CAAQ,CACvB,CACJ,CAAC,EAGD,OAAIE,EAAQ,cAAc,EACjBqB,GAAuBzC,CAAE,EAC7B,KACC0C,EAAOC,GAAWA,CAAO,EACzBC,GAAK,CAAC,EACNZ,EAAU,IAAMrB,CAAQ,CAC1B,EAGGA,CACT,CCnLO,SAASkC,GACdC,EAAwB,CAAE,QAAAC,EAAS,OAAAC,CAAO,EACrB,CACrB,IAAIC,EAAO,GACX,OAAOC,EAGLH,EACG,KACCI,EAAIC,GAAUA,EAAO,QAAQ,qBAAqB,CAAE,EACpDC,EAAOC,GAAWR,IAAOQ,CAAO,EAChCH,EAAI,KAAO,CACT,OAAQ,OAAQ,OAAQ,EAC1B,EAAa,CACf,EAGFH,EACG,KACCK,EAAOE,GAAUA,GAAU,CAACN,CAAI,EAChCO,EAAI,IAAMP,EAAOH,EAAG,IAAI,EACxBK,EAAII,IAAW,CACb,OAAQA,EAAS,OAAS,OAC5B,EAAa,CACf,CACJ,CACF,CAaO,SAASE,GACdX,EAAwBY,EACQ,CAChC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,OAAAE,EAAQ,OAAAC,CAAO,IAAM,CACtCjB,EAAG,gBAAgB,OAAQgB,IAAW,MAAM,EACxCC,GACFjB,EAAG,eAAe,CACtB,CAAC,EAGMD,GAAaC,EAAIY,CAAO,EAC5B,KACCF,EAAIQ,GAASJ,EAAM,KAAKI,CAAK,CAAC,EAC9BC,EAAS,IAAML,EAAM,SAAS,CAAC,EAC/BT,EAAIa,GAAUE,EAAA,CAAE,IAAKpB,GAAOkB,EAAQ,CACtC,CACJ,CAAC,CACH,CCzIA,IAAAG,GAAA,yvLCqDA,IAAIC,GAKAC,GAAW,EAWf,SAASC,IAAiC,CACxC,OAAO,OAAO,SAAY,aAAe,mBAAmB,QACxDC,GAAY,kDAAkD,EAC9DC,EAAG,MAAS,CAClB,CAaO,SAASC,GACdC,EACgC,CAChC,OAAAA,EAAG,UAAU,OAAO,SAAS,EAC7BN,QAAaE,GAAa,EACvB,KACCK,EAAI,IAAM,QAAQ,WAAW,CAC3B,YAAa,GACb,SAAAC,GACA,SAAU,CACR,cAAe,OACf,gBAAiB,OACjB,aAAc,MAChB,CACF,CAAC,CAAC,EACFC,EAAI,IAAG,EAAY,EACnBC,EAAY,CAAC,CACf,GAGFV,GAAS,UAAU,IAAYW,GAAA,sBAC7BL,EAAG,UAAU,IAAI,SAAS,EAC1B,IAAMM,EAAK,aAAaX,IAAU,GAG5BY,EAAOC,EAAE,MAAO,CAAE,MAAO,SAAU,CAAC,EACpCC,EAAOT,EAAG,YAGV,CAAE,IAAAU,EAAK,GAAAC,CAAG,EAAI,MAAM,QAAQ,OAAOL,EAAIG,CAAI,EAG3CG,EAASL,EAAK,aAAa,CAAE,KAAM,QAAS,CAAC,EACnDK,EAAO,UAAYF,EAGnBV,EAAG,YAAYO,CAAI,EACnBI,GAAA,MAAAA,EAAKC,EACP,EAAC,EAGMlB,GACJ,KACCS,EAAI,KAAO,CAAE,IAAKH,CAAG,EAAE,CACzB,CACJ,CCtFA,IAAMa,GAAWC,EAAE,OAAO,EAgBnB,SAASC,GACdC,EACkC,CAClC,OAAAA,EAAG,YAAYH,EAAQ,EACvBA,GAAS,YAAYI,GAAYD,CAAE,CAAC,EAG7BE,EAAG,CAAE,IAAKF,CAAG,CAAC,CACvB,CC4BO,SAASG,GACdC,EACyB,CACzB,IAAMC,EAAUD,EAAO,KAAKE,GAASA,EAAM,OAAO,GAAKF,EAAO,CAAC,EAC/D,OAAOG,EAAM,GAAGH,EAAO,IAAIE,GAASE,EAAUF,EAAO,QAAQ,EAC1D,KACCG,EAAI,IAAMC,EAA6B,cAAcJ,EAAM,EAAE,IAAI,CAAC,CACpE,CACF,CAAC,EACE,KACCK,EAAUD,EAA6B,cAAcL,EAAQ,EAAE,IAAI,CAAC,EACpEI,EAAIG,IAAW,CAAE,OAAAA,CAAO,EAAE,CAC5B,CACJ,CAUO,SAASC,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACF,CACpC,IAAMC,EAAYP,EAAW,iBAAkBI,CAAE,EAC3CV,EAASc,EAA8B,iBAAkBJ,CAAE,EAG3DK,EAAOC,GAAoB,MAAM,EACvCN,EAAG,OAAOK,CAAI,EAGd,IAAME,EAAOD,GAAoB,MAAM,EACvC,OAAAN,EAAG,OAAOO,CAAI,EAGPC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxDC,EAAc,CAACL,EAAOM,GAAiBf,CAAE,EAAGgB,GAAuBhB,CAAE,CAAC,CAAC,EACpE,KACCiB,EAAUN,CAAK,EACfO,GAAU,EAAGC,EAAuB,CACtC,EACG,UAAU,CAGT,KAAK,CAAC,CAAE,OAAArB,CAAO,EAAGsB,CAAI,EAAG,CACvB,IAAMC,EAASC,GAAiBxB,CAAM,EAChC,CAAE,MAAAyB,CAAM,EAAIC,GAAe1B,CAAM,EAGvCE,EAAG,MAAM,YAAY,mBAAoB,GAAGqB,EAAO,CAAC,IAAI,EACxDrB,EAAG,MAAM,YAAY,uBAAwB,GAAGuB,CAAK,IAAI,EAGzD,IAAME,EAAUC,GAAwBvB,CAAS,GAE/CkB,EAAO,EAAYI,EAAQ,GAC3BJ,EAAO,EAAIE,EAAQE,EAAQ,EAAIL,EAAK,QAEpCjB,EAAU,SAAS,CACjB,KAAM,KAAK,IAAI,EAAGkB,EAAO,EAAI,EAAE,EAC/B,SAAU,QACZ,CAAC,CACL,EAGA,UAAW,CACTrB,EAAG,MAAM,eAAe,kBAAkB,EAC1CA,EAAG,MAAM,eAAe,sBAAsB,CAChD,CACF,CAAC,EAGLc,EAAc,CACZa,GAA0BxB,CAAS,EACnCY,GAAiBZ,CAAS,CAC5B,CAAC,EACE,KACCc,EAAUN,CAAK,CACjB,EACG,UAAU,CAAC,CAACU,EAAQD,CAAI,IAAM,CAC7B,IAAMK,EAAUG,GAAsBzB,CAAS,EAC/CE,EAAK,OAASgB,EAAO,EAAI,GACzBd,EAAK,OAASc,EAAO,EAAII,EAAQ,MAAQL,EAAK,MAAQ,EACxD,CAAC,EAGL3B,EACEC,EAAUW,EAAM,OAAO,EAAE,KAAKV,EAAI,IAAM,EAAE,CAAC,EAC3CD,EAAUa,EAAM,OAAO,EAAE,KAAKZ,EAAI,IAAM,CAAE,CAAC,CAC7C,EACG,KACCsB,EAAUN,CAAK,CACjB,EACG,UAAUkB,GAAa,CACtB,GAAM,CAAE,MAAAN,CAAM,EAAIC,GAAerB,CAAS,EAC1CA,EAAU,SAAS,CACjB,KAAMoB,EAAQM,EACd,SAAU,QACZ,CAAC,CACH,CAAC,EAGL3B,EACG,KACCe,EAAUN,CAAK,EACfmB,EAAOtC,GAASF,EAAO,SAASE,CAAyB,CAAC,CAC5D,EACG,UAAUA,GAASA,EAAM,MAAM,CAAC,EAGrCW,EAAU,UAAU,IAAI,uBAAuB,EAC/C,QAAWX,KAASF,EAAQ,CAC1B,IAAMyC,EAAQnC,EAA6B,cAAcJ,EAAM,EAAE,IAAI,EACrEuC,EAAM,gBAAgBC,EAAE,IAAK,CAC3B,KAAM,IAAID,EAAM,OAAO,GACvB,SAAU,EACZ,EAAG,GAAG,MAAM,KAAKA,EAAM,UAAU,CAAC,CAAC,EAGnCrC,EAAsBqC,EAAM,kBAAoB,OAAO,EACpD,KACCd,EAAUN,CAAK,EACfmB,EAAOG,GAAM,EAAEA,EAAG,SAAWA,EAAG,QAAQ,EACxCC,EAAID,GAAM,CACRA,EAAG,eAAe,EAClBA,EAAG,gBAAgB,CACrB,CAAC,CACH,EAEG,UAAU,IAAM,CACf,QAAQ,aAAa,CAAC,EAAG,GAAI,IAAIF,EAAM,OAAO,EAAE,EAChDA,EAAM,MAAM,CACd,CAAC,CACP,CAGA,OAAII,EAAQ,mBAAmB,GAC7B1B,EAAM,KACJ2B,GAAK,CAAC,EACNC,GAAepC,CAAS,CAC1B,EACG,UAAU,CAAC,CAAC,CAAE,OAAAH,CAAO,EAAG,CAAE,OAAAuB,CAAO,CAAC,IAAM,CACvC,IAAMiB,EAAMxC,EAAO,UAAU,KAAK,EAClC,GAAIA,EAAO,aAAa,mBAAmB,EACzCA,EAAO,gBAAgB,mBAAmB,MAGrC,CACL,IAAMyC,EAAIvC,EAAG,UAAYqB,EAAO,EAGhC,QAAWmB,KAAOpC,EAAY,aAAa,EACzC,QAAWZ,KAASY,EAClB,iBAAkBoC,CACpB,EAAG,CACD,IAAMT,GAAQnC,EAAW,cAAcJ,EAAM,EAAE,IAAI,EACnD,GACEuC,KAAUjC,GACViC,GAAM,UAAU,KAAK,IAAMO,EAC3B,CACAP,GAAM,aAAa,oBAAqB,EAAE,EAC1CvC,EAAM,MAAM,EACZ,KACF,CACF,CAGF,OAAO,SAAS,CACd,IAAKQ,EAAG,UAAYuC,CACtB,CAAC,EAGD,IAAME,EAAO,SAAmB,QAAQ,GAAK,CAAC,EAC9C,SAAS,SAAU,CAAC,GAAG,IAAI,IAAI,CAACH,EAAK,GAAGG,CAAI,CAAC,CAAC,CAAC,CACjD,CACF,CAAC,EAGLhC,EAAM,KAAKQ,EAAUN,CAAK,CAAC,EACxB,UAAU,IAAM,CACf,QAAW+B,KAAStC,EAA8B,eAAgBJ,CAAE,EAClE0C,EAAM,MAAM,CAChB,CAAC,EAGIrD,GAAiBC,CAAM,EAC3B,KACC4C,EAAIS,GAASlC,EAAM,KAAKkC,CAAK,CAAC,EAC9BC,EAAS,IAAMnC,EAAM,SAAS,CAAC,EAC/Bd,EAAIgD,GAAUE,EAAA,CAAE,IAAK7C,GAAO2C,EAAQ,CACtC,CACJ,CAAC,EACE,KACCG,GAAYC,EAAc,CAC5B,CACJ,CCpMO,SAASC,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,EAAS,OAAAC,CAAO,EACd,CAChC,OAAOC,EAGL,GAAGC,EAAY,4BAA6BL,CAAE,EAC3C,IAAIM,GAASC,GAAqBD,EAAO,CAAE,QAAAJ,EAAS,OAAAC,CAAO,CAAC,CAAC,EAGhE,GAAGE,EAAY,2BAA4BL,CAAE,EAC1C,IAAIM,GAASE,GAAeF,EAAO,CAAE,QAAAJ,EAAS,OAAAC,CAAO,CAAC,CAAC,EAG1D,GAAGE,EAAY,cAAeL,CAAE,EAC7B,IAAIM,GAASG,GAAaH,CAAK,CAAC,EAGnC,GAAGD,EAAY,qBAAsBL,CAAE,EACpC,IAAIM,GAASI,GAAeJ,CAAK,CAAC,EAGrC,GAAGD,EAAY,UAAWL,CAAE,EACzB,IAAIM,GAASK,GAAaL,EAAO,CAAE,QAAAJ,EAAS,OAAAC,CAAO,CAAC,CAAC,EAGxD,GAAGE,EAAY,cAAeL,CAAE,EAC7B,IAAIM,GAASM,GAAiBN,EAAO,CAAE,UAAAL,EAAW,QAAAC,CAAQ,CAAC,CAAC,EAG/D,GAAGG,EAAY,UAAWL,CAAE,EACzB,OAAO,IAAMa,EAAQ,kBAAkB,CAAC,EACxC,IAAIP,GAASQ,GAAoBR,EAAO,CAAE,UAAAL,CAAU,CAAC,CAAC,CAC3D,CACF,CCtDO,SAASc,GACdC,EAAkB,CAAE,OAAAC,CAAO,EACP,CACpB,OAAOA,EACJ,KACCC,EAAUC,GAAWC,EACnBC,EAAG,EAAI,EACPA,EAAG,EAAK,EAAE,KAAKC,GAAM,GAAI,CAAC,CAC5B,EACG,KACCC,EAAIC,IAAW,CAAE,QAAAL,EAAS,OAAAK,CAAO,EAAE,CACrC,CACF,CACF,CACJ,CAaO,SAASC,GACdC,EAAiBC,EACc,CAC/B,IAAMC,EAAQC,EAAW,cAAeH,CAAE,EAC1C,OAAOI,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,QAAAZ,EAAS,OAAAK,CAAO,IAAM,CACvCE,EAAG,UAAU,OAAO,oBAAqBF,CAAM,EAC/CI,EAAM,YAAcT,CACtB,CAAC,EAGMJ,GAAYW,EAAIC,CAAO,EAC3B,KACCM,EAAIC,GAASH,EAAM,KAAKG,CAAK,CAAC,EAC9BC,EAAS,IAAMJ,EAAM,SAAS,CAAC,EAC/BR,EAAIW,GAAUE,EAAA,CAAE,IAAKV,GAAOQ,EAAQ,CACtC,CACJ,CAAC,CACH,CCnDA,IAAIG,GAAW,EAiBR,SAASC,GACdC,EAAiBC,EACI,CACrB,SAAS,KAAK,OAAOD,CAAE,EAGvB,GAAM,CAAE,MAAAE,CAAM,EAAIC,GAAeH,CAAE,EACnCA,EAAG,MAAM,YAAY,qBAAsB,GAAGE,CAAK,IAAI,EACvDF,EAAG,OAAO,EAGV,IAAMI,EAAYC,GAAoBJ,CAAI,EACpCK,EACJ,OAAOF,GAAc,YACjBG,GAA0BH,CAAS,EACnCI,EAAG,CAAE,EAAG,EAAG,EAAG,CAAE,CAAC,EAGjBC,EAAUC,EACdC,GAAkBV,CAAI,EACtBW,GAAkBX,CAAI,CACxB,EACG,KACCY,EAAqB,CACvB,EAGF,OAAOC,EAAc,CAACL,EAASH,CAAO,CAAC,EACpC,KACCS,EAAI,CAAC,CAACC,EAAQC,CAAM,IAAM,CACxB,GAAI,CAAE,EAAAC,EAAG,EAAAC,CAAE,EAAIC,GAAiBnB,CAAI,EAC9BoB,EAAOlB,GAAeF,CAAI,EAU1BqB,EAAQrB,EAAK,QAAQ,OAAO,EAClC,OAAIqB,GAASrB,EAAK,gBAChBiB,GAAKI,EAAM,WAAarB,EAAK,cAAc,WAC3CkB,GAAKG,EAAM,UAAarB,EAAK,cAAc,WAEtC,CACL,OAAAe,EACA,OAAQ,CACN,EAAGE,EAAID,EAAO,EAAII,EAAK,MAAS,EAAInB,EAAQ,EAC5C,EAAGiB,EAAIF,EAAO,EAAII,EAAK,OAAS,CAClC,CACF,CACF,CAAC,CACH,CACJ,CASO,SAASE,GACdvB,EACgC,CAChC,IAAMwB,EAAQxB,EAAG,MACjB,GAAI,CAACwB,EAAM,OACT,OAAOC,EAGT,IAAMC,EAAK,aAAa5B,IAAU,GAC5B6B,EAAUC,GAAcF,EAAI,QAAQ,EACpCG,EAAUC,EAAW,cAAeH,CAAO,EACjD,OAAAE,EAAQ,UAAYL,EAGbO,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAE,CAAO,EAAG,CACfP,EAAQ,MAAM,YAAY,iBAAkB,GAAGO,EAAO,CAAC,IAAI,EAC3DP,EAAQ,MAAM,YAAY,iBAAkB,GAAGO,EAAO,CAAC,IAAI,CAC7D,EAGA,UAAW,CACTP,EAAQ,MAAM,eAAe,gBAAgB,EAC7CA,EAAQ,MAAM,eAAe,gBAAgB,CAC/C,CACF,CAAC,EAGDjB,EACEsB,EAAM,KAAKG,EAAO,CAAC,CAAE,OAAAnB,CAAO,IAAMA,CAAM,CAAC,EACzCgB,EAAM,KAAKI,GAAa,GAAG,EAAGD,EAAO,CAAC,CAAE,OAAAnB,CAAO,IAAM,CAACA,CAAM,CAAC,CAC/D,EACG,UAAU,CAGT,KAAK,CAAE,OAAAA,CAAO,EAAG,CACXA,GACFhB,EAAG,sBAAsB,WAAY2B,CAAO,EAC5C3B,EAAG,aAAa,mBAAoB0B,CAAE,EACtC1B,EAAG,gBAAgB,OAAO,IAE1B2B,EAAQ,OAAO,EACf3B,EAAG,gBAAgB,kBAAkB,EACrCA,EAAG,aAAa,QAASwB,CAAK,EAElC,EAGA,UAAW,CACTG,EAAQ,OAAO,EACf3B,EAAG,gBAAgB,kBAAkB,EACrCA,EAAG,aAAa,QAASwB,CAAK,CAChC,CACF,CAAC,EAGHQ,EACG,KACCK,GAAU,GAAIC,EAAuB,CACvC,EACG,UAAU,CAAC,CAAE,OAAAtB,CAAO,IAAM,CACzBW,EAAQ,UAAU,OAAO,qBAAsBX,CAAM,CACvD,CAAC,EAMLgB,EACG,KACCO,GAAa,IAAKD,EAAuB,EACzCH,EAAO,IAAM,CAAC,CAACnC,EAAG,YAAY,EAC9Be,EAAI,IAAMf,EAAG,aAAc,sBAAsB,CAAC,EAClDe,EAAI,CAAC,CAAE,EAAAG,CAAE,IAAMA,CAAC,CAClB,EACC,UAAU,CAGT,KAAKsB,EAAQ,CACPA,EACFb,EAAQ,MAAM,YAAY,iBAAkB,GAAG,CAACa,CAAM,IAAI,EAE1Db,EAAQ,MAAM,eAAe,gBAAgB,CACjD,EAGA,UAAW,CACTA,EAAQ,MAAM,eAAe,gBAAgB,CAC/C,CACF,CAAC,EAGI5B,GAAa4B,EAAS3B,CAAE,EAC5B,KACCyC,EAAIC,GAASV,EAAM,KAAKU,CAAK,CAAC,EAC9BC,EAAS,IAAMX,EAAM,SAAS,CAAC,EAC/BjB,EAAI2B,GAAUE,EAAA,CAAE,IAAK5C,GAAO0C,EAAQ,CACtC,CACJ,CAAC,EACE,KACCG,GAAYC,EAAc,CAC5B,CACJ,CC7JA,SAASC,GAAS,CAAE,UAAAC,CAAU,EAAsC,CAClE,GAAI,CAACC,EAAQ,iBAAiB,EAC5B,OAAOC,EAAG,EAAK,EAGjB,IAAMC,EAAaH,EAChB,KACCI,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,IAAMA,CAAC,EAC5BC,GAAY,EAAG,CAAC,EAChBF,EAAI,CAAC,CAACG,EAAGC,CAAC,IAAM,CAACD,EAAIC,EAAGA,CAAC,CAAU,EACnCC,EAAwB,CAAC,CAC3B,EAGIC,EAAUC,EAAc,CAACX,EAAWG,CAAU,CAAC,EAClD,KACCS,EAAO,CAAC,CAAC,CAAE,OAAAC,CAAO,EAAG,CAAC,CAAER,CAAC,CAAC,IAAM,KAAK,IAAIA,EAAIQ,EAAO,CAAC,EAAI,GAAG,EAC5DT,EAAI,CAAC,CAAC,CAAE,CAACU,CAAS,CAAC,IAAMA,CAAS,EAClCC,EAAqB,CACvB,EAGIC,EAAUC,GAAY,QAAQ,EACpC,OAAON,EAAc,CAACX,EAAWgB,CAAO,CAAC,EACtC,KACCZ,EAAI,CAAC,CAAC,CAAE,OAAAS,CAAO,EAAGK,CAAM,IAAML,EAAO,EAAI,KAAO,CAACK,CAAM,EACvDH,EAAqB,EACrBI,EAAUC,GAAUA,EAASV,EAAUR,EAAG,EAAK,CAAC,EAChDmB,EAAU,EAAK,CACjB,CACJ,CAcO,SAASC,GACdC,EAAiBC,EACG,CACpB,OAAOC,EAAM,IAAMd,EAAc,CAC/Be,GAAiBH,CAAE,EACnBxB,GAASyB,CAAO,CAClB,CAAC,CAAC,EACC,KACCpB,EAAI,CAAC,CAAC,CAAE,OAAAuB,CAAO,EAAGC,CAAM,KAAO,CAC7B,OAAAD,EACA,OAAAC,CACF,EAAE,EACFb,EAAqB,CAACR,EAAGC,IACvBD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,MAChB,EACDqB,EAAY,CAAC,CACf,CACJ,CAaO,SAASC,GACdP,EAAiB,CAAE,QAAAQ,EAAS,MAAAC,CAAM,EACO,CACzC,OAAOP,EAAM,IAAM,CACjB,IAAMQ,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxDJ,EACG,KACCxB,EAAwB,QAAQ,EAChC6B,GAAkBP,CAAO,CAC3B,EACG,UAAU,CAAC,CAAC,CAAE,OAAAX,CAAO,EAAG,CAAE,OAAAQ,CAAO,CAAC,IAAM,CACvCL,EAAG,UAAU,OAAO,oBAAqBH,GAAU,CAACQ,CAAM,EAC1DL,EAAG,OAASK,CACd,CAAC,EAGL,IAAMW,EAAWC,GAAKC,EAAY,UAAWlB,CAAE,CAAC,EAC7C,KACCX,EAAO,IAAMX,EAAQ,kBAAkB,CAAC,EACxCyC,GAASC,GAASC,GAAaD,CAAK,CAAC,CACvC,EAGF,OAAAX,EAAM,UAAUC,CAAK,EAGdF,EACJ,KACCc,EAAUV,CAAK,EACf/B,EAAI0C,GAAUC,EAAA,CAAE,IAAKxB,GAAOuB,EAAQ,EACpCE,GAAUT,EAAS,KAAKM,EAAUV,CAAK,CAAC,CAAC,CAC3C,CACJ,CAAC,CACH,CCjIO,SAASc,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACb,CACzB,OAAOC,GAAgBH,EAAI,CAAE,UAAAC,EAAW,QAAAC,CAAQ,CAAC,EAC9C,KACCE,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,IAAM,CACzB,GAAM,CAAE,OAAAC,CAAO,EAAIC,GAAeP,CAAE,EACpC,MAAO,CACL,OAAQK,GAAKC,CACf,CACF,CAAC,EACDE,EAAwB,QAAQ,CAClC,CACJ,CAaO,SAASC,GACdT,EAAiBU,EACmB,CACpC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClBD,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAE,CAAO,EAAG,CACfd,EAAG,UAAU,OAAO,2BAA4Bc,CAAM,CACxD,EAGA,UAAW,CACTd,EAAG,UAAU,OAAO,0BAA0B,CAChD,CACF,CAAC,EAGD,IAAMe,EAAUC,GAAmB,gBAAgB,EACnD,OAAI,OAAOD,GAAY,YACdE,EAGFlB,GAAiBgB,EAASL,CAAO,EACrC,KACCQ,EAAIC,GAASP,EAAM,KAAKO,CAAK,CAAC,EAC9BC,EAAS,IAAMR,EAAM,SAAS,CAAC,EAC/BR,EAAIe,GAAUE,EAAA,CAAE,IAAKrB,GAAOmB,EAAQ,CACtC,CACJ,CAAC,CACH,CChEO,SAASG,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACpB,CAGlB,IAAMC,EAAUD,EACb,KACCE,EAAI,CAAC,CAAE,OAAAC,CAAO,IAAMA,CAAM,EAC1BC,EAAqB,CACvB,EAGIC,EAAUJ,EACb,KACCK,EAAU,IAAMC,GAAiBT,CAAE,EAChC,KACCI,EAAI,CAAC,CAAE,OAAAC,CAAO,KAAO,CACnB,IAAQL,EAAG,UACX,OAAQA,EAAG,UAAYK,CACzB,EAAE,EACFK,EAAwB,QAAQ,CAClC,CACF,CACF,EAGF,OAAOC,EAAc,CAACR,EAASI,EAASN,CAAS,CAAC,EAC/C,KACCG,EAAI,CAAC,CAACQ,EAAQ,CAAE,IAAAC,EAAK,OAAAC,CAAO,EAAG,CAAE,OAAQ,CAAE,EAAAC,CAAE,EAAG,KAAM,CAAE,OAAAV,CAAO,CAAE,CAAC,KAChEA,EAAS,KAAK,IAAI,EAAGA,EACjB,KAAK,IAAI,EAAGQ,EAASE,EAAIH,CAAM,EAC/B,KAAK,IAAI,EAAGP,EAASU,EAAID,CAAM,CACnC,EACO,CACL,OAAQD,EAAMD,EACd,OAAAP,EACA,OAAQQ,EAAMD,GAAUG,CAC1B,EACD,EACDT,EAAqB,CAACU,EAAGC,IACvBD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,MAChB,CACH,CACJ,CCxCO,SAASC,GACdC,EACqB,CACrB,IAAMC,EAAU,SAAkB,WAAW,GAAK,CAChD,MAAOD,EAAO,UAAUE,GAAS,WAC/BA,EAAM,aAAa,qBAAqB,CAC1C,EAAE,OAAO,CACX,EAGMC,EAAQ,KAAK,IAAI,EAAG,KAAK,IAAIF,EAAQ,MAAOD,EAAO,OAAS,CAAC,CAAC,EACpE,OAAOI,EAAG,GAAGJ,CAAM,EAChB,KACCK,GAASH,GAASI,EAAUJ,EAAO,QAAQ,EAAE,KAAKK,EAAI,IAAML,CAAK,CAAC,CAAC,EACnEM,EAAUR,EAAOG,CAAK,CAAC,EACvBI,EAAIL,IAAU,CACZ,MAAOF,EAAO,QAAQE,CAAK,EAC3B,MAAO,CACL,MAASA,EAAM,aAAa,qBAAqB,EACjD,OAASA,EAAM,aAAa,sBAAsB,EAClD,QAASA,EAAM,aAAa,uBAAuB,EACnD,OAASA,EAAM,aAAa,sBAAsB,CACpD,CACF,EAAa,EACbO,EAAY,CAAC,CACf,CACJ,CASO,SAASC,GACdC,EACgC,CAChC,IAAMX,EAASY,EAA8B,QAASD,CAAE,EAClDE,EAAOC,EAAE,OAAQ,CAAE,KAAM,aAAc,CAAC,EAC9C,SAAS,KAAK,YAAYD,CAAI,EAG9B,IAAME,EAASD,EAAE,OAAQ,CAAE,KAAM,cAAe,CAAC,EACjD,SAAS,KAAK,YAAYC,CAAM,EAGhC,IAAMC,EAASC,GAAW,+BAA+B,EACzD,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAUE,GAAW,CAIzB,GAHA,SAAS,KAAK,aAAa,0BAA2B,EAAE,EAGpDA,EAAQ,MAAM,QAAU,yBAA0B,CACpD,IAAMC,EAAQ,WAAW,+BAA+B,EAClDpB,EAAQ,SAAS,cAAcoB,EAAM,QACvC,wDACA,sDACJ,EAGAD,EAAQ,MAAM,OAAUnB,EAAM,aAAa,sBAAsB,EACjEmB,EAAQ,MAAM,QAAUnB,EAAM,aAAa,uBAAuB,EAClEmB,EAAQ,MAAM,OAAUnB,EAAM,aAAa,sBAAsB,CACnE,CAGA,OAAW,CAACqB,EAAKC,CAAK,IAAK,OAAO,QAAQH,EAAQ,KAAK,EACrD,SAAS,KAAK,aAAa,iBAAiBE,CAAG,GAAIC,CAAK,EAG1D,QAASrB,EAAQ,EAAGA,EAAQH,EAAO,OAAQG,IAAS,CAClD,IAAMsB,EAAQzB,EAAOG,CAAK,EAAE,mBACxBsB,aAAiB,cACnBA,EAAM,OAASJ,EAAQ,QAAUlB,EACrC,CAGA,SAAS,YAAakB,CAAO,CAC/B,CAAC,EAGDf,EAAyBK,EAAI,SAAS,EAAE,KACtCe,EAAOC,GAAMA,EAAG,MAAQ,OAAO,EAC/BC,GAAeT,EAAO,CAACU,EAAGR,IAAYA,CAAO,CAC/C,EACG,UAAU,CAAC,CAAE,MAAAlB,CAAM,IAAM,CACxBA,GAASA,EAAQ,GAAKH,EAAO,OAC7BA,EAAOG,CAAK,EAAE,MAAM,EACpBH,EAAOG,CAAK,EAAE,MAAM,CACtB,CAAC,EAGHgB,EACG,KACCZ,EAAI,IAAM,CACR,IAAMuB,EAASC,GAAoB,QAAQ,EACrCC,EAAS,OAAO,iBAAiBF,CAAM,EAG7C,OAAAf,EAAO,QAAUiB,EAAM,YAGhBA,EAAM,gBAAgB,MAAM,MAAM,EACtC,IAAIR,IAAU,CAACA,GAAO,SAAS,EAAE,EAAE,SAAS,EAAG,GAAG,CAAC,EACnD,KAAK,EAAE,CACZ,CAAC,CACH,EACG,UAAUS,GAASpB,EAAK,QAAU,IAAIoB,CAAK,EAAE,EAGlDd,EAAM,KAAKe,GAAUC,EAAc,CAAC,EACjC,UAAU,IAAM,CACf,SAAS,KAAK,gBAAgB,yBAAyB,CACzD,CAAC,EAGIpC,GAAaC,CAAM,EACvB,KACCoC,EAAUpB,EAAO,KAAKqB,GAAK,CAAC,CAAC,CAAC,EAC9BC,GAAO,EACPC,EAAIC,GAASrB,EAAM,KAAKqB,CAAK,CAAC,EAC9BC,EAAS,IAAMtB,EAAM,SAAS,CAAC,EAC/BZ,EAAIiC,GAAUE,EAAA,CAAE,IAAK/B,GAAO6B,EAAQ,CACtC,CACJ,CAAC,CACH,CChJO,SAASG,GACdC,EAAiB,CAAE,UAAAC,CAAU,EACI,CAGjC,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,MAAAE,CAAM,IAAM,CAC7BL,EAAG,MAAM,YAAY,sBAAuB,GAAGK,CAAK,EAAE,CACxD,CAAC,EAGMJ,EACJ,KACCK,EAAID,GAASF,EAAM,KAAK,CAAE,MAAAE,CAAM,CAAC,CAAC,EAClCE,EAAS,IAAMJ,EAAM,SAAS,CAAC,EAC/BK,EAAIH,IAAU,CAAE,IAAKL,EAAI,MAAAK,CAAM,EAAE,CACnC,CACJ,CAAC,CACH,CChEA,IAAAI,GAAwB,SAiCxB,SAASC,GAAQC,EAAyB,CACxCA,EAAG,aAAa,kBAAmB,EAAE,EACrC,IAAMC,EAAOD,EAAG,QAAQ,aAAa,EAC/BE,EAAOD,EACTA,EAAK,aAAa,WAAW,EAC7BD,EAAG,UACP,OAAAA,EAAG,gBAAgB,iBAAiB,EAC7BE,EAAK,QAAQ,CACtB,CAWO,SAASC,GACd,CAAE,OAAAC,CAAO,EACH,CACF,GAAAC,QAAY,YAAY,GAC1B,IAAIC,EAA8BC,GAAc,CAC9C,IAAI,GAAAF,QAAY,iDAAkD,CAChE,KAAML,GACJA,EAAG,aAAa,qBAAqB,GACrCD,GAAQS,EACNR,EAAG,aAAa,uBAAuB,CACzC,CAAC,CAEL,CAAC,EACE,GAAG,UAAWS,GAAMF,EAAW,KAAKE,CAAE,CAAC,CAC5C,CAAC,EACE,KACCC,EAAID,GAAM,CACQA,EAAG,QACX,MAAM,CAChB,CAAC,EACDE,EAAI,IAAMC,GAAY,kBAAkB,CAAC,CAC3C,EACG,UAAUR,CAAM,CAEzB,CCrCA,SAASS,GAAQC,EAAUC,EAAW,CACpC,OAAAD,EAAI,SAAWC,EAAK,SACpBD,EAAI,SAAWC,EAAK,SACbD,CACT,CA2BA,SAASE,GAAQC,EAAoBF,EAAoB,CACvD,IAAMG,EAAmB,IAAI,IAC7B,QAAWC,KAAMC,EAAY,MAAOH,CAAQ,EAAG,CAC7C,IAAMH,EAAMO,EAAW,MAAOF,CAAE,EAG1BG,EAAQ,CAACT,GAAQ,IAAI,IAAIC,EAAI,WAAY,EAAGC,CAAI,CAAC,EACvDG,EAAQ,IAAI,GAAGI,EAAM,CAAC,CAAC,GAAIA,CAAK,EAGhC,QAAWC,KAAQH,EAAY,kBAAmBD,CAAE,EAAG,CACrD,IAAMK,EAAOD,EAAK,aAAa,MAAM,EACjCC,GAAQ,MACVF,EAAM,KAAKT,GAAQ,IAAI,IAAIW,CAAI,EAAGT,CAAI,CAAC,CAC3C,CACF,CAGA,OAAOG,CACT,CAgBO,SAASO,GAAaV,EAAyC,CACpE,OAAOW,GAAW,IAAI,IAAI,cAAeX,CAAI,CAAC,EAC3C,KACCY,EAAIV,GAAYD,GAAQC,EAAU,IAAI,IAAIF,CAAI,CAAC,CAAC,EAChDa,GAAW,IAAMC,EAAG,IAAI,GAAK,CAAC,CAChC,CACJ,CClDA,SAASC,GACPC,EAAgBC,EACC,CACjB,GAAI,EAAED,EAAG,kBAAkB,SACzB,OAAOE,EAIT,IAAMC,EAAKH,EAAG,OAAO,QAAQ,GAAG,EAChC,GAAIG,IAAO,KACT,OAAOD,EAMT,GAAIC,EAAG,QAAUH,EAAG,SAAWA,EAAG,QAChC,OAAOE,EAQT,IAAME,EAAM,IAAI,IAAID,EAAG,IAAI,EAO3B,OANAC,EAAI,OAASA,EAAI,KAAO,GAMnBH,EAAQ,IAAI,GAAGG,CAAG,EAAE,GASzBJ,EAAG,eAAe,EACXK,EAAG,IAAI,IAAIF,EAAG,IAAI,CAAC,GATjBD,CAUX,CASA,SAASI,GAAKC,EAA8C,CAC1D,IAAMC,EAAO,IAAI,IACjB,QAAWL,KAAMM,EAAY,aAAcF,EAAS,IAAI,EACtDC,EAAK,IAAIL,EAAG,UAAWA,CAAE,EAG3B,OAAOK,CACT,CAYA,SAASE,GAAQH,EAA0C,CACzD,QAAWJ,KAAMM,EAAY,gBAAiBF,CAAQ,EACpD,QAAWI,IAAO,CAAC,OAAQ,KAAK,EAAG,CACjC,IAAMC,EAAQT,EAAG,aAAaQ,CAAG,EACjC,GAAIC,GAAS,CAAC,qBAAqB,KAAKA,CAAK,EAAG,CAE9CT,EAAGQ,CAAG,EAAIR,EAAGQ,CAAG,EAChB,KACF,CACF,CAGF,OAAON,EAAGE,CAAQ,CACpB,CASA,SAASM,GAAOC,EAAsC,CACpD,QAAWC,IAAY,CACrB,+BACA,gCACA,mCACA,+BACA,2BACA,2BACA,GAAGC,EAAQ,wBAAwB,EAC/B,CAAC,0BAA0B,EAC3B,CAAC,CACP,EAAG,CACD,IAAMC,EAASC,GAAmBH,CAAQ,EACpCI,EAASD,GAAmBH,EAAUD,CAAI,EAE9C,OAAOG,GAAW,aAClB,OAAOE,GAAW,aAElBF,EAAO,YAAYE,CAAM,CAE7B,CAGA,IAAMX,EAAOF,GAAK,QAAQ,EAC1B,OAAW,CAACc,EAAMjB,CAAE,IAAKG,GAAKQ,CAAI,EAC5BN,EAAK,IAAIY,CAAI,EACfZ,EAAK,OAAOY,CAAI,EAEhB,SAAS,KAAK,YAAYjB,CAAE,EAGhC,QAAWA,KAAMK,EAAK,OAAO,EAAG,CAC9B,IAAMa,EAAOlB,EAAG,aAAa,MAAM,EAI/BkB,IAAS,eAAiBA,IAAS,gBACrClB,EAAG,OAAO,CACd,CAIA,IAAMmB,EAAYC,GAAoB,WAAW,EACjD,OAAOC,GAAOf,EAAY,SAAUa,CAAS,CAAC,EAC3C,KACCG,EAAUtB,GAAM,CACd,IAAMuB,EAASZ,EAAK,cAAc,QAAQ,EAC1C,GAAIX,EAAG,IAAK,CACV,QAAWkB,KAAQlB,EAAG,kBAAkB,EACtCuB,EAAO,aAAaL,EAAMlB,EAAG,aAAakB,CAAI,CAAE,EAClD,OAAAlB,EAAG,YAAYuB,CAAM,EAGd,IAAIC,EAAWC,GAAY,CAChCF,EAAO,OAAS,IAAME,EAAS,SAAS,CAC1C,CAAC,CAGH,KACE,QAAAF,EAAO,YAAcvB,EAAG,YACxBA,EAAG,YAAYuB,CAAM,EACdxB,CAEX,CAAC,EACD2B,EAAe,EACfC,GAAQ,QAAQ,CAClB,CACJ,CAgBO,SAASC,GACd,CAAE,UAAAC,EAAW,UAAAC,EAAW,UAAAC,CAAU,EACZ,CACtB,IAAMC,EAASC,GAAc,EAC7B,GAAI,SAAS,WAAa,QACxB,OAAOlC,EAIT,IAAMmC,EAAWC,GAAaH,EAAO,IAAI,EAUzC9B,EAAG,QAAQ,EACR,UAAUK,EAAO,EAUpB,IAAM6B,EACJC,EAAsB,SAAS,KAAM,OAAO,EACzC,KACCC,GAAkBJ,CAAQ,EAC1BZ,EAAU,CAAC,CAACzB,EAAIC,CAAO,IAAMF,GAAOC,EAAIC,CAAO,CAAC,EAChDyC,GAAM,CACR,EAIEC,EACJH,EAAyB,OAAQ,UAAU,EACxC,KACCI,EAAIC,EAAW,EACfH,GAAM,CACR,EAMJH,EAAS,KAAKO,GAAeb,CAAS,CAAC,EACpC,UAAU,CAAC,CAAC7B,EAAK,CAAE,OAAA2C,CAAO,CAAC,IAAM,CAChC,QAAQ,aAAaA,EAAQ,EAAE,EAC/B,QAAQ,UAAU,KAAM,GAAI3C,CAAG,CACjC,CAAC,EAMH4C,EAAMT,EAAUI,CAAQ,EACrB,UAAUX,CAAS,EActB,IAAMiB,EACJjB,EAAU,KACRkB,EAAwB,UAAU,EAClCzB,EAAUrB,GAAO+C,GAAY/C,EAAK,CAAE,UAAA8B,CAAU,CAAC,EAC5C,KACCkB,GAAW,KACTC,GAAYjD,EAAK,EAAI,EACdF,EACR,CACH,CACF,EAIAuB,EAAUf,EAAO,EACjBe,EAAUZ,EAAM,EAChB6B,GAAM,CACR,EAUF,OAAAM,EACEC,EAAU,KAAKH,GAAed,EAAW,CAACsB,EAAGlD,IAAQA,CAAG,CAAC,EASzD6C,EAAU,KACRxB,EAAU,IAAMO,CAAS,EACzBkB,EAAwB,UAAU,EAClCzB,EAAU,IAAMO,CAAS,EACzBkB,EAAwB,MAAM,CAChC,EAQAlB,EAAU,KACRuB,EAAqB,CAACC,EAAGC,IACvBD,EAAE,WAAaC,EAAE,UACjBD,EAAE,OAAaC,EAAE,IAClB,EACDhC,EAAU,IAAMc,CAAQ,EACxBmB,EAAI,IAAM,QAAQ,KAAK,CAAC,CAC1B,CACF,EACG,UAAUtD,GAAO,CA1YtB,IAAAuD,EAAAC,EAgZU,QAAQ,QAAU,MAAQ,CAACxD,EAAI,KACjC,OAAO,SAAS,GAAGwD,GAAAD,EAAA,QAAQ,QAAR,YAAAA,EAAe,IAAf,KAAAC,EAAoB,CAAC,GAExC,QAAQ,kBAAoB,OAC5BC,GAAgBzD,EAAI,IAAI,EACxB,QAAQ,kBAAoB,SAEhC,CAAC,EAMH4B,EAAU,UAAU,IAAM,CACxB,QAAQ,kBAAoB,QAC9B,CAAC,EAMDQ,EAAU,OAAQ,cAAc,EAC7B,UAAU,IAAM,CACf,QAAQ,kBAAoB,MAC9B,CAAC,EAMHP,EAAU,KACRiB,EAAwB,QAAQ,EAChCY,GAAa,GAAG,CAClB,EACG,UAAU,CAAC,CAAE,OAAAf,CAAO,IAAM,CACzB,QAAQ,aAAaA,EAAQ,EAAE,CACjC,CAAC,EAGIE,CACT,CClaA,IAAAc,GAAuB,SAqChB,SAASC,GACdC,EAC0B,CAE1B,IAAMC,EAAQD,EAAO,UAAU,MAAM,GAAG,EAAE,IAAIE,GAC/BA,EAAK,QAAQ,sBAAuB,EAAE,EACvC,SAAW,EAAI,SAAMA,CAClC,EACE,KAAK,GAAG,EAELC,EAAY,IAAI,OAAOF,EAAO,KAAK,EACnCG,EAAY,CAACC,EAAYC,EAAcJ,IACpC,GAAGI,CAAI,2BAA2BJ,CAAI,UAI/C,OAAQK,GAAkB,CACxBA,EAAQA,EACL,QAAQ,gBAAiB,GAAG,EAC5B,KAAK,EAGR,IAAMC,EAAQ,IAAI,OAAO,MAAMR,EAAO,SAAS,MAC7CO,EACG,QAAQ,uBAAwB,MAAM,EACtC,QAAQJ,EAAW,GAAG,CAC3B,IAAK,KAAK,EAGV,OAAOM,MAAS,GAAAC,SAAWD,CAAK,EAC7B,QAAQD,EAAOJ,CAAS,EACxB,QAAQ,8BAA+B,IAAI,CAChD,CACF,CCEO,SAASO,GACdC,EAC+B,CAC/B,OAAOA,EAAQ,OAAS,CAC1B,CASO,SAASC,GACdD,EACgC,CAChC,OAAOA,EAAQ,OAAS,CAC1B,CC1CO,SAASE,GACdC,EAAaC,EACW,CACxB,IAAMC,EAAUC,GAA2BH,CAAG,EAC9C,OAAAI,EACEC,EAAG,SAAS,WAAa,OAAO,EAChCC,GAAY,QAAQ,CACtB,EACG,KACCC,GAAMC,GAAUA,CAAM,EACtBC,EAAU,IAAMR,CAAM,CACxB,EACG,UAAU,CAAC,CAAE,OAAAS,EAAQ,KAAAC,CAAK,IAAMT,EAAQ,KAAK,CAC5C,OACA,KAAM,CACJ,OAAAQ,EACA,KAAAC,EACA,QAAS,CACP,QAASC,EAAQ,gBAAgB,CACnC,CACF,CACF,CAAC,CAAC,EAGCV,CACT,CCxBO,SAASW,GACd,CAAE,UAAAC,CAAU,EACN,CACN,IAAMC,EAASC,GAAc,EACvBC,EAAYC,GAChB,IAAI,IAAI,mBAAoBH,EAAO,IAAI,CACzC,EACG,KACCI,GAAW,IAAMC,CAAK,CACxB,EAGIC,EAAWJ,EACd,KACCK,EAAIC,GAAY,CACd,GAAM,CAAC,CAAEC,CAAO,EAAIT,EAAO,KAAK,MAAM,aAAa,EACnD,OAAOQ,EAAS,KAAK,CAAC,CAAE,QAAAE,EAAS,QAAAC,CAAQ,IACvCD,IAAYD,GAAWE,EAAQ,SAASF,CAAO,CAChD,GAAKD,EAAS,CAAC,CAClB,CAAC,CACH,EAGFN,EACG,KACCK,EAAIC,GAAY,IAAI,IAAIA,EAAS,IAAIE,GAAW,CAC9C,GAAG,IAAI,IAAI,MAAMA,EAAQ,OAAO,IAAKV,EAAO,IAAI,CAAC,GACjDU,CACF,CAAC,CAAC,CAAC,EACHE,EAAUC,GAAQC,EAAsB,SAAS,KAAM,OAAO,EAC3D,KACCC,EAAOC,GAAM,CAACA,EAAG,SAAW,CAACA,EAAG,OAAO,EACvCC,GAAeX,CAAQ,EACvBM,EAAU,CAAC,CAACI,EAAIP,CAAO,IAAM,CAC3B,GAAIO,EAAG,kBAAkB,QAAS,CAChC,IAAME,EAAKF,EAAG,OAAO,QAAQ,GAAG,EAChC,GAAIE,GAAM,CAACA,EAAG,QAAUL,EAAK,IAAIK,EAAG,IAAI,EAAG,CACzC,IAAMC,EAAMD,EAAG,KAWf,MAAI,CAACF,EAAG,OAAO,QAAQ,aAAa,GAClBH,EAAK,IAAIM,CAAG,IACZV,EACPJ,GAEXW,EAAG,eAAe,EACXI,EAAGD,CAAG,EACf,CACF,CACA,OAAOd,CACT,CAAC,EACDO,EAAUO,GACDE,GAAa,IAAI,IAAIF,CAAG,CAAC,EAC7B,KACCZ,EAAIe,GAAW,CAEb,IAAMC,EADWC,GAAY,EACP,KAAK,QAAQxB,EAAO,KAAMmB,CAAG,EACnD,OAAOG,EAAQ,IAAIC,EAAK,MAAM,GAAG,EAAE,CAAC,CAAC,EACjC,IAAI,IAAIA,CAAI,EACZ,IAAI,IAAIJ,CAAG,CACjB,CAAC,CACH,CACH,CACH,CACF,CACF,EACG,UAAUA,GAAOM,GAAYN,EAAK,EAAI,CAAC,EAG5CO,EAAc,CAACxB,EAAWI,CAAQ,CAAC,EAChC,UAAU,CAAC,CAACE,EAAUC,CAAO,IAAM,CACpBkB,EAAW,mBAAmB,EACtC,YAAYC,GAAsBpB,EAAUC,CAAO,CAAC,CAC5D,CAAC,EAGHV,EAAU,KAAKa,EAAU,IAAMN,CAAQ,CAAC,EACrC,UAAUG,GAAW,CA3J1B,IAAAoB,EA8JM,IAAIC,EAAW,SAAS,aAAc,cAAc,EACpD,GAAIA,IAAa,KAAM,CACrBA,EAAW,GAGX,IAAIC,IAAUF,EAAA7B,EAAO,UAAP,YAAA6B,EAAgB,UAAW,SACpC,MAAM,QAAQE,CAAO,IACxBA,EAAU,CAACA,CAAO,GAGpBC,EAAM,QAAWC,KAAUF,EACzB,QAAWrB,KAAWD,EAAQ,QAAQ,OAAOA,EAAQ,OAAO,EAC1D,GAAI,IAAI,OAAOwB,EAAQ,GAAG,EAAE,KAAKvB,CAAO,EAAG,CACzCoB,EAAW,GACX,MAAME,CACR,CAGJ,SAAS,aAAcF,EAAU,cAAc,CACjD,CAGA,GAAIA,EACF,QAAWI,KAAWC,GAAqB,UAAU,EACnDD,EAAQ,OAAS,EACvB,CAAC,CACL,CCpFO,SAASE,GACdC,EAAsB,CAAE,QAAAC,CAAQ,EACP,CAGzB,GAAM,CAAE,aAAAC,CAAa,EAAIC,GAAY,EACjCD,EAAa,IAAI,GAAG,IACtBE,GAAU,SAAU,EAAI,EAGxBJ,EAAG,MAAQE,EAAa,IAAI,GAAG,EAC/BF,EAAG,MAAM,EAGTK,GAAY,QAAQ,EACjB,KACCC,GAAMC,GAAU,CAACA,CAAM,CACzB,EACG,UAAU,IAAM,CACf,IAAMC,EAAML,GAAY,EACxBK,EAAI,aAAa,OAAO,GAAG,EAC3B,QAAQ,aAAa,CAAC,EAAG,GAAI,GAAGA,CAAG,EAAE,CACvC,CAAC,GAIP,IAAMC,EAASC,GAAkBV,CAAE,EAC7BW,EAASC,EACbX,EAAQ,KAAKK,GAAMO,EAAoB,CAAC,EACxCC,EAAUd,EAAI,OAAO,EACrBS,CACF,EACG,KACCM,EAAI,IAAMf,EAAG,KAAK,EAClBgB,EAAqB,CACvB,EAGF,OAAOC,EAAc,CAACN,EAAQF,CAAM,CAAC,EAClC,KACCM,EAAI,CAAC,CAACG,EAAOC,CAAK,KAAO,CAAE,MAAAD,EAAO,MAAAC,CAAM,EAAE,EAC1CC,EAAY,CAAC,CACf,CACJ,CAUO,SAASC,GACdrB,EAAsB,CAAE,QAAAC,CAAQ,EACsB,CACtD,IAAMqB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAGxDT,EAAc,CACZhB,EAAQ,KAAKK,GAAMO,EAAoB,CAAC,EACxCS,CACF,EAAG,CAACK,EAAGC,IAAUA,CAAK,EACnB,KACCC,EAAwB,OAAO,CACjC,EACG,UAAU,CAAC,CAAE,MAAAX,CAAM,IAAMjB,EAAQ,KAAK,CACrC,OACA,KAAMiB,CACR,CAAC,CAAC,EAGNI,EACG,KACCO,EAAwB,OAAO,CACjC,EACG,UAAU,CAAC,CAAE,MAAAV,CAAM,IAAM,CACpBA,GACFf,GAAU,SAAUe,CAAK,CAC7B,CAAC,EAGLL,EAAUd,EAAG,KAAO,OAAO,EACxB,KACC8B,EAAUN,CAAK,CACjB,EACG,UAAU,IAAMxB,EAAG,MAAM,CAAC,EAM/B,IAAM+B,EAAQC,EAAW,uBAAuB,EAChD,OAAAlB,EAAUiB,EAAO,OAAO,EACrB,UAAU,IAAM/B,EAAG,MAAM,CAAC,EAGtBD,GAAiBC,EAAI,CAAE,QAAAC,CAAQ,CAAC,EACpC,KACCgC,EAAIC,GAASZ,EAAM,KAAKY,CAAK,CAAC,EAC9BC,EAAS,IAAMb,EAAM,SAAS,CAAC,EAC/BP,EAAImB,GAAUE,EAAA,CAAE,IAAKpC,GAAOkC,EAAQ,EACpCd,EAAY,CAAC,CACf,CACJ,CCnHO,SAASiB,GACdC,EAAiB,CAAE,QAAAC,EAAS,OAAAC,CAAO,EACE,CACrC,IAAMC,EAAQ,IAAIC,EACZC,EAAYC,GAAqBN,EAAG,aAAc,EACrD,KACCO,EAAO,OAAO,CAChB,EAGIC,EAAYR,EAAG,cAGfS,EAAOC,EAAW,wBAAyBV,CAAE,EAC7CW,EAAOD,EAAW,uBAAwBV,CAAE,EAGlDY,GAAY,QAAQ,EACjB,UAAUC,GAAUF,EAAK,aACxB,OAAQE,EAAS,OAAS,cAC5B,CAAC,EAGHV,EACG,KACCW,GAAeZ,CAAM,EACrBa,GAAUd,EAAQ,KAAKe,GAAMC,EAAoB,CAAC,CAAC,CACrD,EACG,UAAU,CAAC,CAAC,CAAE,MAAAC,CAAM,EAAG,CAAE,MAAAC,CAAM,CAAC,IAAM,CACrC,OAAQD,EAAM,OAAQ,CAGpB,IAAK,GACHT,EAAK,YAAcU,EAAM,OACrBC,GAAY,oBAAoB,EAChCA,GAAY,2BAA2B,EAC3C,MAGF,IAAK,GACHX,EAAK,YAAcW,GAAY,mBAAmB,EAClD,MAGF,QACE,IAAMC,EAAQC,GAAMJ,EAAM,MAAM,EAChCT,EAAK,YAAcW,GAAY,sBAAuBC,CAAK,CAC/D,CACF,CAAC,EAGL,IAAME,EAAUpB,EACb,KACCqB,EAAI,IAAMb,EAAK,UAAY,EAAE,EAC7Bc,EAAU,CAAC,CAAE,MAAAP,CAAM,IAAMQ,EACvBC,EAAG,GAAGT,EAAM,MAAM,EAAG,EAAE,CAAC,EACxBS,EAAG,GAAGT,EAAM,MAAM,EAAE,CAAC,EAClB,KACCU,GAAY,CAAC,EACbC,GAAQxB,CAAS,EACjBoB,EAAU,CAAC,CAACK,CAAK,IAAMA,CAAK,CAC9B,CACJ,CAAC,EACDC,EAAIC,EAAsB,EAC1BC,GAAM,CACR,EAGF,OAAAV,EAAQ,UAAUW,GAAQvB,EAAK,YAAYuB,CAAI,CAAC,EAChDX,EACG,KACCY,GAASD,GAAQ,CACf,IAAME,EAAUC,GAAmB,UAAWH,CAAI,EAClD,OAAI,OAAOE,GAAY,YACdE,EAGFC,EAAUH,EAAS,QAAQ,EAC/B,KACCI,EAAUrC,CAAK,EACf4B,EAAI,IAAMK,CAAO,CACnB,CACJ,CAAC,CACH,EACG,UAAUA,GAAW,CAElBA,EAAQ,OAAS,IACjBA,EAAQ,WAAa5B,EAAU,WAE/BA,EAAU,SAAS,CAAE,IAAK4B,EAAQ,SAAU,CAAC,CACjD,CAAC,EAGWnC,EACb,KACCM,EAAOkC,EAAqB,EAC5BV,EAAI,CAAC,CAAE,KAAAW,CAAK,IAAMA,CAAI,CACxB,EAIC,KACClB,EAAImB,GAASxC,EAAM,KAAKwC,CAAK,CAAC,EAC9BC,EAAS,IAAMzC,EAAM,SAAS,CAAC,EAC/B4B,EAAIY,GAAUE,EAAA,CAAE,IAAK7C,GAAO2C,EAAQ,CACtC,CACJ,CCpHO,SAASG,GACdC,EAAkB,CAAE,OAAAC,CAAO,EACF,CACzB,OAAOA,EACJ,KACCC,EAAI,CAAC,CAAE,MAAAC,CAAM,IAAM,CACjB,IAAMC,EAAMC,GAAY,EACxB,OAAAD,EAAI,KAAO,GAGXD,EAAQA,EACL,QAAQ,OAAQ,GAAG,EACnB,QAAQ,KAAM,KAAK,EACnB,QAAQ,KAAM,KAAK,EAGtBC,EAAI,OAAS,KAAKD,CAAK,GAChB,CAAE,IAAAC,CAAI,CACf,CAAC,CACH,CACJ,CAUO,SAASE,GACdC,EAAuBC,EACa,CACpC,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EACxD,OAAAJ,EAAM,UAAU,CAAC,CAAE,IAAAL,CAAI,IAAM,CAC3BG,EAAG,aAAa,sBAAuBA,EAAG,IAAI,EAC9CA,EAAG,KAAO,GAAGH,CAAG,EAClB,CAAC,EAGDU,EAAUP,EAAI,OAAO,EAClB,KACCQ,EAAUJ,CAAK,CACjB,EACG,UAAUK,GAAMA,EAAG,eAAe,CAAC,EAGjCjB,GAAiBQ,EAAIC,CAAO,EAChC,KACCS,EAAIC,GAAST,EAAM,KAAKS,CAAK,CAAC,EAC9BC,EAAS,IAAMV,EAAM,SAAS,CAAC,EAC/BP,EAAIgB,GAAUE,EAAA,CAAE,IAAKb,GAAOW,EAAQ,CACtC,CACJ,CCpDO,SAASG,GACdC,EAAiB,CAAE,QAAAC,EAAS,UAAAC,CAAU,EACA,CACtC,IAAMC,EAAQ,IAAIC,EAGZC,EAASC,GAAoB,cAAc,EAC3CC,EAASC,EACbC,EAAUJ,EAAO,SAAS,EAC1BI,EAAUJ,EAAO,OAAO,CAC1B,EACG,KACCK,GAAUC,EAAc,EACxBC,EAAI,IAAMP,EAAM,KAAK,EACrBQ,EAAqB,CACvB,EAGF,OAAAV,EACG,KACCW,GAAkBP,CAAM,EACxBK,EAAI,CAAC,CAAC,CAAE,QAAAG,CAAQ,EAAGC,CAAK,IAAM,CAC5B,IAAMC,EAAQD,EAAM,MAAM,UAAU,EACpC,GAAID,GAAA,MAAAA,EAAS,QAAUE,EAAMA,EAAM,OAAS,CAAC,EAAG,CAC9C,IAAMC,EAAOH,EAAQA,EAAQ,OAAS,CAAC,EACnCG,EAAK,WAAWD,EAAMA,EAAM,OAAS,CAAC,CAAC,IACzCA,EAAMA,EAAM,OAAS,CAAC,EAAIC,EAC9B,MACED,EAAM,OAAS,EAEjB,OAAOA,CACT,CAAC,CACH,EACG,UAAUA,GAASjB,EAAG,UAAYiB,EAChC,KAAK,EAAE,EACP,QAAQ,MAAO,QAAQ,CAC1B,EAGJf,EACG,KACCiB,EAAO,CAAC,CAAE,KAAAC,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,OAAQA,EAAI,KAAM,CAGhB,IAAK,aAEDrB,EAAG,UAAU,QACbK,EAAM,iBAAmBA,EAAM,MAAM,SAErCA,EAAM,MAAQL,EAAG,WACnB,KACJ,CACF,CAAC,EAGWC,EACb,KACCkB,EAAOG,EAAqB,EAC5BV,EAAI,CAAC,CAAE,KAAAW,CAAK,IAAMA,CAAI,CACxB,EAIC,KACCC,EAAIC,GAAStB,EAAM,KAAKsB,CAAK,CAAC,EAC9BC,EAAS,IAAMvB,EAAM,SAAS,CAAC,EAC/BS,EAAI,KAAO,CAAE,IAAKZ,CAAG,EAAE,CACzB,CACJ,CCjDO,SAAS2B,GACdC,EAAiB,CAAE,OAAAC,EAAQ,UAAAC,CAAU,EACN,CAC/B,IAAMC,EAASC,GAAc,EAC7B,GAAI,CACF,IAAMC,EAAUC,GAAkBH,EAAO,OAAQF,CAAM,EAGjDM,EAASC,GAAoB,eAAgBR,CAAE,EAC/CS,EAASD,GAAoB,gBAAiBR,CAAE,EAGtDU,EAAwBV,EAAI,OAAO,EAChC,KACCW,EAAO,CAAC,CAAE,OAAAC,CAAO,IACfA,aAAkB,SAAW,CAAC,CAACA,EAAO,QAAQ,GAAG,CAClD,CACH,EACG,UAAU,IAAMC,GAAU,SAAU,EAAK,CAAC,EAG/CX,EACG,KACCS,EAAO,CAAC,CAAE,KAAAG,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,IAAMC,EAASC,GAAiB,EAChC,OAAQF,EAAI,KAAM,CAGhB,IAAK,QACH,GAAIC,IAAWT,EAAO,CACpB,IAAMW,EAAU,IAAI,IACpB,QAAWC,KAAUC,EACnB,sBAAuBX,CACzB,EAAG,CACD,IAAMY,EAAUF,EAAO,kBACvBD,EAAQ,IAAIC,EAAQ,WAClBE,EAAQ,aAAa,eAAe,CACtC,CAAC,CACH,CAGA,GAAIH,EAAQ,KAAM,CAChB,GAAM,CAAC,CAACI,CAAI,CAAC,EAAI,CAAC,GAAGJ,CAAO,EAAE,KAAK,CAAC,CAAC,CAAEK,CAAC,EAAG,CAAC,CAAEC,CAAC,IAAMA,EAAID,CAAC,EAC1DD,EAAK,MAAM,CACb,CAGAP,EAAI,MAAM,CACZ,CACA,MAGF,IAAK,SACL,IAAK,MACHF,GAAU,SAAU,EAAK,EACzBN,EAAM,KAAK,EACX,MAGF,IAAK,UACL,IAAK,YACH,GAAI,OAAOS,GAAW,YACpBT,EAAM,MAAM,MACP,CACL,IAAMkB,EAAM,CAAClB,EAAO,GAAGa,EACrB,wDACAX,CACF,CAAC,EACKiB,EAAI,KAAK,IAAI,GACjB,KAAK,IAAI,EAAGD,EAAI,QAAQT,CAAM,CAAC,EAAIS,EAAI,QACrCV,EAAI,OAAS,UAAY,GAAK,IAE9BU,EAAI,MAAM,EACdA,EAAIC,CAAC,EAAE,MAAM,CACf,CAGAX,EAAI,MAAM,EACV,MAGF,QACMR,IAAUU,GAAiB,GAC7BV,EAAM,MAAM,CAClB,CACF,CAAC,EAGLL,EACG,KACCS,EAAO,CAAC,CAAE,KAAAG,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,OAAQA,EAAI,KAAM,CAGhB,IAAK,IACL,IAAK,IACL,IAAK,IACHR,EAAM,MAAM,EACZA,EAAM,OAAO,EAGbQ,EAAI,MAAM,EACV,KACJ,CACF,CAAC,EAGL,IAAMY,EAASC,GAAiBrB,EAAO,CAAE,QAAAF,CAAQ,CAAC,EAClD,OAAOwB,EACLF,EACAG,GAAkBrB,EAAQ,CAAE,QAAAJ,EAAS,OAAAsB,CAAO,CAAC,CAC/C,EACG,KACCI,GAGE,GAAGC,GAAqB,eAAgBhC,CAAE,EACvC,IAAIiC,GAASC,GAAiBD,EAAO,CAAE,OAAAN,CAAO,CAAC,CAAC,EAGnD,GAAGK,GAAqB,iBAAkBhC,CAAE,EACzC,IAAIiC,GAASE,GAAmBF,EAAO,CAAE,QAAA5B,EAAS,UAAAH,CAAU,CAAC,CAAC,CACnE,CACF,CAGJ,OAASkC,EAAK,CACZ,OAAApC,EAAG,OAAS,GACLqC,EACT,CACF,CCnKO,SAASC,GACdC,EAAiB,CAAE,OAAAC,EAAQ,UAAAC,CAAU,EACG,CACxC,OAAOC,EAAc,CACnBF,EACAC,EACG,KACCE,EAAUC,GAAY,CAAC,EACvBC,EAAOC,GAAO,CAAC,CAACA,EAAI,aAAa,IAAI,GAAG,CAAC,CAC3C,CACJ,CAAC,EACE,KACCC,EAAI,CAAC,CAACC,EAAOF,CAAG,IAAMG,GAAuBD,EAAM,MAAM,EACvDF,EAAI,aAAa,IAAI,GAAG,CAC1B,CAAC,EACDC,EAAIG,GAAM,CA1FhB,IAAAC,EA2FQ,IAAMC,EAAQ,IAAI,IAGZC,EAAK,SAAS,mBAAmBd,EAAI,WAAW,SAAS,EAC/D,QAASe,EAAOD,EAAG,SAAS,EAAGC,EAAMA,EAAOD,EAAG,SAAS,EACtD,IAAIF,EAAAG,EAAK,gBAAL,MAAAH,EAAoB,aAAc,CACpC,IAAMI,EAAWD,EAAK,YAChBE,EAAWN,EAAGK,CAAQ,EACxBC,EAAS,OAASD,EAAS,QAC7BH,EAAM,IAAIE,EAAmBE,CAAQ,CACzC,CAIF,OAAW,CAACF,EAAMG,CAAI,IAAKL,EAAO,CAChC,GAAM,CAAE,WAAAM,CAAW,EAAIC,EAAE,OAAQ,KAAMF,CAAI,EAC3CH,EAAK,YAAY,GAAG,MAAM,KAAKI,CAAU,CAAC,CAC5C,CAGA,MAAO,CAAE,IAAKnB,EAAI,MAAAa,CAAM,CAC1B,CAAC,CACH,CACJ,CCPO,SAASQ,GACdC,EAAiB,CAAE,UAAAC,EAAW,MAAAC,CAAM,EACf,CACrB,IAAMC,EAASH,EAAG,QAAqB,UAAU,EAC3CI,EACJD,EAAO,UACPA,EAAO,cAAe,UAGxB,OAAOE,EAAc,CAACH,EAAOD,CAAS,CAAC,EACpC,KACCK,EAAI,CAAC,CAAC,CAAE,OAAAC,EAAQ,OAAAC,CAAO,EAAG,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,CAAC,KACzCD,EAASA,EACL,KAAK,IAAIJ,EAAQ,KAAK,IAAI,EAAGK,EAAIF,CAAM,CAAC,EACxCH,EACG,CACL,OAAAI,EACA,OAAQC,GAAKF,EAASH,CACxB,EACD,EACDM,EAAqB,CAACC,EAAGC,IACvBD,EAAE,SAAWC,EAAE,QACfD,EAAE,SAAWC,EAAE,MAChB,CACH,CACJ,CAuBO,SAASC,GACdb,EAAiBc,EACe,CADf,IAAAC,EAAAD,EAAE,SAAAE,CA5JrB,EA4JmBD,EAAcE,EAAAC,GAAdH,EAAc,CAAZ,YAEnB,IAAMI,EAAQC,EAAW,0BAA2BpB,CAAE,EAChD,CAAE,EAAAS,CAAE,EAAIY,GAAiBF,CAAK,EACpC,OAAOG,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAClDC,EAAQL,EACX,KACCM,GAAU,EAAGC,EAAuB,CACtC,EAGF,OAAAF,EAAM,KAAKG,GAAef,CAAO,CAAC,EAC/B,UAAU,CAGT,KAAK,CAAC,CAAE,OAAAR,CAAO,EAAG,CAAE,OAAQD,CAAO,CAAC,EAAG,CACrCY,EAAM,MAAM,OAAS,GAAGX,EAAS,EAAIC,CAAC,KACtCT,EAAG,MAAM,IAAY,GAAGO,CAAM,IAChC,EAGA,UAAW,CACTY,EAAM,MAAM,OAAS,GACrBnB,EAAG,MAAM,IAAY,EACvB,CACF,CAAC,EAGH4B,EAAM,KAAKI,GAAM,CAAC,EACf,UAAU,IAAM,CACf,QAAWC,KAAQC,EAAY,8BAA+BlC,CAAE,EAAG,CACjE,GAAI,CAACiC,EAAK,aACR,SACF,IAAME,EAAYF,EAAK,QAAqB,yBAAyB,EACrE,GAAI,OAAOE,GAAc,YAAa,CACpC,IAAM5B,EAAS0B,EAAK,UAAYE,EAAU,UACpC,CAAE,OAAA3B,CAAO,EAAI4B,GAAeD,CAAS,EAC3CA,EAAU,SAAS,CACjB,IAAK5B,EAASC,EAAS,CACzB,CAAC,CACH,CACF,CACF,CAAC,EAGH6B,GAAKH,EAA8B,kBAAmBlC,CAAE,CAAC,EACtD,KACCsC,GAASC,GAASC,EAAUD,EAAO,OAAO,EACvC,KACCE,GAAUC,EAAc,EACxBpC,EAAI,IAAMiC,CAAK,EACfI,EAAUlB,CAAK,CACjB,CACF,CACF,EACG,UAAUc,GAAS,CAClB,IAAMK,EAAQxB,EAA6B,QAAQmB,EAAM,OAAO,IAAI,EACxDnB,EAAW,qBAAqBmB,EAAM,EAAE,IAAI,EACpD,aAAa,gBAAiB,GAAGK,EAAM,OAAO,EAAE,CACtD,CAAC,EAGE7C,GAAaC,EAAIiB,CAAO,EAC5B,KACC4B,EAAIC,GAASvB,EAAM,KAAKuB,CAAK,CAAC,EAC9BC,EAAS,IAAMxB,EAAM,SAAS,CAAC,EAC/BjB,EAAIwC,GAAUE,EAAA,CAAE,IAAKhD,GAAO8C,EAAQ,CACtC,CACJ,CAAC,CACH,CCxKO,SAASG,GACdC,EAAcC,EACW,CACzB,GAAI,OAAOA,GAAS,YAAa,CAC/B,IAAMC,EAAM,gCAAgCF,CAAI,IAAIC,CAAI,GACxD,OAAOE,GAGLC,GAAqB,GAAGF,CAAG,kBAAkB,EAC1C,KACCG,GAAW,IAAMC,CAAK,EACtBC,EAAIC,IAAY,CACd,QAASA,EAAQ,QACnB,EAAE,EACFC,GAAe,CAAC,CAAC,CACnB,EAGFL,GAAkBF,CAAG,EAClB,KACCG,GAAW,IAAMC,CAAK,EACtBC,EAAIG,IAAS,CACX,MAAOA,EAAK,iBACZ,MAAOA,EAAK,WACd,EAAE,EACFD,GAAe,CAAC,CAAC,CACnB,CACJ,EACG,KACCF,EAAI,CAAC,CAACC,EAASE,CAAI,IAAOC,IAAA,GAAKH,GAAYE,EAAO,CACpD,CAGJ,KAAO,CACL,IAAMR,EAAM,gCAAgCF,CAAI,GAChD,OAAOI,GAAkBF,CAAG,EACzB,KACCK,EAAIG,IAAS,CACX,aAAcA,EAAK,YACrB,EAAE,EACFD,GAAe,CAAC,CAAC,CACnB,CACJ,CACF,CC3CO,SAASG,GACdC,EAAcC,EACW,CACzB,IAAMC,EAAM,WAAWF,CAAI,oBAAoB,mBAAmBC,CAAO,CAAC,GAC1E,OAAOE,GAGLC,GAAqB,GAAGF,CAAG,4BAA4B,EACpD,KACCG,GAAW,IAAMC,CAAK,EACtBC,EAAI,CAAC,CAAE,SAAAC,CAAS,KAAO,CACrB,QAASA,CACX,EAAE,EACFC,GAAe,CAAC,CAAC,CACnB,EAGFL,GAA2BF,CAAG,EAC3B,KACCG,GAAW,IAAMC,CAAK,EACtBC,EAAI,CAAC,CAAE,WAAAG,EAAY,YAAAC,CAAY,KAAO,CACpC,MAAOD,EACP,MAAOC,CACT,EAAE,EACFF,GAAe,CAAC,CAAC,CACnB,CACJ,EACG,KACCF,EAAI,CAAC,CAACK,EAASC,CAAI,IAAOC,IAAA,GAAKF,GAAYC,EAAO,CACpD,CACJ,CCtBO,SAASE,GACdC,EACyB,CAGzB,IAAIC,EAAQD,EAAI,MAAM,qCAAqC,EAC3D,GAAIC,EAAO,CACT,GAAM,CAAC,CAAEC,EAAMC,CAAI,EAAIF,EACvB,OAAOG,GAA2BF,EAAMC,CAAI,CAC9C,CAIA,GADAF,EAAQD,EAAI,MAAM,oCAAoC,EAClDC,EAAO,CACT,GAAM,CAAC,CAAEI,EAAMC,CAAI,EAAIL,EACvB,OAAOM,GAA2BF,EAAMC,CAAI,CAC9C,CAGA,OAAOE,CACT,CCpBA,IAAIC,GAgBG,SAASC,GACdC,EACoB,CACpB,OAAOF,QAAWG,EAAM,IAAM,CAC5B,IAAMC,EAAS,SAAsB,WAAY,cAAc,EAC/D,GAAIA,EACF,OAAOC,EAAGD,CAAM,EAKhB,GADYE,GAAqB,SAAS,EAClC,OAAQ,CACd,IAAMC,EAAU,SAA0B,WAAW,EACrD,GAAI,EAAEA,GAAWA,EAAQ,QACvB,OAAOC,CACX,CAGA,OAAOC,GAAiBP,EAAG,IAAI,EAC5B,KACCQ,EAAIC,GAAS,SAAS,WAAYA,EAAO,cAAc,CAAC,CAC1D,CAEN,CAAC,EACE,KACCC,GAAW,IAAMJ,CAAK,EACtBK,EAAOF,GAAS,OAAO,KAAKA,CAAK,EAAE,OAAS,CAAC,EAC7CG,EAAIH,IAAU,CAAE,MAAAA,CAAM,EAAE,EACxBI,EAAY,CAAC,CACf,EACJ,CASO,SAASC,GACdd,EAC+B,CAC/B,IAAMe,EAAQC,EAAW,uBAAwBhB,CAAE,EACnD,OAAOC,EAAM,IAAM,CACjB,IAAMgB,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAAC,CAAE,MAAAR,CAAM,IAAM,CAC7BM,EAAM,YAAYI,GAAkBV,CAAK,CAAC,EAC1CM,EAAM,UAAU,IAAI,+BAA+B,CACrD,CAAC,EAGMhB,GAAYC,CAAE,EAClB,KACCQ,EAAIY,GAASH,EAAM,KAAKG,CAAK,CAAC,EAC9BC,EAAS,IAAMJ,EAAM,SAAS,CAAC,EAC/BL,EAAIQ,GAAUE,EAAA,CAAE,IAAKtB,GAAOoB,EAAQ,CACtC,CACJ,CAAC,CACH,CCtDO,SAASG,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACpB,CAClB,OAAOC,GAAiB,SAAS,IAAI,EAClC,KACCC,EAAU,IAAMC,GAAgBL,EAAI,CAAE,QAAAE,EAAS,UAAAD,CAAU,CAAC,CAAC,EAC3DK,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,KACZ,CACL,OAAQA,GAAK,EACf,EACD,EACDC,EAAwB,QAAQ,CAClC,CACJ,CAaO,SAASC,GACdT,EAAiBU,EACY,CAC7B,OAAOC,EAAM,IAAM,CACjB,IAAMC,EAAQ,IAAIC,EAClB,OAAAD,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAE,CAAO,EAAG,CACfd,EAAG,OAASc,CACd,EAGA,UAAW,CACTd,EAAG,OAAS,EACd,CACF,CAAC,GAICe,EAAQ,wBAAwB,EAC5BC,EAAG,CAAE,OAAQ,EAAM,CAAC,EACpBjB,GAAUC,EAAIU,CAAO,GAExB,KACCO,EAAIC,GAASN,EAAM,KAAKM,CAAK,CAAC,EAC9BC,EAAS,IAAMP,EAAM,SAAS,CAAC,EAC/BN,EAAIY,GAAUE,EAAA,CAAE,IAAKpB,GAAOkB,EAAQ,CACtC,CACJ,CAAC,CACH,CCfO,SAASG,GACdC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACT,CAC7B,IAAMC,EAAQ,IAAI,IAGZC,EAAUC,EAA+B,gBAAiBL,CAAE,EAClE,QAAWM,KAAUF,EAAS,CAC5B,IAAMG,EAAK,mBAAmBD,EAAO,KAAK,UAAU,CAAC,CAAC,EAChDE,EAASC,GAAmB,QAAQF,CAAE,IAAI,EAC5C,OAAOC,GAAW,aACpBL,EAAM,IAAIG,EAAQE,CAAM,CAC5B,CAGA,IAAME,EAAUR,EACb,KACCS,EAAwB,QAAQ,EAChCC,EAAI,CAAC,CAAE,OAAAC,CAAO,IAAM,CAClB,IAAMC,EAAOC,GAAoB,MAAM,EACjCC,EAAOC,EAAW,wBAAyBH,CAAI,EACrD,OAAOD,EAAS,IACdG,EAAK,UACLF,EAAK,UAET,CAAC,EACDI,GAAM,CACR,EAqFF,OAlFmBC,GAAiB,SAAS,IAAI,EAC9C,KACCR,EAAwB,QAAQ,EAGhCS,EAAUC,GAAQC,EAAM,IAAM,CAC5B,IAAIC,EAA4B,CAAC,EACjC,OAAOC,EAAG,CAAC,GAAGrB,CAAK,EAAE,OAAO,CAACsB,EAAO,CAACnB,EAAQE,CAAM,IAAM,CACvD,KAAOe,EAAK,QACGpB,EAAM,IAAIoB,EAAKA,EAAK,OAAS,CAAC,CAAC,EACnC,SAAWf,EAAO,SACzBe,EAAK,IAAI,EAOb,IAAIG,EAASlB,EAAO,UACpB,KAAO,CAACkB,GAAUlB,EAAO,eACvBA,EAASA,EAAO,cAChBkB,EAASlB,EAAO,UAIlB,IAAImB,EAASnB,EAAO,aACpB,KAAOmB,EAAQA,EAASA,EAAO,aAC7BD,GAAUC,EAAO,UAGnB,OAAOF,EAAM,IACX,CAAC,GAAGF,EAAO,CAAC,GAAGA,EAAMjB,CAAM,CAAC,EAAE,QAAQ,EACtCoB,CACF,CACF,EAAG,IAAI,GAAkC,CAAC,CAC5C,CAAC,EACE,KAGCd,EAAIa,GAAS,IAAI,IAAI,CAAC,GAAGA,CAAK,EAAE,KAAK,CAAC,CAAC,CAAEG,CAAC,EAAG,CAAC,CAAEC,CAAC,IAAMD,EAAIC,CAAC,CAAC,CAAC,EAC9DC,GAAkBpB,CAAO,EAGzBU,EAAU,CAAC,CAACK,EAAOM,CAAM,IAAM9B,EAC5B,KACC+B,GAAK,CAAC,CAACC,EAAMC,CAAI,EAAG,CAAE,OAAQ,CAAE,EAAAC,CAAE,EAAG,KAAAC,CAAK,IAAM,CAC9C,IAAMC,EAAOF,EAAIC,EAAK,QAAU,KAAK,MAAMf,EAAK,MAAM,EAGtD,KAAOa,EAAK,QAAQ,CAClB,GAAM,CAAC,CAAER,CAAM,EAAIQ,EAAK,CAAC,EACzB,GAAIR,EAASK,EAASI,GAAKE,EACzBJ,EAAO,CAAC,GAAGA,EAAMC,EAAK,MAAM,CAAE,MAE9B,MAEJ,CAGA,KAAOD,EAAK,QAAQ,CAClB,GAAM,CAAC,CAAEP,CAAM,EAAIO,EAAKA,EAAK,OAAS,CAAC,EACvC,GAAIP,EAASK,GAAUI,GAAK,CAACE,EAC3BH,EAAO,CAACD,EAAK,IAAI,EAAI,GAAGC,CAAI,MAE5B,MAEJ,CAGA,MAAO,CAACD,EAAMC,CAAI,CACpB,EAAG,CAAC,CAAC,EAAG,CAAC,GAAGT,CAAK,CAAC,CAAC,EACnBa,EAAqB,CAACV,EAAGC,IACvBD,EAAE,CAAC,IAAMC,EAAE,CAAC,GACZD,EAAE,CAAC,IAAMC,EAAE,CAAC,CACb,CACH,CACF,CACF,CACF,CACF,EAIC,KACCjB,EAAI,CAAC,CAACqB,EAAMC,CAAI,KAAO,CACrB,KAAMD,EAAK,IAAI,CAAC,CAACV,CAAI,IAAMA,CAAI,EAC/B,KAAMW,EAAK,IAAI,CAAC,CAACX,CAAI,IAAMA,CAAI,CACjC,EAAE,EAGFgB,EAAU,CAAE,KAAM,CAAC,EAAG,KAAM,CAAC,CAAE,CAAC,EAChCC,GAAY,EAAG,CAAC,EAChB5B,EAAI,CAAC,CAACgB,EAAGC,CAAC,IAGJD,EAAE,KAAK,OAASC,EAAE,KAAK,OAClB,CACL,KAAMA,EAAE,KAAK,MAAM,KAAK,IAAI,EAAGD,EAAE,KAAK,OAAS,CAAC,EAAGC,EAAE,KAAK,MAAM,EAChE,KAAM,CAAC,CACT,EAIO,CACL,KAAMA,EAAE,KAAK,MAAM,EAAE,EACrB,KAAMA,EAAE,KAAK,MAAM,EAAGA,EAAE,KAAK,OAASD,EAAE,KAAK,MAAM,CACrD,CAEH,CACH,CACJ,CAYO,SAASa,GACdzC,EAAiB,CAAE,UAAAC,EAAW,QAAAC,EAAS,MAAAwC,EAAO,QAAAC,CAAQ,EACd,CACxC,OAAOrB,EAAM,IAAM,CACjB,IAAMsB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGC,GAAQ,EAAI,CAAC,EAoBxD,GAnBAJ,EAAM,UAAU,CAAC,CAAE,KAAAX,EAAM,KAAAC,CAAK,IAAM,CAGlC,OAAW,CAAC5B,CAAM,IAAK4B,EACrB5B,EAAO,UAAU,OAAO,sBAAsB,EAC9CA,EAAO,UAAU,OAAO,sBAAsB,EAIhD,OAAW,CAACmB,EAAO,CAACnB,CAAM,CAAC,IAAK2B,EAAK,QAAQ,EAC3C3B,EAAO,UAAU,IAAI,sBAAsB,EAC3CA,EAAO,UAAU,OACf,uBACAmB,IAAUQ,EAAK,OAAS,CAC1B,CAEJ,CAAC,EAGGgB,EAAQ,YAAY,EAAG,CAGzB,IAAMC,EAAUC,EACdlD,EAAU,KAAKmD,GAAa,CAAC,EAAGxC,EAAI,IAAG,EAAY,CAAC,EACpDX,EAAU,KAAKmD,GAAa,GAAG,EAAGxC,EAAI,IAAM,QAAiB,CAAC,CAChE,EAGAgC,EACG,KACCS,EAAO,CAAC,CAAE,KAAApB,CAAK,IAAMA,EAAK,OAAS,CAAC,EACpCH,GAAkBY,EAAM,KAAKY,GAAUC,EAAc,CAAC,CAAC,EACvDC,GAAeN,CAAO,CACxB,EACG,UAAU,CAAC,CAAC,CAAC,CAAE,KAAAjB,CAAK,CAAC,EAAGwB,CAAQ,IAAM,CACrC,GAAM,CAACnD,CAAM,EAAI2B,EAAKA,EAAK,OAAS,CAAC,EACrC,GAAI3B,EAAO,aAAc,CAGvB,IAAMoD,EAAYC,GAAoBrD,CAAM,EAC5C,GAAI,OAAOoD,GAAc,YAAa,CACpC,IAAMhC,EAASpB,EAAO,UAAYoD,EAAU,UACtC,CAAE,OAAA7C,CAAO,EAAI+C,GAAeF,CAAS,EAC3CA,EAAU,SAAS,CACjB,IAAKhC,EAASb,EAAS,EACvB,SAAA4C,CACF,CAAC,CACH,CACF,CACF,CAAC,CACP,CAGA,OAAIR,EAAQ,qBAAqB,GAC/BhD,EACG,KACC4D,EAAUf,CAAK,EACfnC,EAAwB,QAAQ,EAChCyC,GAAa,GAAG,EAChBU,GAAK,CAAC,EACND,EAAUlB,EAAQ,KAAKmB,GAAK,CAAC,CAAC,CAAC,EAC/BC,GAAO,CAAE,MAAO,GAAI,CAAC,EACrBP,GAAeZ,CAAK,CACtB,EACG,UAAU,CAAC,CAAC,CAAE,CAAE,KAAAX,CAAK,CAAC,IAAM,CAC3B,IAAM+B,EAAMC,GAAY,EAGlB3D,EAAS2B,EAAKA,EAAK,OAAS,CAAC,EACnC,GAAI3B,GAAUA,EAAO,OAAQ,CAC3B,GAAM,CAAC4D,CAAM,EAAI5D,EACX,CAAE,KAAA6D,CAAK,EAAI,IAAI,IAAID,EAAO,IAAI,EAChCF,EAAI,OAASG,IACfH,EAAI,KAAOG,EACX,QAAQ,aAAa,CAAC,EAAG,GAAI,GAAGH,CAAG,EAAE,EAIzC,MACEA,EAAI,KAAO,GACX,QAAQ,aAAa,CAAC,EAAG,GAAI,GAAGA,CAAG,EAAE,CAEzC,CAAC,EAGAjE,GAAqBC,EAAI,CAAE,UAAAC,EAAW,QAAAC,CAAQ,CAAC,EACnD,KACCkE,EAAIC,GAASzB,EAAM,KAAKyB,CAAK,CAAC,EAC9BC,EAAS,IAAM1B,EAAM,SAAS,CAAC,EAC/BhC,EAAIyD,GAAUE,EAAA,CAAE,IAAKvE,GAAOqE,EAAQ,CACtC,CACJ,CAAC,CACH,CC9RO,SAASG,GACdC,EAAkB,CAAE,UAAAC,EAAW,MAAAC,EAAO,QAAAC,CAAQ,EACvB,CAGvB,IAAMC,EAAaH,EAChB,KACCI,EAAI,CAAC,CAAE,OAAQ,CAAE,EAAAC,CAAE,CAAE,IAAMA,CAAC,EAC5BC,GAAY,EAAG,CAAC,EAChBF,EAAI,CAAC,CAAC,EAAGG,CAAC,IAAM,EAAIA,GAAKA,EAAI,CAAC,EAC9BC,EAAqB,CACvB,EAGIC,EAAUR,EACb,KACCG,EAAI,CAAC,CAAE,OAAAM,CAAO,IAAMA,CAAM,CAC5B,EAGF,OAAOC,EAAc,CAACF,EAASN,CAAU,CAAC,EACvC,KACCC,EAAI,CAAC,CAACM,EAAQE,CAAS,IAAM,EAAEF,GAAUE,EAAU,EACnDJ,EAAqB,EACrBK,EAAUX,EAAQ,KAAKY,GAAK,CAAC,CAAC,CAAC,EAC/BC,GAAQ,EAAI,EACZC,GAAO,CAAE,MAAO,GAAI,CAAC,EACrBZ,EAAIa,IAAW,CAAE,OAAAA,CAAO,EAAE,CAC5B,CACJ,CAYO,SAASC,GACdC,EAAiB,CAAE,UAAAnB,EAAW,QAAAoB,EAAS,MAAAnB,EAAO,QAAAC,CAAQ,EACpB,CAClC,IAAMmB,EAAQ,IAAIC,EACZC,EAAQF,EAAM,KAAKG,EAAe,EAAGT,GAAQ,EAAI,CAAC,EACxD,OAAAM,EAAM,UAAU,CAGd,KAAK,CAAE,OAAAJ,CAAO,EAAG,CACfE,EAAG,OAASF,EACRA,GACFE,EAAG,aAAa,WAAY,IAAI,EAChCA,EAAG,KAAK,GAERA,EAAG,gBAAgB,UAAU,CAEjC,EAGA,UAAW,CACTA,EAAG,MAAM,IAAM,GACfA,EAAG,OAAS,GACZA,EAAG,gBAAgB,UAAU,CAC/B,CACF,CAAC,EAGDC,EACG,KACCP,EAAUU,CAAK,EACfE,EAAwB,QAAQ,CAClC,EACG,UAAU,CAAC,CAAE,OAAAC,CAAO,IAAM,CACzBP,EAAG,MAAM,IAAM,GAAGO,EAAS,EAAE,IAC/B,CAAC,EAGLC,EAAUR,EAAI,OAAO,EAClB,UAAUS,GAAM,CACfA,EAAG,eAAe,EAClB,OAAO,SAAS,CAAE,IAAK,CAAE,CAAC,CAC5B,CAAC,EAGI9B,GAAeqB,EAAI,CAAE,UAAAnB,EAAW,MAAAC,EAAO,QAAAC,CAAQ,CAAC,EACpD,KACC2B,EAAIC,GAAST,EAAM,KAAKS,CAAK,CAAC,EAC9BC,EAAS,IAAMV,EAAM,SAAS,CAAC,EAC/BjB,EAAI0B,GAAUE,EAAA,CAAE,IAAKb,GAAOW,EAAQ,CACtC,CACJ,CClHO,SAASG,GACd,CAAE,UAAAC,EAAW,UAAAC,CAAU,EACjB,CACND,EACG,KACCE,EAAU,IAAMC,EAAY,cAAc,CAAC,EAC3CC,GAASC,GAAMC,GAAuBD,CAAE,EACrC,KACCE,EAAUP,EAAU,KAAKQ,GAAK,CAAC,CAAC,CAAC,EACjCC,EAAOC,GAAWA,CAAO,EACzBC,EAAI,IAAMN,CAAE,EACZO,GAAK,CAAC,CACR,CACF,EACAH,EAAOJ,GAAMA,EAAG,YAAcA,EAAG,WAAW,EAC5CD,GAASC,GAAM,CACb,IAAMQ,EAAOR,EAAG,UACVS,EAAOT,EAAG,QAAQ,GAAG,GAAKA,EAIhC,OAHAS,EAAK,MAAQD,EAGRE,EAAQ,kBAAkB,EAIxBC,GAAoBF,EAAM,CAAE,UAAAb,CAAU,CAAC,EAC3C,KACCM,EAAUP,EAAU,KAAKQ,GAAK,CAAC,CAAC,CAAC,EACjCS,EAAS,IAAMH,EAAK,gBAAgB,OAAO,CAAC,CAC9C,EAPOI,CAQX,CAAC,CACH,EACG,UAAU,EAGXH,EAAQ,kBAAkB,GAC5Bf,EACG,KACCE,EAAU,IAAMC,EAAY,YAAY,CAAC,EACzCC,GAASC,GAAMW,GAAoBX,EAAI,CAAE,UAAAJ,CAAU,CAAC,CAAC,CACvD,EACG,UAAU,CACnB,CCpDO,SAASkB,GACd,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACf,CACND,EACG,KACCE,EAAU,IAAMC,EACd,2BACF,CAAC,EACDC,EAAIC,GAAM,CACRA,EAAG,cAAgB,GACnBA,EAAG,QAAU,EACf,CAAC,EACDC,GAASD,GAAME,EAAUF,EAAI,QAAQ,EAClC,KACCG,GAAU,IAAMH,EAAG,UAAU,SAAS,0BAA0B,CAAC,EACjEI,EAAI,IAAMJ,CAAE,CACd,CACF,EACAK,GAAeT,CAAO,CACxB,EACG,UAAU,CAAC,CAACI,EAAIM,CAAM,IAAM,CAC3BN,EAAG,UAAU,OAAO,0BAA0B,EAC1CM,IACFN,EAAG,QAAU,GACjB,CAAC,CACP,CC9BA,SAASO,IAAyB,CAChC,MAAO,qBAAqB,KAAK,UAAU,SAAS,CACtD,CAiBO,SAASC,GACd,CAAE,UAAAC,CAAU,EACN,CACNA,EACG,KACCC,EAAU,IAAMC,EAAY,qBAAqB,CAAC,EAClDC,EAAIC,GAAMA,EAAG,gBAAgB,mBAAmB,CAAC,EACjDC,EAAOP,EAAa,EACpBQ,GAASF,GAAMG,EAAUH,EAAI,YAAY,EACtC,KACCI,EAAI,IAAMJ,CAAE,CACd,CACF,CACF,EACG,UAAUA,GAAM,CACf,IAAMK,EAAML,EAAG,UAGXK,IAAQ,EACVL,EAAG,UAAY,EAGNK,EAAML,EAAG,eAAiBA,EAAG,eACtCA,EAAG,UAAYK,EAAM,EAEzB,CAAC,CACP,CCpCO,SAASC,GACd,CAAE,UAAAC,EAAW,QAAAC,CAAQ,EACf,CACNC,EAAc,CAACC,GAAY,QAAQ,EAAGF,CAAO,CAAC,EAC3C,KACCG,EAAI,CAAC,CAACC,EAAQC,CAAM,IAAMD,GAAU,CAACC,CAAM,EAC3CC,EAAUF,GAAUG,EAAGH,CAAM,EAC1B,KACCI,GAAMJ,EAAS,IAAM,GAAG,CAC1B,CACF,EACAK,GAAeV,CAAS,CAC1B,EACG,UAAU,CAAC,CAACK,EAAQ,CAAE,OAAQ,CAAE,EAAAM,CAAE,CAAC,CAAC,IAAM,CACzC,GAAIN,EACF,SAAS,KAAK,aAAa,qBAAsB,EAAE,EACnD,SAAS,KAAK,MAAM,IAAM,IAAIM,CAAC,SAC1B,CACL,IAAMC,EAAQ,GAAK,SAAS,SAAS,KAAK,MAAM,IAAK,EAAE,EACvD,SAAS,KAAK,gBAAgB,oBAAoB,EAClD,SAAS,KAAK,MAAM,IAAM,GACtBA,GACF,OAAO,SAAS,EAAGA,CAAK,CAC5B,CACF,CAAC,CACP,CC7DK,OAAO,UACV,OAAO,QAAU,SAAUC,EAAa,CACtC,IAAMC,EAA2B,CAAC,EAClC,QAAWC,KAAO,OAAO,KAAKF,CAAG,EAE/BC,EAAK,KAAK,CAACC,EAAKF,EAAIE,CAAG,CAAC,CAAC,EAG3B,OAAOD,CACT,GAGG,OAAO,SACV,OAAO,OAAS,SAAUD,EAAa,CACrC,IAAMC,EAAiB,CAAC,EACxB,QAAWC,KAAO,OAAO,KAAKF,CAAG,EAE/BC,EAAK,KAAKD,EAAIE,CAAG,CAAC,EAGpB,OAAOD,CACT,GAKE,OAAO,SAAY,cAGhB,QAAQ,UAAU,WACrB,QAAQ,UAAU,SAAW,SAC3BE,EAA8BC,EACxB,CACF,OAAOD,GAAM,UACf,KAAK,WAAaA,EAAE,KACpB,KAAK,UAAYA,EAAE,MAEnB,KAAK,WAAaA,EAClB,KAAK,UAAYC,EAErB,GAGG,QAAQ,UAAU,cACrB,QAAQ,UAAU,YAAc,YAC3BC,EACG,CACN,IAAMC,EAAS,KAAK,WACpB,GAAIA,EAAQ,CACND,EAAM,SAAW,GACnBC,EAAO,YAAY,IAAI,EAGzB,QAASC,EAAIF,EAAM,OAAS,EAAGE,GAAK,EAAGA,IAAK,CAC1C,IAAIC,EAAOH,EAAME,CAAC,EACd,OAAOC,GAAS,SAClBA,EAAO,SAAS,eAAeA,CAAI,EAC5BA,EAAK,YACZA,EAAK,WAAW,YAAYA,CAAI,EAG7BD,EAGHD,EAAO,aAAa,KAAK,gBAAkBE,CAAI,EAF/CF,EAAO,aAAaE,EAAM,IAAI,CAGlC,CACF,CACF,I1MMJ,SAASC,IAA4C,CACnD,OAAI,SAAS,WAAa,QACjBC,GACL,GAAG,IAAI,IAAI,yBAA0BC,GAAO,IAAI,CAAC,EACnD,EACG,KAECC,EAAI,IAAM,OAAO,EACjBC,EAAY,CAAC,CACf,EAEKC,GACL,IAAI,IAAI,2BAA4BH,GAAO,IAAI,CACjD,CAEJ,CAOA,SAAS,gBAAgB,UAAU,OAAO,OAAO,EACjD,SAAS,gBAAgB,UAAU,IAAI,IAAI,EAG3C,IAAMI,GAAYC,GAAc,EAC1BC,GAAYC,GAAc,EAC1BC,GAAYC,GAAoBH,EAAS,EACzCI,GAAYC,GAAc,EAG1BC,GAAYC,GAAc,EAC1BC,GAAYC,GAAW,oBAAoB,EAC3CC,GAAYD,GAAW,qBAAqB,EAC5CE,GAAYC,GAAW,EAGvBlB,GAASmB,GAAc,EACvBC,GAAS,SAAS,MAAM,UAAU,QAAQ,EAC5CtB,GAAiB,EACjBuB,GAGEC,GAAS,IAAIC,EACnBC,GAAiB,CAAE,OAAAF,EAAO,CAAC,EAG3B,IAAMG,GAAY,IAAIF,EAGlBG,EAAQ,oBAAoB,GAC9BC,GAAuB,CAAE,UAAArB,GAAW,UAAAM,GAAW,UAAAa,EAAU,CAAC,EACvD,UAAUrB,EAAS,EAzJxB,IAAAwB,KA4JIA,GAAA5B,GAAO,UAAP,YAAA4B,GAAgB,YAAa,QAC/BC,GAAqB,CAAE,UAAAzB,EAAU,CAAC,EAGpC0B,EAAMxB,GAAWE,EAAO,EACrB,KACCuB,GAAM,GAAG,CACX,EACG,UAAU,IAAM,CACfC,GAAU,SAAU,EAAK,EACzBA,GAAU,SAAU,EAAK,CAC3B,CAAC,EAGLtB,GACG,KACCuB,EAAO,CAAC,CAAE,KAAAC,CAAK,IAAMA,IAAS,QAAQ,CACxC,EACG,UAAUC,GAAO,CAChB,OAAQA,EAAI,KAAM,CAGhB,IAAK,IACL,IAAK,IACH,IAAMC,EAAOC,GAAoC,gBAAgB,EAC7D,OAAOD,GAAS,aAClBE,GAAYF,CAAI,EAClB,MAGF,IAAK,IACL,IAAK,IACH,IAAMG,EAAOF,GAAoC,gBAAgB,EAC7D,OAAOE,GAAS,aAClBD,GAAYC,CAAI,EAClB,MAGF,IAAK,QACH,IAAMC,EAASC,GAAiB,EAC5BD,aAAkB,kBACpBA,EAAO,MAAM,CACnB,CACF,CAAC,EAGLE,GAAc,CAAE,UAAA9B,GAAW,UAAAR,EAAU,CAAC,EACtCuC,GAAmB,CAAE,UAAAvC,GAAW,QAAAU,EAAQ,CAAC,EACzC8B,GAAe,CAAE,UAAAxC,EAAU,CAAC,EAC5ByC,GAAgB,CAAE,UAAAjC,GAAW,QAAAE,EAAQ,CAAC,EAGtC,IAAMgC,GAAUC,GAAYC,GAAoB,QAAQ,EAAG,CAAE,UAAApC,EAAU,CAAC,EAClEqC,GAAQ7C,GACX,KACCH,EAAI,IAAM+C,GAAoB,MAAM,CAAC,EACrCE,EAAUC,GAAMC,GAAUD,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,EAAQ,CAAC,CAAC,EACrD5C,EAAY,CAAC,CACf,EAGImD,GAAWvB,EAGf,GAAGwB,GAAqB,SAAS,EAC9B,IAAIH,GAAMI,GAAaJ,EAAI,CAAE,QAAA3C,EAAQ,CAAC,CAAC,EAG1C,GAAG8C,GAAqB,QAAQ,EAC7B,IAAIH,GAAMK,GAAYL,EAAI,CAAE,OAAA7B,EAAO,CAAC,CAAC,EAGxC,GAAGgC,GAAqB,QAAQ,EAC7B,IAAIH,GAAMM,GAAYN,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,EAAM,CAAC,CAAC,EAG3D,GAAGK,GAAqB,SAAS,EAC9B,IAAIH,GAAMO,GAAaP,CAAE,CAAC,EAG7B,GAAGG,GAAqB,UAAU,EAC/B,IAAIH,GAAMQ,GAAcR,EAAI,CAAE,UAAA1B,EAAU,CAAC,CAAC,EAG7C,GAAG6B,GAAqB,QAAQ,EAC7B,IAAIH,GAAMS,GAAYT,EAAI,CAAE,OAAA/B,GAAQ,UAAAV,EAAU,CAAC,CAAC,EAGnD,GAAG4C,GAAqB,QAAQ,EAC7B,IAAIH,GAAMU,GAAYV,CAAE,CAAC,CAC9B,EAGMW,GAAWC,EAAM,IAAMjC,EAG3B,GAAGwB,GAAqB,UAAU,EAC/B,IAAIH,GAAMa,GAAcb,CAAE,CAAC,EAG9B,GAAGG,GAAqB,SAAS,EAC9B,IAAIH,GAAMc,GAAad,EAAI,CAAE,UAAAvC,GAAW,QAAAJ,GAAS,OAAAS,EAAO,CAAC,CAAC,EAG7D,GAAGqC,GAAqB,SAAS,EAC9B,IAAIH,GAAMzB,EAAQ,kBAAkB,EACjCwC,GAAoBf,EAAI,CAAE,OAAA/B,GAAQ,UAAAd,EAAU,CAAC,EAC7C6D,CACJ,EAGF,GAAGb,GAAqB,cAAc,EACnC,IAAIH,GAAMiB,GAAiBjB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,EAAQ,CAAC,CAAC,EAGzD,GAAGQ,GAAqB,SAAS,EAC9B,IAAIH,GAAMA,EAAG,aAAa,cAAc,IAAM,aAC3CkB,GAAGrD,GAAS,IAAMsD,GAAanB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,EAAM,CAAC,CAAC,EACjEoB,GAAGvD,GAAS,IAAMwD,GAAanB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,EAAM,CAAC,CAAC,CACrE,EAGF,GAAGK,GAAqB,MAAM,EAC3B,IAAIH,GAAMoB,GAAUpB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,EAAQ,CAAC,CAAC,EAGlD,GAAGQ,GAAqB,KAAK,EAC1B,IAAIH,GAAMqB,GAAqBrB,EAAI,CAClC,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,GAAO,QAAAzC,EAC7B,CAAC,CAAC,EAGJ,GAAG8C,GAAqB,KAAK,EAC1B,IAAIH,GAAMsB,GAAetB,EAAI,CAAE,UAAAvC,GAAW,QAAAkC,GAAS,MAAAG,GAAO,QAAAzC,EAAQ,CAAC,CAAC,CACzE,CAAC,EAGKkE,GAAatE,GAChB,KACC8C,EAAU,IAAMY,EAAQ,EACxBa,GAAUtB,EAAQ,EAClBnD,EAAY,CAAC,CACf,EAGFwE,GAAW,UAAU,EAMrB,OAAO,UAAatE,GACpB,OAAO,UAAaE,GACpB,OAAO,QAAaE,GACpB,OAAO,UAAaE,GACpB,OAAO,UAAaE,GACpB,OAAO,QAAaE,GACpB,OAAO,QAAaE,GACpB,OAAO,OAAaC,GACpB,OAAO,OAAaK,GACpB,OAAO,UAAaG,GACpB,OAAO,WAAaiD",
+  "names": ["require_focus_visible", "__commonJSMin", "exports", "module", "global", "factory", "applyFocusVisiblePolyfill", "scope", "hadKeyboardEvent", "hadFocusVisibleRecently", "hadFocusVisibleRecentlyTimeout", "inputTypesAllowlist", "isValidFocusTarget", "el", "focusTriggersKeyboardModality", "type", "tagName", "addFocusVisibleClass", "removeFocusVisibleClass", "onKeyDown", "e", "onPointerDown", "onFocus", "onBlur", "onVisibilityChange", "addInitialPointerMoveListeners", "onInitialPointerMove", "removeInitialPointerMoveListeners", "event", "error", "require_escape_html", "__commonJSMin", "exports", "module", "matchHtmlRegExp", "escapeHtml", "string", "str", "match", "escape", "html", "index", "lastIndex", "require_clipboard", "__commonJSMin", "exports", "module", "root", "factory", "__webpack_modules__", "__unused_webpack_module", "__webpack_exports__", "__webpack_require__", "clipboard", "tiny_emitter", "tiny_emitter_default", "listen", "listen_default", "src_select", "select_default", "command", "type", "err", "ClipboardActionCut", "target", "selectedText", "actions_cut", "createFakeElement", "value", "isRTL", "fakeElement", "yPosition", "fakeCopyAction", "options", "ClipboardActionCopy", "actions_copy", "_typeof", "obj", "ClipboardActionDefault", "_options$action", "action", "container", "text", "actions_default", "clipboard_typeof", "_classCallCheck", "instance", "Constructor", "_defineProperties", "props", "i", "descriptor", "_createClass", "protoProps", "staticProps", "_inherits", "subClass", "superClass", "_setPrototypeOf", "o", "p", "_createSuper", "Derived", "hasNativeReflectConstruct", "_isNativeReflectConstruct", "Super", "_getPrototypeOf", "result", "NewTarget", "_possibleConstructorReturn", "self", "call", "_assertThisInitialized", "e", "getAttributeValue", "suffix", "element", "attribute", "Clipboard", "_Emitter", "_super", "trigger", "_this", "_this2", "selector", "actions", "support", "DOCUMENT_NODE_TYPE", "proto", "closest", "__unused_webpack_exports", "_delegate", "callback", "useCapture", "listenerFn", "listener", "delegate", "elements", "is", "listenNode", "listenNodeList", "listenSelector", "node", "nodeList", "select", "isReadOnly", "selection", "range", "E", "name", "ctx", "data", "evtArr", "len", "evts", "liveEvents", "__webpack_module_cache__", "moduleId", "getter", "definition", "key", "prop", "import_focus_visible", "extendStatics", "d", "b", "p", "__extends", "__", "__awaiter", "thisArg", "_arguments", "P", "generator", "adopt", "value", "resolve", "reject", "fulfilled", "step", "e", "rejected", "result", "__generator", "body", "_", "t", "f", "y", "g", "verb", "n", "v", "op", "__values", "o", "s", "m", "i", "__read", "n", "r", "ar", "e", "error", "__spreadArray", "to", "from", "pack", "i", "l", "ar", "__await", "v", "__asyncGenerator", "thisArg", "_arguments", "generator", "g", "q", "verb", "n", "a", "b", "resume", "step", "e", "settle", "r", "fulfill", "reject", "value", "f", "__asyncValues", "o", "m", "i", "__values", "verb", "n", "v", "resolve", "reject", "settle", "d", "isFunction", "value", "createErrorClass", "createImpl", "_super", "instance", "ctorFunc", "UnsubscriptionError", "createErrorClass", "_super", "errors", "err", "i", "arrRemove", "arr", "item", "index", "Subscription", "initialTeardown", "errors", "_parentage", "_parentage_1", "__values", "_parentage_1_1", "parent_1", "initialFinalizer", "isFunction", "e", "UnsubscriptionError", "_finalizers", "_finalizers_1", "_finalizers_1_1", "finalizer", "execFinalizer", "err", "__spreadArray", "__read", "teardown", "_a", "parent", "arrRemove", "empty", "EMPTY_SUBSCRIPTION", "Subscription", "isSubscription", "value", "isFunction", "execFinalizer", "finalizer", "config", "timeoutProvider", "handler", "timeout", "args", "_i", "delegate", "__spreadArray", "__read", "handle", "reportUnhandledError", "err", "timeoutProvider", "onUnhandledError", "config", "noop", "COMPLETE_NOTIFICATION", "createNotification", "errorNotification", "error", "nextNotification", "value", "kind", "context", "errorContext", "cb", "config", "isRoot", "_a", "errorThrown", "error", "captureError", "err", "Subscriber", "_super", "__extends", "destination", "_this", "isSubscription", "EMPTY_OBSERVER", "next", "error", "complete", "SafeSubscriber", "value", "handleStoppedNotification", "nextNotification", "err", "errorNotification", "COMPLETE_NOTIFICATION", "Subscription", "_bind", "bind", "fn", "thisArg", "ConsumerObserver", "partialObserver", "value", "error", "handleUnhandledError", "err", "SafeSubscriber", "_super", "__extends", "observerOrNext", "complete", "_this", "isFunction", "context_1", "config", "Subscriber", "handleUnhandledError", "error", "config", "captureError", "reportUnhandledError", "defaultErrorHandler", "err", "handleStoppedNotification", "notification", "subscriber", "onStoppedNotification", "timeoutProvider", "EMPTY_OBSERVER", "noop", "observable", "identity", "x", "pipe", "fns", "_i", "pipeFromArray", "identity", "input", "prev", "fn", "Observable", "subscribe", "operator", "observable", "observerOrNext", "error", "complete", "_this", "subscriber", "isSubscriber", "SafeSubscriber", "errorContext", "_a", "source", "sink", "err", "next", "promiseCtor", "getPromiseCtor", "resolve", "reject", "value", "operations", "_i", "pipeFromArray", "x", "getPromiseCtor", "promiseCtor", "_a", "config", "isObserver", "value", "isFunction", "isSubscriber", "Subscriber", "isSubscription", "hasLift", "source", "isFunction", "operate", "init", "liftedSource", "err", "createOperatorSubscriber", "destination", "onNext", "onComplete", "onError", "onFinalize", "OperatorSubscriber", "_super", "__extends", "shouldUnsubscribe", "_this", "value", "err", "closed_1", "_a", "Subscriber", "animationFrameProvider", "callback", "request", "cancel", "delegate", "handle", "timestamp", "Subscription", "args", "_i", "__spreadArray", "__read", "ObjectUnsubscribedError", "createErrorClass", "_super", "Subject", "_super", "__extends", "_this", "operator", "subject", "AnonymousSubject", "ObjectUnsubscribedError", "value", "errorContext", "_b", "__values", "_c", "observer", "err", "observers", "_a", "subscriber", "hasError", "isStopped", "EMPTY_SUBSCRIPTION", "Subscription", "arrRemove", "thrownError", "observable", "Observable", "destination", "source", "AnonymousSubject", "_super", "__extends", "destination", "source", "_this", "value", "_b", "_a", "err", "subscriber", "EMPTY_SUBSCRIPTION", "Subject", "BehaviorSubject", "_super", "__extends", "_value", "_this", "subscriber", "subscription", "_a", "hasError", "thrownError", "value", "Subject", "dateTimestampProvider", "ReplaySubject", "_super", "__extends", "_bufferSize", "_windowTime", "_timestampProvider", "dateTimestampProvider", "_this", "value", "_a", "isStopped", "_buffer", "_infiniteTimeWindow", "subscriber", "subscription", "copy", "i", "adjustedBufferSize", "now", "last", "Subject", "Action", "_super", "__extends", "scheduler", "work", "state", "delay", "Subscription", "intervalProvider", "handler", "timeout", "args", "_i", "delegate", "__spreadArray", "__read", "handle", "AsyncAction", "_super", "__extends", "scheduler", "work", "_this", "state", "delay", "id", "_a", "_id", "intervalProvider", "_scheduler", "error", "_delay", "errored", "errorValue", "e", "actions", "arrRemove", "Action", "Scheduler", "schedulerActionCtor", "now", "work", "delay", "state", "dateTimestampProvider", "AsyncScheduler", "_super", "__extends", "SchedulerAction", "now", "Scheduler", "_this", "action", "actions", "error", "asyncScheduler", "AsyncScheduler", "AsyncAction", "async", "QueueAction", "_super", "__extends", "scheduler", "work", "_this", "state", "delay", "id", "AsyncAction", "QueueScheduler", "_super", "__extends", "AsyncScheduler", "queueScheduler", "QueueScheduler", "QueueAction", "AnimationFrameAction", "_super", "__extends", "scheduler", "work", "_this", "id", "delay", "animationFrameProvider", "actions", "_a", "AsyncAction", "AnimationFrameScheduler", "_super", "__extends", "action", "flushId", "actions", "error", "AsyncScheduler", "animationFrameScheduler", "AnimationFrameScheduler", "AnimationFrameAction", "EMPTY", "Observable", "subscriber", "isScheduler", "value", "isFunction", "last", "arr", "popResultSelector", "args", "isFunction", "popScheduler", "isScheduler", "popNumber", "defaultValue", "isArrayLike", "x", "isPromise", "value", "isFunction", "isInteropObservable", "input", "isFunction", "observable", "isAsyncIterable", "obj", "isFunction", "createInvalidObservableTypeError", "input", "getSymbolIterator", "iterator", "isIterable", "input", "isFunction", "iterator", "readableStreamLikeToAsyncGenerator", "readableStream", "reader", "__await", "_a", "_b", "value", "done", "isReadableStreamLike", "obj", "isFunction", "innerFrom", "input", "Observable", "isInteropObservable", "fromInteropObservable", "isArrayLike", "fromArrayLike", "isPromise", "fromPromise", "isAsyncIterable", "fromAsyncIterable", "isIterable", "fromIterable", "isReadableStreamLike", "fromReadableStreamLike", "createInvalidObservableTypeError", "obj", "subscriber", "obs", "observable", "isFunction", "array", "i", "promise", "value", "err", "reportUnhandledError", "iterable", "iterable_1", "__values", "iterable_1_1", "asyncIterable", "process", "readableStream", "readableStreamLikeToAsyncGenerator", "asyncIterable_1", "__asyncValues", "asyncIterable_1_1", "executeSchedule", "parentSubscription", "scheduler", "work", "delay", "repeat", "scheduleSubscription", "observeOn", "scheduler", "delay", "operate", "source", "subscriber", "createOperatorSubscriber", "value", "executeSchedule", "err", "subscribeOn", "scheduler", "delay", "operate", "source", "subscriber", "scheduleObservable", "input", "scheduler", "innerFrom", "subscribeOn", "observeOn", "schedulePromise", "input", "scheduler", "innerFrom", "subscribeOn", "observeOn", "scheduleArray", "input", "scheduler", "Observable", "subscriber", "i", "scheduleIterable", "input", "scheduler", "Observable", "subscriber", "iterator", "executeSchedule", "value", "done", "_a", "err", "isFunction", "scheduleAsyncIterable", "input", "scheduler", "Observable", "subscriber", "executeSchedule", "iterator", "result", "scheduleReadableStreamLike", "input", "scheduler", "scheduleAsyncIterable", "readableStreamLikeToAsyncGenerator", "scheduled", "input", "scheduler", "isInteropObservable", "scheduleObservable", "isArrayLike", "scheduleArray", "isPromise", "schedulePromise", "isAsyncIterable", "scheduleAsyncIterable", "isIterable", "scheduleIterable", "isReadableStreamLike", "scheduleReadableStreamLike", "createInvalidObservableTypeError", "from", "input", "scheduler", "scheduled", "innerFrom", "of", "args", "_i", "scheduler", "popScheduler", "from", "throwError", "errorOrErrorFactory", "scheduler", "errorFactory", "isFunction", "init", "subscriber", "Observable", "EmptyError", "createErrorClass", "_super", "isValidDate", "value", "map", "project", "thisArg", "operate", "source", "subscriber", "index", "createOperatorSubscriber", "value", "isArray", "callOrApply", "fn", "args", "__spreadArray", "__read", "mapOneOrManyArgs", "map", "isArray", "getPrototypeOf", "objectProto", "getKeys", "argsArgArrayOrObject", "args", "first_1", "isPOJO", "keys", "key", "obj", "createObject", "keys", "values", "result", "key", "i", "combineLatest", "args", "_i", "scheduler", "popScheduler", "resultSelector", "popResultSelector", "_a", "argsArgArrayOrObject", "observables", "keys", "from", "result", "Observable", "combineLatestInit", "values", "createObject", "identity", "mapOneOrManyArgs", "valueTransform", "subscriber", "maybeSchedule", "length", "active", "remainingFirstValues", "i", "source", "hasFirstValue", "createOperatorSubscriber", "value", "execute", "subscription", "executeSchedule", "mergeInternals", "source", "subscriber", "project", "concurrent", "onBeforeNext", "expand", "innerSubScheduler", "additionalFinalizer", "buffer", "active", "index", "isComplete", "checkComplete", "outerNext", "value", "doInnerSub", "innerComplete", "innerFrom", "createOperatorSubscriber", "innerValue", "bufferedValue", "executeSchedule", "err", "mergeMap", "project", "resultSelector", "concurrent", "isFunction", "a", "i", "map", "b", "ii", "innerFrom", "operate", "source", "subscriber", "mergeInternals", "mergeAll", "concurrent", "mergeMap", "identity", "concatAll", "mergeAll", "concat", "args", "_i", "concatAll", "from", "popScheduler", "defer", "observableFactory", "Observable", "subscriber", "innerFrom", "nodeEventEmitterMethods", "eventTargetMethods", "jqueryMethods", "fromEvent", "target", "eventName", "options", "resultSelector", "isFunction", "mapOneOrManyArgs", "_a", "__read", "isEventTarget", "methodName", "handler", "isNodeStyleEventEmitter", "toCommonHandlerRegistry", "isJQueryStyleEventEmitter", "add", "remove", "isArrayLike", "mergeMap", "subTarget", "innerFrom", "Observable", "subscriber", "args", "_i", "fromEventPattern", "addHandler", "removeHandler", "resultSelector", "mapOneOrManyArgs", "Observable", "subscriber", "handler", "e", "_i", "retValue", "isFunction", "timer", "dueTime", "intervalOrScheduler", "scheduler", "async", "intervalDuration", "isScheduler", "Observable", "subscriber", "due", "isValidDate", "n", "merge", "args", "_i", "scheduler", "popScheduler", "concurrent", "popNumber", "sources", "innerFrom", "mergeAll", "from", "EMPTY", "NEVER", "Observable", "noop", "isArray", "argsOrArgArray", "args", "filter", "predicate", "thisArg", "operate", "source", "subscriber", "index", "createOperatorSubscriber", "value", "zip", "args", "_i", "resultSelector", "popResultSelector", "sources", "argsOrArgArray", "Observable", "subscriber", "buffers", "completed", "sourceIndex", "innerFrom", "createOperatorSubscriber", "value", "buffer", "result", "__spreadArray", "__read", "i", "EMPTY", "audit", "durationSelector", "operate", "source", "subscriber", "hasValue", "lastValue", "durationSubscriber", "isComplete", "endDuration", "value", "cleanupDuration", "createOperatorSubscriber", "innerFrom", "auditTime", "duration", "scheduler", "asyncScheduler", "audit", "timer", "bufferCount", "bufferSize", "startBufferEvery", "operate", "source", "subscriber", "buffers", "count", "createOperatorSubscriber", "value", "toEmit", "buffers_1", "__values", "buffers_1_1", "buffer", "toEmit_1", "toEmit_1_1", "arrRemove", "buffers_2", "buffers_2_1", "catchError", "selector", "operate", "source", "subscriber", "innerSub", "syncUnsub", "handledResult", "createOperatorSubscriber", "err", "innerFrom", "scanInternals", "accumulator", "seed", "hasSeed", "emitOnNext", "emitBeforeComplete", "source", "subscriber", "hasState", "state", "index", "createOperatorSubscriber", "value", "i", "combineLatest", "args", "_i", "resultSelector", "popResultSelector", "pipe", "__spreadArray", "__read", "mapOneOrManyArgs", "operate", "source", "subscriber", "combineLatestInit", "argsOrArgArray", "combineLatestWith", "otherSources", "_i", "combineLatest", "__spreadArray", "__read", "debounce", "durationSelector", "operate", "source", "subscriber", "hasValue", "lastValue", "durationSubscriber", "emit", "value", "createOperatorSubscriber", "noop", "innerFrom", "debounceTime", "dueTime", "scheduler", "asyncScheduler", "operate", "source", "subscriber", "activeTask", "lastValue", "lastTime", "emit", "value", "emitWhenIdle", "targetTime", "now", "createOperatorSubscriber", "defaultIfEmpty", "defaultValue", "operate", "source", "subscriber", "hasValue", "createOperatorSubscriber", "value", "take", "count", "EMPTY", "operate", "source", "subscriber", "seen", "createOperatorSubscriber", "value", "ignoreElements", "operate", "source", "subscriber", "createOperatorSubscriber", "noop", "mapTo", "value", "map", "delayWhen", "delayDurationSelector", "subscriptionDelay", "source", "concat", "take", "ignoreElements", "mergeMap", "value", "index", "innerFrom", "mapTo", "delay", "due", "scheduler", "asyncScheduler", "duration", "timer", "delayWhen", "distinctUntilChanged", "comparator", "keySelector", "identity", "defaultCompare", "operate", "source", "subscriber", "previousKey", "first", "createOperatorSubscriber", "value", "currentKey", "a", "b", "distinctUntilKeyChanged", "key", "compare", "distinctUntilChanged", "x", "y", "throwIfEmpty", "errorFactory", "defaultErrorFactory", "operate", "source", "subscriber", "hasValue", "createOperatorSubscriber", "value", "EmptyError", "endWith", "values", "_i", "source", "concat", "of", "__spreadArray", "__read", "finalize", "callback", "operate", "source", "subscriber", "first", "predicate", "defaultValue", "hasDefaultValue", "source", "filter", "v", "identity", "take", "defaultIfEmpty", "throwIfEmpty", "EmptyError", "takeLast", "count", "EMPTY", "operate", "source", "subscriber", "buffer", "createOperatorSubscriber", "value", "buffer_1", "__values", "buffer_1_1", "merge", "args", "_i", "scheduler", "popScheduler", "concurrent", "popNumber", "argsOrArgArray", "operate", "source", "subscriber", "mergeAll", "from", "__spreadArray", "__read", "mergeWith", "otherSources", "_i", "merge", "__spreadArray", "__read", "repeat", "countOrConfig", "count", "delay", "_a", "EMPTY", "operate", "source", "subscriber", "soFar", "sourceSub", "resubscribe", "notifier", "timer", "innerFrom", "notifierSubscriber_1", "createOperatorSubscriber", "subscribeToSource", "syncUnsub", "scan", "accumulator", "seed", "operate", "scanInternals", "share", "options", "_a", "connector", "Subject", "_b", "resetOnError", "_c", "resetOnComplete", "_d", "resetOnRefCountZero", "wrapperSource", "connection", "resetConnection", "subject", "refCount", "hasCompleted", "hasErrored", "cancelReset", "reset", "resetAndUnsubscribe", "conn", "operate", "source", "subscriber", "dest", "handleReset", "SafeSubscriber", "value", "err", "innerFrom", "on", "args", "_i", "onSubscriber", "__spreadArray", "__read", "shareReplay", "configOrBufferSize", "windowTime", "scheduler", "bufferSize", "refCount", "_a", "_b", "_c", "share", "ReplaySubject", "skip", "count", "filter", "_", "index", "skipUntil", "notifier", "operate", "source", "subscriber", "taking", "skipSubscriber", "createOperatorSubscriber", "noop", "innerFrom", "value", "startWith", "values", "_i", "scheduler", "popScheduler", "operate", "source", "subscriber", "concat", "switchMap", "project", "resultSelector", "operate", "source", "subscriber", "innerSubscriber", "index", "isComplete", "checkComplete", "createOperatorSubscriber", "value", "innerIndex", "outerIndex", "innerFrom", "innerValue", "takeUntil", "notifier", "operate", "source", "subscriber", "innerFrom", "createOperatorSubscriber", "noop", "takeWhile", "predicate", "inclusive", "operate", "source", "subscriber", "index", "createOperatorSubscriber", "value", "result", "tap", "observerOrNext", "error", "complete", "tapObserver", "isFunction", "operate", "source", "subscriber", "_a", "isUnsub", "createOperatorSubscriber", "value", "err", "_b", "identity", "throttle", "durationSelector", "config", "operate", "source", "subscriber", "_a", "_b", "leading", "_c", "trailing", "hasValue", "sendValue", "throttled", "isComplete", "endThrottling", "send", "cleanupThrottling", "startThrottle", "value", "innerFrom", "createOperatorSubscriber", "throttleTime", "duration", "scheduler", "config", "asyncScheduler", "duration$", "timer", "throttle", "withLatestFrom", "inputs", "_i", "project", "popResultSelector", "operate", "source", "subscriber", "len", "otherValues", "hasValue", "ready", "i", "innerFrom", "createOperatorSubscriber", "value", "identity", "noop", "values", "__spreadArray", "__read", "zip", "sources", "_i", "operate", "source", "subscriber", "__spreadArray", "__read", "zipWith", "otherInputs", "_i", "zip", "__spreadArray", "__read", "watchDocument", "document$", "ReplaySubject", "fromEvent", "getElements", "selector", "node", "getElement", "el", "getOptionalElement", "getActiveElement", "_a", "_b", "_c", "_d", "observer$", "merge", "fromEvent", "debounceTime", "startWith", "map", "getActiveElement", "shareReplay", "watchElementFocus", "el", "active", "distinctUntilChanged", "watchElementHover", "el", "timeout", "defer", "merge", "fromEvent", "map", "debounce", "active", "timer", "identity", "startWith", "appendChild", "el", "child", "node", "h", "tag", "attributes", "children", "attr", "round", "value", "digits", "watchScript", "src", "script", "h", "defer", "merge", "fromEvent", "switchMap", "throwError", "map", "finalize", "take", "entry$", "Subject", "observer$", "defer", "watchScript", "of", "map", "entries", "entry", "switchMap", "observer", "merge", "NEVER", "finalize", "shareReplay", "getElementSize", "el", "watchElementSize", "target", "tap", "filter", "startWith", "getElementContentSize", "el", "getElementContainer", "parent", "getElementContainers", "containers", "getElementOffset", "el", "getElementOffsetAbsolute", "rect", "watchElementOffset", "merge", "fromEvent", "auditTime", "animationFrameScheduler", "map", "startWith", "getElementContentOffset", "el", "watchElementContentOffset", "merge", "fromEvent", "auditTime", "animationFrameScheduler", "map", "startWith", "entry$", "Subject", "observer$", "defer", "of", "entries", "entry", "switchMap", "observer", "merge", "NEVER", "finalize", "shareReplay", "watchElementVisibility", "el", "tap", "filter", "target", "map", "isIntersecting", "watchElementBoundary", "threshold", "watchElementContentOffset", "y", "visible", "getElementSize", "content", "getElementContentSize", "distinctUntilChanged", "toggles", "getElement", "getToggle", "name", "setToggle", "value", "watchToggle", "el", "fromEvent", "map", "startWith", "isSusceptibleToKeyboard", "el", "type", "watchComposition", "merge", "fromEvent", "map", "startWith", "watchKeyboard", "keyboard$", "filter", "ev", "getToggle", "mode", "active", "getActiveElement", "share", "switchMap", "EMPTY", "getLocation", "setLocation", "url", "navigate", "feature", "el", "h", "watchLocation", "Subject", "getLocationHash", "setLocationHash", "hash", "el", "h", "ev", "watchLocationHash", "location$", "merge", "fromEvent", "map", "startWith", "filter", "shareReplay", "watchLocationTarget", "id", "getOptionalElement", "watchMedia", "query", "media", "fromEventPattern", "next", "startWith", "watchPrint", "merge", "fromEvent", "map", "at", "query$", "factory", "switchMap", "active", "EMPTY", "request", "url", "options", "Observable", "observer", "req", "event", "_a", "length", "requestJSON", "switchMap", "res", "map", "body", "shareReplay", "requestHTML", "dom", "requestXML", "getViewportOffset", "watchViewportOffset", "merge", "fromEvent", "map", "startWith", "getViewportSize", "watchViewportSize", "fromEvent", "map", "startWith", "watchViewport", "combineLatest", "watchViewportOffset", "watchViewportSize", "map", "offset", "size", "shareReplay", "watchViewportAt", "el", "viewport$", "header$", "size$", "distinctUntilKeyChanged", "offset$", "combineLatest", "map", "getElementOffset", "height", "offset", "size", "x", "y", "recv", "worker", "fromEvent", "ev", "send", "send$", "Subject", "data", "watchWorker", "url", "recv$", "worker$", "done$", "ignoreElements", "endWith", "mergeWith", "takeUntil", "share", "script", "getElement", "config", "getLocation", "configuration", "feature", "flag", "translation", "key", "value", "getComponentElement", "type", "node", "getElement", "getComponentElements", "getElements", "watchAnnounce", "el", "button", "getElement", "fromEvent", "map", "content", "mountAnnounce", "feature", "EMPTY", "defer", "push$", "Subject", "hash", "tap", "state", "finalize", "__spreadValues", "watchConsent", "el", "target$", "map", "target", "mountConsent", "options", "internal$", "Subject", "hidden", "tap", "state", "finalize", "__spreadValues", "renderTooltip", "id", "style", "h", "renderInlineTooltip2", "children", "renderAnnotation", "id", "prefix", "anchor", "h", "renderTooltip", "renderClipboardButton", "id", "h", "translation", "import_escape_html", "renderSearchDocument", "document", "flag", "parent", "teaser", "missing", "key", "list", "h", "escapeHTML", "config", "configuration", "url", "feature", "match", "highlight", "value", "tags", "tag", "type", "translation", "renderSearchResultItem", "result", "threshold", "docs", "doc", "article", "index", "best", "more", "children", "section", "renderSourceFacts", "facts", "h", "key", "value", "round", "renderTabbedControl", "type", "classes", "h", "renderTable", "table", "h", "renderVersion", "version", "_a", "config", "configuration", "url", "h", "renderVersionSelector", "versions", "active", "translation", "sequence", "watchTooltip2", "el", "active$", "combineLatest", "watchElementFocus", "watchElementHover", "map", "focus", "hover", "distinctUntilChanged", "offset$", "defer", "getElementContainers", "mergeMap", "watchElementContentOffset", "throttleTime", "combineLatestWith", "getElementOffsetAbsolute", "first", "active", "switchMap", "offset", "share", "mountTooltip2", "dependencies", "content$", "viewport$", "id", "push$", "Subject", "show$", "BehaviorSubject", "ignoreElements", "endWith", "node$", "debounce", "timer", "queueScheduler", "EMPTY", "tap", "node", "startWith", "states", "origin$", "filter", "withLatestFrom", "_", "size", "host", "x", "height", "getElementSize", "origin", "getElement", "observeOn", "animationFrameScheduler", "state", "finalize", "__spreadValues", "mountInlineTooltip2", "container", "Observable", "observer", "title", "renderInlineTooltip2", "watchAnnotation", "el", "container", "offset$", "defer", "combineLatest", "watchElementOffset", "watchElementContentOffset", "map", "x", "y", "scroll", "width", "height", "getElementSize", "watchElementFocus", "switchMap", "active", "offset", "take", "mountAnnotation", "target$", "tooltip", "index", "push$", "Subject", "done$", "ignoreElements", "endWith", "watchElementVisibility", "takeUntil", "visible", "merge", "filter", "debounceTime", "auditTime", "animationFrameScheduler", "throttleTime", "origin", "fromEvent", "ev", "withLatestFrom", "_a", "parent", "getActiveElement", "target", "delay", "tap", "state", "finalize", "__spreadValues", "findHosts", "container", "getElements", "findMarkers", "markers", "el", "nodes", "it", "node", "text", "match", "id", "force", "marker", "swap", "source", "target", "mountAnnotationList", "target$", "print$", "parent", "prefix", "annotations", "getOptionalElement", "renderAnnotation", "EMPTY", "defer", "push$", "Subject", "done$", "ignoreElements", "endWith", "pairs", "annotation", "getElement", "takeUntil", "active", "inner", "child", "merge", "mountAnnotation", "finalize", "share", "findList", "el", "sibling", "mountAnnotationBlock", "options", "defer", "list", "mountAnnotationList", "EMPTY", "import_clipboard", "sequence", "findCandidateList", "el", "sibling", "watchCodeBlock", "watchElementSize", "map", "width", "getElementContentSize", "distinctUntilKeyChanged", "mountCodeBlock", "options", "hover", "factory$", "defer", "push$", "Subject", "done$", "takeLast", "scrollable", "content$", "ClipboardJS", "feature", "parent", "button", "renderClipboardButton", "mountInlineTooltip2", "container", "list", "annotations$", "mountAnnotationList", "takeUntil", "height", "distinctUntilChanged", "switchMap", "active", "EMPTY", "getElements", "tap", "state", "finalize", "__spreadValues", "mergeWith", "watchElementVisibility", "filter", "visible", "take", "watchDetails", "el", "target$", "print$", "open", "merge", "map", "target", "filter", "details", "active", "tap", "mountDetails", "options", "defer", "push$", "Subject", "action", "reveal", "state", "finalize", "__spreadValues", "mermaid_default", "mermaid$", "sequence", "fetchScripts", "watchScript", "of", "mountMermaid", "el", "tap", "mermaid_default", "map", "shareReplay", "__async", "id", "host", "h", "text", "svg", "fn", "shadow", "sentinel", "h", "mountDataTable", "el", "renderTable", "of", "watchContentTabs", "inputs", "initial", "input", "merge", "fromEvent", "map", "getElement", "startWith", "active", "mountContentTabs", "el", "viewport$", "target$", "container", "getElements", "prev", "renderTabbedControl", "next", "defer", "push$", "Subject", "done$", "ignoreElements", "endWith", "combineLatest", "watchElementSize", "watchElementVisibility", "takeUntil", "auditTime", "animationFrameScheduler", "size", "offset", "getElementOffset", "width", "getElementSize", "content", "getElementContentOffset", "watchElementContentOffset", "getElementContentSize", "direction", "filter", "label", "h", "ev", "tap", "feature", "skip", "withLatestFrom", "tab", "y", "set", "tabs", "media", "state", "finalize", "__spreadValues", "subscribeOn", "asyncScheduler", "mountContent", "el", "viewport$", "target$", "print$", "merge", "getElements", "child", "mountAnnotationBlock", "mountCodeBlock", "mountMermaid", "mountDataTable", "mountDetails", "mountContentTabs", "feature", "mountInlineTooltip2", "watchDialog", "_el", "alert$", "switchMap", "message", "merge", "of", "delay", "map", "active", "mountDialog", "el", "options", "inner", "getElement", "defer", "push$", "Subject", "tap", "state", "finalize", "__spreadValues", "sequence", "watchTooltip", "el", "host", "width", "getElementSize", "container", "getElementContainer", "scroll$", "watchElementContentOffset", "of", "active$", "merge", "watchElementFocus", "watchElementHover", "distinctUntilChanged", "combineLatest", "map", "active", "scroll", "x", "y", "getElementOffset", "size", "table", "mountTooltip", "title", "EMPTY", "id", "tooltip", "renderTooltip", "typeset", "getElement", "defer", "push$", "Subject", "offset", "filter", "debounceTime", "auditTime", "animationFrameScheduler", "throttleTime", "origin", "tap", "state", "finalize", "__spreadValues", "subscribeOn", "asyncScheduler", "isHidden", "viewport$", "feature", "of", "direction$", "map", "y", "bufferCount", "a", "b", "distinctUntilKeyChanged", "hidden$", "combineLatest", "filter", "offset", "direction", "distinctUntilChanged", "search$", "watchToggle", "search", "switchMap", "active", "startWith", "watchHeader", "el", "options", "defer", "watchElementSize", "height", "hidden", "shareReplay", "mountHeader", "header$", "main$", "push$", "Subject", "done$", "ignoreElements", "endWith", "combineLatestWith", "tooltips", "from", "getElements", "mergeMap", "child", "mountTooltip", "takeUntil", "state", "__spreadValues", "mergeWith", "watchHeaderTitle", "el", "viewport$", "header$", "watchViewportAt", "map", "y", "height", "getElementSize", "distinctUntilKeyChanged", "mountHeaderTitle", "options", "defer", "push$", "Subject", "active", "heading", "getOptionalElement", "EMPTY", "tap", "state", "finalize", "__spreadValues", "watchMain", "el", "viewport$", "header$", "adjust$", "map", "height", "distinctUntilChanged", "border$", "switchMap", "watchElementSize", "distinctUntilKeyChanged", "combineLatest", "header", "top", "bottom", "y", "a", "b", "watchPalette", "inputs", "current", "input", "index", "of", "mergeMap", "fromEvent", "map", "startWith", "shareReplay", "mountPalette", "el", "getElements", "meta", "h", "scheme", "media$", "watchMedia", "defer", "push$", "Subject", "palette", "media", "key", "value", "label", "filter", "ev", "withLatestFrom", "_", "header", "getComponentElement", "style", "color", "observeOn", "asyncScheduler", "takeUntil", "skip", "repeat", "tap", "state", "finalize", "__spreadValues", "mountProgress", "el", "progress$", "defer", "push$", "Subject", "value", "tap", "finalize", "map", "import_clipboard", "extract", "el", "copy", "text", "setupClipboardJS", "alert$", "ClipboardJS", "Observable", "subscriber", "getElement", "ev", "tap", "map", "translation", "resolve", "url", "base", "extract", "document", "sitemap", "el", "getElements", "getElement", "links", "link", "href", "fetchSitemap", "requestXML", "map", "catchError", "of", "handle", "ev", "sitemap", "EMPTY", "el", "url", "of", "head", "document", "tags", "getElements", "resolve", "key", "value", "inject", "next", "selector", "feature", "source", "getOptionalElement", "target", "html", "name", "container", "getComponentElement", "concat", "switchMap", "script", "Observable", "observer", "ignoreElements", "endWith", "setupInstantNavigation", "location$", "viewport$", "progress$", "config", "configuration", "sitemap$", "fetchSitemap", "instant$", "fromEvent", "combineLatestWith", "share", "history$", "map", "getLocation", "withLatestFrom", "offset", "merge", "document$", "distinctUntilKeyChanged", "requestHTML", "catchError", "setLocation", "_", "distinctUntilChanged", "a", "b", "tap", "_a", "_b", "setLocationHash", "debounceTime", "import_escape_html", "setupSearchHighlighter", "config", "regex", "term", "separator", "highlight", "_", "data", "query", "match", "value", "escapeHTML", "isSearchReadyMessage", "message", "isSearchResultMessage", "setupSearchWorker", "url", "index$", "worker$", "watchWorker", "merge", "of", "watchToggle", "first", "active", "switchMap", "config", "docs", "feature", "setupVersionSelector", "document$", "config", "configuration", "versions$", "requestJSON", "catchError", "EMPTY", "current$", "map", "versions", "current", "version", "aliases", "switchMap", "urls", "fromEvent", "filter", "ev", "withLatestFrom", "el", "url", "of", "fetchSitemap", "sitemap", "path", "getLocation", "setLocation", "combineLatest", "getElement", "renderVersionSelector", "_a", "outdated", "ignored", "main", "ignore", "warning", "getComponentElements", "watchSearchQuery", "el", "worker$", "searchParams", "getLocation", "setToggle", "watchToggle", "first", "active", "url", "focus$", "watchElementFocus", "value$", "merge", "isSearchReadyMessage", "fromEvent", "map", "distinctUntilChanged", "combineLatest", "value", "focus", "shareReplay", "mountSearchQuery", "push$", "Subject", "done$", "ignoreElements", "endWith", "_", "query", "distinctUntilKeyChanged", "takeUntil", "label", "getElement", "tap", "state", "finalize", "__spreadValues", "mountSearchResult", "el", "worker$", "query$", "push$", "Subject", "boundary$", "watchElementBoundary", "filter", "container", "meta", "getElement", "list", "watchToggle", "active", "withLatestFrom", "skipUntil", "first", "isSearchReadyMessage", "items", "value", "translation", "count", "round", "render$", "tap", "switchMap", "merge", "of", "bufferCount", "zipWith", "chunk", "map", "renderSearchResultItem", "share", "item", "mergeMap", "details", "getOptionalElement", "EMPTY", "fromEvent", "takeUntil", "isSearchResultMessage", "data", "state", "finalize", "__spreadValues", "watchSearchShare", "_el", "query$", "map", "value", "url", "getLocation", "mountSearchShare", "el", "options", "push$", "Subject", "done$", "ignoreElements", "endWith", "fromEvent", "takeUntil", "ev", "tap", "state", "finalize", "__spreadValues", "mountSearchSuggest", "el", "worker$", "keyboard$", "push$", "Subject", "query", "getComponentElement", "query$", "merge", "fromEvent", "observeOn", "asyncScheduler", "map", "distinctUntilChanged", "combineLatestWith", "suggest", "value", "words", "last", "filter", "mode", "key", "isSearchResultMessage", "data", "tap", "state", "finalize", "mountSearch", "el", "index$", "keyboard$", "config", "configuration", "worker$", "setupSearchWorker", "query", "getComponentElement", "result", "fromEvent", "filter", "target", "setToggle", "mode", "key", "active", "getActiveElement", "anchors", "anchor", "getElements", "article", "best", "a", "b", "els", "i", "query$", "mountSearchQuery", "merge", "mountSearchResult", "mergeWith", "getComponentElements", "child", "mountSearchShare", "mountSearchSuggest", "err", "NEVER", "mountSearchHiglight", "el", "index$", "location$", "combineLatest", "startWith", "getLocation", "filter", "url", "map", "index", "setupSearchHighlighter", "fn", "_a", "nodes", "it", "node", "original", "replaced", "text", "childNodes", "h", "watchSidebar", "el", "viewport$", "main$", "parent", "adjust", "combineLatest", "map", "offset", "height", "y", "distinctUntilChanged", "a", "b", "mountSidebar", "_a", "_b", "header$", "options", "__objRest", "inner", "getElement", "getElementOffset", "defer", "push$", "Subject", "done$", "ignoreElements", "endWith", "next$", "auditTime", "animationFrameScheduler", "withLatestFrom", "first", "item", "getElements", "container", "getElementSize", "from", "mergeMap", "label", "fromEvent", "observeOn", "asyncScheduler", "takeUntil", "input", "tap", "state", "finalize", "__spreadValues", "fetchSourceFactsFromGitHub", "user", "repo", "url", "zip", "requestJSON", "catchError", "EMPTY", "map", "release", "defaultIfEmpty", "info", "__spreadValues", "fetchSourceFactsFromGitLab", "base", "project", "url", "zip", "requestJSON", "catchError", "EMPTY", "map", "tag_name", "defaultIfEmpty", "star_count", "forks_count", "release", "info", "__spreadValues", "fetchSourceFacts", "url", "match", "user", "repo", "fetchSourceFactsFromGitHub", "base", "slug", "fetchSourceFactsFromGitLab", "EMPTY", "fetch$", "watchSource", "el", "defer", "cached", "of", "getComponentElements", "consent", "EMPTY", "fetchSourceFacts", "tap", "facts", "catchError", "filter", "map", "shareReplay", "mountSource", "inner", "getElement", "push$", "Subject", "renderSourceFacts", "state", "finalize", "__spreadValues", "watchTabs", "el", "viewport$", "header$", "watchElementSize", "switchMap", "watchViewportAt", "map", "y", "distinctUntilKeyChanged", "mountTabs", "options", "defer", "push$", "Subject", "hidden", "feature", "of", "tap", "state", "finalize", "__spreadValues", "watchTableOfContents", "el", "viewport$", "header$", "table", "anchors", "getElements", "anchor", "id", "target", "getOptionalElement", "adjust$", "distinctUntilKeyChanged", "map", "height", "main", "getComponentElement", "grid", "getElement", "share", "watchElementSize", "switchMap", "body", "defer", "path", "of", "index", "offset", "parent", "a", "b", "combineLatestWith", "adjust", "scan", "prev", "next", "y", "size", "last", "distinctUntilChanged", "startWith", "bufferCount", "mountTableOfContents", "main$", "target$", "push$", "Subject", "done$", "ignoreElements", "endWith", "feature", "smooth$", "merge", "debounceTime", "filter", "observeOn", "asyncScheduler", "withLatestFrom", "behavior", "container", "getElementContainer", "getElementSize", "takeUntil", "skip", "repeat", "url", "getLocation", "active", "hash", "tap", "state", "finalize", "__spreadValues", "watchBackToTop", "_el", "viewport$", "main$", "target$", "direction$", "map", "y", "bufferCount", "b", "distinctUntilChanged", "active$", "active", "combineLatest", "direction", "takeUntil", "skip", "endWith", "repeat", "hidden", "mountBackToTop", "el", "header$", "push$", "Subject", "done$", "ignoreElements", "distinctUntilKeyChanged", "height", "fromEvent", "ev", "tap", "state", "finalize", "__spreadValues", "patchEllipsis", "document$", "viewport$", "switchMap", "getElements", "mergeMap", "el", "watchElementVisibility", "takeUntil", "skip", "filter", "visible", "map", "take", "text", "host", "feature", "mountInlineTooltip2", "finalize", "EMPTY", "patchIndeterminate", "document$", "tablet$", "switchMap", "getElements", "tap", "el", "mergeMap", "fromEvent", "takeWhile", "map", "withLatestFrom", "tablet", "isAppleDevice", "patchScrollfix", "document$", "switchMap", "getElements", "tap", "el", "filter", "mergeMap", "fromEvent", "map", "top", "patchScrolllock", "viewport$", "tablet$", "combineLatest", "watchToggle", "map", "active", "tablet", "switchMap", "of", "delay", "withLatestFrom", "y", "value", "obj", "data", "key", "x", "y", "nodes", "parent", "i", "node", "fetchSearchIndex", "watchScript", "config", "map", "shareReplay", "requestJSON", "document$", "watchDocument", "location$", "watchLocation", "target$", "watchLocationTarget", "keyboard$", "watchKeyboard", "viewport$", "watchViewport", "tablet$", "watchMedia", "screen$", "print$", "watchPrint", "configuration", "index$", "NEVER", "alert$", "Subject", "setupClipboardJS", "progress$", "feature", "setupInstantNavigation", "_a", "setupVersionSelector", "merge", "delay", "setToggle", "filter", "mode", "key", "prev", "getOptionalElement", "setLocation", "next", "active", "getActiveElement", "patchEllipsis", "patchIndeterminate", "patchScrollfix", "patchScrolllock", "header$", "watchHeader", "getComponentElement", "main$", "switchMap", "el", "watchMain", "control$", "getComponentElements", "mountConsent", "mountDialog", "mountHeader", "mountPalette", "mountProgress", "mountSearch", "mountSource", "content$", "defer", "mountAnnounce", "mountContent", "mountSearchHiglight", "EMPTY", "mountHeaderTitle", "at", "mountSidebar", "mountTabs", "mountTableOfContents", "mountBackToTop", "component$", "mergeWith"]
 }
diff --git a/v10.0.X/assets/javascripts/bundle.fe8b6f2b.min.js b/v10.0.X/assets/javascripts/bundle.fe8b6f2b.min.js
deleted file mode 100644
index cf778d4288e..00000000000
--- a/v10.0.X/assets/javascripts/bundle.fe8b6f2b.min.js
+++ /dev/null
@@ -1,29 +0,0 @@
-"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*!
- * clipboard.js v2.0.11
- * https://clipboardjs.com/
- *
- * Licensed MIT © Zeno Rocha
- */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(M){return typeof M}:Ie=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,M){return _&&ro(V.prototype,_),M&&ro(V,M),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Wt(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ri=function(V){Ci(M,V);var _=Hi(M);function M(j,D){var Y;return _i(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),M}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p<c;p++)s[p].fn.apply(s[p].ctx,a);return this},off:function(i,a){var s=this.e||(this.e={}),p=s[i],c=[];if(p&&a)for(var l=0,f=p.length;l<f;l++)p[l].fn!==a&&p[l].fn._!==a&&c.push(p[l]);return c.length?s[i]=c:delete s[i],this}},o.exports=n,o.exports.TinyEmitter=n}},t={};function r(o){if(t[o])return t[o].exports;var n=t[o]={exports:{}};return e[o](n,n.exports,r),n.exports}return function(){r.n=function(o){var n=o&&o.__esModule?function(){return o.default}:function(){return o};return r.d(n,{a:n}),n}}(),function(){r.d=function(o,n){for(var i in n)r.o(n,i)&&!r.o(o,i)&&Object.defineProperty(o,i,{enumerable:!0,get:n[i]})}}(),function(){r.o=function(o,n){return Object.prototype.hasOwnProperty.call(o,n)}}(),r(686)}().default})});var ti=yr((gT,ei)=>{"use strict";/*!
- * escape-html
- * Copyright(c) 2012-2013 TJ Holowaychuk
- * Copyright(c) 2015 Andreas Lubbe
- * Copyright(c) 2015 Tiancheng "Timothy" Gu
- * MIT Licensed
- */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i<t.length;i++){switch(t.charCodeAt(i)){case 34:o="&quot;";break;case 38:o="&amp;";break;case 39:o="&#39;";break;case 60:o="&lt;";break;case 62:o="&gt;";break;default:continue}a!==i&&(n+=t.substring(a,i)),a=i+1,n+=o}return a!==i?n+t.substring(a,i):n}});var t0=Vt(co());/*! *****************************************************************************
-Copyright (c) Microsoft Corporation.
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
-REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
-INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
-OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THIS SOFTWARE.
-***************************************************************************** */var wr=function(e,t){return wr=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(r,o){r.__proto__=o}||function(r,o){for(var n in o)Object.prototype.hasOwnProperty.call(o,n)&&(r[n]=o[n])},wr(e,t)};function re(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");wr(e,t);function r(){this.constructor=e}e.prototype=t===null?Object.create(t):(r.prototype=t.prototype,new r)}function po(e,t,r,o){function n(i){return i instanceof r?i:new r(function(a){a(i)})}return new(r||(r=Promise))(function(i,a){function s(l){try{c(o.next(l))}catch(f){a(f)}}function p(l){try{c(o.throw(l))}catch(f){a(f)}}function c(l){l.done?i(l.value):n(l.value).then(s,p)}c((o=o.apply(e,t||[])).next())})}function Nt(e,t){var r={label:0,sent:function(){if(i[0]&1)throw i[1];return i[1]},trys:[],ops:[]},o,n,i,a;return a={next:s(0),throw:s(1),return:s(2)},typeof Symbol=="function"&&(a[Symbol.iterator]=function(){return this}),a;function s(c){return function(l){return p([c,l])}}function p(c){if(o)throw new TypeError("Generator is already executing.");for(;r;)try{if(o=1,n&&(i=c[0]&2?n.return:c[0]?n.throw||((i=n.return)&&i.call(n),0):n.next)&&!(i=i.call(n,c[1])).done)return i;switch(n=0,i&&(c=[c[0]&2,i.value]),c[0]){case 0:case 1:i=c;break;case 4:return r.label++,{value:c[1],done:!1};case 5:r.label++,n=c[1],c=[0];continue;case 7:c=r.ops.pop(),r.trys.pop();continue;default:if(i=r.trys,!(i=i.length>0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]<i[3])){r.label=c[1];break}if(c[0]===6&&r.label<i[1]){r.label=i[1],i=c;break}if(i&&r.label<i[2]){r.label=i[2],r.ops.push(c);break}i[2]&&r.ops.pop(),r.trys.pop();continue}c=t.call(e,r)}catch(l){c=[6,l],n=0}finally{o=i=0}if(c[0]&5)throw c[1];return{value:c[0]?c[1]:void 0,done:!0}}}function de(e){var t=typeof Symbol=="function"&&Symbol.iterator,r=t&&e[t],o=0;if(r)return r.call(e);if(e&&typeof e.length=="number")return{next:function(){return e&&o>=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o<n;o++)(i||!(o in t))&&(i||(i=Array.prototype.slice.call(t,0,o)),i[o]=t[o]);return e.concat(i||Array.prototype.slice.call(t))}function nt(e){return this instanceof nt?(this.v=e,this):new nt(e)}function lo(e,t,r){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var o=r.apply(e,t||[]),n,i=[];return n={},a("next"),a("throw"),a("return"),n[Symbol.asyncIterator]=function(){return this},n;function a(u){o[u]&&(n[u]=function(h){return new Promise(function(w,A){i.push([u,h,w,A])>1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription:
-`+r.map(function(o,n){return n+1+") "+o.toString()}).join(`
-  `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o<arguments.length;o++)r[o-2]=arguments[o];var n=ut.delegate;return n!=null&&n.setTimeout?n.setTimeout.apply(n,q([e,t],N(r))):setTimeout.apply(void 0,q([e,t],N(r)))},clearTimeout:function(e){var t=ut.delegate;return((t==null?void 0:t.clearTimeout)||clearTimeout)(e)},delegate:void 0};function Qt(e){ut.setTimeout(function(){var t=$e.onUnhandledError;if(t)t(e);else throw e})}function he(){}var uo=function(){return Sr("C",void 0,void 0)}();function ho(e){return Sr("E",void 0,e)}function bo(e){return Sr("N",e,void 0)}function Sr(e,t,r){return{kind:e,value:t,error:r}}var it=null;function dt(e){if($e.useDeprecatedSynchronousErrorHandling){var t=!it;if(t&&(it={errorThrown:!1,error:null}),e(),t){var r=it,o=r.errorThrown,n=r.error;if(it=null,o)throw n}}else e()}function vo(e){$e.useDeprecatedSynchronousErrorHandling&&it&&(it.errorThrown=!0,it.error=e)}var Mt=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o.isStopped=!1,r?(o.destination=r,qt(r)&&r.add(o)):o.destination=qi,o}return t.create=function(r,o,n){return new at(r,o,n)},t.prototype.next=function(r){this.isStopped?Mr(bo(r),this):this._next(r)},t.prototype.error=function(r){this.isStopped?Mr(ho(r),this):(this.isStopped=!0,this._error(r))},t.prototype.complete=function(){this.isStopped?Mr(uo,this):(this.isStopped=!0,this._complete())},t.prototype.unsubscribe=function(){this.closed||(this.isStopped=!0,e.prototype.unsubscribe.call(this),this.destination=null)},t.prototype._next=function(r){this.destination.next(r)},t.prototype._error=function(r){try{this.destination.error(r)}finally{this.unsubscribe()}},t.prototype._complete=function(){try{this.destination.complete()}finally{this.unsubscribe()}},t}(Fe);var Vi=Function.prototype.bind;function Or(e,t){return Vi.call(e,t)}var Ni=function(){function e(t){this.partialObserver=t}return e.prototype.next=function(t){var r=this.partialObserver;if(r.next)try{r.next(t)}catch(o){Kt(o)}},e.prototype.error=function(t){var r=this.partialObserver;if(r.error)try{r.error(t)}catch(o){Kt(o)}else Kt(t)},e.prototype.complete=function(){var t=this.partialObserver;if(t.complete)try{t.complete()}catch(r){Kt(r)}},e}(),at=function(e){re(t,e);function t(r,o,n){var i=e.call(this)||this,a;if(k(r)||!r)a={next:r!=null?r:void 0,error:o!=null?o:void 0,complete:n!=null?n:void 0};else{var s;i&&$e.useDeprecatedNextContext?(s=Object.create(r),s.unsubscribe=function(){return i.unsubscribe()},a={next:r.next&&Or(r.next,s),error:r.error&&Or(r.error,s),complete:r.complete&&Or(r.complete,s)}):a=r}return i.destination=new Ni(a),i}return t}(Mt);function Kt(e){$e.useDeprecatedSynchronousErrorHandling?vo(e):Qt(e)}function zi(e){throw e}function Mr(e,t){var r=$e.onStoppedNotification;r&&ut.setTimeout(function(){return r(e,t)})}var qi={closed:!0,next:he,error:zi,complete:he};var ht=function(){return typeof Symbol=="function"&&Symbol.observable||"@@observable"}();function le(e){return e}function go(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Lr(e)}function Lr(e){return e.length===0?le:e.length===1?e[0]:function(r){return e.reduce(function(o,n){return n(o)},r)}}var F=function(){function e(t){t&&(this._subscribe=t)}return e.prototype.lift=function(t){var r=new e;return r.source=this,r.operator=t,r},e.prototype.subscribe=function(t,r,o){var n=this,i=Ki(t)?t:new at(t,r,o);return dt(function(){var a=n,s=a.operator,p=a.source;i.add(s?s.call(i,p):p?n._subscribe(i):n._trySubscribe(i))}),i},e.prototype._trySubscribe=function(t){try{return this._subscribe(t)}catch(r){t.error(r)}},e.prototype.forEach=function(t,r){var o=this;return r=xo(r),new r(function(n,i){var a=new at({next:function(s){try{t(s)}catch(p){i(p),a.unsubscribe()}},error:i,complete:n});o.subscribe(a)})},e.prototype._subscribe=function(t){var r;return(r=this.source)===null||r===void 0?void 0:r.subscribe(t)},e.prototype[ht]=function(){return this},e.prototype.pipe=function(){for(var t=[],r=0;r<arguments.length;r++)t[r]=arguments[r];return Lr(t)(this)},e.prototype.toPromise=function(t){var r=this;return t=xo(t),new t(function(o,n){var i;r.subscribe(function(a){return i=a},function(a){return n(a)},function(){return o(i)})})},e.create=function(t){return new e(t)},e}();function xo(e){var t;return(t=e!=null?e:$e.Promise)!==null&&t!==void 0?t:Promise}function Qi(e){return e&&k(e.next)&&k(e.error)&&k(e.complete)}function Ki(e){return e&&e instanceof Mt||Qi(e)&&qt(e)}function Yi(e){return k(e==null?void 0:e.lift)}function y(e){return function(t){if(Yi(t))return t.lift(function(r){try{return e(r,this)}catch(o){this.error(o)}});throw new TypeError("Unable to lift unknown Observable type")}}function T(e,t,r,o,n){return new Bi(e,t,r,o,n)}var Bi=function(e){re(t,e);function t(r,o,n,i,a,s){var p=e.call(this,r)||this;return p.onFinalize=a,p.shouldUnsubscribe=s,p._next=o?function(c){try{o(c)}catch(l){r.error(l)}}:e.prototype._next,p._error=i?function(c){try{i(c)}catch(l){r.error(l)}finally{this.unsubscribe()}}:e.prototype._error,p._complete=n?function(){try{n()}catch(c){r.error(c)}finally{this.unsubscribe()}}:e.prototype._complete,p}return t.prototype.unsubscribe=function(){var r;if(!this.shouldUnsubscribe||this.shouldUnsubscribe()){var o=this.closed;e.prototype.unsubscribe.call(this),!o&&((r=this.onFinalize)===null||r===void 0||r.call(this))}},t}(Mt);var bt={schedule:function(e){var t=requestAnimationFrame,r=cancelAnimationFrame,o=bt.delegate;o&&(t=o.requestAnimationFrame,r=o.cancelAnimationFrame);var n=t(function(i){r=void 0,e(i)});return new Fe(function(){return r==null?void 0:r(n)})},requestAnimationFrame:function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=bt.delegate;return((r==null?void 0:r.requestAnimationFrame)||requestAnimationFrame).apply(void 0,q([],N(e)))},cancelAnimationFrame:function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=bt.delegate;return((r==null?void 0:r.cancelAnimationFrame)||cancelAnimationFrame).apply(void 0,q([],N(e)))},delegate:void 0};var yo=ft(function(e){return function(){e(this),this.name="ObjectUnsubscribedError",this.message="object unsubscribed"}});var g=function(e){re(t,e);function t(){var r=e.call(this)||this;return r.closed=!1,r.currentObservers=null,r.observers=[],r.isStopped=!1,r.hasError=!1,r.thrownError=null,r}return t.prototype.lift=function(r){var o=new Eo(this,this);return o.operator=r,o},t.prototype._throwIfClosed=function(){if(this.closed)throw new yo},t.prototype.next=function(r){var o=this;dt(function(){var n,i;if(o._throwIfClosed(),!o.isStopped){o.currentObservers||(o.currentObservers=Array.from(o.observers));try{for(var a=de(o.currentObservers),s=a.next();!s.done;s=a.next()){var p=s.value;p.next(r)}}catch(c){n={error:c}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(n)throw n.error}}}})},t.prototype.error=function(r){var o=this;dt(function(){if(o._throwIfClosed(),!o.isStopped){o.hasError=o.isStopped=!0,o.thrownError=r;for(var n=o.observers;n.length;)n.shift().error(r)}})},t.prototype.complete=function(){var r=this;dt(function(){if(r._throwIfClosed(),!r.isStopped){r.isStopped=!0;for(var o=r.observers;o.length;)o.shift().complete()}})},t.prototype.unsubscribe=function(){this.isStopped=this.closed=!0,this.observers=this.currentObservers=null},Object.defineProperty(t.prototype,"observed",{get:function(){var r;return((r=this.observers)===null||r===void 0?void 0:r.length)>0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p<s.length&&!r.closed;p+=i?1:2)r.next(s[p]);return this._checkFinalizedStatuses(r),o},t.prototype._trimBuffer=function(){var r=this,o=r._bufferSize,n=r._timestampProvider,i=r._buffer,a=r._infiniteTimeWindow,s=(a?1:2)*o;if(o<1/0&&s<i.length&&i.splice(0,i.length-s),!a){for(var p=n.now(),c=0,l=1;l<i.length&&i[l]<=p;l+=2)c=l;c&&i.splice(0,c+1)}},t}(g);var wo=function(e){re(t,e);function t(r,o){return e.call(this)||this}return t.prototype.schedule=function(r,o){return o===void 0&&(o=0),this},t}(Fe);var At={setInterval:function(e,t){for(var r=[],o=2;o<arguments.length;o++)r[o-2]=arguments[o];var n=At.delegate;return n!=null&&n.setInterval?n.setInterval.apply(n,q([e,t],N(r))):setInterval.apply(void 0,q([e,t],N(r)))},clearInterval:function(e){var t=At.delegate;return((t==null?void 0:t.clearInterval)||clearInterval)(e)},delegate:void 0};var vt=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n.pending=!1,n}return t.prototype.schedule=function(r,o){var n;if(o===void 0&&(o=0),this.closed)return this;this.state=r;var i=this.id,a=this.scheduler;return i!=null&&(this.id=this.recycleAsyncId(a,i,o)),this.pending=!0,this.delay=o,this.id=(n=this.id)!==null&&n!==void 0?n:this.requestAsyncId(a,this.id,o),this},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),At.setInterval(r.flush.bind(r,this),n)},t.prototype.recycleAsyncId=function(r,o,n){if(n===void 0&&(n=0),n!=null&&this.delay===n&&this.pending===!1)return o;o!=null&&At.clearInterval(o)},t.prototype.execute=function(r,o){if(this.closed)return new Error("executing a cancelled action");this.pending=!1;var n=this._execute(r,o);if(n)return n;this.pending===!1&&this.id!=null&&(this.id=this.recycleAsyncId(this.scheduler,this.id,null))},t.prototype._execute=function(r,o){var n=!1,i;try{this.work(r)}catch(a){n=!0,i=a||new Error("Scheduled action threw falsy error")}if(n)return this.unsubscribe(),i},t.prototype.unsubscribe=function(){if(!this.closed){var r=this,o=r.id,n=r.scheduler,i=n.actions;this.work=this.state=this.scheduler=null,this.pending=!1,qe(i,this),o!=null&&(this.id=this.recycleAsyncId(n,o,null)),this.delay=null,e.prototype.unsubscribe.call(this)}},t}(wo);var Ar=function(){function e(t,r){r===void 0&&(r=e.now),this.schedulerActionCtor=t,this.now=r}return e.prototype.schedule=function(t,r,o){return r===void 0&&(r=0),new this.schedulerActionCtor(this,t).schedule(o,r)},e.now=Lt.now,e}();var gt=function(e){re(t,e);function t(r,o){o===void 0&&(o=Ar.now);var n=e.call(this,r,o)||this;return n.actions=[],n._active=!1,n}return t.prototype.flush=function(r){var o=this.actions;if(this._active){o.push(r);return}var n;this._active=!0;do if(n=r.execute(r.state,r.delay))break;while(r=o.shift());if(this._active=!1,n){for(;r=o.shift();)r.unsubscribe();throw n}},t}(Ar);var se=new gt(vt),Cr=se;var To=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.schedule=function(r,o){return o===void 0&&(o=0),o>0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var O=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r<e.length&&!t.closed;r++)t.next(e[r]);t.complete()})}function Zi(e){return new F(function(t){e.then(function(r){t.closed||(t.next(r),t.complete())},function(r){return t.error(r)}).then(null,Qt)})}function ea(e){return new F(function(t){var r,o;try{for(var n=de(e),i=n.next();!i.done;i=n.next()){var a=i.value;if(t.next(a),t.closed)return}}catch(s){r={error:s}}finally{try{i&&!i.done&&(o=n.return)&&o.call(n)}finally{if(r)throw r.error}}t.complete()})}function Lo(e){return new F(function(t){ra(e,t).catch(function(r){return t.error(r)})})}function ta(e){return Lo(rr(e))}function ra(e,t){var r,o,n,i;return po(this,void 0,void 0,function(){var a,s;return Nt(this,function(p){switch(p.label){case 0:p.trys.push([0,5,6,11]),r=mo(e),p.label=1;case 1:return[4,r.next()];case 2:if(o=p.sent(),!!o.done)return[3,4];if(a=o.value,t.next(a),t.closed)return[2];p.label=3;case 3:return[3,1];case 4:return[3,11];case 5:return s=p.sent(),n={error:s},[3,11];case 6:return p.trys.push([6,,9,10]),o&&!o.done&&(i=r.return)?[4,i.call(r)]:[3,8];case 7:p.sent(),p.label=8;case 8:return[3,10];case 9:if(n)throw n.error;return[7];case 10:return[7];case 11:return t.complete(),[2]}})})}function we(e,t,r,o,n){o===void 0&&(o=0),n===void 0&&(n=!1);var i=t.schedule(function(){r(),n?e.add(this.schedule(null,o)):this.unsubscribe()},o);if(e.add(i),!n)return i}function be(e,t){return t===void 0&&(t=0),y(function(r,o){r.subscribe(T(o,function(n){return we(o,e,function(){return o.next(n)},t)},function(){return we(o,e,function(){return o.complete()},t)},function(n){return we(o,e,function(){return o.error(n)},t)}))})}function Qe(e,t){return t===void 0&&(t=0),y(function(r,o){o.add(e.schedule(function(){return r.subscribe(o)},t))})}function _o(e,t){return W(e).pipe(Qe(t),be(t))}function Ao(e,t){return W(e).pipe(Qe(t),be(t))}function Co(e,t){return new F(function(r){var o=0;return t.schedule(function(){o===e.length?r.complete():(r.next(e[o++]),r.closed||this.schedule())})})}function Ho(e,t){return new F(function(r){var o;return we(r,t,function(){o=e[er](),we(r,t,function(){var n,i,a;try{n=o.next(),i=n.value,a=n.done}catch(s){r.error(s);return}a?r.complete():r.next(i)},0,!0)}),function(){return k(o==null?void 0:o.return)&&o.return()}})}function nr(e,t){if(!e)throw new Error("Iterable cannot be null");return new F(function(r){we(r,t,function(){var o=e[Symbol.asyncIterator]();we(r,t,function(){o.next().then(function(n){n.done?r.complete():r.next(n.value)})},0,!0)})})}function ko(e,t){return nr(rr(e),t)}function $o(e,t){if(e!=null){if(Jt(e))return _o(e,t);if(xt(e))return Co(e,t);if(Gt(e))return Ao(e,t);if(Xt(e))return nr(e,t);if(tr(e))return Ho(e,t);if(or(e))return ko(e,t)}throw Zt(e)}function ue(e,t){return t?$o(e,t):W(e)}function I(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e);return ue(e,r)}function $r(e,t){var r=k(e)?e:function(){return e},o=function(n){return n.error(r())};return new F(t?function(n){return t.schedule(o,0,n)}:o)}var ir=ft(function(e){return function(){e(this),this.name="EmptyError",this.message="no elements in sequence"}});function Po(e){return e instanceof Date&&!isNaN(e)}function m(e,t){return y(function(r,o){var n=0;r.subscribe(T(o,function(i){o.next(e.call(t,i,n++))}))})}var oa=Array.isArray;function na(e,t){return oa(t)?e.apply(void 0,q([],N(t))):e(t)}function Ze(e){return m(function(t){return na(e,t)})}var ia=Array.isArray,aa=Object.getPrototypeOf,sa=Object.prototype,ca=Object.keys;function Ro(e){if(e.length===1){var t=e[0];if(ia(t))return{args:t,keys:null};if(pa(t)){var r=ca(t);return{args:r.map(function(o){return t[o]}),keys:r}}}return{args:e,keys:null}}function pa(e){return e&&typeof e=="object"&&aa(e)===sa}function Io(e,t){return e.reduce(function(r,o,n){return r[o]=t[n],r},{})}function z(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e),o=Xe(e),n=Ro(e),i=n.args,a=n.keys;if(i.length===0)return ue([],r);var s=new F(Pr(i,r,a?function(p){return Io(a,p)}:le));return o?s.pipe(Ze(o)):s}function Pr(e,t,r){return r===void 0&&(r=le),function(o){Fo(t,function(){for(var n=e.length,i=new Array(n),a=n,s=n,p=function(l){Fo(t,function(){var f=ue(e[l],t),u=!1;f.subscribe(T(o,function(h){i[l]=h,u||(u=!0,s--),s||o.next(r(i.slice()))},function(){--a||o.complete()}))},o)},c=0;c<n;c++)p(c)},o)}}function Fo(e,t,r){e?we(r,e,t):t()}function jo(e,t,r,o,n,i,a,s){var p=[],c=0,l=0,f=!1,u=function(){f&&!p.length&&!c&&t.complete()},h=function(A){return c<o?w(A):p.push(A)},w=function(A){i&&t.next(A),c++;var te=!1;W(r(A,l++)).subscribe(T(t,function(ie){n==null||n(ie),i?h(ie):t.next(ie)},function(){te=!0},void 0,function(){if(te)try{c--;for(var ie=function(){var J=p.shift();a?we(t,a,function(){return w(J)}):w(J)};p.length&&c<o;)ie();u()}catch(J){t.error(J)}}))};return e.subscribe(T(t,h,function(){f=!0,u()})),function(){s==null||s()}}function oe(e,t,r){return r===void 0&&(r=1/0),k(t)?oe(function(o,n){return m(function(i,a){return t(o,i,n,a)})(W(e(o,n)))},r):(typeof t=="number"&&(r=t),y(function(o,n){return jo(o,n,e,r)}))}function yt(e){return e===void 0&&(e=1/0),oe(le,e)}function Wo(){return yt(1)}function je(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Wo()(ue(e,He(e)))}function C(e){return new F(function(t){W(e()).subscribe(t)})}var la=["addListener","removeListener"],ma=["addEventListener","removeEventListener"],fa=["on","off"];function d(e,t,r,o){if(k(r)&&(o=r,r=void 0),o)return d(e,t,r).pipe(Ze(o));var n=N(ha(e)?ma.map(function(s){return function(p){return e[s](t,p,r)}}):ua(e)?la.map(Uo(e,t)):da(e)?fa.map(Uo(e,t)):[],2),i=n[0],a=n[1];if(!i&&xt(e))return oe(function(s){return d(s,t,r)})(W(e));if(!i)throw new TypeError("Invalid event target");return new F(function(s){var p=function(){for(var c=[],l=0;l<arguments.length;l++)c[l]=arguments[l];return s.next(1<c.length?c:c[0])};return i(p),function(){return a(p)}})}function Uo(e,t){return function(r){return function(o){return e[r](t,o)}}}function ua(e){return k(e.addListener)&&k(e.removeListener)}function da(e){return k(e.on)&&k(e.off)}function ha(e){return k(e.addEventListener)&&k(e.removeEventListener)}function ar(e,t,r){return r?ar(e,t).pipe(Ze(r)):new F(function(o){var n=function(){for(var a=[],s=0;s<arguments.length;s++)a[s]=arguments[s];return o.next(a.length===1?a[0]:a)},i=e(n);return k(t)?function(){return t(n,i)}:void 0})}function Me(e,t,r){e===void 0&&(e=0),r===void 0&&(r=Cr);var o=-1;return t!=null&&(Yt(t)?r=t:o=t),new F(function(n){var i=Po(e)?+e-r.now():e;i<0&&(i=0);var a=0;return r.schedule(function(){n.closed||(n.next(a++),0<=o?this.schedule(void 0,o):n.complete())},i)})}function S(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e),o=Bt(e,1/0),n=e;return n.length?n.length===1?W(n[0]):yt(o)(ue(n,r)):O}var Ke=new F(he);var ba=Array.isArray;function Et(e){return e.length===1&&ba(e[0])?e[0]:e}function b(e,t){return y(function(r,o){var n=0;r.subscribe(T(o,function(i){return e.call(t,i,n++)&&o.next(i)}))})}function Ct(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=Xe(e),o=Et(e);return o.length?new F(function(n){var i=o.map(function(){return[]}),a=o.map(function(){return!1});n.add(function(){i=a=null});for(var s=function(c){W(o[c]).subscribe(T(n,function(l){if(i[c].push(l),i.every(function(u){return u.length})){var f=i.map(function(u){return u.shift()});n.next(r?r.apply(void 0,q([],N(f))):f),i.some(function(u,h){return!u.length&&a[h]})&&n.complete()}},function(){a[c]=!0,!i[c].length&&n.complete()}))},p=0;!n.closed&&p<o.length;p++)s(p);return function(){i=a=null}}):O}function Do(e){return y(function(t,r){var o=!1,n=null,i=null,a=!1,s=function(){if(i==null||i.unsubscribe(),i=null,o){o=!1;var c=n;n=null,r.next(c)}a&&r.complete()},p=function(){i=null,a&&r.complete()};t.subscribe(T(r,function(c){o=!0,n=c,i||W(e(c)).subscribe(i=T(r,s,p))},function(){a=!0,(!o||!i||i.closed)&&r.complete()}))})}function Le(e,t){return t===void 0&&(t=se),Do(function(){return Me(e,t)})}function Ye(e,t){return t===void 0&&(t=null),t=t!=null?t:e,y(function(r,o){var n=[],i=0;r.subscribe(T(o,function(a){var s,p,c,l,f=null;i++%t===0&&n.push([]);try{for(var u=de(n),h=u.next();!h.done;h=u.next()){var w=h.value;w.push(a),e<=w.length&&(f=f!=null?f:[],f.push(w))}}catch(ie){s={error:ie}}finally{try{h&&!h.done&&(p=u.return)&&p.call(u)}finally{if(s)throw s.error}}if(f)try{for(var A=de(f),te=A.next();!te.done;te=A.next()){var w=te.value;qe(n,w),o.next(w)}}catch(ie){c={error:ie}}finally{try{te&&!te.done&&(l=A.return)&&l.call(A)}finally{if(c)throw c.error}}},function(){var a,s;try{for(var p=de(n),c=p.next();!c.done;c=p.next()){var l=c.value;o.next(l)}}catch(f){a={error:f}}finally{try{c&&!c.done&&(s=p.return)&&s.call(p)}finally{if(a)throw a.error}}o.complete()},void 0,function(){n=null}))})}function ve(e){return y(function(t,r){var o=null,n=!1,i;o=t.subscribe(T(r,void 0,void 0,function(a){i=W(e(a,ve(e)(t))),o?(o.unsubscribe(),o=null,i.subscribe(r)):n=!0})),n&&(o.unsubscribe(),o=null,i.subscribe(r))})}function Vo(e,t,r,o,n){return function(i,a){var s=r,p=t,c=0;i.subscribe(T(a,function(l){var f=c++;p=s?e(p,l,f):(s=!0,l),o&&a.next(p)},n&&function(){s&&a.next(p),a.complete()}))}}function Rr(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=Xe(e);return r?go(Rr.apply(void 0,q([],N(e))),Ze(r)):y(function(o,n){Pr(q([o],N(Et(e))))(n)})}function We(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Rr.apply(void 0,q([],N(e)))}function Ht(e){return y(function(t,r){var o=!1,n=null,i=null,a=function(){if(i==null||i.unsubscribe(),i=null,o){o=!1;var s=n;n=null,r.next(s)}};t.subscribe(T(r,function(s){i==null||i.unsubscribe(),o=!0,n=s,i=T(r,a,he),W(e(s)).subscribe(i)},function(){a(),r.complete()},void 0,function(){n=i=null}))})}function _e(e,t){return t===void 0&&(t=se),y(function(r,o){var n=null,i=null,a=null,s=function(){if(n){n.unsubscribe(),n=null;var c=i;i=null,o.next(c)}};function p(){var c=a+e,l=t.now();if(l<c){n=this.schedule(void 0,c-l),o.add(n);return}s()}r.subscribe(T(o,function(c){i=c,a=t.now(),n||(n=t.schedule(p,e),o.add(n))},function(){s(),o.complete()},void 0,function(){i=n=null}))})}function Be(e){return y(function(t,r){var o=!1;t.subscribe(T(r,function(n){o=!0,r.next(n)},function(){o||r.next(e),r.complete()}))})}function Te(e){return e<=0?function(){return O}:y(function(t,r){var o=0;t.subscribe(T(r,function(n){++o<=e&&(r.next(n),e<=o&&r.complete())}))})}function X(){return y(function(e,t){e.subscribe(T(t,he))})}function No(e){return m(function(){return e})}function Ir(e,t){return t?function(r){return je(t.pipe(Te(1),X()),r.pipe(Ir(e)))}:oe(function(r,o){return W(e(r,o)).pipe(Te(1),No(r))})}function Ge(e,t){t===void 0&&(t=se);var r=Me(e,t);return Ir(function(){return r})}function K(e,t){return t===void 0&&(t=le),e=e!=null?e:va,y(function(r,o){var n,i=!0;r.subscribe(T(o,function(a){var s=t(a);(i||!e(n,s))&&(i=!1,n=s,o.next(a))}))})}function va(e,t){return e===t}function Z(e,t){return K(function(r,o){return t?t(r[e],o[e]):r[e]===o[e]})}function zo(e){return e===void 0&&(e=ga),y(function(t,r){var o=!1;t.subscribe(T(r,function(n){o=!0,r.next(n)},function(){return o?r.complete():r.error(e())}))})}function ga(){return new ir}function ne(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return function(r){return je(r,I.apply(void 0,q([],N(e))))}}function L(e){return y(function(t,r){try{t.subscribe(r)}finally{r.add(e)}})}function Ae(e,t){var r=arguments.length>=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return O}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e<o.length&&o.shift()},function(){var n,i;try{for(var a=de(o),s=a.next();!s.done;s=a.next()){var p=s.value;r.next(p)}}catch(c){n={error:c}}finally{try{s&&!s.done&&(i=a.return)&&i.call(a)}finally{if(n)throw n.error}}r.complete()},void 0,function(){o=null}))})}function qo(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e),o=Bt(e,1/0);return e=Et(e),y(function(n,i){yt(o)(ue(q([n],N(e)),r)).subscribe(i)})}function Pe(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return qo.apply(void 0,q([],N(e)))}function st(e){var t,r=1/0,o;return e!=null&&(typeof e=="object"?(t=e.count,r=t===void 0?1/0:t,o=e.delay):r=e),r<=0?function(){return O}:y(function(n,i){var a=0,s,p=function(){if(s==null||s.unsubscribe(),s=null,o!=null){var l=typeof o=="number"?Me(o):W(o(a)),f=T(i,function(){f.unsubscribe(),c()});l.subscribe(f)}else c()},c=function(){var l=!1;s=n.subscribe(T(i,void 0,function(){++a<r?s?p():l=!0:i.complete()})),l&&p()};c()})}function jr(e,t){return y(Vo(e,t,arguments.length>=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;o<arguments.length;o++)r[o-2]=arguments[o];if(t===!0){e();return}if(t!==!1){var n=new at({next:function(){n.unsubscribe(),e()}});return W(t.apply(void 0,q([],N(r)))).subscribe(n)}}function G(e,t,r){var o,n,i,a,s=!1;return e&&typeof e=="object"?(o=e.bufferSize,a=o===void 0?1/0:o,n=e.windowTime,t=n===void 0?1/0:n,i=e.refCount,s=i===void 0?!1:i,r=e.scheduler):a=e!=null?e:1/0,pe({connector:function(){return new _t(a,t,r)},resetOnError:!0,resetOnComplete:!1,resetOnRefCountZero:s})}function Ce(e){return b(function(t,r){return e<=r})}function Ur(e){return y(function(t,r){var o=!1,n=T(r,function(){n==null||n.unsubscribe(),o=!0},he);W(e).subscribe(n),t.subscribe(T(r,function(i){return o&&r.next(i)}))})}function Q(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=He(e);return y(function(o,n){(r?je(e,o,r):je(e,o)).subscribe(n)})}function v(e,t){return y(function(r,o){var n=null,i=0,a=!1,s=function(){return a&&!n&&o.complete()};r.subscribe(T(o,function(p){n==null||n.unsubscribe();var c=0,l=i++;W(e(p,l)).subscribe(n=T(o,function(f){return o.next(t?t(p,f,l,c++):f)},function(){n=null,s()}))},function(){a=!0,s()}))})}function U(e){return y(function(t,r){W(e).subscribe(T(r,function(){return r.complete()},he)),!r.closed&&t.subscribe(r)})}function Dr(e,t){return t===void 0&&(t=!1),y(function(r,o){var n=0;r.subscribe(T(o,function(i){var a=e(i,n++);(a||t)&&o.next(i),!a&&o.complete()}))})}function E(e,t,r){var o=k(e)||t||r?{next:e,error:t,complete:r}:e;return o?y(function(n,i){var a;(a=o.subscribe)===null||a===void 0||a.call(o);var s=!0;n.subscribe(T(i,function(p){var c;(c=o.next)===null||c===void 0||c.call(o,p),i.next(p)},function(){var p;s=!1,(p=o.complete)===null||p===void 0||p.call(o),i.complete()},function(p){var c;s=!1,(c=o.error)===null||c===void 0||c.call(o,p),i.error(p)},function(){var p,c;s&&((p=o.unsubscribe)===null||p===void 0||p.call(o)),(c=o.finalize)===null||c===void 0||c.call(o)}))}):le}function Qo(e,t){return y(function(r,o){var n=t!=null?t:{},i=n.leading,a=i===void 0?!0:i,s=n.trailing,p=s===void 0?!1:s,c=!1,l=null,f=null,u=!1,h=function(){f==null||f.unsubscribe(),f=null,p&&(te(),u&&o.complete())},w=function(){f=null,u&&o.complete()},A=function(ie){return f=W(e(ie)).subscribe(T(o,h,w))},te=function(){if(c){c=!1;var ie=l;l=null,o.next(ie),!u&&A(ie)}};r.subscribe(T(o,function(ie){c=!0,l=ie,!(f&&!f.closed)&&(a?te():A(ie))},function(){u=!0,!(p&&c&&f&&!f.closed)&&o.complete()}))})}function ct(e,t,r){t===void 0&&(t=se);var o=Me(e,t);return Qo(function(){return o},r)}function ee(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];var r=Xe(e);return y(function(o,n){for(var i=e.length,a=new Array(i),s=e.map(function(){return!1}),p=!1,c=function(f){W(e[f]).subscribe(T(n,function(u){a[f]=u,!p&&!s[f]&&(s[f]=!0,(p=s.every(le))&&(s=null))},he))},l=0;l<i;l++)c(l);o.subscribe(T(n,function(f){if(p){var u=q([f],N(a));n.next(r?r.apply(void 0,q([],N(u))):u)}}))})}function Ko(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return y(function(r,o){Ct.apply(void 0,q([r],N(e))).subscribe(o)})}function Vr(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return Ko.apply(void 0,q([],N(e)))}function Yo(){let e=new _t(1);return d(document,"DOMContentLoaded",{once:!0}).subscribe(()=>e.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?O:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),G(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():O))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function B(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!B("announce.dismiss")||!e.childElementCount)return O;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.score<t);a===-1&&(a=r.length);let s=r.slice(0,a),p=r.slice(a),c=[qr(i,2|+(!n&&a===0)),...s.map(l=>qr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:O),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?O:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):O})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:O)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.x<h.x||f.x+u>h.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>B("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return O;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[n<i,i]),Z(0)),r=z([e,t]).pipe(b(([{offset:n},[,i]])=>Math.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?O:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;s<t.length;s++){let p=t[s].nextElementSibling;p instanceof HTMLElement&&(p.hidden=a.index!==s)}__md_set("__palette",a)}),d(e,"keydown").pipe(b(a=>a.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return O;let r=e.target.closest("a");if(r===null)return O;if(r.target||e.metaKey||e.ctrlKey)return O;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):O}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),O}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return O;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),O)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}<mark data-md-highlight>${a}</mark>`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)<mark[^>]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>O)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?O:(i.preventDefault(),I(p))}}return O}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),G(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?O:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g,"&nbsp;")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>O),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>O),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>O),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return O}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return O}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>O),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c<u||w)l=[...l,f.shift()];else break}for(;l.length;){let[,A]=l[l.length-1];if(A-c>=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length<p.prev.length?{prev:p.prev.slice(Math.max(0,s.prev.length-1),p.prev.length),next:[]}:{prev:p.prev.slice(-1),next:p.next.slice(0,p.next.length-s.next.length)}))}function vi(e,{viewport$:t,header$:r,main$:o,target$:n}){return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth<r.scrollWidth),oe(r=>{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):O})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),G(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;B("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),G(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>B("search.highlight")?mi(e,{index$:Mi,location$:jt}):O),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),G(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})();
-//# sourceMappingURL=bundle.fe8b6f2b.min.js.map
-
diff --git a/v10.0.X/index.html b/v10.0.X/index.html
index 3487ad4aabf..d5ffc5d73bb 100644
--- a/v10.0.X/index.html
+++ b/v10.0.X/index.html
@@ -14,7 +14,7 @@
       
       
       <link rel="icon" href="logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1958,7 +1958,7 @@ <h2 id="citation">Citation</h2>
     <script id="__config" type="application/json">{"base": ".", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/model_building_tutorial2024/model_building_exercise/index.html b/v10.0.X/model_building_tutorial2024/model_building_exercise/index.html
index 4eba1ce34ce..66ea257f98d 100644
--- a/v10.0.X/model_building_tutorial2024/model_building_exercise/index.html
+++ b/v10.0.X/model_building_tutorial2024/model_building_exercise/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1961,7 +1961,7 @@ <h3 id="eft-model">EFT model</h3>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part2/bin-wise-stats/index.html b/v10.0.X/part2/bin-wise-stats/index.html
index ae06c95ec33..0f9d71cb2ff 100644
--- a/v10.0.X/part2/bin-wise-stats/index.html
+++ b/v10.0.X/part2/bin-wise-stats/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1567,7 +1567,7 @@ <h2 id="technical-details">Technical details</h2>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part2/bsm-higgs-models/index.html b/v10.0.X/part2/bsm-higgs-models/index.html
index e571fc444ed..6efadc08b4f 100644
--- a/v10.0.X/part2/bsm-higgs-models/index.html
+++ b/v10.0.X/part2/bsm-higgs-models/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1579,7 +1579,7 @@ <h2 id="fermiophobic-higgs-model">Fermiophobic Higgs Model</h2>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part2/higgscouplings/index.html b/v10.0.X/part2/higgscouplings/index.html
index 0f560fdef2f..62a7b45da9d 100644
--- a/v10.0.X/part2/higgscouplings/index.html
+++ b/v10.0.X/part2/higgscouplings/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1609,7 +1609,7 @@ <h2 id="outdated-couplings-modifer-models">Outdated couplings modifer models</h2
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part2/physicsmodels/index.html b/v10.0.X/part2/physicsmodels/index.html
index d398fed842f..104efbd635f 100644
--- a/v10.0.X/part2/physicsmodels/index.html
+++ b/v10.0.X/part2/physicsmodels/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1796,7 +1796,7 @@ <h3 id="multi-process-interference">Multi-process interference</h3>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part2/settinguptheanalysis/index.html b/v10.0.X/part2/settinguptheanalysis/index.html
index 3add90155ad..533bc9502b1 100644
--- a/v10.0.X/part2/settinguptheanalysis/index.html
+++ b/v10.0.X/part2/settinguptheanalysis/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2360,7 +2360,7 @@ <h1>Nuisance Report</h1>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part3/commonstatsmethods/index.html b/v10.0.X/part3/commonstatsmethods/index.html
index 2e4416f2521..60b49909aeb 100644
--- a/v10.0.X/part3/commonstatsmethods/index.html
+++ b/v10.0.X/part3/commonstatsmethods/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2826,7 +2826,9 @@ <h2 id="likelihood-fits-and-scans">Likelihood Fits and Scans</h2>
 </details>
 <p>This will produce a plot like the one below,</p>
 <p><img alt="" src="../images/2D_LHScan.png" /></p>
-<p>Similarly, 1D scans can be drawn directly from the tree, however for 1D likelihood scans, there is a python script from the <a href="http://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#combine-tool"><code>CombineHarvester/CombineTools</code></a> package <a href="https://github.com/cms-analysis/CombineHarvester/blob/113x/CombineTools/scripts/plot1DScan.py">plot1DScan.py</a> that can be used to make plots and extract the crossings of the <code>2*deltaNLL</code> - e.g the 1σ/2σ boundaries.</p>
+<p>However for 1D likelihood scans, a python script to make plots and extract crossing of the <code>2*deltaNLL</code>, e.g the 1σ/2σ boundaries, is already available <code>plot1DScan.py</code></p>
+<div class="highlight"><pre><span></span><code>plot1DScan.py higgsCombineTest.MultiDimFit.mH125.root -o output
+</code></pre></div>
 <h3 id="useful-options-for-likelihood-scans">Useful options for likelihood scans</h3>
 <p>A number of common, useful options (especially for computing likelihood scans with the <strong>grid</strong> algo) are,</p>
 <ul>
@@ -3104,7 +3106,7 @@ <h4 id="extracting-2d-contours-general-intervals">Extracting 2D contours / gener
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part3/debugging/index.html b/v10.0.X/part3/debugging/index.html
index c2144e9d6cf..c2486f3eece 100644
--- a/v10.0.X/part3/debugging/index.html
+++ b/v10.0.X/part3/debugging/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1439,7 +1439,7 @@ <h2 id="analyzing-the-nll-shape-in-each-parameter">Analyzing the NLL shape in ea
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part3/nonstandard/index.html b/v10.0.X/part3/nonstandard/index.html
index aa7fd0317e8..4866e657702 100644
--- a/v10.0.X/part3/nonstandard/index.html
+++ b/v10.0.X/part3/nonstandard/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2836,7 +2836,7 @@ <h2 id="look-elsewhere-effect-for-one-parameter">Look-elsewhere effect for one p
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part3/regularisation/index.html b/v10.0.X/part3/regularisation/index.html
index cbdeeffabf0..c955b188099 100644
--- a/v10.0.X/part3/regularisation/index.html
+++ b/v10.0.X/part3/regularisation/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1595,7 +1595,7 @@ <h3 id="tunfold-method">TUnfold method</h3>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part3/runningthetool/index.html b/v10.0.X/part3/runningthetool/index.html
index 67da31dc81a..2aef3ef3bb1 100644
--- a/v10.0.X/part3/runningthetool/index.html
+++ b/v10.0.X/part3/runningthetool/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2064,7 +2064,7 @@ <h3 id="grid-submission-with-crab3">Grid submission with crab3</h3>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part3/simplifiedlikelihood/index.html b/v10.0.X/part3/simplifiedlikelihood/index.html
index 85e8de2d712..c9905143af1 100644
--- a/v10.0.X/part3/simplifiedlikelihood/index.html
+++ b/v10.0.X/part3/simplifiedlikelihood/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1834,7 +1834,7 @@ <h2 id="example-using-tutorial-datacard">Example using tutorial datacard</h2>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part3/validation/index.html b/v10.0.X/part3/validation/index.html
index 9347d52407b..670e82dd6e6 100644
--- a/v10.0.X/part3/validation/index.html
+++ b/v10.0.X/part3/validation/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1798,7 +1798,7 @@ <h2 id="what-to-do-in-case-of-a-warning">What to do in case of a warning</h2>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part4/usefullinks/index.html b/v10.0.X/part4/usefullinks/index.html
index 6bf9cfd3295..ba5c287d613 100644
--- a/v10.0.X/part4/usefullinks/index.html
+++ b/v10.0.X/part4/usefullinks/index.html
@@ -14,7 +14,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1615,7 +1615,7 @@ <h1 id="faq">FAQ</h1>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part5/longexercise/index.html b/v10.0.X/part5/longexercise/index.html
index fb69a4c4f59..1f61402c611 100644
--- a/v10.0.X/part5/longexercise/index.html
+++ b/v10.0.X/part5/longexercise/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2378,7 +2378,7 @@ <h3 id="b-performing-and-plotting-2d-likelihood-scans">B: Performing and plottin
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part5/longexerciseanswers/index.html b/v10.0.X/part5/longexerciseanswers/index.html
index 0c97bb12038..463c0f357ac 100644
--- a/v10.0.X/part5/longexerciseanswers/index.html
+++ b/v10.0.X/part5/longexerciseanswers/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1973,7 +1973,7 @@ <h3 id="e-signal-strength-measurement-and-uncertainty-breakdown">E: Signal stren
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/part5/roofit/index.html b/v10.0.X/part5/roofit/index.html
index 40c2a005ad2..82d6f12413c 100644
--- a/v10.0.X/part5/roofit/index.html
+++ b/v10.0.X/part5/roofit/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2066,7 +2066,7 @@ <h2 id="a-likelihood-for-a-counting-experiment">A likelihood for a counting expe
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/releaseNotes/index.html b/v10.0.X/releaseNotes/index.html
index 30c611b8c53..336b2f501e5 100644
--- a/v10.0.X/releaseNotes/index.html
+++ b/v10.0.X/releaseNotes/index.html
@@ -12,7 +12,7 @@
       
       
       <link rel="icon" href="../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1353,7 +1353,7 @@ <h2 id="cmssw-8_1_x-v7013">CMSSW 8_1_X - v7.0.13</h2>
     <script id="__config" type="application/json">{"base": "..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/search/search_index.json b/v10.0.X/search/search_index.json
index 80fad81168b..3385517523a 100644
--- a/v10.0.X/search/search_index.json
+++ b/v10.0.X/search/search_index.json
@@ -1 +1 @@
-{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"<p> These pages document the RooStats / RooFit - based software tool used for statistical analysis within the CMS experiment - Combine. Note that while this tool was originally developed in the Higgs Physics Analysis Group (PAG), its usage is now widespread within CMS. </p> <p>Combine provides a command-line interface to many different statistical techniques, available inside RooFit/RooStats, that are used widely inside CMS.</p> <p>The package exists on GitHub under https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit</p> <p>For more information about Git, GitHub and its usage in CMS, see http://cms-sw.github.io/cmssw/faq.html</p> <p>The code can be checked out from GitHub and compiled on top of a CMSSW release that includes a recent RooFit/RooStats, or via standalone compilation without CMSSW dependencies. See the instructions for installation of Combine below.</p>"},{"location":"#installation-instructions","title":"Installation instructions","text":"<p>Installation instructions and recommended versions can be found below.  Since v9.0.0, the versioning follows the semantic versioning 2.0.0 standard. Earlier versions are not guaranteed to follow the standard.</p>"},{"location":"#within-cmssw-recommended-for-cms-users","title":"Within CMSSW (recommended for CMS users)","text":"<p>The instructions below are for installation within a CMSSW environment. For end users that do not need to commit or do any development, the following recipes should be sufficient. To choose a release version, you can find the latest releases on github under https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit/releases</p>"},{"location":"#combine-v10-recommended-version","title":"Combine v10 - recommended version","text":"<p>The nominal installation method is inside CMSSW. The current release targets the CMSSW <code>14_1_X</code> series because of the recent switch to el9 at lxplus machines.</p> <p><pre><code>cmsrel CMSSW_14_1_0_pre4\ncd CMSSW_14_1_0_pre4/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag is v10.0.2: see release notes</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v10.0.2\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#combine-v9","title":"Combine v9","text":"<p>The nominal installation method is inside CMSSW. The current release targets the CMSSW <code>11_3_X</code> series because this release has both python2 and python3 ROOT bindings, allowing a more gradual migration of user code to python3. Combine is fully python3-compatible and, with some adaptations, can also work in 12_X releases. </p> <p>CMSSW <code>11_3_X</code> runs on slc7, which can be setup using apptainer (see detailed instructions): <pre><code>cmssw-el7\ncmsrel CMSSW_11_3_4\ncd CMSSW_11_3_4/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag is v9.2.1: see release notes</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v9.2.1\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#combine-v8-cmssw_10_2_x-release-series","title":"Combine v8: <code>CMSSW_10_2_X</code> release series","text":"<p>Setting up the environment (once):</p> <p><pre><code>cmssw-el7\ncmsrel CMSSW_10_2_13\ncd CMSSW_10_2_13/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag is v8.2.0: see release notes</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v8.2.0\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#slc6cc7-release-cmssw_8_1_x","title":"SLC6/CC7 release <code>CMSSW_8_1_X</code>","text":"<p>Setting up OS using apptainer (see detailed instructions):</p> <p><pre><code># For CC7:\ncmssw-el7\n# For SLC6:\ncmssw-el6\n\ncmsrel CMSSW_8_1_0\ncd CMSSW_8_1_0/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag for CMSSW_8_1_X is v7.0.13:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v7.0.13\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#oustide-of-cmssw-recommended-for-non-cms-users","title":"Oustide of CMSSW (recommended for non-CMS users)","text":"<p>Pre-compiled versions of the tool are available as container images from the CMS cloud. These containers can be downloaded and run using Docker. If you have docker running you can pull and run the image using, </p> <p><pre><code>docker run --name combine -it gitlab-registry.cern.ch/cms-cloud/combine-standalone:&lt;tag&gt;\n</code></pre> where you must replace <code>&lt;tag&gt;</code> with a particular version of the tool - eg - <code>v9.2.1</code>. See the top of this page for the latest recommended versions. </p> <p>You will now have the compiled Combine binary available as well as the complete package of tool.  The container can be re-started using <code>docker start -i combine</code>. </p>"},{"location":"#standalone-compilation","title":"Standalone compilation","text":"<p>The standalone version can be easily compiled using cvmfs as it relies on dependencies that are already installed at <code>/cvmfs/cms.cern.ch/</code>. Access to <code>/cvmfs/cms.cern.ch/</code> can be obtained from lxplus machines or via <code>CernVM</code>. See CernVM for further details on the latter. In case you do not want to use the <code>cvmfs</code> area, you will need to adapt the locations of the dependencies listed in both the <code>Makefile</code> and <code>env_standalone.sh</code> files.</p> <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit/ \n# git checkout &lt;some release&gt;\n. env_standalone.sh\nmake -j 4\n</code></pre> <p>You will need to source <code>env_standalone.sh</code> each time you want to use the package, or add it to your login environment.</p>"},{"location":"#compilation-of-slc7-compatible-versions","title":"Compilation of slc7 compatible versions","text":"<p>For Combine versions before v10 release you will need to do the compilation in an slc7 environment using apptainer. You can then source the standalone script outside of the apptainer. On lxplus this can be done as follows:</p> <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit/ \n# git checkout &lt;some release&gt;\ncmssw-el7\n. env_standalone.sh\nmake -j 4\nexit\nsource . env_standalone.sh\n</code></pre>"},{"location":"#standalone-compilation-with-lcg","title":"Standalone compilation with LCG","text":"<p>For compilation outside of CMSSW, for example to use ROOT versions not yet available in CMSSW, one can compile against LCG releases. The current default is to compile with LCG_102, which contains ROOT 6.26: <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\nsource env_lcg.sh \nmake LCG=1 -j 8\n</code></pre> To change the LCG version, edit <code>env_lcg.sh</code>. </p> <p>The resulting binaries can be moved for use in a batch job if the following files are included in the job tarball: <pre><code>tar -zcf Combine_LCG_env.tar.gz build interface src/classes.h --exclude=obj\n</code></pre></p>"},{"location":"#standalone-compilation-with-conda","title":"Standalone compilation with <code>conda</code>","text":"<p>This recipe will work both for linux and MacOS <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n\nconda install --name base mamba # faster conda\nmamba env create -f conda_env.yml\n\nconda activate combine\nsource set_conda_env_vars.sh\n# Need to reactivate\nconda deactivate\nconda activate combine\n\nmake CONDA=1 -j 8\n</code></pre></p> <p>Using Combine from then on should only require sourcing the conda environment  <pre><code>conda activate combine\n</code></pre></p> <p>Note: on OS X, Combine can only accept workspaces, so run <code>text2workspace.py</code> first. This is due to an issue with child processes and <code>LD_LIBRARY_PATH</code> (see note in Makefile)</p>"},{"location":"#standalone-compilation-with-cernvm","title":"Standalone compilation with CernVM","text":"<p>Combine, either standalone or not, can be compiled via CVMFS using access to <code>/cvmfs/cms.cern.ch/</code>  obtained using a virtual machine - <code>CernVM</code>. To use <code>CernVM</code> You should have access to CERN IT resources. If you are a CERN user you can use your account, otherwise you can request a lightweight account. If you have a CERN user account, we strongly suggest you simply run one of the other standalone installations, which are simpler and faster than using a VM.</p> <p>You should have a working VM on your local machine, compatible with CernVM, such as <code>VirtualBox</code>. All the required software can be downloaded here. At least 2GB of disk space should be reserved on the virtual machine for Combine to work properly and the machine must be contextualized to add the <code>CMS</code> group to CVMFS. A minimal working setup is described below.</p> <ol> <li> <p>Download the CernVM-launcher for your operating system, following the instructions available [<code>here</code>] for your operating system (https://cernvm.readthedocs.io/en/stable/cpt-launch.html#installation</p> </li> <li> <p>Prepare a CMS context. You can use the CMS open data one already available on gitHub:  <code>wget https://raw.githubusercontent.com/cernvm/public-contexts/master/cms-opendata-2011.context)</code></p> </li> <li> <p>Launch the virtual machine <code>cernvm-launch create --name combine --cpus 2 cms-opendata-2011.context</code></p> </li> <li> <p>In the VM, proceed with an installation of combine</p> </li> </ol> <p>Installation through CernVM is maintained on a best-effort basis and these instructions may not be up to date. </p>"},{"location":"#what-has-changed-between-tags","title":"What has changed between tags?","text":"<p>You can generate a diff of any two tags (eg for <code>v9.2.1</code> and <code>v9.2.0</code>) by using the following url:</p> <p>https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit/compare/v9.2.0...v9.2.1</p> <p>Replace the tag names in the url to any tags you would like to compare.</p>"},{"location":"#for-developers","title":"For developers","text":"<p>We use the Fork and Pull model for development: each user creates a copy of the repository on GitHub, commits their requests there, and then sends pull requests for the administrators to merge.</p> <p>Prerequisites</p> <ol> <li> <p>Register on GitHub, as needed anyway for CMSSW development: http://cms-sw.github.io/cmssw/faq.html</p> </li> <li> <p>Register your SSH key on GitHub: https://help.github.com/articles/generating-ssh-keys </p> </li> <li> <p>Fork the repository to create your copy of it: https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit/fork (more documentation at https://help.github.com/articles/fork-a-repo )</p> </li> </ol> <p>You will now be able to browse your fork of the repository from https://github.com/your-github-user-name/HiggsAnalysis-CombinedLimit</p> <p>We strongly encourage you to contribute any developments you make back to the main repository.  See contributing.md for details about contributing. </p>"},{"location":"#combineharvestercombinetools","title":"CombineHarvester/CombineTools","text":"<p>CombineHarvester/CombineTools is a package for the creation of datacards/workspaces used with Combine v10 for a number of analyses in CMS. See the <code>CombineHarvester</code> documentation pages for more details on using this tool and additional features available in the full package.</p> <p>This package also comes with useful features for Combine such as the automated datacard validation (see instructions). The repository can be checked out and compiled using, </p> <pre><code>git clone https://github.com/cms-analysis/CombineHarvester.git CombineHarvester\nscram b\n</code></pre> <p>See the <code>CombineHarvester</code> documentation for full instructions and reccomended versions. </p> <p>Info</p> <p>Starting with Combine v10, specific ombineTool functionalities for job submition and parallelization (<code>combineTool.py</code>) as well as many plotting functions have been integrated into the Combine package. For these tasks you no longer have to follow the instructions above.</p>"},{"location":"#citation","title":"Citation","text":"<p>If you use Combine, please cite the following CMS publication here. </p> Show BibTex Entry <pre><code>@unpublished{\n    CMS:2024onh,\n    author = \"Hayrapetyan, Aram and others\",\n    collaboration = \"CMS\",\n    title = \"The {CMS} statistical analysis and combination tool: {\\textsc{Combine}}\",\n    eprint = \"2404.06614\",\n    archivePrefix = \"arXiv\",\n    primaryClass = \"physics.data-an\",\n    reportNumber = \"CMS-CAT-23-001, CERN-EP-2024-078\",\n    year = \"2024\",\n    note = \"Submitted to \\textit{Comput. Softw. Big Sci.}\"\n}\n</code></pre>"},{"location":"CernVM/","title":"CernVM","text":""},{"location":"CernVM/#standalone-use-inside-cernvm","title":"Standalone use inside CernVM","text":"<p>Standalone by adding the <code>CMS</code> group to the CVMFS Configuration. A minimal <code>CernVM</code> working context setup can be found in the CernVM Marketplace under <code>Experimental/HiggsCombine</code> or at https://cernvm-online.cern.ch/context/view/9ee5960ce4b143f5829e72bbbb26d382. At least 2GB of disk space should be reserved on the virtual machine for Combine to work properly.</p>"},{"location":"CernVM/#available-machines-for-standalone-combine","title":"Available machines for standalone combine","text":"<p>The standalone version can be easily compiled via CVMFS as it relies on dependencies which are already installed at /cvmfs/cms.cern.ch/. Access to /cvmfs/cms.cern.ch/ can be obtained from lxplus machines or via <code>CernVM</code>. The only requirement will be to add the CMS group to the CVMFS configuration as shown in the picture</p> <p></p> <p>At least 2GB of disk space should be reserved on the virtual machine for combine to work properly. A minimal CernVM working context setup can be found in the CernVM Marketplace under <code>Experimental/HiggsCombine</code>. </p> <p>To use this predefined context, first locally launch the CernVM (eg you can use the .ova with VirtualBox, by downloading from here and launching the downloaded file. You can click on \"pair an instance of CernVM\" from the cernvm-online dashboard, which displays a PIN. In the VirtualBox terminal, pair the virtual machine with this PIN code (enter in the terminal using #PIN eg <code>#123456</code>. After this, you will be asked again for username (use <code>user</code>) and then a password (use <code>hcomb</code>).</p> <p>In case you do not want to use the cvmfs area, you will need to adapt the location of the dependencies listed in both the Makefile and env_standalone.sh files.</p>"},{"location":"releaseNotes/","title":"Release notes","text":""},{"location":"releaseNotes/#cmssw-10_2_x-v800","title":"CMSSW 10_2_X - v8.0.0","text":"<p>This release contains all of the changes listed for v7.0.13 below. In addition:</p> <ul> <li>New documentation pages, using the mkdocs framework. The documentation source is included in the repository as simple markdown files. Users are welcome to make additions and corrections as pull requests to this repo.</li> <li>It is now possible to include additional constraint terms for regularisiation when unfolding using combine. Detailed documentation for this is given here.</li> <li>The option <code>-S 0</code> to remove all systematic uncertainties has been removed. Instead, to freeze all constrained nuisance parameters the option <code>--freezeParameters allConstrainedNuisances</code> should be used, which replaces the previous shortcut of <code>--freezeParameters all</code>.</li> <li>The possibility to use some old method names has now been fully removed. When setting the <code>-M</code> option, <code>FitDiagnostics</code>, <code>AsymptoticLimits</code> and <code>Significance</code> must be used instead of, respectively, <code>MaxLikelihoodFit</code>, <code>Asymptotic</code> and <code>ProfileLikelihood</code>.</li> </ul>"},{"location":"releaseNotes/#cmssw-8_1_x-v7013","title":"CMSSW 8_1_X - v7.0.13","text":"<ul> <li>Nuisance <code>edit</code> selections for bins, processes or systematic names now require a complete string match. For example, <code>nuisance edit add procA binA [...]</code> will no longer match <code>procAB</code> and <code>binAB</code>. Note that regex selections can still be used to match multiple labels, but again are now required to match the full strings.</li> <li>Nuisance parameters can now be frozen using attributes that have been assigned to the corresponding RooRealVars. Syntax is <code>--freezeWithAttributes attr1,attr2,...,attrN</code>.</li> <li>For Higgs analyses: added YR4 cross sections, branching ratios and partial width uncertainties in <code>data/lhc-hxswg/sm/</code>, as used in HIG-17-031</li> <li>[EXPERIMENTAL] For binned analyses using autoMCStats a faster implementation of the vertical template morphing for shape uncertainties can be enabled at runtime with the option <code>--X-rtd FAST_VERTICAL_MORPH</code>. Any results using this flag should be validated carefully against the default.</li> </ul>"},{"location":"model_building_tutorial2024/model_building_exercise/","title":"Building statistical models with Combine","text":""},{"location":"model_building_tutorial2024/model_building_exercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of Combine, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>After setting up Combine, you can access the working directory for this tutorial which contains all of the inputs and scripts needed in this excercise exercise:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/\ngit checkout main \nscram b -j 8\ncd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#exercise-outline","title":"Exercise outline","text":"<p>This tutorial focuses and extends on the model building topic, and it is not going to give a full picture on the statistical methods, which are extensively covered in the long exercise and statistical methods exercise. </p> <p>1) Building analysis with templates </p> <p>2) Using keywords </p> <p>3) Control regions </p> <p>4) Rate parameters </p> <p>5) Extra arguments  </p> <p>6) Physics Models</p>"},{"location":"model_building_tutorial2024/model_building_exercise/#introduction","title":"Introduction","text":"<p>The most general definition for the binned model likelihood can be given as </p> \\[ \\mathcal{L} =  \\mathcal{L}_\\mathrm{primary} \\cdot \\mathcal{L}_\\mathrm{auxiliary} = \\prod_{c=1}^{N_c} \\prod_{b=1}^{N_b^c} \\mathrm{Poiss}(n_{cb}; n^\\mathrm{exp}_{cb}(\\vec{\\mu},\\vec{\\nu})) \\cdot \\prod_{e=1}^{N_E}  p_e(y_e ; \\nu_e) \\] <p>Where \\(c\\) indexes the channel, \\(b\\) indexes the histogram bin, and \\(e\\) indexes the nuisance parameter.</p> <p>The generic model of the expected event count in a given bin, \\(n^\\mathrm{exp}_{cb}\\), implemented in combine for template based analyses is given by:</p> \\[n^\\mathrm{exp}_{cb} = \\mathrm{max}(0, \\sum_{p} M_{cp}(\\vec{\\mu})N_{cp}(\\nu_G, \\vec{\\nu}_L,\\vec{\\nu}_S,\\vec{\\nu}_{\\rho})\\omega_{cbp}(\\vec{\\nu}_S) + E_{cb}(\\vec{\\nu}_B) ) \\] <p>In terms of datacard structure there are several differences with respect to the counting datacard:</p> <ul> <li>A new block of lines at the top defining how channels and processes are mapped to the histograms (more than one line can be used)</li> <li>In the list of systematic uncertainties we now have entries marked as <code>shape</code></li> </ul> <p>The \"shapes\" line has to follow the following syntax: </p> <pre><code>shapes   &lt;process_name&gt;   &lt;channel_name&gt;   &lt;path/to/input_shape.root&gt;   &lt;histograms_nominal&gt;  &lt;histograms_with_variations&gt;\n</code></pre> <p>To start the hands-on for this section:  <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/simple_shape\n</code></pre></p> <p>The input shapes for the first example (<code>datacard-2-template-analysis.txt</code>) are located in the <code>template-analysis-datacard-input.root</code>, it contains the observed distribution <code>data_obs</code>, the nominal histograms for each process and systematic uncertainties templates:  </p> <pre><code>root [0] \nAttaching file template-analysis-datacard-input.root as _file0...\n(TFile *) 0x556fd5b0fea0\nroot [1] .ls\nTFile**     template-analysis-datacard-input.root   \n TFile*     template-analysis-datacard-input.root   \n  KEY: TH1F background;1    Histogram of background__x\n  KEY: TH1F background_alphaUp;1    Histogram of background__x\n  KEY: TH1F background_alphaDown;1  Histogram of background__x\n  KEY: TH1F data_obs;1  Histogram of data_obs__x\n  KEY: TH1F signal;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaUp;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaDown;1  Histogram of signal__x\n</code></pre> <p>To define the mapping to the systematic uncertainties templates the <code>$SYSTEMATIC</code> keyword should be used, which connects the systematic uncertainties marked as <code>shape</code> type with the input shapes. </p> <pre><code>imax 1\njmax 1\nkmax 4\n# ---------------\nshapes  signal  ch1 template-analysis-datacard-input.root signal signal_$SYSTEMATIC\nshapes  background  ch1 template-analysis-datacard-input.root background background_$SYSTEMATIC\n# ---------------\nbin         ch1\nobservation 85\n# ------------------------------\nbin             ch1        ch1\nprocess         signal     background\nprocess         0          1\nrate            24         100\n# --------------------------------\nlumi     lnN    1.1       1.0\nbgnorm   lnN    -         1.3\nalpha  shape    -          1   # uncertainty in the background template.\nsigma  shape    0.5        -   # uncertainty in the signal template.\n</code></pre> <p>To simplify the shape mapping line the keywords <code>$PROCESS</code>, <code>$CHANNEL</code> can be used. The <code>$PROCESS</code> keyword is associated with the processes listed in the datacard: <code>[signal, background]</code>, it is also possible to use the <code>*</code> wildcard to map multiple processes and/or channels with one line as shown below. </p> <pre><code>imax 1\njmax 1\nkmax 4\n# ---------------\nshapes * * template-analysis-datacard-input.root $PROCESS $PROCESS_$SYSTEMATIC\n# ---------------\nbin         ch1\nobservation 85\n# ------------------------------\nbin             ch1        ch1\nprocess         signal     background\nprocess         0          1\nrate            24         100\n# --------------------------------\nlumi     lnN    1.1       1.0\nbgnorm   lnN    -         1.3\nalpha  shape    -          1   # uncertainty in the background template.\nsigma  shape    0.5        -   # uncertainty in the signal template.\n</code></pre> <p>If there are more than one category it can be useful to store the input shapes corresponding to different regions in separate <code>TDirectory</code>s and use $CHANNEL keyword as shown below: </p> <pre><code>shapes * * &lt;input-file.root&gt; $CHANNEL/$PROCESS $CHANNEL/$PROCESS_$SYSTEMATIC\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#keywords","title":"Keywords","text":"<p>Go to the datacards location corresponding to this section of the tutorial: <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/keywords\n</code></pre></p> <p>The datacard can also contain <code>$MASS</code> keyword, it allows to setup a single datacard for various mass points. It will be replaced with the value passed to <code>-m</code> option when running the tool. In addition, user-defined keywords can be used. Any word in the datacard <code>$WORD</code> will be replaced by <code>VALUE</code> when including the option <code>--keyword-value WORD=VALUE</code>. This option can be repeated multiple times for multiple keywords.</p> <p><pre><code> KEY: TH1D  ggH110;1    \n KEY: TH1D  bbH110;1    \n KEY: TH1D  ggH110_CMS_eff_t_mssmHigh_tt_13TeVDown;1    \n.... \n KEY: TH1D  ggH120;1    \n KEY: TH1D  bbH120;1    \n KEY: TH1D  ggH120_CMS_eff_t_mssmHigh_tt_13TeVDown;1    \n.....\n</code></pre> In the <code>htt_tt_9_13TeV.txt</code> datacard you can find the following lines: </p> <p><pre><code>shapes * htt_tt_9_13TeV htt_input.root htt_tt_9_13TeV/$PROCESS htt_tt_9_13TeV/$PROCESS_$SYSTEMATIC\nshapes bbH htt_tt_9_13TeV htt_input.root htt_tt_9_13TeV/bbH$MASS htt_tt_9_13TeV/bbH$MASS_$SYSTEMATIC\nshapes ggH htt_tt_9_13TeV htt_input.root htt_tt_9_13TeV/ggH$MASS htt_tt_9_13TeV/ggH$MASS_$SYSTEMATIC\n</code></pre> defining the mapping for all mass points at the same time. One can use this datacard to estimate 95%CL for different mass points by assigning  in the command below.   <pre><code>combine -M AsymptoticLimits  -d htt_tt_9_13TeV.txt  -m &lt;mass_value&gt;\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#simultaneous-fit-in-multiple-categories","title":"Simultaneous fit in multiple categories","text":"<pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/control_regions\n</code></pre> <p>To combine the datacards corresponding to various (independent) regions into a single card one can use <code>combineCards.py</code>. </p> <p><pre><code>combineCards.py htt_tt_9_13TeV=htt_tt_9_13TeV.txt htt_tt_8_13TeV=htt_tt_8_13TeV.txt &gt;htt_tt_SRs.txt\n</code></pre> The combined card <code>htt_tt_SRs.txt</code> now has two categories:  <pre><code>----------------------------------------------------------------------------------------------------------------------------------\nbin          htt_tt_9_13TeV  htt_tt_8_13TeV\nobservation  3416            105545\n----------------------------------------------------------------------------------------------------------------------------------\n</code></pre></p>"},{"location":"model_building_tutorial2024/model_building_exercise/#rate-parameters","title":"Rate parameters","text":"<pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/control_regions\n</code></pre> <p>It is quite common to use data-drive background estimation methods. In Combine one can perform simultaneous fit of signal and control regions it allows to automatically handle the statistical uncertainty due to the number of data events in the control region, correctly handles signal contamination in the control region, allows to properly take into account systematic uncertainties affecting the backgrounds in the control regions. </p> <p>In the working directory for this section you can find the <code>htt_zmm_8_13TeV.txt, htt_zmm_9_13TeV.txt and htt_ttbar_1_13TeV.txt</code> cards, corresponding to the control regions enriched in ZLL and ttbar processes, in addition to the signal regions from the previous step. Let's combine all of the regions into one datacard. </p> <p><pre><code>combineCards.py htt_zmm_9_13TeV=htt_zmm_9_13TeV.txt htt_zmm_8_13TeV=htt_zmm_8_13TeV.txt htt_ttbar_1_13TeV=htt_ttbar_1_13TeV.txt htt_tt_9_13TeV=htt_tt_9_13TeV.txt htt_tt_8_13TeV=htt_tt_8_13TeV.txt &gt;htt_tt_combined.txt\n</code></pre> Now the <code>htt_tt_combined.txt</code> contains signal and control regions. To allow the rate of the background processes to be corrected from the control regions we can define common rate parameters, which linearly scale the predicted rates specified in the datacard, using the syntax </p> <pre><code>&lt;rate_param_name&gt; rateParam &lt;category&gt; &lt;process&gt; &lt;initial_value&gt; [min_value,max_value]\n</code></pre> <p>The following lines define <code>rate_TT</code> and <code>rate_ZMM</code> rate parameters scaling TT and ZLL processes in all regions simultaneously:  <pre><code>rate_TT                 rateParam  *          TT         1 [0,5]\nrate_TT                 rateParam  *          TTT        1 [0,5]\nrate_ZMM                rateParam  *          ZLL        1 [0,2]\n</code></pre></p> <p>Note that by default rate parameters are freely floating (unconstrained) parameters in Combine, however it is possible to add a constrain term to the likelihood by adding the <code>param</code> modifier with the same name as rate parameter: </p> <p><pre><code>rate_TT param &lt;mean&gt; &lt;sigma&gt;  \n</code></pre> Task: Add <code>param</code> nuisance named <code>rate_TT</code> with <code>mean = 1.</code> and <code>sigma = 1</code>, check how the uncertainty on <code>rate_TT</code> parameter changes, what happens if you change the width of the constraint term?</p> <p>In addition modifiers that are functions of other parameters can be included using the following syntax:</p> <p><pre><code>name rateParam bin process formula args\n</code></pre> This can be useful to constrain the ratio of two processes as shown below</p> <pre><code>rate_A rateParam *  process_A \nratio_BtoA param 1 1\nrate_B rateParam *  process_B  @0*@1 rate_A*ratio\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#extra-arguments","title":"Extra arguments","text":"<pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/PhysicsModels\n</code></pre> <p>In one wants to connect different models with common parameters, or just use external functions it is possible to import parameters defined within external workspaces with <code>extArg</code>: </p> <p><pre><code>name extArg rootfile:workspacename\n</code></pre> The <code>extArg</code> syntax allows to import <code>RooAbsReal</code> object from an external workspace. This object can be another free floating parameter, or a function of other parameters. In this section we are going to import <code>RooSpline1D</code> objects which define how various Higgs production cross sections depend on the Higgs mass (<code>MH</code> parameter). </p> <p>The datacard we are going to use in this section <code>htt_tt_125_8TeV.txt</code> correspond to 8 TeV analysis and Higgs mass of 125 GeV. To excercise the <code>extArg</code> features, let's rescale the signal templates to 13 TeV cross section values.  The 13 and 8 TeV cross sections predictions from YR4 are stored in the <code>$CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/</code> in <code>sm_yr4_13TeV.root</code> and <code>sm_yr4_8TeV.root</code> files respectively, let's inspect the contents of <code>sm_yr4_13TeV.root</code>: </p> <p><pre><code> TFile*     /afs/cern.ch/work/a/anigamov/rootv630/CMSSW_14_1_0_pre4/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root \n  KEY: RooWorkspace xs_13TeV;1  xs_13TeV\n  KEY: TProcessID   ProcessID0;1    2fd49e90-f1a0-11e8-9717-b052b8bcbeef\nroot [2] xs_13TeV-&gt;Print()\n\nRooWorkspace(xs_13TeV) xs_13TeV contents\n\nvariables\n---------\n(MH)\n\nfunctions\n--------\nRooSpline1D::WH_13TeV[ xvar=MH ] = 1.369\nRooSpline1D::WminusH_13TeV[ xvar=MH ] = 0.5313\nRooSpline1D::WplusH_13TeV[ xvar=MH ] = 0.838\nRooSpline1D::ZH_13TeV[ xvar=MH ] = 0.8824\nRooSpline1D::bbH_13TeV[ xvar=MH ] = 0.4863\nRooSpline1D::ggH_13TeV[ xvar=MH ] = 48.52\nRooSpline1D::ggZH_13TeV[ xvar=MH ] = 0.1227\nRooFormulaVar::qqZH_13TeV[ actualVars=(ZH_13TeV,ggZH_13TeV) formula=\"@0-@1\" ] = 0.7597\nRooSpline1D::tHW_13TeV[ xvar=MH ] = 0.01517\nRooSpline1D::tHq_13TeV[ xvar=MH ] = 0.07714\nRooSpline1D::ttH_13TeV[ xvar=MH ] = 0.5065\nRooSpline1D::vbfH_13TeV[ xvar=MH ] = 3.779\n</code></pre> The <code>RooSpline1D::WH_13TeV[ xvar=MH ] = 1.369</code> contains cross sections values interpolated between various Higgs mass points.  We can import them into our model as shown below</p> <p><pre><code>vbfH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\nggH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\nZH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\nWH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\n\nvbfH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\nggH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\nZH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\nWH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\n</code></pre> Then we can define <code>rateParam</code> as functions of imported <code>extArg</code>s to rescale signal processes WH, ZH, ggH, qqH:  <pre><code>WH_8to13TeV    rateParam  *   WH    @0/@1         WH_13TeV,WH_8TeV\nZH_8to13TeV    rateParam  *   ZH    @0/@1         ZH_13TeV,ZH_8TeV\nggH_8to13TeV    rateParam  *   ggH    @0/@1         ggH_13TeV,ggH_8TeV\nvbfH_8to13TeV    rateParam  *   qqH    @0/@1         vbfH_13TeV,vbfH_8TeV\n</code></pre></p> <p>When running Combine methods (e.g. <code>combine -M MultiDimFit --algo singles</code>) with these parameters you have to freeze <code>MH</code>, i.e. add <code>--freezeParameters MH</code> option.  </p> <p>Advanced task: rescale the signal templates to the cross-section corresponding to a different MH value (e.g. 120 GeV). </p>"},{"location":"model_building_tutorial2024/model_building_exercise/#physics-models","title":"Physics Models","text":"<p>With physics model one can instruct Combine how to scale the signal (background) yields with parameters of the model: \\(M_{cp}(\\vec{\\mu})\\) from </p> \\[n^\\mathrm{exp}_{cb} = \\mathrm{max}(0, \\sum_{p} M_{cp}(\\vec{\\mu})N_{cp}(\\nu_G, \\vec{\\nu}_L,\\vec{\\nu}_S,\\vec{\\nu}_{\\rho})\\omega_{cbp}(\\vec{\\nu}_S) + E_{cb}(\\vec{\\nu}_B) ) \\] <p>In Combine we can use PhysicsModel as a base class.</p> <p>This class has several useful methods, but the most important ones are <code>doParametersOfInterest()</code> which defines parameters and functions of the model, and <code>getYieldScale(self, bin, process)</code> defines how expected events are scaled with the model parameters.</p> <pre><code>class PhysicsModelBase(six.with_metaclass(ABCMeta, object)):\n...\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n\n    def getYieldScale(self, bin, process):\n        \"Return the name of a RooAbsReal to scale this yield by or the two special values 1 and 0 (don't scale, and set to zero)\"\n        return \"r\" if self.DC.isSignal[process] else 1\n...\n</code></pre> <p>There are many models available to use for different physics cases, follow the link for more information. In the following sections we will discuss how one can construct custom models. </p> <p>To use a different physics model instead of the default one, we are going to use the option -P as in</p> <pre><code>text2workspace.py datacard.txt -P HiggsAnalysis.CombinedLimit.PythonFile:modelName\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#default-physics-model","title":"Default physics model","text":"<p>The default physics model implemented in Combine defines a single POI that linearly scales all signal processes. We use this model by default when running <code>text2workspace.py -m &lt;mass_value&gt; &lt;datacard.txt&gt;</code>. </p> \\[ M_{cp}(\\mu) = \\begin{cases}     \\mu  &amp;\\mathrm{if\\ } p \\in \\mathrm{signal} \\\\     1    &amp;\\mathrm{otherwise} \\end{cases} \\] <pre><code>class PhysicsModel(PhysicsModelBase):\n    \"\"\"Example class with signal strength as only POI\"\"\"\n\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"r[1,0,20]\")\n        self.modelBuilder.doSet(\"POI\", \"r\")\n        # --- Higgs Mass as other parameter ----\n        if self.options.mass != 0:\n            if self.modelBuilder.out.var(\"MH\"):\n                self.modelBuilder.out.var(\"MH\").removeRange()\n                self.modelBuilder.out.var(\"MH\").setVal(self.options.mass)\n            else:\n                self.modelBuilder.doVar(\"MH[%g]\" % self.options.mass)\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#multi-signal-model","title":"Multi Signal model","text":"<p>Combine already contains a model <code>HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel</code> that can be used to assign different signal strengths to multiple processes in a datacard, configurable from the command line using the mapping <code>--PO 'map=&lt;bin&gt;/&lt;process&gt;:&lt;parameter_name&gt;'</code>. The wildcard <code>*</code> are allowed for <code>&lt;bin&gt;</code> and <code>&lt;process&gt;</code> entries. The following command assigns <code>r_ggH</code> signal strength to scale <code>ggH</code> processes in all regions (<code>bin</code>s). </p> <pre><code>text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose  --PO  'map=.*/ggH:r_ggH[1,-5,5]' --PO 'map=.*/qqH:r_qqH[1,-5,5]' PhysicsModel/htt_tt_125_8TeV.txt  -o ws_multiSignal.root -m 125\ncombine -M MultiDimFit --algo singles -d  ws_multiSignal.root -n .multiSignal.\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#custom-models","title":"Custom models","text":"<p>Now let's look at the example of how one can construct a model where the two leading signal processes (qqH and ggH) are scaled with relative fraction parameter <code>f</code> and the overall rate modifier <code>r</code>. </p> \\[ N_{qqH}(r, f) = r f N_{qqH};\\,N_{ggH}(r, f) = r (1 - f) N_{ggH} \\] <p>As discussed above first we have to define the parameters of the model <pre><code>    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"f[0,0,4]\") \n        self.modelBuilder.doVar(\"r[1,0,10]\")\n</code></pre> then we can use the in the scaling functions for ggH and qqH processes <pre><code>        self.modelBuilder.factory_( \"expr::scale_qqH(\\\"(1-@0)*@1\\\", f,r)\")\n        self.modelBuilder.factory_( \"expr::scale_ggH(\\\"@0*@1\\\", f,r)\")\n</code></pre> add the parameters to the set of POIs <pre><code>        self.modelBuilder.doSet(\"POI\", \",\".join([\"f\"]))\n        self.modelBuilder.doSet(\"POI\", \",\".join([\"r\"]))\n</code></pre> The <code>getYieldScale(self, bin, process)</code> method will scale <code>qqH</code> process with the <code>scale_qqH</code> object and <code>ggH</code> with <code>scale_ggH</code>.  <pre><code>    def getYieldScale(self, bin, process):\n        if process == \"qqH\": return \"scale_qqH\"\n        elif process == \"ggH\": return \"scale_ggH\"\n        else: return 1\n</code></pre></p> <p>To use this model we should add the directory where the corresponding file is located to the <code>PYTHON3PATH</code>: <pre><code>export PYTHON3PATH=${PYTHON3PATH}:${PWD}/models\n</code></pre> And now we can finally create the workspace using this model:  <pre><code>text2workspace.py PhysicsModels/htt_tt_125_8TeV.txt -P FractionModel:Fraction_2signals  -m 125  -o ws_fraction.root\n</code></pre></p> <p>One can inspect the created workspace to ensure that the model is correctly implemented</p> <pre><code>root -l ws_fraction.root\nroot [1] w-&gt;Print()\n</code></pre> <p>The created workspace is quite large, but it should have two new <code>RooFormulaVar</code> objects </p> <p><pre><code>RooFormulaVar::scale_ggH[ actualVars=(f,r) formula=\"x[0]*x[1]\" ] = 0\nRooFormulaVar::scale_qqH[ actualVars=(f,r) formula=\"(1-x[0])*x[1]\" ] = 1\n</code></pre> which modify the normalisation of the ggH and qqH processes</p> <pre><code>ProcessNormalization::n_exp_binhtt_tt_1_8TeV_proc_ggH[ thetaList=(CMS_eff_t_tt_8TeV,CMS_htt_scale_met_8TeV,QCDscale_ggH1in,UEPS,lumi_8TeV,pdf_gg) asymmThetaList=() otherFactorList=(scale_ggH) ] = 0\nProcessNormalization::n_exp_binhtt_tt_1_8TeV_proc_qqH[ thetaList=(CMS_eff_t_tt_8TeV,CMS_htt_scale_met_8TeV,CMS_scale_j_8TeV,QCDscale_qqH,UEPS,lumi_8TeV,pdf_qqbar) asymmThetaList=() otherFactorList=(scale_qqH) ] = 1.4954\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#eft-model","title":"EFT model","text":"<p>One can also define analytical BSM models with Combine. In this section we will extract the CI for one of the SMEFT Wilson coefficient. Without going into details, it can be shown that the dimension-6 SMEFT operators scale with quadratic equations. In this example will consider the \\(c_{Hg}\\) operator that among all signal processes in the datacard affects only ggH at the LO in SMEFT. </p> <p>\\(\\sigma(c_{g}) = \\sigma_{SM} (1 + A c_{g} + B c^{2}_{g})\\), where A and B coefficients are numbers that can be estimated from simulation.</p> <pre><code>class SMEFT_chg(PhysicsModel):\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"A[39.54]\")\n        self.modelBuilder.doVar(\"B[245.32]\")\n    self.modelBuilder.out.var(\"A\").setConstant(True) \n    self.modelBuilder.out.var(\"B\").setConstant(True)\n        self.modelBuilder.doVar(\"chg[0,-1,1]\")\n        self.modelBuilder.factory_( \"expr::ggH_scaling_chg(\\\"1+@1*@0+@2*@0*@0\\\", chg, A, B)\")\n    self.modelBuilder.doSet(\"POI\", \",\".join([\"chg\"]))\n\n    def getYieldScale(self, bin, process):\n        if process == \"ggH\": return \"ggH_scaling_chg\"\n        else: return 1\n\nsmeft_chg_tutorial = SMEFT_chg()\n</code></pre> <p>To create the workspace using this model one can run </p> <pre><code>export PYTHON3PATH=${PYTHON3PATH}:${PWD}/models\ntext2workspace.py PhysicsModels/htt_tt_125_8TeV.txt -P EFT_simple:smeft_chg_tutorial  -m 125  -o ws_chg.root\n</code></pre> <p>Run the likelihood scan for \\(c_{Hg}\\) parameter and make the plot: <pre><code>combine -M MultiDimFit --algo grid -d  ws_chg.root --setParameterRanges chg=-0.2,0.2\nplot1DScan.py higgsCombineTest.MultiDimFit.mH120.root --POI chg\n</code></pre></p>"},{"location":"part2/bin-wise-stats/","title":"Automatic statistical uncertainties","text":""},{"location":"part2/bin-wise-stats/#introduction","title":"Introduction","text":"<p>The <code>text2workspace.py</code> script is able to produce a type of workspace, using a set of new histogram classes, in which bin-wise statistical uncertainties are added automatically. This can be built for shape-based datacards where the inputs are in TH1 format. Datacards that use RooDataHists are not supported. The bin errrors (i.e. values returned by <code>TH1::GetBinError</code>) are used to model the uncertainties.</p> <p>By default the script will attempt to assign a single nuisance parameter to scale the sum of the process yields in each bin, constrained by the total uncertainty, instead of requiring separate parameters, one per process. This is sometimes referred to as the Barlow-Beeston-lite approach, and is useful as it minimises the number of parameters required in the maximum likelihood fit. A useful description of this approach may be found in section 5 of this report.</p>"},{"location":"part2/bin-wise-stats/#usage-instructions","title":"Usage instructions","text":"<p>The following line should be added at the bottom of the datacard, underneath the systematics, to produce a new-style workspace and optionally enable the automatic bin-wise uncertainties:</p> <pre><code>[channel] autoMCStats [threshold] [include-signal = 0] [hist-mode = 1]\n</code></pre> <p>The first string <code>channel</code> should give the name of the channels (bins) in the datacard for which the new histogram classes should be used. The wildcard <code>*</code> is supported for selecting multiple channels in one go. The value of <code>threshold</code> should be set to a value greater than or equal to zero to enable the creation of automatic bin-wise uncertainties, or <code>-1</code> to use the new histogram classes without these uncertainties. A positive value sets the threshold on the effective number of unweighted events above which the uncertainty will be modeled with the Barlow-Beeston-lite approach described above. Below the threshold an individual uncertainty per-process will be created. The algorithm is described in more detail below.</p> <p>The last two settings are optional. The first of these, <code>include-signal</code> has a default value of <code>0</code> but can be set to <code>1</code> as an alternative. By default, the total nominal yield and uncertainty used to test the threshold excludes signal processes. The reason for this is that typically the initial signal normalization is arbitrary, and could unduly lead to a bin being considered well-populated despite poorly populated background templates. Setting this flag will include the signal processes in the uncertainty analysis. Note that this option only affects the logic for creating a single Barlow-Beeston-lite parameter vs. separate per-process parameters - the uncertainties on all signal processes are always included in the actual model! The second flag changes the way the normalization effect of shape-altering uncertainties is handled. In the default mode (<code>1</code>) the normalization is handled separately from the shape morphing via a an asymmetric log-normal term. This is identical to how Combine has always handled shape morphing. When set to <code>2</code>, the normalization will be adjusted in the shape morphing directly. Unless there is a strong motivation we encourage users to leave this on the default setting.</p>"},{"location":"part2/bin-wise-stats/#description-of-the-algorithm","title":"Description of the algorithm","text":"<p>When <code>threshold</code> is set to a number of effective unweighted events greater than or equal to zero, denoted \\(n^{\\text{threshold}}\\), the following algorithm is applied to each bin:</p> <ol> <li>Sum the yields \\(n_{i}\\) and uncertainties \\(e_{i}\\) of each background process \\(i\\) in the bin. Note that the \\(n_i\\) and \\(e_i\\) include the nominal effect of any scaling parameters that have been set in the datacard, for example <code>rateParams</code>. \\(n_{\\text{tot}} = \\sum_{i\\,\\in\\,\\text{bkg}}n_i\\), \\(e_{\\text{tot}} = \\sqrt{\\sum_{i\\,\\in\\,\\text{bkg}}e_i^{2}}\\)</li> <li>If \\(e_{\\text{tot}} = 0\\), the bin is skipped and no parameters are created. If this is the case, it is a good idea to check why there is no uncertainty in the background prediction in this bin!</li> <li>The effective number of unweighted events is defined as \\(n_{\\text{tot}}^{\\text{eff}} = n_{\\text{tot}}^{2} / e_{\\text{tot}}^{2}\\), rounded to the nearest integer.</li> <li>If \\(n_{\\text{tot}}^{\\text{eff}} \\leq n^{\\text{threshold}}\\): separate uncertainties will be created for each process. Processes where \\(e_{i} = 0\\) are skipped. If the number of effective events for a given process is lower than \\(n^{\\text{threshold}}\\) a Poisson-constrained parameter will be created. Otherwise a Gaussian-constrained parameter is used.</li> <li>If \\(n_{\\text{tot}}^{\\text{eff}} \\gt n^{\\text{threshold}}\\): A single Gaussian-constrained Barlow-Beeston-lite parameter is created that will scale the total yield in the bin.</li> <li>Note that the values of \\(e_{i}\\), and therefore \\(e_{tot}\\), will be updated automatically in the model whenever the process normalizations change.</li> <li>A Gaussian-constrained parameter \\(\\nu\\) has a nominal value of zero and scales the yield as \\(n_{\\text{tot}} + \\nu \\cdot e_{\\text{tot}}\\). The Poisson-constrained parameters are expressed as a yield multiplier with nominal value one: \\(n_{\\text{tot}} \\cdot \\nu\\).</li> </ol> <p>The output from <code>text2workspace.py</code> will give details on how each bin has been treated by this algorithm, for example:</p> Show example output <pre><code>============================================================\nAnalysing bin errors for: prop_binhtt_et_6_7TeV\nPoisson cut-off: 10\nProcesses excluded for sums: ZH qqH WH ggH\n============================================================\nBin        Contents        Error           Notes\n0          0.000000        0.000000        total sum\n0          0.000000        0.000000        excluding marked processes\n  =&gt; Error is zero, ignore\n------------------------------------------------------------\n1          0.120983        0.035333        total sum\n1          0.120983        0.035333        excluding marked processes\n1          12.000000       3.464102        Unweighted events, alpha=0.010082\n  =&gt; Total parameter prop_binhtt_et_6_7TeV_bin1[0.00,-7.00,7.00] to be gaussian constrained\n------------------------------------------------------------\n2          0.472198        0.232096        total sum\n2          0.472198        0.232096        excluding marked processes\n2          4.000000        2.000000        Unweighted events, alpha=0.118049\n  =&gt; Number of weighted events is below poisson threshold\n    ZH                   0.000000        0.000000\n      =&gt; Error is zero, ignore\n  ----------------------------------------------------------\n    W                    0.050606        0.029220\n                         3.000000        1.732051        Unweighted events, alpha=0.016869\n      =&gt; Product of prop_binhtt_et_6_7TeV_bin2_W[1.00,0.00,12.15] and const [3] to be poisson constrained\n  ----------------------------------------------------------\n    ZJ                   0.142444        0.140865\n                         1.000000        1.000000        Unweighted events, alpha=0.142444\n      =&gt; Product of prop_binhtt_et_6_7TeV_bin2_ZJ[1.00,0.00,30.85] and const [1] to be poisson constrained\n  ----------------------------------------------------------\n</code>"},{"location":"part2/bin-wise-stats/#analytic-minimisation","title":"Analytic minimisation","text":"<p>One significant advantage of the Barlow-Beeston-lite approach is that the maximum likelihood estimate of each nuisance parameter has a simple analytic form that depends only on \\(n_{\\text{tot}}\\), \\(e_{\\text{tot}}\\) and the observed number of data events in the relevant bin. Therefore when minimising the negative log-likelihood of the whole model it is possible to remove these parameters from the fit and set them to their best-fit values automatically. For models with large numbers of bins this can reduce the fit time and increase the fit stability. The analytic minimisation is enabled by default starting in combine v8.2.0, you can disable it by adding the option <code>--X-rtd MINIMIZER_no_analytic</code> when running Combine.</p>\n<p>The figure below shows a performance comparison of the analytical minimisation versus the number of bins in the likelihood function. The real time (in sections) for a typical minimisation of a binned likelihood is shown as a function of the number of bins when invoking the analytic minimisation of the nuisance parameters versus the default numerical approach.</p>\n\nShow Comparison\n<p></p>"},{"location":"part2/bin-wise-stats/#technical-details","title":"Technical details","text":"<p>Up until recently <code>text2workspace.py</code> would only construct the PDF for each channel using a <code>RooAddPdf</code>, i.e. each component process is represented by a separate PDF and normalization coefficient. However, in order to model bin-wise statistical uncertainties, the alternative <code>RooRealSumPdf</code> can be more useful, as each process is represented by a RooFit function object instead of a PDF, and we can vary the bin yields directly. As such, a new RooFit histogram class <code>CMSHistFunc</code> is introduced, which offers the same vertical template morphing algorithms offered by the current default histogram PDF, <code>FastVerticalInterpHistPdf2</code>. Accompanying this is the <code>CMSHistErrorPropagator</code> class. This evaluates a sum of <code>CMSHistFunc</code> objects, each multiplied by a coefficient. It is also able to scale the summed yield of each bin to account for bin-wise statistical uncertainty nuisance parameters.</p>\n\n<p>Warning</p>\n<p>One disadvantage of this new approach comes when evaluating the expectation for individual processes, for example when using the <code>--saveShapes</code> option in the <code>FitDiagnostics</code> mode of Combine. The Barlow-Beeston-lite parameters scale the sum of the process yields directly, so extra work is needed to distribute this total scaling back to each individual process. To achieve this, an additional class <code>CMSHistFuncWrapper</code> has been created that, given a particular <code>CMSHistFunc</code>, the <code>CMSHistErrorPropagator</code> will distribute an appropriate fraction of the total yield shift to each bin. As a consequence of the extra computation needed to distribute the yield shifts in this way, the evaluation of individual process shapes in <code>--saveShapes</code> can take longer then previously.</p>"},{"location":"part2/bsm-higgs-models/","title":"Physics Models for Extended Higgs Sector Searches","text":"<p>This page lists the physics models that can be used to perform searches for additional Higgs boson measurements at the LHC, when the SM-like Higgs boson should be accounted for in the data.</p>"},{"location":"part2/bsm-higgs-models/#two-higgs-models","title":"Two Higgs Models","text":"<p>These models are for the case where there are just Two Higgs bosons, one of which is the SM-like Higgs boson that was discovered at the LHC. The two Higgs models are implemented in the python file <code>TwoHiggsModels.py</code>. For each of these models, we assume that the SM-like Higgs boson has mass specified by <code>MH_SM</code>, while the additional boson has mass <code>MH</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.TwoHiggsModels:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Two Higgs bosons <code>twoHiggsUnconstrained</code> N/A <code>r</code>, <code>r_SM</code> The SM-like Higgs boson signal strength will be <code>r_SM</code> and its mass will be assumed to be <code>MH_SM</code> (default value 125.8), while the additional Higgs boson signal strength will be scaled by <code>r</code> and assumed to have mass <code>MH</code>. Singlet Mixing Model <code>singletMixing</code> <code>--PO BSMDecays</code>,<code>--PO UseVisibleMu</code> <code>r</code>,<code>BR_BSM</code> Without any options, the SM like Higgs boson will have signal strength <code>r</code>, while the additional Higgs boson is scaled by <code>1-r</code> and <code>BR_BSM</code> will not be a POI. If the option <code>BSMDecays</code> is included, the additional boson's signal strength will be <code>r(1-BR_BSM)</code> and the SM like Higgs boson will have signal strength of <code>1-r</code>. If the option <code>UseVisibleMu</code> is included too, then instead, the additional Higgs boson will get a signal strength <code>r</code> while the SM one will have <code>1-r(1-BR_BSM)</code>. Singlet Mixing Model for Exclusions <code>singletMixingInvisible</code> <code>--PO BSMDecays</code> <code>r</code>,<code>x</code>,<code>BR_BSM</code> The SM like Higgs boson will be scaled by <code>r*x</code> while the additional boson is scaled by <code>r*(1-x)</code>. If the option <code>BSMDecays</code> is included, then <code>BR_BSM</code> is also a POI and the additional Higgs boson signal strength is scaled accounting for this BSM branching fraction <code>r*(1-x)*(1-BR_BSM)</code>. Two Higgs bosons with \\(c_{V}\\), \\(c_{F}\\) couplings <code>twoHiggsCvCf</code> N/A <code>CV</code>, <code>CF</code> Both Higgs bosons signal strengths scale accoring to the coupling to vector bosons <code>CV</code> and fermions <code>CF</code> where the scaling is determined for each contributing production/decay vertex. In this case, the additioal boson is assumed to follow the same coupling structure (i.e the SM like Higgs boson couplings)."},{"location":"part2/bsm-higgs-models/#two-higgs-doublet-models","title":"Two Higgs Doublet Models","text":"<p>In these models, the couplings of the SM-like Higgs boson are modified according to the type of 2HDM with parameters \\(\\cos(\\beta-\\alpha)\\) and \\(\\tan\\beta\\). The two Higgs doublet models are implemented in the python file <code>AdditionalModels.py</code>.  In this model, the Higgs boson mass is <code>MH</code>, and it can be promoted to a POI by including the option <code>--PO higgsMassRange=[low,high]</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.AdditionalModels:model_name\n</code></pre> <code>model</code> <code>--PO</code> POIs Description 2HDM Type-1 <code>twohdm</code> <code>--PO thdmtype=1</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to fermions and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\kappa_{d}=\\cos(\\alpha)/\\sin(\\beta)\\). 2HDM Type-2 <code>twohdm</code> <code>--PO thdmtype=2</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to fermions and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\cos(\\alpha)/\\sin(\\beta)\\), \\(\\kappa_{d}=-\\sin(\\alpha)/\\cos(\\beta)\\). 2HDM Type-3 <code>twohdm</code> <code>--PO thdmtype=3</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to quarks, leptons and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\kappa_{d}=\\cos(\\alpha)/\\sin(\\beta)\\), \\(\\kappa_{l}=-\\sin(\\alpha)/\\cos(\\beta)\\). 2HDM Type-4 <code>twohdm</code> <code>--PO thdmtype=4</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to quarks, leptons and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\kappa_{l}=\\cos(\\alpha)/\\sin(\\beta)\\), \\(\\kappa_{d}=-\\sin(\\alpha)/\\cos(\\beta)\\)."},{"location":"part2/bsm-higgs-models/#fermiophobic-higgs-model","title":"Fermiophobic Higgs Model","text":"<p>This model is for the case where the additional Higgs boson does not couple to fermions. The fermiophobic Higgs model is implemented in the python file <code>HiggsFermiophobic.py</code>. In this model, the Higgs boson mass is <code>MH</code>, and it can be promoted to a POI by including the option <code>--PO higgsMassRange=[low,high]</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.HiggsFermiophobic:model_name\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Fermiophobic Higgs <code>fp</code> N/A <code>r</code> The Higgs boson signal strength will be <code>r</code> for any production/decay that involves vector boson couplints only. The branching ratios are recalculated assuming no Higgs boson couplings to fermions."},{"location":"part2/higgscouplings/","title":"Physics Models for SM Higgs Boson Couplings","text":"<p>This page lists the physics models that can be used to perform measurements of the Higgs Boson couplings in the Standard Model (SM). These models follow the recommendations of the LHC Higgs Cross Section Working Group LHCHXSWG YR3 and have been used in the combination of the ATLAS and CMS measurements of the Higgs boson properties.</p>"},{"location":"part2/higgscouplings/#lhc-hcg-models","title":"LHC HCG Models","text":"<p>The following models are used in the LHC Higgs Combination Group (LHC HCG) to perform measurements of the Higgs boson couplings. They are well defined only for a SM Higgs boson mass of a few GeV around the measured value \\(m_H \\approx 125\\) GeV. The models are implemented in the python file  <code>LHCHCGModels.py</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.LHCHCGModels:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Couplings with resolved loops <code>K1</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_W</code>,<code>kappa_Z</code>,<code>kappa_b</code>, <code>kappa_t</code>, <code>kappa_tau</code>,<code>kappa_mu</code> Higgs boson couplings to fermions and bosons in loops such as the \\(gg\\to H\\) and \\(H\\to\\gamma\\gamma\\) loops are scaled using the appropriate SM Higgs couplings. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), \\(H\\to Z\\gamma\\) are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings with effective loops <code>K2</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>kappa_b</code>, <code>kappa_t</code>, <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\). By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2Width</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>,<code>c7_Gscal_tot</code>, <code>kappa_t</code>, <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\). In this model, the total Higgs width is allowed to float by effectively replacing the coupling \\(\\kappa_{b}\\) as a parameter of interest. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2Inv</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>,<code>kappa_b</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this). The total width is modified accordingly. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2InvC</code> <code>--PO dohmm</code>, <code>--PO dohzg</code>, <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>kappa_b</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code>,<code>kappa_c</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this), and the coupling to charm quarks is included as a parameter of interest.  The total width is modified accordingly. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2Undet</code> <code>--PO dohmm</code>, <code>--PO dohzg</code>, <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>kappa_b</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code>,<code>BRundet</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this), and the undetected decay modes are scaled by <code>BRundet</code>.  The total width is modified accordingly. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2UndetWidth</code> <code>--PO dohmm</code>, <code>--PO dohzg</code>, <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>c7_Gscal_tot</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code>,<code>BRundet</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this), and the undetected decay modes are scaled by <code>BRundet</code>.  The total width Higgs width is allowed to float by effectively replacing the coupling \\(\\kappa_{b}\\) as a parameter of interest. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings to vector bosons and fermions <code>K3</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_V</code>,<code>kappa_F</code> Higgs boson couplings to bosons and fermions. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), \\(H\\to Z\\gamma\\) are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K3Inv</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_V</code>,<code>kappa_F</code>, <code>BRinv</code> Higgs boson couplings to bosons and fermions and free floating branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this). By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), \\(H\\to Z\\gamma\\) are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Ratios of coupling modifiers <code>L1</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_WZ</code>,<code>lambda_Zg</code>, <code>lambda_bZ</code>,<code>lambda_gamZ</code>,<code>lambda_tauZ</code>,<code>lambda_muZ</code>,<code>lambda_tg</code>,<code>kappa_gZ</code> Ratios of coupling modifiers \\(\\lambda_{WZ}=\\kappa_W/\\kappa_Z\\), \\(\\lambda_{Zg}=\\kappa_Z/\\kappa_{g}\\), \\(\\lambda_{bZ}=\\kappa_b/\\kappa_{Z}\\), \\(\\lambda_{\\gamma Z}=\\kappa_{\\gamma}/\\kappa_{Z}\\), \\(\\lambda_{\\tau Z}=\\kappa_{\\tau}/\\kappa_{Z}\\), \\(\\lambda_{\\mu Z}=\\kappa_{\\mu}/\\kappa_{Z}\\), \\(\\lambda_{tg}=\\kappa_{t}/\\kappa_{g}\\), \\(\\kappa_{gZ}=\\kappa_{g} \\kappa_{Z}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <p>The next models are constrained models to study a limited set of ratios of couplings. These are particularly useful to study models such as 2HDM that modify the SM Higgs couplings in a specific way. The models are implemented in the python file  <code>HiggsCouplings.py</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.HiggsCouplings:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Ratios of up/down fermion couplings <code>lambdadu</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_du</code>,<code>lambda_Vu</code>,<code>kappa_uu</code> Ratios of coupling modifiers \\(\\lambda_{du}=\\kappa_d/\\kappa_u\\), \\(\\lambda_{Vu}=\\kappa_{V}/\\kappa_{u}\\), \\(\\kappa_{uu}=\\kappa_{u}^{2}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Ratios of lepton to quark couplings <code>lambdalq</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_lq</code>,<code>lambda_Vq</code>,<code>kappa_qq</code> Ratios of coupling modifiers \\(\\lambda_{lq}=\\kappa_l/\\kappa_q\\), \\(\\lambda_{Vq}=\\kappa_{V}/\\kappa_{q}\\), \\(\\kappa_{qq}=\\kappa_{q}^{2}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Ratios of fermion to vector boson couplings <code>lambdafv</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_FV</code>,<code>kappa_VV</code> Ratios of coupling modifiers \\(\\lambda_{fV}=\\kappa_f/\\kappa_V\\) \\(\\kappa_{VV}=\\kappa_{V}^{2}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range."},{"location":"part2/higgscouplings/#outdated-couplings-modifer-models","title":"Outdated couplings modifer models","text":"<p>These models were used early in the Higgs discovery, but are now considered outdated. They are still available for backward compatibility and for simple studies, but are generally not recommended. The models are implemented in the python file  <code>HiggsCouplings.py</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.HiggsCouplings:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Custodial Symmetry Model <code>cWZ</code> <code>--PO higgsMassRange=x,y</code> <code>Cz</code>,<code>Cwz</code> Ratio of couplings to W bosons and Z bosons. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings with universal up or down-type fermion couplings <code>c5udHiggs</code> <code>--PO universalCF</code>, <code>--PO higgsMassRange=x,y</code> <code>Cg</code>,<code>Cv</code>,<code>Cglu</code>,<code>Cu</code>,<code>Cd</code> Treat photon, vector-boson and gluon coupling as independent couplings. Up-type and down-type fermions have independent couplings. include <code>universalCF</code> to replace <code>Cu</code> and <code>Cd</code> with universal fermion coupling <code>Cf</code>. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings with universal lepton or quark couplings <code>c5qlHiggs</code> <code>--PO universalCF</code>, <code>--PO higgsMassRange=x,y</code> <code>Cg</code>,<code>Cv</code>,<code>Cglu</code>,<code>Cq</code>,<code>Cl</code> Treat photon, vector-boson and gluon coupling as independent couplings. quark and lepton fermions have independent couplings. include <code>universalCF</code> to replace <code>Cq</code> and <code>Cl</code> with universal fermion coupling <code>Cf</code>. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range."},{"location":"part2/physicsmodels/","title":"Introduction to Physics Models","text":"<p>Combine can be run directly on the text-based datacard. However, for more advanced physics models, the internal step to convert the datacard to a binary workspace should be performed by the user. To create a binary workspace starting from a <code>datacard.txt</code>, you can run</p> <pre><code>text2workspace.py datacard.txt -o workspace.root\n</code></pre> <p>By default (without the <code>-o</code> option), the binary workspace will be named <code>datacard.root</code> - i.e the .txt suffix will be replaced by .root.</p> <p>A full set of options for <code>text2workspace</code> can be found by running <code>text2workspace.py --help</code>.</p> <p>The default model that will be produced when running <code>text2workspace</code> is one in which all processes identified as signal are multiplied by a common multiplier r. This is all that is needed for simply setting limits or calculating significances.</p> <p><code>text2workspace</code> will convert the datacard into a PDF that summarizes the analysis. For example, let's take a look at the data/tutorials/counting/simple-counting-experiment.txt datacard.</p> <pre><code># Simple counting experiment, with one signal and one background process\n# Extremely simplified version of the 35/pb H-&gt;WW analysis for mH = 200 GeV,\n# for 4th generation exclusion (EWK-10-009, arxiv:1102.5429v1)\nimax 1  number of channels\njmax 1  number of backgrounds\nkmax 2  number of nuisance parameters (sources of systematical uncertainties)\n------------\n# we have just one channel, in which we observe 0 events\nbin         1\nobservation 0\n------------\n# now we list the expected events for signal and all backgrounds in that bin\n# the second 'process' line must have a positive number for backgrounds, and 0 for signal\n# then we list the independent sources of uncertainties, and give their effect (syst. error)\n# on each process and bin\nbin             1      1\nprocess       ggh4G  Bckg\nprocess         0      1\nrate           4.76  1.47\n------------\ndeltaS  lnN    1.20    -    20% uncertainty on signal\ndeltaB  lnN      -   1.50   50% uncertainty on background\n</code></pre> <p>If we run <code>text2workspace.py</code> on this datacard and take a look at the workspace (<code>w</code>) inside the <code>.root</code> file produced, we will find a number of different objects representing the signal, background, and observed event rates, as well as the nuisance parameters and signal strength \\(r\\). Note that often in the statistics literature, this parameter is referred to as \\(\\mu\\).</p> <p>From these objects, the necessary PDF has been constructed (named <code>model_s</code>). For this counting experiment we will expect a simple PDF of the form</p> \\[ p(n_{\\mathrm{obs}}| r,\\nu_{S},\\nu_{B})\\propto \\dfrac{[r\\cdot n_{S}(\\nu_{S})+n_{B}(\\nu_{B})]^{n_{\\mathrm{obs}}} } {n_{\\mathrm{obs}}!}e^{-[r\\cdot n_{S}(\\nu_{S})+n_{B}(\\nu_{B})]} \\cdot e^{-\\frac{1}{2}(\\nu_{S}- y_{S})^{2}} \\cdot e^{-\\frac{1}{2}(\\nu_{B}- y_{B})^{2}} \\] <p>where the expected signal and background rates are expressed as functions of the nuisance parameters, \\(n_{S}(\\nu_{S}) = 4.76(1+0.2)^{\\nu_{S}}~\\) and \\(~n_{B}(\\nu_{B}) = 1.47(1+0.5)^{\\nu_{B}}\\). The \\(y_{S},~y_{B}\\) are the auxiliary observables. In the code, these will have the same name as the corresponding nuisance parameter, with the extension <code>_In</code>.</p> <p>The first term represents the usual Poisson expression for observing \\(n_{\\mathrm{obs}}\\) events, while the second two are the Gaussian constraint terms for the nuisance parameters. In this case \\({y_S}={y_B}=0\\), and the widths of both Gaussians are 1.</p> <p>A combination of counting experiments (or a binned shape datacard) will look like a product of PDFs of this kind. For parametric/unbinned analyses, the PDF for each process in each channel is provided instead of the using the Poisson terms and a product runs over the bin counts/events.</p>"},{"location":"part2/physicsmodels/#model-building","title":"Model building","text":"<p>For more complex models, <code>PhysicsModels</code> can be produced. To use a different physics model instead of the default one, use the option <code>-P</code> as in</p> <pre><code>text2workspace.py datacard -P HiggsAnalysis.CombinedLimit.PythonFile:modelName\n</code></pre> <p>Generic models can be implemented by writing a python class that:</p> <ul> <li>defines the model parameters (by default it is just the signal strength modifier <code>r</code>)</li> <li>defines how signal and background yields depend on the parameters (by default, the signal scales linearly with <code>r</code>, backgrounds are constant)</li> <li>potentially also modifies the systematic uncertainties (e.g. switch off theory uncertainties on cross section when measuring the cross section itself)</li> </ul> <p>In the case of SM-like Higgs boson measurements, the class should inherit from <code>SMLikeHiggsModel</code> (redefining <code>getHiggsSignalYieldScale</code>), while beyond that one can inherit from <code>PhysicsModel</code>. You can find some examples in PhysicsModel.py.</p> <p>In the 4-process model (<code>PhysicsModel:floatingXSHiggs</code>, you will see that each of the 4 dominant Higgs boson production modes get separate scaling parameters, <code>r_ggH</code>, <code>r_qqH</code>, <code>r_ttH</code> and <code>r_VH</code> (or <code>r_ZH</code> and <code>r_WH</code>) as defined in,</p> <pre><code>def doParametersOfInterest(self):\n  \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n  # --- Signal Strength as only POI ---\n  if \"ggH\" in self.modes: self.modelBuilder.doVar(\"r_ggH[1,%s,%s]\" % (self.ggHRange[0], self.ggHRange[1]))\n  if \"qqH\" in self.modes: self.modelBuilder.doVar(\"r_qqH[1,%s,%s]\" % (self.qqHRange[0], self.qqHRange[1]))\n  if \"VH\"  in self.modes: self.modelBuilder.doVar(\"r_VH[1,%s,%s]\"  % (self.VHRange [0], self.VHRange [1]))\n  if \"WH\"  in self.modes: self.modelBuilder.doVar(\"r_WH[1,%s,%s]\"  % (self.WHRange [0], self.WHRange [1]))\n  if \"ZH\"  in self.modes: self.modelBuilder.doVar(\"r_ZH[1,%s,%s]\"  % (self.ZHRange [0], self.ZHRange [1]))\n  if \"ttH\" in self.modes: self.modelBuilder.doVar(\"r_ttH[1,%s,%s]\" % (self.ttHRange[0], self.ttHRange[1]))\n  poi = \",\".join([\"r_\"+m for m in self.modes])\n  if self.pois: poi = self.pois\n  ...\n</code></pre> <p>The mapping of which POI scales which process is handled via the following function,</p> <pre><code>def getHiggsSignalYieldScale(self,production,decay, energy):\n  if production == \"ggH\": return (\"r_ggH\" if \"ggH\" in self.modes else 1)\n  if production == \"qqH\": return (\"r_qqH\" if \"qqH\" in self.modes else 1)\n  if production == \"ttH\": return (\"r_ttH\" if \"ttH\" in self.modes else (\"r_ggH\" if self.ttHasggH else 1))\n  if production in [ \"WH\", \"ZH\", \"VH\" ]: return (\"r_VH\" if \"VH\" in self.modes else 1)\n  raise RuntimeError, \"Unknown production mode '%s'\" % production\n</code></pre> <p>You should note that <code>text2workspace</code> will look for the python module in <code>PYTHONPATH</code>. If you want to keep your model local, you'll need to add the location of the python file to <code>PYTHONPATH</code>.</p> <p>A number of models used in the LHC Higgs combination paper can be found in LHCHCGModels.py.</p> <p>The models can be applied to the datacard by using the <code>-P</code> option, for example <code>-P HiggsAnalysis.CombinedLimit.HiggsCouplings:c7</code>, and others that are defined in HiggsCouplings.py.</p> <p>Below are some (more generic) example models that also exist in GitHub.</p>"},{"location":"part2/physicsmodels/#multisignalmodel-ready-made-model-for-multiple-signal-processes","title":"MultiSignalModel ready made model for multiple signal processes","text":"<p>Combine already contains a model <code>HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel</code> that can be used to assign different signal strengths to multiple processes in a datacard, configurable from the command line.</p> <p>The model is configured by passing one or more mappings in the form <code>--PO 'map=bin/process:parameter'</code> to text2workspace:</p> <ul> <li><code>bin</code> and <code>process</code> can be arbitrary regular expressions matching the bin names and process names in the datacard.     Note that mappings are applied both to signals and to background processes; if a line matches multiple mappings, precedence is given to the last one in the order they are in the command line.     It is suggested to put quotes around the argument of <code>--PO</code> so that the shell does not try to expand any <code>*</code> signs in the patterns.</li> <li><code>parameter</code> is the POI to use to scale that process (<code>name[starting_value,min,max]</code> the first time a parameter is defined, then just <code>name</code> if used more than once).     Special values are <code>1</code> and <code>0==; ==0</code> means \"drop the process completely from the model\", while <code>1</code> means to \"keep the yield as is in the card with no scaling\" (as normally done for backgrounds); <code>1</code> is the default that is applied to processes that have no mappings. Therefore it is normally not needed, but it may be used to override a previous more generic match in the same command line (e.g. <code>--PO 'map=.*/ggH:r[1,0,5]' --PO 'map=bin37/ggH:1'</code> would treat ggH as signal in general, but count it as background in the channel <code>bin37</code>).</li> </ul> <p>Passing the additional option <code>--PO verbose</code> will set the code to verbose mode, printing out the scaling factors for each process; we encourage the use this option to make sure that the processes are being scaled correctly.</p> <p>The MultiSignalModel will define all parameters as parameters of interest, but that can be then changed from the command line, as described in the following subsection.</p> <p>Some examples, taking as reference the toy datacard test/multiDim/toy-hgg-125.txt:</p> <ul> <li>Scale both <code>ggH</code> and <code>qqH</code> with the same signal strength <code>r</code> (that is what the default physics model of Combine does for all signals; if they all have the same systematic uncertainties, it is also equivalent to adding up their yields and writing them as a single column in the card)</li> </ul> <pre><code>  $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:r[1,0,10]' --PO 'map=.*/qqH:r' toy-hgg-125.txt -o toy-1d.root\n  [...]\n  Will create a POI  r  with factory  r[1,0,10]\n  Mapping  r  to  ['.*/ggH']  patterns\n  Mapping  r  to  ['.*/qqH']  patterns\n  [...]\n  Will scale  incl/bkg  by  1\n  Will scale  incl/ggH  by  r\n  Will scale  incl/qqH  by  r\n  Will scale  dijet/bkg  by  1\n  Will scale  dijet/ggH  by  r\n  Will scale  dijet/qqH  by  r\n</code></pre> <ul> <li>Define two independent parameters of interest <code>r_ggH</code> and <code>r_qqH</code></li> </ul> <pre><code>  $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:r_ggH[1,0,10]' --PO 'map=.*/qqH:r_qqH[1,0,20]' toy-hgg-125.txt -o toy-2d.root\n  [...]\n  Will create a POI  r_ggH  with factory  r_ggH[1,0,10]\n  Mapping  r_ggH  to  ['.*/ggH']  patterns\n  Will create a POI  r_qqH  with factory  r_qqH[1,0,20]\n  Mapping  r_qqH  to  ['.*/qqH']  patterns\n  [...]\n  Will scale  incl/bkg  by  1\n  Will scale  incl/ggH  by  r_ggH\n  Will scale  incl/qqH  by  r_qqH\n  Will scale  dijet/bkg  by  1\n  Will scale  dijet/ggH  by  r_ggH\n  Will scale  dijet/qqH  by  r_qqH\n</code></pre> <ul> <li>Fix <code>ggH</code> to SM, define only <code>qqH</code> as parameter</li> </ul> <pre><code>  $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:1' --PO 'map=.*/qqH:r_qqH[1,0,20]' toy-hgg-125.txt -o toy-1d-qqH.root\n  [...]\n  Mapping  1  to  ['.*/ggH']  patterns\n  Will create a POI  r_qqH  with factory  r_qqH[1,0,20]\n  Mapping  r_qqH  to  ['.*/qqH']  patterns\n  [...]\n  Will scale  incl/bkg  by  1\n  Will scale  incl/ggH  by  1\n  Will scale  incl/qqH  by  r_qqH\n  Will scale  dijet/bkg  by  1\n  Will scale  dijet/ggH  by  1\n  Will scale  dijet/qqH  by  r_qqH\n</code></pre> <ul> <li>Drop <code>ggH</code> , and define only <code>qqH</code> as parameter</li> </ul> <pre><code> $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:0' --PO 'map=.*/qqH:r_qqH[1,0,20]' toy-hgg-125.txt -o toy-1d-qqH0-only.root\n [...]\n Mapping  0  to  ['.*/ggH']  patterns\n Will create a POI  r_qqH  with factory  r_qqH[1,0,20]\n Mapping  r_qqH  to  ['.*/qqH']  patterns\n [...]\n Will scale  incl/bkg  by  1\n Will scale  incl/ggH  by  0\n Will scale  incl/qqH  by  r_qqH\n Will scale  dijet/bkg  by  1\n Will scale  dijet/ggH  by  0\n Will scale  dijet/qqH  by  r_qqH\n</code></pre>"},{"location":"part2/physicsmodels/#two-hypothesis-testing","title":"Two Hypothesis testing","text":"<p>The <code>PhysicsModel</code> that encodes the signal model above is the twoHypothesisHiggs, which assumes signal processes with suffix _ALT will exist in the datacard. An example of such a datacard can be found under data/benchmarks/simple-counting/twoSignals-3bin-bigBSyst.txt</p> <pre><code> $ text2workspace.py twoSignals-3bin-bigBSyst.txt -P HiggsAnalysis.CombinedLimit.HiggsJPC:twoHypothesisHiggs -m 125.7 --PO verbose -o jcp_hww.root\n\n MH (not there before) will be assumed to be 125.7\n Process  S  will get norm  not_x\n Process  S_ALT  will get norm  x\n Process  S  will get norm  not_x\n Process  S_ALT  will get norm  x\n Process  S  will get norm  not_x\n Process  S_ALT  will get norm  x\n</code></pre> <p>The two processes (S and S_ALT) will get different scaling parameters. The LEP-style likelihood for hypothesis testing can now be used by setting x or not_x to 1 and 0 and comparing the two likelihood evaluations.</p>"},{"location":"part2/physicsmodels/#signal-background-interference","title":"Signal-background interference","text":"<p>Since negative probability distribution functions do not exist, the recommended way to implement this is to start from the expression for the individual amplitudes \\(A\\) and the parameter of interest \\(k\\),</p> \\[ \\mathrm{Yield} = |k * A_{s} + A_{b}|^2 = k^2 * |A_{s}|^2 + k * 2 \\Re(A_{s}^* A_{b}) + |A_{b}|^2 = \\mu * S + \\sqrt{\\mu} * I + B \\] <p>where</p> <p>\\(\\mu = k^2, ~S = |A_{s}|^2,~B = |A_b|^2\\) and \\(S+B+I = |A_s + A_b|^2\\).</p> <p>With some algebra you can work out that,</p> <p>\\(\\mathrm{Yield} = \\sqrt{\\mu} * \\left[S+B+I\\right] + (\\mu-\\sqrt{\\mu}) * \\left[S\\right] + (1-\\sqrt{\\mu}) * \\left[B\\right]\\)</p> <p>where square brackets represent the input (histograms as <code>TH1</code> or <code>RooDataHists</code>) that one needs to provide.</p> <p>An example of this scheme is implemented in a HiggsWidth and is completely general, since all of the three components above are strictly positive. In this example, the POI is <code>CMS_zz4l_mu</code> and the equations for the three components are scaled (separately for the qqH and ggH processes) as,</p> <pre><code> self.modelBuilder.factory_( \"expr::ggH_s_func(\\\"@0-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::ggH_b_func(\\\"1-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::ggH_sbi_func(\\\"sqrt(@0)\\\", CMS_zz4l_mu)\")\n\n self.modelBuilder.factory_( \"expr::qqH_s_func(\\\"@0-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::qqH_b_func(\\\"1-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::qqH_sbi_func(\\\"sqrt(@0)\\\", CMS_zz4l_mu)\")\n</code></pre>"},{"location":"part2/physicsmodels/#multi-process-interference","title":"Multi-process interference","text":"<p>The above formulation can be extended to multiple parameters of interest (POIs). See AnalyticAnomalousCoupling for an example. However, the computational performance scales quadratically with the number of POIs, and can get extremely expensive for 10 or more, as may be encountered often with EFT analyses. To alleviate this issue, an accelerated interference modeling technique is implemented for template-based analyses via the <code>interferenceModel</code> physics model. In this model, each bin yield \\(y\\) is parameterized</p> \\[ y(\\vec{\\mu}) = y_0 (\\vec{\\mu}^\\top M \\vec{\\mu}) \\] <p>as a function of the POI vector \\(\\vec{\\mu}\\), a nominal template \\(y_0\\), and a scaling matrix \\(M\\). To see how this parameterization relates to that of the previous section, we can define:</p> \\[ y_0 = A_b^2, \\qquad M = \\frac{1}{A_b^2} \\begin{bmatrix}  |A_s|^2 &amp; \\Re(A_s^* A_b) \\\\  \\Re(A_s A_b^*) &amp; |A_b|^2  \\end{bmatrix}, \\qquad \\vec{\\mu} = \\begin{bmatrix}  \\sqrt{\\mu} \\\\  1  \\end{bmatrix} \\] <p>which leads to the same parameterization. At present, this technique only works with <code>CMSHistFunc</code>-based workspaces, as these are the most common workspace types encountered and the default when using autoMCStats. To use this model, for each bin find \\(y_0\\) and put it into the datacard as a signal process, then find \\(M\\) and save the lower triangular component as an array in a <code>scaling.json</code> file with a syntax as follows:</p> <pre><code>[\n  {\n    \"channel\": \"my_channel\",\n    \"process\": \"my_nominal_process\",\n    \"parameters\": [\"sqrt_mu[1,0,2]\", \"Bscaling[1]\"],\n    \"scaling\": [\n      [0.5, 0.1, 1.0],\n      [0.6, 0.2, 1.0],\n      [0.7, 0.3, 1.0]\n    ]\n  }\n]\n</code></pre> <p>where the parameters are declared using RooFit's factory syntax and each row of the <code>scaling</code> field represents the scaling information of a bin, e.g. if \\(y_0 = |A_b|^2\\) then each row would contain three entries:</p> \\[ |A_s|^2 / |A_b|^2,\\quad \\Re(A_s^* A_b)/|A_b|^2,\\quad 1 \\] <p>For several coefficients, one would enumerate as follows: <pre><code>scaling = []\nfor ibin in range(nbins):\n    binscaling = []\n    for icoef in range(ncoef):\n        for jcoef in range(icoef + 1):\n            binscaling.append(amplitude_squared_for(ibin, icoef, jcoef))\n    scaling.append(binscaling)\n</code></pre></p> <p>Then, to construct the workspace, run</p> <p><pre><code>text2workspace.py card.txt -P HiggsAnalysis.CombinedLimit.InterferenceModels:interferenceModel \\\n    --PO verbose --PO scalingData=scaling.json\n</code></pre> For large amounts of scaling data, you can optionally use gzipped json (<code>.json.gz</code>) or pickle (<code>.pkl.gz</code>) files with 2D numpy arrays for the scaling coefficients instead of lists. The function <code>numpy.tril_indices(ncoef)</code> is helpful for extracting the lower triangle of a square matrix.</p> <p>You could pick any nominal template, and adjust the scaling as appropriate. Generally it is advisable to use a nominal template corresponding to near where you expect the best-fit values of the POIs to be so that the shape systematic effects are well-modeled in that region.</p> <p>It may be the case that the relative contributions of the terms are themselves a function of the POIs. For example, in VBF di-Higgs production, BSM modifications to the production rate can be parameterized in the \"kappa\" framework via three diagrams, with scaling coefficients \\(\\kappa_V \\kappa_\\lambda\\), \\(\\kappa_V^2\\), and \\(\\kappa_{2V}\\), respectively, that interfere.  In that case, you can declare formulas with the factory syntax to represent each amplitude as follows:</p> <pre><code>[\n  {\n    \"channel\": \"a_vbf_channel\",\n    \"process\": \"VBFHH\",\n    \"parameters\": [\"expr::a0('@0*@1', kv[1,0,2], kl[1,0,2])\", \"expr::a1('@0*@0', kv[1,0,2])\", \"k2v[1,0,2]\"],\n    \"scaling\": [\n      [3.30353674666415, -8.54170982038222, 22.96464188467882, 4.2353483207128, -11.07996258835088, 5.504469544697623],\n      [2.20644332142891, -7.076836641962523, 23.50989689214267, 4.053185685866683, -13.08569222837996, 7.502346155380032]\n    ]\n  }\n]\n</code></pre> <p>However, you will need to manually specify what the POIs should be when creating the workspace using the <code>POIs=</code> physics option, e.g.</p> <pre><code>text2workspace.py card.txt -P HiggsAnalysis.CombinedLimit.InterferenceModels:interferenceModel \\\n  --PO scalingData=scaling.json --PO 'POIs=kl[1,0,2]:kv[1,0,2]:k2v[1,0,2]'\n</code></pre>"},{"location":"part2/settinguptheanalysis/","title":"Preparing the datacard","text":"<p>The input to Combine, which defines the details of the analysis, is a plain ASCII file we will refer to as datacard. This is true whether the analysis is a simple counting experiment or a shape analysis.</p>"},{"location":"part2/settinguptheanalysis/#a-simple-counting-experiment","title":"A simple counting experiment","text":"<p>The file data/tutorials/counting/realistic-counting-experiment.txt shows an example of a counting experiment.</p> <p>The first lines can be used to add some descriptive information. Those lines must start with a \"#\", and they are not parsed by Combine:</p> <pre><code># Simple counting experiment, with one signal and a few background processes\n# Simplified version of the 35/pb H-&gt;WW analysis for mH = 160 GeV\n</code></pre> <p>Following this, one declares the number of observables, <code>imax</code>, that are present in the model used to set limits / extract confidence intervals. The number of observables will typically be the number of channels in a counting experiment. The value <code>*</code> can be specified for <code>imax</code>, which tells Combine to determine the number of observables from the rest of the datacard. In order to better catch mistakes, it is recommended to explicitly specify the value. </p> <pre><code>imax 1  number of channels\n</code></pre> <p>This declaration is followed by a specification of the number of background sources to be considered, <code>jmax</code>, and the number of independent sources of systematic uncertainty, <code>kmax</code>:</p> <pre><code>jmax 3  number of backgrounds\nkmax 5  number of nuisance parameters (sources of systematic uncertainty)\n</code></pre> <p>In the example there is 1 channel, there are 3 background sources, and there are 5 independent sources of systematic uncertainty.</p> <p>After providing this information, the following lines describe what is observed in data: the number of events observed in each channel. The first line, starting with <code>bin</code>, defines the label used for each channel. In the example we have 1 channel, labelled <code>1</code>, and in the following line, <code>observation</code>, the number of observed events is given: <code>0</code> in this example.</p> <p><pre><code># we have just one channel, in which we observe 0 events\nbin bin1\nobservation 0\n</code></pre> This is followed by information related to the expected number of events, for each bin and process, arranged in (#channels)*(#processes) columns.</p> <pre><code>bin          bin1     bin1     bin1     bin1\nprocess         ggH  qqWW  ggWW  others\nprocess          0     1     2     3\nrate           1.47  0.63  0.06  0.22\n</code></pre> <ul> <li>The <code>bin</code> line identifies the channel that the column refers to. It ranges from <code>1</code> to the value of <code>imax</code> declared above.</li> <li>The first <code>process</code> line contains the names of the various process sources</li> <li>The second <code>process</code> line is a numerical process identifier. Backgrounds are given a positive number, while <code>0</code> and negative numbers are used for signal processes. Different process identifiers must be used for different processes.</li> <li>The last line, <code>rate</code>, gives the expected number of events for the given process in the specified bin</li> </ul> <p>If a process does not contribute in a given bin, it can be removed from the datacard, or the rate can be set to 0.</p> <p>The final section of the datacard describes the systematic uncertainties:</p> <pre><code>lumi    lnN    1.11    -   1.11    -    lumi affects both signal and gg-&gt;WW (mc-driven). lnN = lognormal\nxs_ggH  lnN    1.16    -     -     -    gg-&gt;H cross section + signal efficiency + other minor ones.\nWW_norm gmN 4    -   0.16    -     -    WW estimate of 0.64 comes from sidebands: 4 events in sideband times 0.16 (=&gt; ~50% statistical uncertainty)\nxs_ggWW lnN      -     -   1.50    -    50% uncertainty on gg-&gt;WW cross section\nbg_others lnN    -     -     -   1.30   30% uncertainty on the rest of the backgrounds\n</code></pre> <ul> <li>The first column is the name of the nuisance parameter, a label that is used to identify the uncertainty</li> <li>The second column identifies the type of distribution used to describe the nuisance parameter<ul> <li><code>lnN</code> stands for Log-normal, which is the recommended choice for multiplicative corrections (efficiencies, cross sections, ...).     If \u0394x/x is the relative uncertainty in the multiplicative correction, one should put 1+\u0394x/x in the column corresponding to the process and channel. Asymmetric log-normals are instead supported by providing \u03ba<sub>down</sub>/\u03ba<sub>up</sub> where \u03ba<sub>down</sub> is the ratio of the the yield to the nominal value for a -1\u03c3 deviation of the nuisance parameter and \u03ba<sub>up</sub> is the ratio of the yield to the nominal value for a \\(+1\\sigma\\) deviation. Note that for a single-value log-normal with value \\(\\kappa=1+\\Delta x/x\\), the yield of the process it is associated with is multiplied by \\(\\kappa^{\\theta}\\). At \\(\\theta=0\\) the nominal yield is retained, at \\(\\theta=1\\sigma\\) the yield is multiplied by \\(\\kappa\\) and at \\(\\theta=-1\\sigma\\) the yield is multiplied by \\(1/\\kappa\\). This means that an uncertainty represented as <code>1.2</code> does not multiply the nominal yield by 0.8 for \\(\\theta=-1\\sigma\\); but by 0.8333. It may therefore be desirable to encode large uncertainties that have a symmetric effect on the yield as asymmetric log-normals instead. </li> <li><code>gmN</code> stands for Gamma, and is the recommended choice for the statistical uncertainty in a background determined from the number of events in a control region (or in an MC sample with limited sample size).     If the control region or simulated sample contains N events, and the extrapolation factor from the control region to the signal region is \u03b1, one shoud put N just after the <code>gmN</code> keyword, and then the value of \u03b1 in the relevant (bin,process) column. The yield specified in the <code>rate</code> line for this (bin,process) combination should equal N\u03b1.</li> <li><code>lnU</code> stands for log-uniform distribution. A value of 1+\u03b5 in the column will imply that the yield of this background is allowed to float freely between x(1+\u03b5) and x/(1+\u03b5). In particular, if \u03b5 is small, this is approximately (x-\u0394x,x+\u0394x) with \u03b5=\u0394x/x.     This distribution is typically useful when you want to set a large a-priori uncertainty on a given background process, and then rely on the correlation between channels to constrain it. Note that for this use case, we usually recommend using a <code>rateParam</code> instead. If you do use <code>lnU</code>, please be aware that while Gaussian-like uncertainties behave in a similar way under profiling and marginalization, uniform uncertainties do not. This means the impact of the uncertainty on the result will depend on how the nuisance parameters are treated. </li> </ul> </li> <li>The next (#channels)*(#processes) columns indicate the relative effect of the systematic uncertainty on the rate of each process in each channel. The columns are aligned with those in the previous lines declaring bins, processes, and rates.</li> </ul> <p>In the example, there are 5 uncertainties:</p> <ul> <li>The first uncertainty has an 11% effect on the signal and on the <code>ggWW</code> process.</li> <li>The second uncertainty affects the signal by 16%, but leaves the background processes unaffected</li> <li>The third line specifies that the <code>qqWW</code> background comes from a sideband with 4 observed events and an extrapolation factor of 0.16; the resulting uncertainty in the expected yield is \\(1/\\sqrt{4+1}\\) = 45%</li> <li>The fourth uncertainty does not affect the signal, has a 50% effect on the <code>ggWW</code> background, and leaves the other backgrounds unaffected</li> <li>The fifth uncertainty does not affect the signal, has a 30% effect on the <code>others</code> background process, and does not affect the remaining backgrounds.</li> </ul>"},{"location":"part2/settinguptheanalysis/#shape-analyses","title":"Shape analyses","text":"<p>The datacard has to be supplemented with two extensions:</p> <ul> <li>A new block of lines defining how channels and processes are mapped into shapes.</li> <li>The block for systematics can now also contain rows with shape uncertainties.</li> </ul> <p>The expected shape can be parametric, or not. In the first case the parametric PDFs have to be given as input to the tool. In the latter case, for each channel, histograms have to be provided for the expected shape of each process. The data have to be provided as input as a histogram to perform a binned shape analysis, and as a RooDataSet to perform an unbinned shape analysis.</p> <p>Warning</p> <p>If using RooFit-based inputs (RooDataHists/RooDataSets/RooAbsPdfs) then you need to ensure you are using different RooRealVars as the observable in each category entering the statistical analysis. It is possible to use the same RooRealVar if the observable has the same range (and binning if using binned data) in each category, although in most cases it is simpler to avoid doing this.</p>"},{"location":"part2/settinguptheanalysis/#rates-for-shape-analyses","title":"Rates for shape analyses","text":"<p>As with the counting experiment, the total nominal rate of a given process must be identified in the rate line of the datacard. However, there are special options for shape-based analyses, as follows:</p> <ul> <li>A value of -1 in the rate line means Combine will calculate the rate from the input TH1 (via TH1::Integral) or RooDataSet/RooDataHist (via RooAbsData::sumEntries).</li> <li>For parametric shapes (RooAbsPdf), if a parameter with the name pdfname_norm is found in the input workspace, the rate will be multiplied by the value of that parameter. Note that since this parameter can be freely floating, the normalization of a process can be set freely float this way. This can also be achieved through the use of <code>rateParams</code>.</li> </ul>"},{"location":"part2/settinguptheanalysis/#binned-shape-analyses","title":"Binned shape analyses","text":"<p>For each channel, histograms have to be provided for the observed shape and for the expected shape of each process.</p> <ul> <li>Within each channel, all histograms must have the same binning.</li> <li>The normalization of the data histogram must correspond to the number of observed events.</li> <li>The normalization of the expected histograms must match the expected event yields.</li> </ul> <p>The Combine tool can take as input histograms saved as TH1, as RooAbsHist in a RooFit workspace (an example of how to create a RooFit workspace and save histograms is available in github), or from a pandas dataframe (example).</p> <p>The block of lines defining the mapping (first block in the datacard) contains one or more rows of the form</p> <pre><code>shapes process channel file histogram [histogram_with_systematics]\n</code></pre> <p>In this line,</p> <ul> <li><code>process</code>is any one the process names, or <code>*</code> for all processes, or <code>data_obs</code> for the observed data;</li> <li><code>channel</code>is any one the process names, or <code>*</code> for all channels;</li> <li><code>file</code>, <code>histogram</code> and <code>histogram_with_systematics</code> identify the names of the files and of the histograms within the file, after making some replacements (if any are found):<ul> <li><code>$PROCESS</code> is replaced with the process name (or \"<code>data_obs</code>\" for the observed data);</li> <li><code>$CHANNEL</code> is replaced with the channel name;</li> <li><code>$SYSTEMATIC</code> is replaced with the name of the systematic + (<code>Up</code>, <code>Down</code>);</li> <li><code>$MASS</code> is replaced with the chosen (Higgs boson) mass value that is passed as a command-line option when running the tool</li> </ul> </li> </ul> <p>In addition, user-defined keywords can be used. Any word in the datacard <code>$WORD</code> will be replaced by <code>VALUE</code> when including the option <code>--keyword-value WORD=VALUE</code>. This option can be repeated multiple times for multiple keywords.</p>"},{"location":"part2/settinguptheanalysis/#template-shape-uncertainties","title":"Template shape uncertainties","text":"<p>Shape uncertainties can be taken into account by vertical interpolation of the histograms. The shapes (fraction of events \\(f\\) in each bin) are interpolated using a spline for shifts below +/- 1\u03c3 and linearly outside of that. Specifically, for nuisance parameter values \\(|\\nu|\\leq 1\\) </p> \\[ f(\\nu) = \\frac{1}{2} \\left( (\\delta^{+}-\\delta^{-})\\nu + \\frac{1}{8}(\\delta^{+}+\\delta^{-})(3\\nu^6 - 10\\nu^4 + 15\\nu^2) \\right) \\] <p>and for \\(|\\nu|&gt; 1\\) (\\(|\\nu|&lt;-1\\)), \\(f(\\nu)\\) is a straight line with gradient \\(\\delta^{+}\\) (\\(\\delta^{-}\\)), where \\(\\delta^{+}=f(\\nu=1)-f(\\nu=0)\\), and \\(\\delta^{-}=f(\\nu=-1)-f(\\nu=0)\\), derived using the nominal and up/down histograms. This interpolation is designed so that the values of \\(f(\\nu)\\) and its derivatives are continuous for all values of \\(\\nu\\). </p> <p>The normalizations are interpolated linearly in log scale, just like we do for log-normal uncertainties. If the value in a given bin is negative for some value of \\(\\nu\\), the value will be truncated at 0.</p> <p>For each shape uncertainty and process/channel affected by it, two additional input shapes have to be provided. These are obtained by shifting the parameter up and down by one standard deviation. When building the likelihood, each shape uncertainty is associated to a nuisance parameter taken from a unit gaussian distribution, which is used to interpolate or extrapolate using the specified histograms.</p> <p>For each given shape uncertainty, the part of the datacard describing shape uncertainties must contain a row</p> <pre><code>name shape effect_for_each_process_and_channel\n</code></pre> <p>The effect can be \"-\" or 0 for no effect, 1 for the normal effect, and something different from 1 to test larger or smaller effects (in that case, the unit gaussian is scaled by that factor before using it as parameter for the interpolation).</p> <p>The datacard in data/tutorials/shapes/simple-shapes-TH1.txt provides an example of how to include shapes in the datacard. In the first block the following line specifies the shape mapping:</p> <pre><code>shapes * * simple-shapes-TH1.root $PROCESS $PROCESS_$SYSTEMATIC\n</code></pre> <p>The last block concerns the treatment of the systematic uncertainties that affect shapes. In this case there are two uncertainties with a shape-altering effect.</p> <pre><code>alpha  shape    -           1   uncertainty on background shape and normalization\nsigma  shape    0.5         -   uncertainty on signal resolution. Assume the histogram is a 2 sigma shift,\n#                                so divide the unit gaussian by 2 before doing the interpolation\n</code></pre> <p>There are two options for the interpolation algorithm in the \"shape\" uncertainty. Putting <code>shape</code> will result in an interpolation of the fraction of events in each bin. That is, the histograms are first normalized before interpolation. Putting <code>shapeN</code> while instead base the interpolation on the logs of the fraction in each bin. For both <code>shape</code>  and <code>shapeN</code>, the total normalization is interpolated using an asymmetric log-normal, so that the effect of the systematic on both the shape and normalization are accounted for. The following image shows a comparison of the two algorithms for the example datacard.</p> <p></p> <p>In this case there are two processes, signal and background, and two uncertainties affecting the background (alpha) and signal shapes (sigma). In the ROOT file, two histograms per systematic have to be provided, they are the shapes obtained, for the specific process, by shifting the parameter associated with the uncertainty up and down by a standard deviation: <code>background_alphaUp</code> and <code>background_alphaDown</code>, <code>signal_sigmaUp</code> and <code>signal_sigmaDown</code>.</p> <p>The content of the ROOT file simple-shapes-TH1.root  associated with the datacard data/tutorials/shapes/simple-shapes-TH1.txt is:</p> <pre><code>root [0]\nAttaching file simple-shapes-TH1.root as _file0...\nroot [1] _file0-&gt;ls()\nTFile**     simple-shapes-TH1.root\n TFile*     simple-shapes-TH1.root\n  KEY: TH1F signal;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaUp;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaDown;1  Histogram of signal__x\n  KEY: TH1F background;1    Histogram of background__x\n  KEY: TH1F background_alphaUp;1    Histogram of background__x\n  KEY: TH1F background_alphaDown;1  Histogram of background__x\n  KEY: TH1F data_obs;1  Histogram of data_obs__x\n  KEY: TH1F data_sig;1  Histogram of data_sig__x\n</code></pre> <p>For example, without shape uncertainties there would only be one row with <code>shapes * * shapes.root $CHANNEL/$PROCESS</code> Then, to give a simple example for two channels (\"e\", \"mu\") with three processes ()\"higgs\", \"zz\", \"top\"), the ROOT file contents should look like:</p> histogram meaning <code>e/data_obs</code> observed data in electron channel <code>e/higgs</code> expected shape for higgs in electron channel <code>e/zz</code> expected shape for ZZ in electron channel <code>e/top</code> expected shape for top in electron channel <code>mu/data_obs</code> observed data in muon channel <code>mu/higgs</code> expected shape for higgs in muon channel <code>mu/zz</code> expected shape for ZZ in muon channel <code>mu/top</code> expected shape for top in muon channel <p>If there is also an uncertainty that affects the shape, e.g. the jet energy scale, shape histograms for the jet energy scale shifted up and down by one sigma need to be included. This could be done by creating a folder for each process and writing a line like</p> <p><code>shapes * * shapes.root $CHANNEL/$PROCESS/nominal  $CHANNEL/$PROCESS/$SYSTEMATIC</code></p> <p>or a postifx can be added to the histogram name:</p> <p><code>shapes * * shapes.root $CHANNEL/$PROCESS  $CHANNEL/$PROCESS_$SYSTEMATIC</code></p> <p>Warning</p> <p>If you have a nuisance parameter that has shape effects on some processes (using <code>shape</code>) and rate effects on other processes (using <code>lnN</code>) you should use a single line for the systematic uncertainty with <code>shape?</code>. This will tell Combine to fist look for Up/Down systematic templates for that process and if it doesnt find them, it will interpret the number that you put for the process as a <code>lnN</code> instead. </p> <p>For a detailed example of a template-based binned analysis, see the H\u2192\u03c4\u03c4 2014 DAS tutorial, or in our Tutorial pages. </p>"},{"location":"part2/settinguptheanalysis/#unbinned-or-parametric-shape-analyses","title":"Unbinned or parametric shape analyses","text":"<p>In some cases, it can be convenient to describe the expected signal and background shapes in terms of analytical functions, rather than templates. Typical examples are searches/measurements where the signal is apparent as a narrow peak over a smooth continuum background. In this context, uncertainties affecting the shapes of the signal and backgrounds can be implemented naturally as uncertainties in the parameters of those analytical functions. It is also possible to adopt an agnostic approach in which the parameters of the background model are left freely floating in the fit to the data, i.e. only requiring the background to be well described by a smooth function.</p> <p>Technically, this is implemented by means of the RooFit package, which allows writing generic probability density functions, and saving them into ROOT files. The PDFs can be either taken from RooFit's standard library of functions (e.g. Gaussians, polynomials, ...) or hand-coded in C++, and combined together to form even more complex shapes.</p> <p>In the datacard using templates, the column after the file name would have been the name of the histogram. For parametric analysis we need two names to identify the mapping, separated by a colon (<code>:</code>).</p> <p>shapes process channel shapes.root workspace_name:pdf_name</p> <p>The first part identifies the name of the input RooWorkspace containing the PDF, and the second part the name of the RooAbsPdf inside it (or, for the observed data, the RooAbsData). It is possible to have multiple input workspaces, just as there can be multiple input ROOT files. You can use any of the usual RooFit pre-defined PDFs for your signal and background models.</p> <p>Warning</p> <p>If in your model you are using RooAddPdfs, in which the coefficients are not defined recursively, Combine will not interpret them correctly. You can add the option <code>--X-rtd ADDNLL_RECURSIVE=0</code> to any Combine command in order to recover the correct interpretation, however we recommend that you instead re-define your PDF so that the coefficients are recursive (as described in the RooAddPdf documentation) and keep the total normalization (i.e the extended term) as a separate object, as in the case of the tutorial datacard.</p> <p>For example, take a look at the data/tutorials/shapes/simple-shapes-parametric.txt. We see the following line:</p> <pre><code>shapes * * simple-shapes-parametric_input.root w:$PROCESS\n[...]\nbin          1          1\nprocess      sig    bkg\n</code></pre> <p>which indicates that the input file <code>simple-shapes-parametric_input.root</code> should contain an input workspace (<code>w</code>) with PDFs named <code>sig</code> and <code>bkg</code>, since these are the names of the two processes in the datacard. Additionally, we expect there to be a data set named <code>data_obs</code>. If we look at the contents of the workspace in <code>data/tutorials/shapes/simple-shapes-parametric_input.root</code>, this is indeed what we see:</p> <pre><code>root [1] w-&gt;Print()\n\nRooWorkspace(w) w contents\n\nvariables\n---------\n(MH,bkg_norm,cc_a0,cc_a1,cc_a2,j,vogian_sigma,vogian_width)\n\np.d.f.s\n-------\nRooChebychev::bkg[ x=j coefList=(cc_a0,cc_a1,cc_a2) ] = 2.6243\nRooVoigtian::sig[ x=j mean=MH width=vogian_width sigma=vogian_sigma ] = 0.000639771\n\ndatasets\n--------\nRooDataSet::data_obs(j)\n</code></pre> <p>In this datacard, the signal is parameterized in terms of the hypothesized mass (<code>MH</code>). Combine will use this variable, instead of creating its own, which will be interpreted as the value for <code>-m</code>. For this reason, we should add the option <code>-m 30</code> (or something else within the observable range) when running Combine. You will also see there is a variable named <code>bkg_norm</code>. This is used to normalize the background rate (see the section on Rate parameters below for details).</p> <p>Warning</p> <p>Combine will not accept RooExtendedPdfs as input. This is to alleviate a bug that lead to improper treatment of the normalization when using multiple RooExtendedPdfs to describe a single process. You should instead use RooAbsPdfs and provide the rate as a separate object (see the Rate parameters section).</p> <p>The part of the datacard related to the systematics can include lines with the syntax</p> <ul> <li>name param  X Y</li> </ul> <p>These lines encode uncertainties in the parameters of the signal and background PDFs. The parameter is to be assigned a Gaussian uncertainty of Y around its mean value of X. One can change the mean value from 0 to 1 (or any value, if one so chooses) if the parameter in question is multiplicative instead of additive.</p> <p>In the data/tutorials/shapes/simple-shapes-parametric.txt datacard, there are lines for one such parametric uncertainty,</p> <pre><code>sigma   param 1.0      0.1\n</code></pre> <p>meaning there is a parameter in the input workspace called <code>sigma</code>, that should be constrained with a Gaussian centered at 1.0 with a width of 0.1. Note that the exact interpretation of these parameters is left to the user since the signal PDF is constructed externally by you. All Combine knows is that 1.0 should be the most likely value and 0.1 is its 1\u03c3 uncertainy. Asymmetric uncertainties are written using the syntax -1\u03c3/+1\u03c3 in the datacard, as is the case for <code>lnN</code> uncertainties. </p> <p>If one wants to specify a parameter that is freely floating across its given range, and not Gaussian constrained, the following syntax is used:</p> <pre><code>name flatParam\n</code></pre> <p>Though this is not strictly necessary in frequentist methods using profiled likelihoods, as Combine will still profile these nuisances when performing fits (as is the case for the <code>simple-shapes-parametric.txt</code> datacard).</p> <p>Warning</p> <p>All parameters that are floating or constant in the user's input workspaces will remain floating or constant. Combine will not modify those for you!</p> <p>A full example of a parametric analysis can be found in this H\u2192\u03b3\u03b3 2014 DAS tutorial or in our Tutorial pages.</p>"},{"location":"part2/settinguptheanalysis/#caveat-on-using-parametric-pdfs-with-binned-datasets","title":"Caveat on using parametric PDFs with binned datasets","text":"<p>Users should be aware of a feature that affects the use of parametric PDFs together with binned datasets.</p> <p>RooFit uses the integral of the PDF, computed analytically (or numerically, but disregarding the binning), to normalize it, but computes the expected event yield in each bin by evaluating the PDF at the bin center. This means that if the variation of the pdf is sizeable within the bin, there is a mismatch between the sum of the event yields per bin and the PDF normalization, which can cause a bias in the fits. More specifically, the bias is present if the contribution of the second derivative integrated in the bin size is not negligible. For linear functions, an evaluation at the bin center is correct. There are two recommended ways to work around this issue:</p> <p>1. Use narrow bins</p> <p>It is recommended to use bins that are significantly finer than the characteristic scale of the PDFs. Even in the absence of this feature, this would be advisable. Note that this caveat does not apply to analyses using templates (they are constant across each bin, so there is no bias), or using unbinned datasets.</p> <p>2. Use a RooParametricShapeBinPdf</p> <p>Another solution (currently only implemented for 1-dimensional histograms) is to use a custom PDF that performs the correct integrals internally, as in RooParametricShapeBinPdf.</p> <p>Note that this PDF class now allows parameters that are themselves RooAbsReal objects (i.e. functions of other variables). The integrals are handled internally by calling the underlying PDF's <code>createIntegral()</code> method with named ranges created for each of the bins. This means that if the analytical integrals for the underlying PDF are available, they will be used.</p> <p>The constructor for this class requires a RooAbsReal (eg any RooAbsPdf) along with a list of RooRealVars (the parameters, excluding the observable \\(x\\)),</p> <pre><code>RooParametricShapeBinPdf(const char *name, const char *title,  RooAbsReal&amp; _pdf, RooAbsReal&amp; _x, RooArgList&amp; _pars, const TH1 &amp;_shape )\n</code></pre> <p>Below is a comparison of a fit to a binned dataset containing 1000 events with one observable \\(0 \\leq x \\leq 100\\). The fit function is a RooExponential of the form \\(e^{xp}\\).</p> <p> </p> <p>In the upper plot, the data are binned in 100 evenly-spaced bins, while in the lower plot, there are three irregular bins. The blue lines show the result of the fit when using the RooExponential directly, while the red lines show the result when wrapping the PDF inside a RooParametricShapeBinPdf. In the narrow binned case, the two agree well, while for wide bins, accounting for the integral over the bin yields a better fit.</p> <p>You should note that using this class will result in slower fits, so you should first decide whether the added accuracy is enough to justify the reduced efficiency.</p>"},{"location":"part2/settinguptheanalysis/#beyond-simple-datacards","title":"Beyond simple datacards","text":"<p>Datacards can be extended in order to provide additional functionality and flexibility during runtime. These can also allow for the production of more complicated models and for producing more advanced results.</p>"},{"location":"part2/settinguptheanalysis/#rate-parameters","title":"Rate parameters","text":"<p>The overall rate \"expected\" of a particular process in a particular bin does not necessarily need to be a fixed quantity. Scale factors can be introduced to modify the rate directly in the datacards for ANY type of analysis. This can be achieved using the directive <code>rateParam</code> in the datacard with the following syntax,</p> <pre><code>name rateParam bin process initial_value [min,max]\n</code></pre> <p>The <code>[min,max]</code> argument is optional. If it is not included, Combine  will remove the range of this parameter. This will produce a new parameter, which multiplies the rate of that particular process in the given bin by its value, in the model (unless it already exists).</p> <p>You can attach the same <code>rateParam</code> to multiple processes/bins by either using a wild card (eg <code>*</code> will match everything, <code>QCD_*</code> will match everything starting with <code>QCD_</code>, etc.) in the name of the bin and/or process, or by repeating the <code>rateParam</code> line in the datacard for different bins/processes with the same name.</p> <p>Warning</p> <p><code>rateParam</code> is not a shortcut to evaluate the post-fit yield of a process since other nuisance parameters can also change the normalization. E.g., finding that the <code>rateParam</code> best-fit value is 0.9 does not necessarily imply that the process yield is 0.9 times the initial yield. The best is to evaluate the yield taking into account the values of all nuisance parameters using <code>--saveNormalizations</code>.</p> <p>This parameter is, by default, freely floating. It is possible to include a Gaussian constraint on any <code>rateParam</code> that is floating (i.e not a <code>formula</code> or spline) by adding a <code>param</code> nuisance line in the datacard with the same name.</p> <p>In addition to rate modifiers that are freely floating, modifiers that are functions of other parameters can be included using the following syntax,</p> <pre><code>name rateParam bin process formula args\n</code></pre> <p>where <code>args</code> is a comma-separated list of the arguments for the string <code>formula</code>. You can include other nuisance parameters in the <code>formula</code>, including ones that are Gaussian constrained (i,e via the <code>param</code> directive.)</p> <p>Below is an example datacard that uses the <code>rateParam</code> directive to implement an ABCD-like method in Combine. For a more realistic description of its use for ABCD, see the single-lepton SUSY search implementation described here.</p> <pre><code>imax 4  number of channels\njmax 0  number of processes -1\nkmax *  number of nuisance parameters (sources of systematical uncertainties)\n-------\nbin                   B      C       D        A\nobservation           50    100      500      10\n-------\nbin                   B      C       D        A\nprocess               bkg    bkg     bkg      bkg\nprocess               1      1       1         1\nrate                  1      1       1         1\n-------\n\nalpha rateParam A bkg (@0*@1/@2) beta,gamma,delta\nbeta  rateParam B bkg 50\ngamma rateParam C bkg 100\ndelta rateParam D bkg 500\n</code></pre> <p>For more examples of using <code>rateParam</code> (eg for fitting process normalizations in control regions and signal regions simultaneously) see this 2016 CMS tutorial</p> <p>Finally, any pre-existing RooAbsReal inside some ROOT file with a workspace can be imported using the following:</p> <pre><code>name rateParam bin process rootfile:workspacename\n</code></pre> <p>The name should correspond to the name of the object that is being picked up inside the RooWorkspace. A simple example using the SM XS and BR splines available in HiggsAnalysis/CombinedLimit can be found under data/tutorials/rate_params/simple_sm_datacard.txt</p>"},{"location":"part2/settinguptheanalysis/#extra-arguments","title":"Extra arguments","text":"<p>If a parameter is intended to be used, and it is not a user-defined <code>param</code> or <code>rateParam</code>, it can be picked up by first issuing an <code>extArgs</code> directive before this line in the datacard. The syntax for <code>extArgs</code> is:</p> <pre><code>name extArg rootfile:workspacename\n</code></pre> <p>The string \":RecycleConflictNodes\" can be added at the end of the final argument (i.e. rootfile:workspacename:RecycleConflictNodes) to apply the corresponding RooFit option when the object is imported into the workspace. It is also possible to simply add a RooRealVar using <code>extArg</code> for use in function <code>rateParams</code> with the following</p> <pre><code>name extArg init [min,max]\n</code></pre> <p>Note that the <code>[min,max]</code> argument is optional and if not included, the code will remove the range of this parameter.</p>"},{"location":"part2/settinguptheanalysis/#manipulation-of-nuisance-parameters","title":"Manipulation of Nuisance parameters","text":"<p>It can often be useful to modify datacards, or the runtime behavior, without having to modify individual systematic lines. This can be achieved through nuisance parameter modifiers.</p>"},{"location":"part2/settinguptheanalysis/#nuisance-modifiers","title":"Nuisance modifiers","text":"<p>If a nuisance parameter needs to be renamed for certain processes/channels, it can be done using a single <code>nuisance edit</code> directive at the end of a datacard</p> <p><pre><code>nuisance edit rename process channel oldname newname [options]\n</code></pre> Note that the wildcard (*) can be used for either a process, a channel, or both.  This will have the effect that nuisance parameters affecting a given process/channel will be renamed, thereby de-correlating between processes/channels.  Use the option <code>ifexists</code> to skip/avoid an error if the nuisance paremeter is not found.  This kind of command will only affect nuisances of the type <code>shape[N]</code>, <code>lnN</code>. Instead, if you also want to change the names of <code>param</code> type nuisances, you can use a global version </p> <p><pre><code>nuisance edit rename oldname newname\n</code></pre> which will rename all <code>shape[N]</code>, <code>lnN</code> and <code>param</code> nuisances found in one go. You should make sure these commands come after any process/channel specific ones in the datacard. This version does not accept options.  </p> <p>Other edits are also supported, as follows:</p> <ul> <li><code>nuisance edit add process channel name pdf value [options]</code>  -&gt; add a new nuisance parameter to a process</li> <li><code>nuisance edit drop process channel name [options]</code>  -&gt; remove this nuisance from the process/channel. Use the option <code>ifexists</code> to skip/avoid errors if the nuisance parameter is not found.</li> <li><code>nuisance edit changepdf name newpdf</code> -&gt; change the PDF type of a given nuisance parameter to <code>newpdf</code>.</li> <li><code>nuisance edit split process channel oldname newname1 newname2 value1 value2</code> -&gt; split a nuisance parameter line into two separate nuisance parameters called <code>newname1</code> and <code>newname2</code> with values <code>value1</code> and <code>value2</code>. This will produce two separate lines so that the original nuisance parameter <code>oldname</code> is split into two uncorrelated nuisances.</li> <li><code>nuisance edit freeze name [options]</code>  -&gt; set nuisance parameter frozen by default. Can be overridden on the command line using the <code>--floatNuisances</code> option. Use the option <code>ifexists</code> to skip/avoid errors if the nuisance parameter not found.</li> <li><code>nuisance edit merge process channel name1 name2</code> -&gt; merge systematic <code>name2</code> into <code>name1</code> by adding their values in quadrature and removing <code>name2</code>. This only works if, for each process and channel included, the uncertainties both increase or both reduce the process yield. For example, you can add 1.1 to 1.2, but not to 0.9.</li> </ul> <p>The above edits (excluding the renaming) support nuisance parameters of the types <code>shape[N]</code>, <code>lnN</code>, <code>lnU</code>, <code>gmN</code>, <code>param</code>, <code>flatParam</code>, <code>rateParam</code>, or <code>discrete</code>.</p>"},{"location":"part2/settinguptheanalysis/#groups-of-nuisances","title":"Groups of nuisances","text":"<p>Often it is desirable to freeze one or more nuisance parameters to check the impact they have on limits, likelihood scans, significances etc.</p> <p>However, for large groups of nuisance parameters (eg everything associated to theory) it is easier to define nuisance groups in the datacard. The following line in a datacard will, for example, produce a group of nuisance parameters with the group name <code>theory</code> that contains two parameters, <code>QCDscale</code> and <code>pdf</code>.</p> <pre><code>theory group = QCDscale pdf\n</code></pre> <p>Multiple groups can be defined in this way. It is also possible to extend nuisance parameters groups in datacards using += in place of =.</p> <p>These groups can be manipulated at runtime (eg for freezing all nuisance parameterss associated to a group at runtime, see Running the tool). You can find more info on groups of nuisances here</p> <p>Note that when using the automatic addition of statistical uncertainties (autoMCStats), the corresponding nuisance parameters are created by <code>text2workspace.py</code> and so do not exist in the datacards. It is therefore not possible to add autoMCStats parameters to groups of nuisances in the way described above. However, <code>text2workspace.py</code> will automatically create a group labelled <code>autoMCStats</code>, which contains all autoMCStats parameters.</p> <p>This group is useful for freezing all parameters created by autoMCStats. For freezing subsets of the parameters, for example if the datacard contains two categories, cat_label_1 and cat_label_2, to only freeze the autoMCStat parameters created for category cat_label_1, the regular expression features can be used. In this example this can be achieved by using <code>--freezeParameters 'rgx{prop_bincat_label_1_bin.*}'</code>.</p>"},{"location":"part2/settinguptheanalysis/#combination-of-multiple-datacards","title":"Combination of multiple datacards","text":"<p>If you have separate channels, each with their own datacard, it is possible to produce a combined datacard using the script <code>combineCards.py</code></p> <p>The syntax is simple: <code>combineCards.py Name1=card1.txt Name2=card2.txt .... &gt; card.txt</code> If the input datacards had just one bin each, the output channels will be called <code>Name1</code>, <code>Name2</code>, and so on. Otherwise, a prefix <code>Name1_</code> ... <code>Name2_</code> will be added to the bin labels in each datacard. The supplied bin names <code>Name1</code>, <code>Name2</code>, etc. must themselves conform to valid C++/python identifier syntax.</p> <p>Warning</p> <p>When combining datacards, you should keep in mind that systematic uncertainties that have different names will be assumed to be uncorrelated, and those with the same name will be assumed 100% correlated. An uncertainty correlated across channels must have the same PDF. in all cards (i.e. always <code>lnN</code>, or all <code>gmN</code> with same <code>N</code>. Note that <code>shape</code> and <code>lnN</code> can be interchanged via the <code>shape?</code> directive). Furthermore, when using parametric models, \"parameter\" objects such as <code>RooRealVar</code>, <code>RooAbsReal</code>, and <code>RooAbsCategory</code> (parameters, PDF indices etc) with the same name will be assumed to be the same object. If this is not intended, you may encounter unexpected behaviour, such as the order of combining cards having an impact on the results. Make sure that such objects are named differently in your inputs if they represent different things! Instead, Combine will try to rename other \"shape\" objects (such as PDFs) automatically. </p> <p>The <code>combineCards.py</code> script will fail if you are trying to combine a shape datacard with a counting datacard. You can however convert a counting datacard into an equivalent shape-based one by adding a line <code>shapes * * FAKE</code> in the datacard after the <code>imax</code>, <code>jmax</code>, and <code>kmax</code> section. Alternatively, you can add the option <code>-S</code> to <code>combineCards.py</code>, which will do this for you while creating the combined datacard.</p>"},{"location":"part2/settinguptheanalysis/#automatic-production-of-datacards-and-workspaces","title":"Automatic production of datacards and workspaces","text":"<p>For complicated analyses or cases in which multiple datacards are needed (e.g. optimization studies), you can avoid writing these by hand. The object Datacard defines the analysis and can be created as a python object. The template python script below will produce the same workspace as running <code>textToWorkspace.py</code> (see the section on Physics Models) on the realistic-counting-experiment.txt datacard.</p> <pre><code>from HiggsAnalysis.CombinedLimit.DatacardParser import *\nfrom HiggsAnalysis.CombinedLimit.ModelTools import *\nfrom HiggsAnalysis.CombinedLimit.ShapeTools import *\nfrom HiggsAnalysis.CombinedLimit.PhysicsModel import *\n\nfrom sys import exit\nfrom optparse import OptionParser\nparser = OptionParser()\naddDatacardParserOptions(parser)\noptions,args = parser.parse_args()\noptions.bin = True # make a binary workspace\n\nDC = Datacard()\nMB = None\n\n############## Setup the datacard (must be filled in) ###########################\n\nDC.bins =   ['bin1'] # &lt;type 'list'&gt;\nDC.obs =    {'bin1': 0.0} # &lt;type 'dict'&gt;\nDC.processes =  ['ggH', 'qqWW', 'ggWW', 'others'] # &lt;type 'list'&gt;\nDC.signals =    ['ggH'] # &lt;type 'list'&gt;\nDC.isSignal =   {'qqWW': False, 'ggWW': False, 'ggH': True, 'others': False} # &lt;type 'dict'&gt;\nDC.keyline =    [('bin1', 'ggH', True), ('bin1', 'qqWW', False), ('bin1', 'ggWW', False), ('bin1', 'others', False)] # &lt;type 'list'&gt;\nDC.exp =    {'bin1': {'qqWW': 0.63, 'ggWW': 0.06, 'ggH': 1.47, 'others': 0.22}} # &lt;type 'dict'&gt;\nDC.systs =  [('lumi', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 1.11, 'ggH': 1.11, 'others': 0.0}}), ('xs_ggH', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 0.0, 'ggH': 1.16, 'others': 0.0}}), ('WW_norm', False, 'gmN', [4], {'bin1': {'qqWW': 0.16, 'ggWW': 0.0, 'ggH': 0.0, 'others': 0.0}}), ('xs_ggWW', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 1.5, 'ggH': 0.0, 'others': 0.0}}), ('bg_others', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 0.0, 'ggH': 0.0, 'others': 1.3}})] # &lt;type 'list'&gt;\nDC.shapeMap =   {} # &lt;type 'dict'&gt;\nDC.hasShapes =  False # &lt;type 'bool'&gt;\nDC.flatParamNuisances =  {} # &lt;type 'dict'&gt;\nDC.rateParams =  {} # &lt;type 'dict'&gt;\nDC.extArgs =    {} # &lt;type 'dict'&gt;\nDC.rateParamsOrder  =  set([]) # &lt;type 'set'&gt;\nDC.frozenNuisances  =  set([]) # &lt;type 'set'&gt;\nDC.systematicsShapeMap =  {} # &lt;type 'dict'&gt;\nDC.nuisanceEditLines    =  [] # &lt;type 'list'&gt;\nDC.groups   =  {} # &lt;type 'dict'&gt;\nDC.discretes    =  [] # &lt;type 'list'&gt;\n\n\n###### User defined options #############################################\n\noptions.out      = \"combine_workspace.root\"     # Output workspace name\noptions.fileName = \"./\"             # Path to input ROOT files\noptions.verbose  = \"1\"              # Verbosity\n\n##########################################################################\n\nif DC.hasShapes:\n    MB = ShapeBuilder(DC, options)\nelse:\n    MB = CountingModelBuilder(DC, options)\n\n# Set physics models\nMB.setPhysics(defaultModel)\nMB.doModel()\n</code></pre> <p>Any existing datacard can be converted into such a template python script by using the <code>--dump-datacard</code> option in <code>text2workspace.py</code>, in case a more complicated template is needed.</p> <p>Warning</p> <p>The above is not advised for final results, as this script is not easily combined with other analyses so should only be used for internal studies.</p> <p>For the automatic generation of datacards that are combinable, you should instead use the CombineHarvester package, which includes many features for producing complex datacards in a reliable, automated way.</p>"},{"location":"part2/settinguptheanalysis/#sanity-checking-the-datacard","title":"Sanity checking the datacard","text":"<p>For large combinations with multiple channels/processes etc, the <code>.txt</code> file can get unwieldy to read through. There are some simple tools to help check and disseminate the contents of the cards. </p> <p>In order to get a quick view of the systematic uncertainties included in the datacard, you can use the <code>test/systematicsAnalyzer.py</code> tool. This will produce a list of the systematic uncertainties (normalization and shape), indicating what type they are, which channels/processes they affect and the size of the effect on the normalization (for shape uncertainties, this will just be the overall uncertainty on the normalization).</p> <p>The default output is a <code>.html</code> file that can be expanded to give more details about the effect of the systematic uncertainty for each channel/process. Add the option <code>--format brief</code> to obtain a simpler summary report direct to the terminal. An example output for the tutorial card <code>data/tutorials/shapes/simple-shapes-TH1.txt</code> is shown below.</p> <pre><code>python test/systematicsAnalyzer.py data/tutorials/shapes/simple-shapes-TH1.txt --all -f html &gt; out.html\n</code></pre> <p>This will produce the following output in html format: </p> Nuisance Report Nuisance Report Nuisance (types)RangeProcessesChannels lumi  (lnN) 1.0001.100  background, signal  bin1(1) [+] bin1signal(1.1), background(1.0) alpha  (shape) 1.1111.150  background  bin1(1) [+] bin1background(0.900/1.150 (shape)) bgnorm  (lnN) 1.0001.300  background, signal  bin1(1) [+] bin1signal(1.0), background(1.3) sigma  (shape) 1.0001.000  signal  bin1(1) [+] bin1signal(1.000/1.000 (shape)) <p>In case you only have a counting experiment datacard, include the option <code>--noshape</code>.</p> <p>If you have a datacard that uses several <code>rateParams</code> or a Physics model that includes a complicated product of normalization terms in each process, you can check the values of the normalization (and which objects in the workspace comprise them) using the <code>test/printWorkspaceNormalisations.py</code> tool. As an example, the first few blocks of output for the tutorial card <code>data/tutorials/counting/realistic-multi-channel.txt</code> are given below:</p> <pre><code>text2workspace.py data/tutorials/counting/realistic-multi-channel.txt \npython test/printWorkspaceNormalisations.py data/tutorials/counting/realistic-multi-channel.root     \n</code></pre> Show example output <pre><code>---------------------------------------------------------------------------\n---------------------------------------------------------------------------\nChannel - mu_tau\n---------------------------------------------------------------------------\n  Top-level normalisation for process ZTT -&gt; n_exp_binmu_tau_proc_ZTT\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_binmu_tau_proc_ZTT @ 0x6bbb610\n    nominal value: 329\n    log-normals (3):\n         kappa = 1.23, logKappa = 0.207014, theta = tauid = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = ZtoLL = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = effic = 0\n    asymm log-normals (0):\n    other terms (0):\n\n  -------------------------------------------------------------------------\n  default value =  329.0\n---------------------------------------------------------------------------\n  Top-level normalisation for process QCD -&gt; n_exp_binmu_tau_proc_QCD\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_binmu_tau_proc_QCD @ 0x6bbcaa0\n    nominal value: 259\n    log-normals (1):\n         kappa = 1.1, logKappa = 0.0953102, theta = QCDmu = 0\n    asymm log-normals (0):\n    other terms (0):\n\n  -------------------------------------------------------------------------\n  default value =  259.0\n---------------------------------------------------------------------------\n  Top-level normalisation for process higgs -&gt; n_exp_binmu_tau_proc_higgs\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_binmu_tau_proc_higgs @ 0x6bc6390\n    nominal value: 0.57\n    log-normals (3):\n         kappa = 1.11, logKappa = 0.10436, theta = lumi = 0\n         kappa = 1.23, logKappa = 0.207014, theta = tauid = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = effic = 0\n    asymm log-normals (0):\n    other terms (1):\n         term r (class RooRealVar), value = 1\n\n  -------------------------------------------------------------------------\n  default value =  0.57\n---------------------------------------------------------------------------\n---------------------------------------------------------------------------\nChannel - e_mu\n---------------------------------------------------------------------------\n  Top-level normalisation for process ZTT -&gt; n_exp_bine_mu_proc_ZTT\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_bine_mu_proc_ZTT @ 0x6bc8910\n    nominal value: 88\n    log-normals (2):\n         kappa = 1.04, logKappa = 0.0392207, theta = ZtoLL = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = effic = 0\n    asymm log-normals (0):\n    other terms (0):\n\n  -------------------------------------------------------------------------\n  default value =  88.0\n---------------------------------------------------------------------------\n</code></pre> <p>As you can see, for each channel, a report is given for the top-level rate object in the workspace, for each process contributing to that channel. You can also see the various terms that make up that rate. The default value is for the default parameters in the workspace (i.e when running <code>text2workspace</code>, these are the values created as default).</p> <p>Another example is shown below for the workspace produced from the data/tutorials/shapes/simple-shapes-parametric.txt datacard.</p> <pre><code>text2workspace.py data/tutorials/shapes/simple-shapes-parametric.txt -m 30\npython test/printWorkspaceNormalisations.py data/tutorials/shapes/simple-shapes-parametric.root -m 30\n</code></pre> Show example output <pre><code>  ---------------------------------------------------------------------------\n  ---------------------------------------------------------------------------\n  Channel - bin1\n  ---------------------------------------------------------------------------\n    Top-level normalisation for process bkg -&gt; n_exp_final_binbin1_proc_bkg\n    -------------------------------------------------------------------------\n  RooProduct::n_exp_final_binbin1_proc_bkg[ n_exp_binbin1_proc_bkg * shapeBkg_bkg_bin1__norm ] = 521.163\n   ... is a product, which contains  n_exp_binbin1_proc_bkg\n  RooRealVar::n_exp_binbin1_proc_bkg = 1 C  L(-INF - +INF)\n    -------------------------------------------------------------------------\n    default value =  521.163204829\n  ---------------------------------------------------------------------------\n    Top-level normalisation for process sig -&gt; n_exp_binbin1_proc_sig\n    -------------------------------------------------------------------------\n  Dumping ProcessNormalization n_exp_binbin1_proc_sig @ 0x464f700\n      nominal value: 1\n      log-normals (1):\n           kappa = 1.1, logKappa = 0.0953102, theta = lumi = 0\n      asymm log-normals (0):\n      other terms (1):\n           term r (class RooRealVar), value = 1\n\n    -------------------------------------------------------------------------\n    default value =  1.0\n</code></pre> <p>This tells us that the normalization for the background process, named <code>n_exp_final_binbin1_proc_bkg</code> is a product of two objects <code>n_exp_binbin1_proc_bkg * shapeBkg_bkg_bin1__norm</code>. The first object is just from the rate line in the datacard (equal to 1) and the second is a floating parameter. For the signal, the normalisation is called <code>n_exp_binbin1_proc_sig</code> and is a <code>ProcessNormalization</code> object that contains the rate modifications due to the systematic uncertainties. You can see that it also has a \"nominal value\", which again is just from the value given in the rate line of the datacard (again=1).</p>"},{"location":"part3/commonstatsmethods/","title":"Common Statistical Methods","text":"<p>In this section, the most commonly used statistical methods from Combine will be covered, including specific instructions on how to obtain limits, significances, and likelihood scans. For all of these methods, the assumed parameter of interest (POI) is the overall signal strength \\(r\\) (i.e the default PhysicsModel). In general however, the first POI in the list of POIs (as defined by the PhysicsModel) will be taken instead of r. This may or may not make sense for any particular method, so care must be taken.</p> <p>This section will assume that you are using the default physics model, unless otherwise specified.</p>"},{"location":"part3/commonstatsmethods/#asymptotic-frequentist-limits","title":"Asymptotic Frequentist Limits","text":"<p>The <code>AsymptoticLimits</code> method can be used to quickly compute an estimate of the observed and expected limits, which is accurate when the event yields are not too small and the systematic uncertainties do not play a major role in the result. The limit calculation relies on an asymptotic approximation of the distributions of the LHC test statistic, which is based on a profile likelihood ratio, under the signal and background hypotheses to compute two p-values \\(p_{\\mu}, p_{b}\\) and therefore \\(CL_s=p_{\\mu}/(1-p_{b})\\) (see the FAQ section for a description). This means it is the asymptotic approximation for evaluating limits with frequentist toys using the LHC test statistic for limits. In the definition below, the parameter \\(\\mu=r\\).</p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mu} = -2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\) , in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu\\). The value of \\(q_{\\mu}\\) is set to 0 when \\(\\hat{\\mu}&gt;\\mu\\), giving a one-sided limit. Furthermore, the constraint \\(\\mu&gt;0\\) is enforced in the fit. This means that if the unconstrained value of \\(\\hat{\\mu}\\) would be negative, the test statistic \\(q_{\\mu}\\) is evaluated as \\(-2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(0,\\hat{\\hat{\\nu}}(0))]\\)</li> </ul> <p>This method is the default Combine method: if you call Combine without specifying <code>-M</code>, the <code>AsymptoticLimits</code> method will be run.</p> <p>A realistic example of a datacard for a counting experiment can be found in the HiggsCombination package: data/tutorials/counting/realistic-counting-experiment.txt</p> <p>The <code>AsymptoticLimits</code> method can be run using</p> <pre><code>combine -M AsymptoticLimits realistic-counting-experiment.txt\n</code></pre> <p>The program will print the limit on the signal strength r (number of signal events / number of expected signal events) e .g. <code>Observed Limit: r &lt; 1.6297 @ 95% CL</code> , the median expected limit <code>Expected 50.0%: r &lt; 2.3111</code>, and edges of the 68% and 95% ranges for the expected limits.</p> <pre><code> &lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; method used to compute upper limit is AsymptoticLimits\n[...]\n -- AsymptoticLimits ( CLs ) --\nObserved Limit: r &lt; 1.6281\nExpected  2.5%: r &lt; 0.9640\nExpected 16.0%: r &lt; 1.4329\nExpected 50.0%: r &lt; 2.3281\nExpected 84.0%: r &lt; 3.9800\nExpected 97.5%: r &lt; 6.6194\n\nDone in 0.01 min (cpu), 0.01 min (real)\n</code></pre> <p>By default, the limits are calculated using the CL<sub>s</sub> prescription, as noted in the output, which takes the ratio of p-values under the signal plus background and background only hypothesis. This can be altered to using the strict p-value by using the option <code>--rule CLsplusb</code> (note that <code>CLsplusb</code> is the jargon for calculating the p-value \\(p_{\\mu}\\)). You can also change the confidence level (default is 95%) to 90% using the option <code>--cl 0.9</code> or any other confidence level. You can find the full list of options for <code>AsymptoticLimits</code> using <code>--help -M AsymptoticLimits</code>.</p> <p>Warning</p> <p>You may find that Combine issues a warning that the best fit for the background-only Asimov dataset returns a nonzero value for the signal strength;</p> <p><code>WARNING: Best fit of asimov dataset is at r = 0.220944 (0.011047 times</code> <code>rMax), while it should be at zero</code></p> <p>If this happens, you should check to make sure that there are no issues with the datacard or the Asimov generation used for your setup. For details on debugging, it is recommended that you follow the simple checks used by the HIG PAG here.</p> <p>The program will also create a ROOT file <code>higgsCombineTest.AsymptoticLimits.mH120.root</code> containing a ROOT tree <code>limit</code> that contains the limit values and other bookkeeping information. The important columns are <code>limit</code> (the limit value) and <code>quantileExpected</code> (-1 for observed limit, 0.5 for median expected limit, 0.16/0.84 for the edges of the 65% interval band of expected limits, 0.025/0.975 for 95%).</p> <pre><code>$ root -l higgsCombineTest.AsymptoticLimits.mH120.root\nroot [0] limit-&gt;Scan(\"*\")\n************************************************************************************************************************************\n*    Row   *     limit *  limitErr *        mh *      syst *      iToy *     iSeed *  iChannel *     t_cpu *    t_real * quantileE *\n************************************************************************************************************************************\n*        0 * 0.9639892 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.0250000 *\n*        1 * 1.4329109 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.1599999 *\n*        2 *  2.328125 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 *       0.5 *\n*        3 * 3.9799661 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.8399999 *\n*        4 * 6.6194028 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.9750000 *\n*        5 * 1.6281188 * 0.0050568 *       120 *         1 *         0 *    123456 *         0 * 0.0035000 * 0.0055123 *        -1 *\n************************************************************************************************************************************\n</code></pre>"},{"location":"part3/commonstatsmethods/#blind-limits","title":"Blind limits","text":"<p>The <code>AsymptoticLimits</code> calculation follows the frequentist paradigm for calculating expected limits. This means that the routine will first fit the observed data, conditionally for a fixed value of r, and set the nuisance parameters to the values obtained in the fit for generating the Asimov data set. This means it calculates the post-fit or a-posteriori expected limit. In order to use the pre-fit nuisance parameters (to calculate an a-priori limit), you must add the option <code>--noFitAsimov</code> or <code>--bypassFrequentistFit</code>.</p> <p>For blinding the results completely (i.e not using the data) you can include the option <code>--run blind</code>.</p> <p>Warning</p> <p>While you can use <code>-t -1</code> to get blind limits, if the correct options are passed, we strongly recommend to use <code>--run blind</code>.</p>"},{"location":"part3/commonstatsmethods/#splitting-points","title":"Splitting points","text":"<p>In case your model is particularly complex, you can perform the asymptotic calculation by determining the value of CL<sub>s</sub> for a set grid of points (in <code>r</code>) and merging the results. This is done by using the option <code>--singlePoint X</code> for multiple values of X, hadd'ing the output files and reading them back in,</p> <pre><code>combine -M AsymptoticLimits realistic-counting-experiment.txt --singlePoint 0.1 -n 0.1\ncombine -M AsymptoticLimits realistic-counting-experiment.txt --singlePoint 0.2 -n 0.2\ncombine -M AsymptoticLimits realistic-counting-experiment.txt --singlePoint 0.3 -n 0.3\n...\n\nhadd limits.root higgsCombine*.AsymptoticLimits.*\n\ncombine -M AsymptoticLimits realistic-counting-experiment.txt --getLimitFromGrid limits.root\n</code></pre>"},{"location":"part3/commonstatsmethods/#asymptotic-significances","title":"Asymptotic Significances","text":"<p>The significance of a result is calculated using a ratio of profiled likelihoods, one in which the signal strength is set to 0 and the other in which it is free to float. The evaluated quantity is \\(-2\\ln[\\mathcal{L}(\\mu=0,\\hat{\\hat{\\nu}}(0))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\), in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu=0\\).</p> <p>The distribution of this test statistic can be determined using Wilks' theorem provided the number of events is large enough (i.e in the Asymptotic limit). The significance (or p-value) can therefore be calculated very quickly. The <code>Significance</code> method can be used for this.</p> <p>It is also possible to calculate the ratio of likelihoods between the freely floating signal strength to that of a fixed signal strength other than 0, by specifying it with the option <code>--signalForSignificance=X</code>.</p> <p>Info</p> <p>This calculation assumes that the signal strength can only be positive (i.e we are not interested in negative signal strengths). This behaviour can be altered by including the option <code>--uncapped</code>.</p>"},{"location":"part3/commonstatsmethods/#compute-the-observed-significance","title":"Compute the observed significance","text":"<p>The observed significance is calculated using the <code>Significance</code> method, as</p> <p><code>combine -M Significance datacard.txt</code></p> <p>The printed output will report the significance and the p-value, for example, when using the realistic-counting-experiment.txt datacard, you will see</p> <pre><code> &lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; method used is Significance\n[...]\n -- Significance --\nSignificance: 0\n       (p-value = 0.5)\nDone in 0.00 min (cpu), 0.01 min (real)\n</code></pre> <p>which is not surprising since 0 events were observed in that datacard.</p> <p>The output ROOT file will contain the significance value in the branch limit. To store the p-value instead, include the option <code>--pval</code>. The significance and p-value can be converted between one another using the RooFit functions <code>RooFit::PValueToSignificance</code> and <code>RooFit::SignificanceToPValue</code>.</p> <p>When calculating the significance, you may find it useful to resort to a brute-force fitting algorithm that scans the nll (repeating fits until a certain tolerance is reached), bypassing MINOS, which can be activated with the option <code>bruteForce</code>. This can be tuned using the options <code>setBruteForceAlgo</code>, <code>setBruteForceTypeAndAlgo</code> and <code>setBruteForceTolerance</code>.</p>"},{"location":"part3/commonstatsmethods/#computing-the-expected-significance","title":"Computing the expected significance","text":"<p>The expected significance can be computed from an Asimov data set of signal+background. There are two options for this:</p> <ul> <li>a-posteriori expected: will depend on the observed dataset.</li> <li>a-priori expected (the default behavior): does not depend on the observed dataset, and so is a good metric for optimizing an analysis when still blinded.</li> </ul> <p>The a-priori expected significance from the Asimov dataset is calculated as</p> <pre><code>combine -M Significance datacard.txt -t -1 --expectSignal=1\n</code></pre> <p>In order to produce the a-posteriori expected significance, just generate a post-fit Asimov data set by adding the option <code>--toysFreq</code> in the command above.</p> <p>The output format is the same as for observed significances: the variable limit in the tree will be filled with the significance (or with the p-value if you put also the option <code>--pvalue</code>)</p>"},{"location":"part3/commonstatsmethods/#bayesian-limits-and-credible-regions","title":"Bayesian Limits and Credible regions","text":"<p>Bayesian calculation of limits requires the user to assume a particular prior distribution for the parameter of interest (default r). You can specify the prior using the <code>--prior</code> option, the default is a flat pior in r.</p>"},{"location":"part3/commonstatsmethods/#computing-the-observed-bayesian-limit-for-simple-models","title":"Computing the observed bayesian limit (for simple models)","text":"<p>The <code>BayesianSimple</code> method computes a Bayesian limit performing classical numerical integration. This is very fast and accurate, but only works for simple models (a few channels and nuisance parameters).</p> <pre><code>combine -M BayesianSimple simple-counting-experiment.txt\n[...]\n\n -- BayesianSimple --\nLimit: r &lt; 0.672292 @ 95% CL\nDone in 0.04 min (cpu), 0.05 min (real)\n</code></pre> <p>The output tree will contain a single entry corresponding to the observed 95% confidence level upper limit. The confidence level can be modified to 100*X% using <code>--cl X</code>.</p>"},{"location":"part3/commonstatsmethods/#computing-the-observed-bayesian-limit-for-arbitrary-models","title":"Computing the observed bayesian limit (for arbitrary models)","text":"<p>The <code>MarkovChainMC</code> method computes a Bayesian limit performing a Monte Carlo integration. From the statistical point of view it is identical to the <code>BayesianSimple</code> method, only the technical implementation is different. The method is slower, but can also handle complex models. For this method you can increase the accuracy of the result by increasing the number of Markov Chains, at the expense of a longer running time (option <code>--tries</code>, default is 10). Let's use the realistic counting experiment datacard to test the method.</p> <p>To use the MarkovChainMC method, users need to specify this method in the command line, together with the options they want to use. For instance, to set the number of times the algorithm will run with different random seeds, use option <code>--tries</code>:</p> <pre><code>combine -M MarkovChainMC realistic-counting-experiment.txt --tries 100\n[...]\n\n -- MarkovChainMC --\nLimit: r &lt; 2.20438 +/- 0.0144695 @ 95% CL (100 tries)\nAverage chain acceptance: 0.078118\nDone in 0.14 min (cpu), 0.15 min (real)\n</code></pre> <p>Again, the resulting limit tree will contain the result. You can also save the chains using the option <code>--saveChain</code>, which will then also be included in the output file.</p> <p>Exclusion regions can be made from the posterior once an ordering principle is defined to decide how to grow the contour (there is an infinite number of possible regions that contain 68% of the posterior pdf). Below is a simple example script that can be used to plot the posterior distribution from these chains and calculate the smallest such region. Note that in this example we are ignoring the burn-in. This can be added by e.g. changing <code>for i in range(mychain.numEntries()):</code> to <code>for i in range(200,mychain.numEntries()):</code> for a burn-in of 200.</p> Show example script <pre><code>\nimport ROOT\n\nrmin = 0\nrmax = 30\nnbins = 100\nCL = 0.95\nchains = \"higgsCombineTest.MarkovChainMC.blahblahblah.root\"\n\ndef findSmallestInterval(hist,CL):\n bins = hist.GetNbinsX()\n best_i = 1\n best_j = 1\n bd = bins+1\n val = 0;\n for i in range(1,bins+1):\n   integral = hist.GetBinContent(i)\n   for j in range(i+1,bins+2):\n    integral += hist.GetBinContent(j)\n    if integral &gt; CL :\n      val = integral\n      break\n   if integral &gt; CL and  j-i &lt; bd :\n     bd = j-i\n     best_j = j+1\n     best_i = i\n     val = integral\n return hist.GetBinLowEdge(best_i), hist.GetBinLowEdge(best_j), val\n\nfi_MCMC = ROOT.TFile.Open(chains)\n# Sum up all of the chains (or we could take the average limit)\nmychain=0\nfor k in fi_MCMC.Get(\"toys\").GetListOfKeys():\n    obj = k.ReadObj\n    if mychain ==0:\n        mychain = k.ReadObj().GetAsDataSet()\n    else :\n        mychain.append(k.ReadObj().GetAsDataSet())\nhist = ROOT.TH1F(\"h_post\",\";r;posterior probability\",nbins,rmin,rmax)\nfor i in range(mychain.numEntries()):\n#for i in range(200,mychain.numEntries()): burn-in of 200\n  mychain.get(i)\n  hist.Fill(mychain.get(i).getRealValue(\"r\"), mychain.weight())\nhist.Scale(1./hist.Integral())\nhist.SetLineColor(1)\nvl,vu,trueCL = findSmallestInterval(hist,CL)\nhistCL = hist.Clone()\nfor b in range(nbins):\n  if histCL.GetBinLowEdge(b+1) &lt; vl or histCL.GetBinLowEdge(b+2)&gt;vu: histCL.SetBinContent(b+1,0)\nc6a = ROOT.TCanvas()\nhistCL.SetFillColor(ROOT.kAzure-3)\nhistCL.SetFillStyle(1001)\nhist.Draw()\nhistCL.Draw(\"histFsame\")\nhist.Draw(\"histsame\")\nll = ROOT.TLine(vl,0,vl,2*hist.GetBinContent(hist.FindBin(vl))); ll.SetLineColor(2); ll.SetLineWidth(2)\nlu = ROOT.TLine(vu,0,vu,2*hist.GetBinContent(hist.FindBin(vu))); lu.SetLineColor(2); lu.SetLineWidth(2)\nll.Draw()\nlu.Draw()\n\nprint \" %g %% (%g %%) interval (target)  = %g &lt; r &lt; %g \"%(trueCL,CL,vl,vu)\n</code></pre> <p>Running the script on the output file produced for the same datacard (including the <code>--saveChain</code> option) will produce the following output</p> <pre><code>0.950975 % (0.95 %) interval (target)  = 0 &lt; r &lt; 2.2\n</code></pre> <p>along with a plot of the posterior distribution shown below. This is the same as the output from Combine, but the script can also be used to find lower limits (for example) or credible intervals.</p> <p></p> <p>An example to make contours when ordering by probability density can be found in bayesContours.cxx. Note that the implementation is simplistic, with no clever handling of bin sizes nor smoothing of statistical fluctuations.</p> <p>The <code>MarkovChainMC</code> algorithm has many configurable parameters, and you are encouraged to experiment with those. The default configuration might not be the best for your analysis.</p>"},{"location":"part3/commonstatsmethods/#iterations-burn-in-tries","title":"Iterations, burn-in, tries","text":"<p>Three parameters control how the MCMC integration is performed:</p> <ul> <li>the number of tries (option <code>--tries</code>): the algorithm will run multiple times with different random seeds. The truncated mean and RMS of the different results are reported. The default value is 10, which should be sufficient for a quick computation. For a more accurate result you might want to increase this number up to even ~200.</li> <li>the number of iterations (option <code>-i</code>) determines how many points are proposed to fill a single Markov Chain. The default value is 10k, and a plausible range is between 5k (for quick checks) and 20-30k for lengthy calculations. Beyond 30k, the time vs accuracy can be balanced better by increasing the number of chains (option <code>--tries</code>).</li> <li>the number of burn-in steps (option <code>-b</code>) is the number of points that are removed from the beginning of the chain before using it to compute the limit. The default is 200. If the chain is very long, we recommend to increase this value a bit (e.g. to several hundreds). Using a number of burn-in steps below 50 is likely to result in a bias towards earlier stages of the chain before a reasonable convergence.</li> </ul>"},{"location":"part3/commonstatsmethods/#proposals","title":"Proposals","text":"<p>The option <code>--proposal</code> controls the way new points are proposed to fill in the MC chain.</p> <ul> <li>uniform: pick points at random. This works well if you have very few nuisance parameters (or none at all), but normally fails if you have many.</li> <li>gaus: Use a product of independent gaussians, one for each nuisance parameter. The sigma of the gaussian for each variable is 1/5 of the range of the variable. This behaviour can be controlled using the parameter <code>--propHelperWidthRangeDivisor</code>. This proposal appears to work well for up to around 15 nuisance parameters, provided that the range of the nuisance parameters is in the range \u00b15\u03c3. This method does not work when there are no nuisance parameters.</li> <li>ortho (default): This proposal is similar to the multi-gaussian proposal. However, at every step only a single coordinate of the point is varied, so that the acceptance of the chain is high even for a large number of nuisance parameters (i.e. more than 20).</li> <li>fit: Run a fit and use the uncertainty matrix from HESSE to construct a proposal (or the one from MINOS if the option <code>--runMinos</code> is specified). This can give biased results, so this method is not recommended in general.</li> </ul> <p>If you believe there is something going wrong, e.g. if your chain remains stuck after accepting only a few events, the option <code>--debugProposal</code> can be used to obtain a printout of the first N proposed points. This can help you understand what is happening; for example if you have a region of the phase space with probability zero, the gaus and fit proposal can get stuck there forever.</p>"},{"location":"part3/commonstatsmethods/#computing-the-expected-bayesian-limit","title":"Computing the expected bayesian limit","text":"<p>The expected limit is computed by generating many toy MC data sets and computing the limit for each of them. This can be done passing the option <code>-t</code> . E.g. to run 100 toys with the <code>BayesianSimple</code> method, you can run</p> <pre><code>combine -M BayesianSimple datacard.txt -t 100\n</code></pre> <p>The program will print out the mean and median limit, as well as the 68% and 95% quantiles of the distributions of the limits. This time, the output ROOT tree will contain one entry per toy.</p> <p>For more heavy methods (eg the <code>MarkovChainMC</code>) you will probably want to split this calculation into multiple jobs. To do this, just run Combine multiple times specifying a smaller number of toys (as low as <code>1</code>), using a different seed to initialize the random number generator each time. The option <code>-s</code> can be used for this; if you set it to -1, the starting seed will be initialized randomly at the beginning of the job. Finally, you can merge the resulting trees with <code>hadd</code> and look at the distribution in the merged file.</p>"},{"location":"part3/commonstatsmethods/#multidimensional-bayesian-credible-regions","title":"Multidimensional bayesian credible regions","text":"<p>The <code>MarkovChainMC</code> method allows the user to produce the posterior PDF as a function of (in principle) any number of POIs. In order to do so, you first need to create a workspace with more than one parameter, as explained in the physics models section.</p> <p>For example, let us use the toy datacard data/tutorials/multiDim/toy-hgg-125.txt (counting experiment that vaguely resembles an early H\u2192\u03b3\u03b3 analysis at 125 GeV) and convert the datacard into a workspace with 2 parameters, the ggH and qqH cross sections, using <code>text2workspace</code>.</p> <pre><code>text2workspace.py data/tutorials/multiDim/toy-hgg-125.txt -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH -o workspace.root\n</code></pre> <p>Now we just run one (or more) MCMC chain(s) and save them in the output tree. By default, the nuisance parameters will be marginalized (integrated) over their PDFs. You can ignore the complaints about not being able to compute an upper limit (since for more than 1D, this is not well-defined),</p> <pre><code>combine -M MarkovChainMC workspace.root --tries 1 --saveChain -i 1000000 -m 125 -s 12345\n</code></pre> <p>The output of the Markov Chain is again a RooDataSet of weighted events distributed according to the posterior PDF (after you cut out the burn in part), so it can be used to make histograms or other distributions of the posterior PDF. See as an example bayesPosterior2D.cxx.</p> <p>Below is an example of the output of the macro,</p> <pre><code>$ root -l higgsCombineTest.MarkovChainMC....\n.L bayesPosterior2D.cxx\nbayesPosterior2D(\"bayes2D\",\"Posterior PDF\")\n</code></pre> <p></p>"},{"location":"part3/commonstatsmethods/#computing-limits-with-toys","title":"Computing Limits with toys","text":"<p>The <code>HybridNew</code> method is used to compute either the hybrid bayesian-frequentist limits, popularly known as \"CL<sub>s</sub> of LEP or Tevatron type\", or the fully frequentist limits, which are the current recommended method by the LHC Higgs Combination Group. Note that these methods can be resource intensive for complex models.</p> <p>It is possible to define the criterion used for setting limits using <code>--rule CLs</code> (to use the CL<sub>s</sub> criterion) or <code>--rule CLsplusb</code> (to calculate the limit using \\(p_{\\mu}\\)) and as always the confidence level desired using <code>--cl=X</code>.</p> <p>The choice of test statistic can be made via the option <code>--testStat</code>. Different methodologies for the treatment of the nuisance parameters are available. While it is possible to mix different test statistics with different nuisance parameter treatments, we strongly do not recommend  this. Instead one should follow one of the following three procedures. Note that the signal strength \\(r\\) here is given the more common notation \\(\\mu\\).</p> <ul> <li> <p>LEP-style: <code>--testStat LEP --generateNuisances=1 --fitNuisances=0</code></p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mathrm{LEP}}=-2\\ln[\\mathcal{L}(\\mu=0)/\\mathcal{L}(\\mu)]\\).</li> <li>The nuisance parameters are fixed to their nominal values for the purpose of evaluating the likelihood, while for generating toys, the nuisance parameters are first randomized within their PDFs before generation of the toy.</li> </ul> </li> <li> <p>TEV-style: <code>--testStat TEV --generateNuisances=0 --generateExternalMeasurements=1 --fitNuisances=1</code></p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mathrm{TEV}}=-2\\ln[\\mathcal{L}(\\mu=0,\\hat{\\hat{\\mu}}(0))/\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))]\\), in which the nuisance parameters are profiled separately for \\(\\mu=0\\) and \\(\\mu\\).</li> <li>For the purposes of toy generation, the nuisance parameters are fixed to their post-fit values from the data (conditional on \\(\\mu\\)), while the constraint terms are randomized for the evaluation of the likelihood.</li> </ul> </li> <li> <p>LHC-style: <code>--LHCmode LHC-limits</code> , which is the shortcut for <code>--testStat LHC --generateNuisances=0 --generateExternalMeasurements=1 --fitNuisances=1</code></p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mu} = -2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\) , in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu\\).</li> <li>The value of \\(q_{\\mu}\\) set to 0 when \\(\\hat{\\mu}&gt;\\mu\\) giving a one-sided limit. Furthermore, the constraint \\(\\mu&gt;0\\) is enforced in the fit. This means that if the unconstrained value of \\(\\hat{\\mu}\\) would be negative, the test statistic \\(q_{\\mu}\\) is evaluated as \\(-2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(0,\\hat{\\hat{\\nu}}(0))]\\).</li> <li>For the purposes of toy generation, the nuisance parameters are fixed to their post-fit values from the data (conditionally on the value of \\(\\mu\\)), while the constraint terms are randomized in the evaluation of the likelihood.</li> </ul> </li> </ul> <p>Warning</p> <p>The recommended style is the LHC-style. Please note that this method is sensitive to the observation in data since the post-fit (after a fit to the data) values of the nuisance parameters (assuming different values of r) are used when generating the toys. For completely blind limits you can first generate a pre-fit asimov toy data set (described in the toy data generation section) and use that in place of the data.  You can use this toy by passing the argument <code>-D toysFileName.root:toys/toy_asimov</code></p> <p>While the above shortcuts are the commonly used versions, variations can be tested. The treatment of the nuisances can be changed to the so-called \"Hybrid-Bayesian\" method, which effectively integrates over the nuisance parameters. This is especially relevant when you have very few expected events in your data, and you are using those events to constrain background processes. This can be achieved by setting <code>--generateNuisances=1 --generateExternalMeasurements=0</code>. In case you want to avoid first fitting to the data to choose the nominal values you can additionally pass <code>--fitNuisances=0</code>.</p> <p>Warning</p> <p>If you have unconstrained parameters in your model (<code>rateParam</code>, or if you are using a <code>_norm</code> variable for a PDF) and you want to use the \"Hybrid-Bayesian\" method, you must declare these as <code>flatParam</code> in your datacard. When running text2workspace you must add the option <code>--X-assign-flatParam-prior</code> in the command line. This will create uniform priors for these parameters. These are needed for this method and they would otherwise not get created.</p> <p>Info</p> <p>Note that (observed and expected) values of the test statistic stored in the instances of <code>RooStats::HypoTestResult</code> when the option <code>--saveHybridResult</code> is passed are defined without the factor 2. They are therefore twice as small as the values given by the formulas above. This factor is however included automatically by all plotting scripts supplied within the Combine package. If you use your own plotting scripts, you need to make sure to incorporate the factor 2.</p>"},{"location":"part3/commonstatsmethods/#simple-models","title":"Simple models","text":"<p>For relatively simple models, the observed and expected limits can be calculated interactively. Since the LHC-style is the recommended set of options for calculating limits using toys, we will use that in this section. However, the same procedure can be followed with the other sets of options.</p> <pre><code>combine realistic-counting-experiment.txt -M HybridNew --LHCmode LHC-limits\n</code></pre> Show output <pre><code>\n&lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; using the Profile Likelihood test statistics modified for upper limits (Q_LHC)\n&gt;&gt;&gt; method used is HybridNew\n&gt;&gt;&gt; random number generator seed is 123456\nComputing results starting from observation (a-posteriori)\nSearch for upper limit to the limit\n  r = 20 +/- 0\n    CLs = 0 +/- 0\n    CLs      = 0 +/- 0\n    CLb      = 0.264 +/- 0.0394263\n    CLsplusb = 0 +/- 0\n\nSearch for lower limit to the limit\nNow doing proper bracketing &amp; bisection\n  r = 10 +/- 10\n    CLs = 0 +/- 0\n    CLs      = 0 +/- 0\n    CLb      = 0.288 +/- 0.0405024\n    CLsplusb = 0 +/- 0\n\n  r = 5 +/- 5\n    CLs = 0 +/- 0\n    CLs      = 0 +/- 0\n    CLb      = 0.152 +/- 0.0321118\n    CLsplusb = 0 +/- 0\n\n  r = 2.5 +/- 2.5\n    CLs = 0.0192308 +/- 0.0139799\n    CLs = 0.02008 +/- 0.0103371\n    CLs = 0.0271712 +/- 0.00999051\n    CLs = 0.0239524 +/- 0.00783634\n    CLs      = 0.0239524 +/- 0.00783634\n    CLb      = 0.208748 +/- 0.0181211\n    CLsplusb = 0.005 +/- 0.00157718\n\n  r = 2.00696 +/- 1.25\n    CLs = 0.0740741 +/- 0.0288829\n    CLs = 0.0730182 +/- 0.0200897\n    CLs = 0.0694474 +/- 0.0166468\n    CLs = 0.0640182 +/- 0.0131693\n    CLs = 0.0595 +/- 0.010864\n    CLs = 0.0650862 +/- 0.0105575\n    CLs = 0.0629286 +/- 0.00966301\n    CLs = 0.0634945 +/- 0.00914091\n    CLs = 0.060914 +/- 0.00852667\n    CLs = 0.06295 +/- 0.00830083\n    CLs = 0.0612758 +/- 0.00778181\n    CLs = 0.0608142 +/- 0.00747001\n    CLs = 0.0587169 +/- 0.00697039\n    CLs = 0.0591432 +/- 0.00678587\n    CLs = 0.0599683 +/- 0.00666966\n    CLs = 0.0574868 +/- 0.00630809\n    CLs = 0.0571451 +/- 0.00608177\n    CLs = 0.0553836 +/- 0.00585531\n    CLs = 0.0531612 +/- 0.0055234\n    CLs = 0.0516837 +/- 0.0052607\n    CLs = 0.0496776 +/- 0.00499783\n    CLs      = 0.0496776 +/- 0.00499783\n    CLb      = 0.216635 +/- 0.00801002\n    CLsplusb = 0.0107619 +/- 0.00100693\n\nTrying to move the interval edges closer\n  r = 1.00348 +/- 0\n    CLs = 0.191176 +/- 0.0459911\n    CLs      = 0.191176 +/- 0.0459911\n    CLb      = 0.272 +/- 0.0398011\n    CLsplusb = 0.052 +/- 0.00992935\n\n  r = 1.50522 +/- 0\n    CLs = 0.125 +/- 0.0444346\n    CLs = 0.09538 +/- 0.0248075\n    CLs = 0.107714 +/- 0.0226712\n    CLs = 0.103711 +/- 0.018789\n    CLs = 0.0845069 +/- 0.0142341\n    CLs = 0.0828468 +/- 0.0126789\n    CLs = 0.0879647 +/- 0.0122332\n    CLs      = 0.0879647 +/- 0.0122332\n    CLb      = 0.211124 +/- 0.0137494\n    CLsplusb = 0.0185714 +/- 0.00228201\n\n  r = 1.75609 +/- 0\n    CLs = 0.0703125 +/- 0.0255807\n    CLs = 0.0595593 +/- 0.0171995\n    CLs = 0.0555271 +/- 0.0137075\n    CLs = 0.0548727 +/- 0.0120557\n    CLs = 0.0527832 +/- 0.0103348\n    CLs = 0.0555828 +/- 0.00998248\n    CLs = 0.0567971 +/- 0.00923449\n    CLs = 0.0581822 +/- 0.00871417\n    CLs = 0.0588835 +/- 0.00836245\n    CLs = 0.0594035 +/- 0.00784761\n    CLs = 0.0590583 +/- 0.00752672\n    CLs = 0.0552067 +/- 0.00695542\n    CLs = 0.0560446 +/- 0.00679746\n    CLs = 0.0548083 +/- 0.0064351\n    CLs = 0.0566998 +/- 0.00627124\n    CLs = 0.0561576 +/- 0.00601888\n    CLs = 0.0551643 +/- 0.00576338\n    CLs = 0.0583584 +/- 0.00582854\n    CLs = 0.0585691 +/- 0.0057078\n    CLs = 0.0599114 +/- 0.00564585\n    CLs = 0.061987 +/- 0.00566905\n    CLs = 0.061836 +/- 0.00549856\n    CLs = 0.0616849 +/- 0.0053773\n    CLs = 0.0605352 +/- 0.00516844\n    CLs = 0.0602028 +/- 0.00502875\n    CLs = 0.058667 +/- 0.00486263\n    CLs      = 0.058667 +/- 0.00486263\n    CLb      = 0.222901 +/- 0.00727258\n    CLsplusb = 0.0130769 +/- 0.000996375\n\n  r = 2.25348 +/- 0\n    CLs = 0.0192308 +/- 0.0139799\n    CLs = 0.0173103 +/- 0.00886481\n    CLs      = 0.0173103 +/- 0.00886481\n    CLb      = 0.231076 +/- 0.0266062\n    CLsplusb = 0.004 +/- 0.001996\n\n  r = 2.13022 +/- 0\n    CLs = 0.0441176 +/- 0.0190309\n    CLs = 0.0557778 +/- 0.01736\n    CLs = 0.0496461 +/- 0.0132776\n    CLs = 0.0479048 +/- 0.0114407\n    CLs = 0.0419333 +/- 0.00925719\n    CLs = 0.0367934 +/- 0.0077345\n    CLs = 0.0339814 +/- 0.00684844\n    CLs = 0.03438 +/- 0.0064704\n    CLs = 0.0337633 +/- 0.00597315\n    CLs = 0.0321262 +/- 0.00551608\n    CLs      = 0.0321262 +/- 0.00551608\n    CLb      = 0.230342 +/- 0.0118665\n    CLsplusb = 0.0074 +/- 0.00121204\n\n  r = 2.06859 +/- 0\n    CLs = 0.0357143 +/- 0.0217521\n    CLs = 0.0381957 +/- 0.0152597\n    CLs = 0.0368622 +/- 0.0117105\n    CLs = 0.0415097 +/- 0.0106676\n    CLs = 0.0442816 +/- 0.0100457\n    CLs = 0.0376644 +/- 0.00847235\n    CLs = 0.0395133 +/- 0.0080427\n    CLs = 0.0377625 +/- 0.00727262\n    CLs = 0.0364415 +/- 0.00667827\n    CLs = 0.0368015 +/- 0.00628517\n    CLs = 0.0357251 +/- 0.00586442\n    CLs = 0.0341604 +/- 0.00546373\n    CLs = 0.0361935 +/- 0.00549648\n    CLs = 0.0403254 +/- 0.00565172\n    CLs = 0.0408613 +/- 0.00554124\n    CLs = 0.0416682 +/- 0.00539651\n    CLs = 0.0432645 +/- 0.00538062\n    CLs = 0.0435229 +/- 0.00516945\n    CLs = 0.0427647 +/- 0.00501322\n    CLs = 0.0414894 +/- 0.00479711\n    CLs      = 0.0414894 +/- 0.00479711\n    CLb      = 0.202461 +/- 0.00800632\n    CLsplusb = 0.0084 +/- 0.000912658\n\n\n -- HybridNew, before fit --\nLimit: r &lt; 2.00696 +/- 1.25 [1.50522, 2.13022]\nWarning in : Could not create the Migrad minimizer. Try using the minimizer Minuit\nFit to 5 points: 1.91034 +/- 0.0388334\n\n -- Hybrid New --\nLimit: r &lt; 1.91034 +/- 0.0388334 @ 95% CL\nDone in 0.01 min (cpu), 4.09 min (real)\nFailed to delete temporary file roostats-Sprxsw.root: No such file or directory\n\n<p></p>\n\n<p>The result stored in the limit branch of the output tree will be the upper limit (and its error, stored in limitErr). The default behaviour will be, as above, to search for the upper limit on r. However, the values of \\(p_{\\mu}, p_{b}\\) and CL<sub>s</sub> can be calculated for a particular value r=X by specifying the option <code>--singlePoint=X</code>. In this case, the value stored in the branch limit will be the value of CL<sub>s</sub> (or \\(p_{\\mu}\\)) (see the FAQ section).</p>"},{"location":"part3/commonstatsmethods/#expected-limits","title":"Expected Limits","text":"<p>For simple models, we can run interactively 5 times to compute the median expected and the 68% and 95% central interval boundaries. For this, we can use the <code>HybridNew</code> method with the same options as for the observed limit, but adding a <code>--expectedFromGrid=&lt;quantile&gt;</code>. Here, the quantile should be set to 0.5 for the median, 0.84 for the +ve side of the 68% band, 0.16 for the -ve side of the 68% band, 0.975 for the +ve side of the 95% band, and 0.025 for the -ve side of the 95% band.</p>\n<p>The output file will contain the value of the quantile in the branch quantileExpected. This branch can therefore be used to separate the points.</p>"},{"location":"part3/commonstatsmethods/#accuracy","title":"Accuracy","text":"<p>The search for the limit is performed using an adaptive algorithm, terminating when the estimate of the limit value is below some limit or when the precision cannot be improved further with the specified options. The options controlling this behaviour are:</p>\n<ul>\n<li><code>rAbsAcc</code>, <code>rRelAcc</code>: define the accuracy on the limit at which the search stops. The default values are 0.1 and 0.05 respectively, meaning that the search is stopped when \u0394r &lt; 0.1 or \u0394r/r &lt; 0.05.</li>\n<li><code>clsAcc</code>: this determines the absolute accuracy up to which the CLs values are computed when searching for the limit. The default is 0.5%. Raising the accuracy above this value will significantly increase the time needed to run the algorithm, as you need N<sup>2</sup> more toys to improve the accuracy by a factor N. You can consider increasing this value if you are computing limits with a larger CL (e.g. 90% or 68%). Note that if you are using the <code>CLsplusb</code> rule, this parameter will control the uncertainty on \\(p_{\\mu}\\) rather than CL<sub>s</sub>.</li>\n<li><code>T</code> or <code>toysH</code>: controls the minimum number of toys that are generated for each point. The default value of 500 should be sufficient when computing the limit at 90-95% CL. You can decrease this number if you are computing limits at 68% CL, or increase it if you are using 99% CL.</li>\n</ul>\n<p>Note, to further improve the accuracy when searching for the upper limit, Combine will also fit an exponential function to several of the points and interpolate to find the crossing.</p>"},{"location":"part3/commonstatsmethods/#complex-models","title":"Complex models","text":"<p>For complicated models, it is best to produce a grid of test statistic distributions at various values of the signal strength, and use it to compute the observed and expected limit and central intervals. This approach is convenient for complex models, since the grid of points can be distributed across any number of jobs. In this approach we will store the distributions of the test statistic at different values of the signal strength using the option <code>--saveHybridResult</code>. The distribution at a single value of r=X can be determined by</p>\n<pre><code>combine datacard.txt -M HybridNew --LHCmode LHC-limits --singlePoint X --saveToys --saveHybridResult -T 500 --clsAcc 0\n</code></pre>\n\n<p>Warning</p>\n<p>We have specified the accuracy here by including <code>--clsAcc=0</code>, which turns off adaptive sampling, and specifying the number of toys to be 500 with the <code>-T N</code> option. For complex models, it may be necessary to internally split the toys over a number of instances of <code>HybridNew</code> using the option <code>--iterations I</code>. The total number of toys will be the product I*N.</p>\n\n<p>The above can be repeated several times, in parallel, to build the distribution of the test statistic (passing the random seed option <code>-s -1</code>). Once all of the distributions have been calculated, the resulting output files can be merged into one using hadd, and read back to calculate the limit, specifying the merged file with <code>--grid=merged.root</code>.</p>\n<p>The observed limit can be obtained with</p>\n<pre><code>combine datacard.txt -M HybridNew --LHCmode LHC-limits --readHybridResults --grid=merged.root\n</code></pre>\n<p>and similarly, the median expected and quantiles can be determined using</p>\n<pre><code>combine datacard.txt -M HybridNew --LHCmode LHC-limits --readHybridResults --grid=merged.root --expectedFromGrid &lt;quantile&gt;\n</code></pre>\n<p>substituting <code>&lt;quantile&gt;</code> with 0.5 for the median, 0.84 for the +ve side of the 68% band, 0.16 for the -ve side of the 68% band, 0.975 for the +ve side of the 95% band, and 0.025 for the -ve side of the 95% band. </p>\n\n<p>Warning</p>\n<p>Make sure that if you specified a particular mass value (<code>-m</code> or <code>--mass</code>) in the commands for calculating the toys, you also specify the same mass when reading in the grid of distributions.</p>\n\n<p>You should note that  Combine will update the grid to improve the accuracy on the extracted limit by default. If you want to avoid this, you can use the option <code>--noUpdateGrid</code>. This will mean only the toys/points you produced in the grid will be used to compute the limit.</p>\n\n<p>Warning</p>\n<p>This option should not be used with <code>--expectedFromGrid</code> if you did not create the grid with the same option. The reason is that the value of the test-statistic that is used to calculate the limit will not be properly calcualted if <code>--noUpdateGrid</code> is included. In future versions of the tool, this option will be ignored if using <code>--expectedFromGrid</code>. </p>\n\n<p>The splitting of the jobs can be left to the user's preference. However, users may wish to use <code>combineTool.py</code> for automating this, as described in the section on combineTool for job submission</p>"},{"location":"part3/commonstatsmethods/#plotting","title":"Plotting","text":"<p>A plot of the CL<sub>s</sub> (or \\(p_{\\mu}\\)) as a function of r, which is used to find the crossing, can be produced using the option <code>--plot=limit_scan.png</code>. This can be useful for judging if the chosen grid was sufficient for determining the upper limit.</p>\n<p>If we use our realistic-counting-experiment.txt datacard and generate a grid of points \\(r\\varepsilon[1.4,2.2]\\) in steps of 0.1, with 5000 toys for each point, the plot of the observed CL<sub>s</sub> vs r should look like the following,</p>\n<p></p>\n<p>You should judge in each case whether the limit is accurate given the spacing of the points and the precision of CL<sub>s</sub> at each point. If it is not sufficient, simply generate more points closer to the limit and/or more toys at each point.</p>\n<p>The distributions of the test statistic can also be plotted, at each value in the grid, using</p>\n<pre><code>python test/plotTestStatCLs.py --input mygrid.root --poi r --val all --mass MASS\n</code></pre>\n<p>The resulting output file will contain a canvas showing the distribution of the test statistics for the background only and signal+background hypotheses at each value of r. Use <code>--help</code> to see more options for this script.</p>\n\n<p>Info</p>\n<p>If you used the TEV or LEP style test statistic (using the commands as described above), then you should include the option <code>--doublesided</code>, which will also take care of defining the correct integrals for \\(p_{\\mu}\\) and \\(p_{b}\\). Click on the examples below to see what a typical output of this plotting tool will look like when using the LHC test statistic, or the TEV test statistic.</p>\n\n\nqLHC test stat example\n<p></p>\n\n\nqTEV test stat example\n<p></p>"},{"location":"part3/commonstatsmethods/#computing-significances-with-toys","title":"Computing Significances with toys","text":"<p>Computation of the expected significance with toys is a two-step procedure: first you need to run one or more jobs to construct the expected distribution of the test statistic. As for setting limits, there are a number of different possible configurations for generating toys.  However, we will use the most commonly used option,</p>\n<ul>\n<li>LHC-style: <code>--LHCmode LHC-significance</code>\n, which is the shortcut for <code>--testStat LHC --generateNuisances=0 --generateExternalMeasurements=1 --fitNuisances=1 --significance</code><ul>\n<li>The test statistic is defined using the ratio of likelihoods \\(q_{0} = -2\\ln[\\mathcal{L}(\\mu=0,\\hat{\\hat{\\nu}}(0))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\), in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu=0\\).</li>\n<li>The value of the test statistic is set to 0 when \\(\\hat{\\mu}&lt;0\\)</li>\n<li>For the purposes of toy generation, the nuisance parameters are fixed to their post-fit values from the data assuming no signal, while the constraint terms are randomized for the evaluation of the likelihood.</li>\n</ul>\n</li>\n</ul>"},{"location":"part3/commonstatsmethods/#observed-significance","title":"Observed significance","text":"<p>To construct the distribution of the test statistic, the following command should be run as many times as necessary</p>\n<pre><code>combine -M HybridNew datacard.txt --LHCmode LHC-significance  --saveToys --fullBToys --saveHybridResult -T toys -i iterations -s seed\n</code></pre>\n<p>with different seeds, or using <code>-s -1</code> for random seeds, then merge all those results into a single ROOT file with <code>hadd</code>. The toys can then be read back into combine using the option <code>--toysFile=input.root --readHybridResult</code>.</p>\n<p>The observed significance can be calculated as</p>\n<pre><code>combine -M HybridNew datacard.txt --LHCmode LHC-significance --readHybridResult --toysFile=input.root [--pvalue ]\n</code></pre>\n<p>where the option <code>--pvalue</code> will replace the result stored in the limit branch output tree to be the p-value instead of the signficance.</p>"},{"location":"part3/commonstatsmethods/#expected-significance-assuming-some-signal","title":"Expected significance, assuming some signal","text":"<p>The expected significance, assuming a signal with r=X can be calculated, by including the option <code>--expectSignal X</code> when generating the distribution of the test statistic and using the option <code>--expectedFromGrid=0.5</code> when calculating the significance for the median. To get the \u00b11\u03c3 bands, use 0.16 and 0.84 instead of 0.5, and so on.</p>\n<p>The total number of background toys needs to be large enough to compute the value of the significance, but you need fewer signal toys (especially when you are only computing the median expected significance). For large significances, you can run most of the toys without the <code>--fullBToys</code> option, which will be about a factor 2 faster. Only a small part of the toys needs to be run with that option turned on.</p>\n<p>As with calculating limits with toys, these jobs can be submitted to the grid or batch systems with the help of the <code>combineTool.py</code> script, as described in the section on combineTool for job submission</p>"},{"location":"part3/commonstatsmethods/#goodness-of-fit-tests","title":"Goodness of fit tests","text":"<p>The <code>GoodnessOfFit</code> method can be used to evaluate how compatible the observed data are with the model PDF.</p>\n<p>This method implements several algorithms, and will compute a goodness of fit indicator for the chosen algorithm and the data. The procedure is therefore to first run on the real data</p>\n<pre><code>combine -M GoodnessOfFit datacard.txt --algo=&lt;some-algo&gt;\n</code></pre>\n<p>and then to run on many toy MC data sets to determine the distribution of the goodness-of-fit indicator</p>\n<pre><code>combine -M GoodnessOfFit datacard.txt --algo=&lt;some-algo&gt; -t &lt;number-of-toys&gt; -s &lt;seed&gt;\n</code></pre>\n<p>When computing the goodness-of-fit, by default the signal strength is left floating in the fit, so that the measure is independent from the presence or absence of a signal. It is possible to fixe the signal strength to some value by passing the option <code>--fixedSignalStrength=&lt;value&gt;</code>.</p>\n<p>The following algorithms are implemented:</p>\n<ul>\n<li>\n<p><code>saturated</code>: Compute a goodness-of-fit measure for binned fits based on the saturated model, as prescribed by the Statistics Committee (note). This quantity is similar to a chi-square, but can be computed for an arbitrary combination of binned channels with arbitrary constraints.</p>\n</li>\n<li>\n<p><code>KS</code>: Compute a goodness-of-fit measure for binned fits using the Kolmogorov-Smirnov test. It is based on the largest difference between the cumulative distribution function and the empirical distribution function of any bin.</p>\n</li>\n<li>\n<p><code>AD</code>: Compute a goodness-of-fit measure for binned fits using the Anderson-Darling test. It is based on the integral of the difference between the cumulative distribution function and the empirical distribution function over all bins. It also gives the tail ends of the distribution a higher weighting.</p>\n</li>\n</ul>\n<p>The output tree will contain a branch called <code>limit</code>, which contains the value of the test statistic in each toy. You can make a histogram of this test statistic \\(t\\). From the distribution that is obtained in this way (\\(f(t)\\)) and the single value obtained by running on the observed data (\\(t_{0}\\)) you can calculate the p-value \\(p = \\int_{t=t_{0}}^{\\mathrm{+inf}} f(t) dt\\). Note: in rare cases the test statistic value for the toys can be undefined (for AS and KD). In this case we set the test statistic value to -1. When plotting the test statistic distribution, those toys should be excluded. This is automatically taken care of if you use the GoF collection script which is described below.</p>\n<p>When generating toys, the default behavior will be used. See the section on toy generation for options that control how nuisance parameters are generated and fitted in these tests. It is recommended to use frequentist toys (<code>--toysFreq</code>) when running the <code>saturated</code> model, and the default toys for the other two tests.</p>\n<p>Further goodness-of-fit methods could be added on request, especially if volunteers are available to code them.\nThe output limit tree will contain the value of the test statistic in each toy (or the data)</p>\n\n<p>Warning</p>\n<p>The above algorithms are all concerned with one-sample tests. For two-sample tests, you can follow an example CMS HIN analysis described in this Twiki</p>"},{"location":"part3/commonstatsmethods/#masking-analysis-regions-in-the-saturated-model","title":"Masking analysis regions in the saturated model","text":"<p>For analyses that employ a simultaneous fit across signal and control regions, it may be useful to mask one or more analysis regions, either when the likelihood is maximized (fit) or when the test statistic is computed. This can be done by using the options <code>--setParametersForFit</code> and <code>--setParametersForEval</code>, respectively. The former will set parameters before each fit, while the latter is used to set parameters after each fit, but before the NLL is evaluated. Note, of course, that if the parameter in the list is floating, it will still be floating in each fit. Therefore, it will not affect the results when using <code>--setParametersForFit</code>.</p>\n<p>A realistic example for a binned shape analysis performed in one signal region and two control samples can be found in this directory of the Combine package Datacards-shape-analysis-multiple-regions.</p>\n<p>First of all, one needs to Combine the individual datacards to build a single model, and to introduce the channel masking variables as follow:</p>\n<pre><code>combineCards.py signal_region.txt dimuon_control_region.txt singlemuon_control_region.txt &gt; combined_card.txt\ntext2workspace.py combined_card.txt --channel-masks\n</code></pre>\n<p>More information about the channel masking can be found in this\nsection Channel Masking. The saturated test static value for a simultaneous fit across all the analysis regions can be calculated as:</p>\n<pre><code>combine -M GoodnessOfFit -d combined_card.root --algo=saturated -n _result_sb\n</code></pre>\n<p>In this case, signal and control regions are included in both the fit and in the evaluation of the test statistic, and the signal strength is freely floating. This measures the compatibility between the signal+background fit and the observed data. Moreover, it can be interesting to assess the level of compatibility between the observed data in all the regions and the background prediction obtained by only fitting the control regions (CR-only fit). This can be evaluated as follow:</p>\n<pre><code>combine -M GoodnessOfFit -d combined_card.root --algo=saturated -n _result_bonly_CRonly --setParametersForFit mask_ch1=1 --setParametersForEval mask_ch1=0 --freezeParameters r --setParameters r=0\n</code></pre>\n<p>where the signal strength is frozen and the signal region is not considered in the fit (<code>--setParametersForFit mask_ch1=1</code>), but it is included in the test statistic computation (<code>--setParametersForEval mask_ch1=0</code>). To show the differences between the two models being tested, one can perform a fit to the data using the FitDiagnostics method as:</p>\n<pre><code>combine -M FitDiagnostics -d combined_card.root -n _fit_result --saveShapes --saveWithUncertainties\ncombine -M FitDiagnostics -d combined_card.root -n _fit_CRonly_result --saveShapes --saveWithUncertainties --setParameters mask_ch1=1\n</code></pre>\n<p>By taking the total background, the total signal, and the data shapes from the FitDiagnostics output, we can compare the post-fit predictions from the S+B fit (first case) and the CR-only fit (second case) with the observation as reported below:</p>\n\nFitDiagnostics S+B fit\n<p></p>\n\n\nFitDiagnostics CR-only fit\n<p></p>\n\n<p>To compute a p-value for the two results, one needs to compare the observed goodness-of-fit value previously computed with the expected distribution of the test statistic obtained in toys:</p>\n<pre><code>    combine -M GoodnessOfFit combined_card.root --algo=saturated -n result_toy_sb --toysFrequentist -t 500\n    combine -M GoodnessOfFit -d combined_card.root --algo=saturated -n _result_bonly_CRonly_toy --setParametersForFit mask_ch1=1 --setParametersForEval mask_ch1=0 --freezeParameters r --setParameters r=0,mask_ch1=1 -t 500 --toysFrequentist\n</code></pre>\n<p>where the former gives the result for the S+B model, while the latter gives the test-statistic for CR-only fit. The command <code>--setParameters r=0,mask_ch1=1</code> is needed to ensure that toys are thrown using the nuisance parameters estimated from the CR-only fit to the data. The comparison between the observation and the expected distribition should look like the following two plots:</p>\n\nGoodness-of-fit for S+B model\n<p></p>\n\n\nGoodness-of-fit for CR-only model\n<p></p>"},{"location":"part3/commonstatsmethods/#making-a-plot-of-the-gof-test-statistic-distribution","title":"Making a plot of the GoF test statistic distribution","text":"<p>You can use the <code>combineTool.py</code> script to run batch jobs or on the grid (see here) and produce a plot of the results. Once the jobs have completed, you can hadd them together and run (e.g for the saturated model),</p>\n<pre><code>combineTool.py -M CollectGoodnessOfFit --input data_run.root toys_run.root -m 125.0 -o gof.json\nplotGof.py gof.json --statistic saturated --mass 125.0 -o gof_plot --title-right=\"my label\"\n</code></pre>"},{"location":"part3/commonstatsmethods/#channel-compatibility","title":"Channel Compatibility","text":"<p>The <code>ChannelCompatibilityCheck</code> method can be used to evaluate how compatible the measurements of the signal strength from the separate channels of a combination are with each other.</p>\n<p>The method performs two fits of the data, first with the nominal model in which all channels are assumed to have the same signal strength modifier \\(r\\), and then another allowing separate signal strengths \\(r_{i}\\) in each channel. A chisquare-like quantity is computed as \\(-2 \\ln \\mathcal{L}(\\mathrm{data}| r)/L(\\mathrm{data}|\\{r_{i}\\}_{i=1}^{N_{\\mathrm{chan}}})\\). Just like for the goodness-of-fit indicators, the expected distribution of this quantity under the nominal model can be computed from toy MC data sets.</p>\n<p>By default, the signal strength is kept floating in the fit with the nominal model. It can however be fixed to a given value by passing the option <code>--fixedSignalStrength=&lt;value&gt;</code>.</p>\n<p>In the default model built from the datacards, the signal strengths in all channels are constrained to be non-negative. One can allow negative signal strengths in the fits by changing the bound on the variable (option <code>--rMin=&lt;value&gt;</code>), which should make the quantity more chisquare-like under the hypothesis of zero signal; this however can create issues in channels with small backgrounds, since total expected yields and PDFs in each channel must be positive.</p>\n<p>Optionally, channels can be grouped together by using the option <code>-g &lt;name_fragment&gt;</code>, where <code>&lt;name_fragment&gt;</code> is a string which is common to all channels to be grouped together. The <code>-g</code> option can also be used to set the range for the each POI separately via <code>-g &lt;name&gt;=&lt;min&gt;,&lt;max&gt;</code>.</p>\n<p>When run with a verbosity of 1, as is the default, the program also prints out the best fit signal strengths in all channels. As the fit to all channels is done simultaneously, the correlation between the other systematic uncertainties is taken into account. Therefore, these results can differ from the ones obtained when fitting each channel separately.</p>\n<p>Below is an example output from Combine,</p>\n<pre><code>$ combine -M ChannelCompatibilityCheck comb_hww.txt -m 160 -n HWW\n &lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; method used to compute upper limit is ChannelCompatibilityCheck\n&gt;&gt;&gt; random number generator seed is 123456\n\nSanity checks on the model: OK\nComputing limit starting from observation\n\n--- ChannelCompatibilityCheck ---\nNominal fit : r = 0.3431 -0.1408/+0.1636\nAlternate fit: r = 0.4010 -0.2173/+0.2724 in channel hww_0jsf_shape\nAlternate fit: r = 0.2359 -0.1854/+0.2297 in channel hww_0jof_shape\nAlternate fit: r = 0.7669 -0.4105/+0.5380 in channel hww_1jsf_shape\nAlternate fit: r = 0.3170 -0.3121/+0.3837 in channel hww_1jof_shape\nAlternate fit: r = 0.0000 -0.0000/+0.5129 in channel hww_2j_cut\nChi2-like compatibility variable: 2.16098\nDone in 0.08 min (cpu), 0.08 min (real)\n</code></pre>\n<p>The output tree will contain the value of the compatibility (chi-square variable) in the limit branch. If the option <code>--saveFitResult</code> is specified, the output ROOT file also contains two RooFitResult objects fit_nominal and fit_alternate with the results of the two fits.</p>\n<p>This can be read and used to extract the best fit value for each channel, and the overall best fit value, using</p>\n<pre><code>$ root -l\nTFile* _file0 = TFile::Open(\"higgsCombineTest.ChannelCompatibilityCheck.mH120.root\");\nfit_alternate-&gt;floatParsFinal().selectByName(\"*ChannelCompatibilityCheck*\")-&gt;Print(\"v\");\nfit_nominal-&gt;floatParsFinal().selectByName(\"r\")-&gt;Print(\"v\");\n</code></pre>\n<p>The macro cccPlot.cxx can be used to produce a comparison plot of the best fit signal strengths from all channels.</p>"},{"location":"part3/commonstatsmethods/#likelihood-fits-and-scans","title":"Likelihood Fits and Scans","text":"<p>The <code>MultiDimFit</code> method can be used to perform multi-dimensional fits and likelihood-based scans/contours using models with several parameters of interest.</p>\n<p>Taking a toy datacard data/tutorials/multiDim/toy-hgg-125.txt (counting experiment which vaguely resembles an early H\u2192\u03b3\u03b3 analysis at 125 GeV), we need to convert the datacard into a workspace with 2 parameters, the ggH and qqH cross sections:</p>\n<pre><code>text2workspace.py toy-hgg-125.txt -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH\n</code></pre>\n<p>A number of different algorithms can be used with the option <code>--algo &lt;algo&gt;</code>,</p>\n<ul>\n<li>\n<p><code>none</code> (default):  Perform a maximum likelihood fit <code>combine -M MultiDimFit toy-hgg-125.root</code>; The output ROOT tree will contain two columns, one for each parameter, with the fitted values.</p>\n</li>\n<li>\n<p><code>singles</code>: Perform a fit of each parameter separately, treating the other parameters of interest as unconstrained nuisance parameters: <code>combine -M MultiDimFit toy-hgg-125.root --algo singles --cl=0.68</code> . The output ROOT tree will contain two columns, one for each parameter, with the fitted values; there will be one row with the best fit point (and <code>quantileExpected</code> set to -1) and two rows for each fitted parameter, where the corresponding column will contain the maximum and minimum of that parameter in the 68% CL interval, according to a one-dimensional chi-square (i.e. uncertainties on each fitted parameter do not increase when adding other parameters if they are uncorrelated). Note that if you run, for example, with <code>--cminDefaultMinimizerStrategy=0</code>, these uncertainties will be derived from the Hessian, while <code>--cminDefaultMinimizerStrategy=1</code> will invoke Minos to derive them.</p>\n</li>\n<li>\n<p><code>cross</code>:  Perform a joint fit of all parameters: <code>combine -M MultiDimFit toy-hgg-125.root --algo=cross --cl=0.68</code>. The output ROOT tree will have one row with the best fit point, and two rows for each parameter, corresponding to the minimum and maximum of that parameter on the likelihood contour corresponding to the specified CL, according to an N-dimensional chi-square (i.e. the uncertainties on each fitted parameter do increase when adding other parameters, even if they are uncorrelated). Note that this method does not produce 1D uncertainties on each parameter, and should not be taken as such.</p>\n</li>\n<li>\n<p><code>contour2d</code>: Make a 68% CL contour \u00e0 la minos <code>combine -M MultiDimFit toy-hgg-125.root --algo contour2d --points=20 --cl=0.68</code>. The output will contain values corresponding to the best fit point (with <code>quantileExpected</code> set to -1) and for a set of points on the contour (with <code>quantileExpected</code> set to 1-CL, or something larger than that if the contour hits the boundary of the parameters). Probabilities are computed from the the n-dimensional \\(\\chi^{2}\\) distribution. For slow models, this method can be split by running several times with a different number of points, and merging the outputs. The contourPlot.cxx macro can be used to make plots out of this algorithm.</p>\n</li>\n<li>\n<p><code>random</code>: Scan N random points and compute the probability out of the profile likelihood ratio <code>combine -M MultiDimFit toy-hgg-125.root --algo random --points=20 --cl=0.68</code>. Again, the best fit will have <code>quantileExpected</code> set to -1, while each random point will have <code>quantileExpected</code> set to the probability given by the profile likelihood ratio at that point.</p>\n</li>\n<li>\n<p><code>fixed</code>: Compare the log-likelihood at a fixed point compared to the best fit. <code>combine -M MultiDimFit toy-hgg-125.root --algo fixed --fixedPointPOIs r=r_fixed,MH=MH_fixed</code>. The output tree will contain the difference in the negative log-likelihood between the points (\\(\\hat{r},\\hat{m}_{H}\\)) and (\\(\\hat{r}_{fixed},\\hat{m}_{H,fixed}\\)) in the branch <code>deltaNLL</code>.</p>\n<p>You can use the <code>combineTool.py</code> script to run multiple fixed points from a <code>.csv</code> file. For example, data/tutorials/multiDim/fixed.csv contains the points</p>\n<pre><code>r_ggH,r_qqH\n1.0,1.0\n1.0,2.0\n2.0,1.0\n2.0,2.0\n</code></pre>\n<p>and <code>combineTool.py -M MultiDimFit toy-hgg-125.root --fromfile fixed.csv</code> will run <code>--algo fixed</code> at each of these points.</p>\n</li>\n<li>\n<p><code>grid</code>:  Scan a fixed grid of points with approximately N points in total. <code>combine -M MultiDimFit toy-hgg-125.root --algo grid --points=10000</code>.</p>\n<ul>\n<li>You can partition the job in multiple tasks by using the options <code>--firstPoint</code> and <code>--lastPoint</code>. For complicated scans, the points can be split as described in the combineTool for job submission section. The output file will contain a column <code>deltaNLL</code> with the difference in negative log-likelihood with respect to the best fit point. Ranges/contours can be evaluated by filling TGraphs or TH2 histograms with these points.</li>\n<li>By default the \"min\" and \"max\" of the POI ranges are not included and the points that are in the scan are centred , eg <code>combine -M MultiDimFit --algo grid --rMin 0 --rMax 5 --points 5</code> will scan at the points \\(r=0.5, 1.5, 2.5, 3.5, 4.5\\). You can include the option <code>--alignEdges 1</code>, which causes the points to be aligned with the end-points of the parameter ranges - e.g. <code>combine -M MultiDimFit --algo grid --rMin 0 --rMax 5 --points 6 --alignEdges 1</code> will scan at the points \\(r=0, 1, 2, 3, 4, 5\\). Note - the number of points must be increased by 1 to ensure both end points are included.</li>\n</ul>\n</li>\n</ul>\n<p>With the algorithms <code>none</code> and <code>singles</code> you can save the RooFitResult from the initial fit using the option <code>--saveFitResult</code>. The fit result is saved into a new file called <code>multidimfit.root</code>.</p>\n<p>As usual, any floating nuisance parameters will be profiled. This behaviour can be modified by using the <code>--freezeParameters</code> option.</p>\n<p>For most of the methods, for lower-precision results you can turn off the profiling of the nuisance parameters by using the option <code>--fastScan</code>, which for complex models speeds up the process by several orders of magnitude. All nuisance parameters will be kept fixed at the value corresponding to the best fit point.</p>\n<p>As an example, let's produce the \\(-2\\Delta\\ln{\\mathcal{L}}\\) scan as a function of <code>r_ggH</code> and <code>r_qqH</code> from the toy \\(H\\rightarrow\\gamma\\gamma\\) datacard. The command below should be pretty fast, as the statistical model is quite simple,</p>\n<pre><code>combine toy-hgg-125.root -M MultiDimFit --algo grid --points 2500 --setParameterRanges r_qqH=0,12:r_ggH=-1,4 -m 125\n</code></pre>\n<p>The scan, along with the best fit point and \\(1\\sigma\\) CL contour can be drawn using ROOT using something like the script below,</p>\n\nShow script\n<pre><code>\nvoid plot2D_LHScan(){\n\n  TFile *_file0 = TFile::Open(\"higgsCombineTest.MultiDimFit.mH125.root\");\n  TTree *limit = (TTree*) _file0-&gt;Get(\"limit\");\n\n  // create histogram representing -2Delta Log(L)\n  TCanvas *can = new TCanvas(\"c\",\"c\",600,540);\n  limit-&gt;Draw(\"2*deltaNLL:r_qqH:r_ggH&gt;&gt;h(50,-1,4,50,0,12)\",\"2*deltaNLL&lt;50\",\"prof colz\");\n  TH2F *g2NLL = (TH2F*)gROOT-&gt;FindObject(\"h\");\n\n  g2NLL-&gt;SetName(\"g2NLL\");\n  g2NLL-&gt;SetTitle(\"\");\n  g2NLL-&gt;GetXaxis()-&gt;SetTitle(\"r_{ggH}\");\n  g2NLL-&gt;GetYaxis()-&gt;SetTitle(\"r_{qqH}\");\n\n  // Get best fit point\n  limit-&gt;Draw(\"r_qqH:r_ggH\",\"quantileExpected == -1\",\"P same\");\n  TGraph *best_fit = (TGraph*)gROOT-&gt;FindObject(\"Graph\");\n\n  best_fit-&gt;SetMarkerSize(3);\n  best_fit-&gt;SetMarkerStyle(34);\n  best_fit-&gt;Draw(\"p same\");\n\n  // get 1-sigma contour\n  TH2F *h68 = (TH2F*)g2NLL-&gt;Clone();\n  h68-&gt;SetContour(2);\n  h68-&gt;SetContourLevel(1,2.3);\n  h68-&gt;SetLineWidth(3);\n  h68-&gt;SetLineColor(1);\n  h68-&gt;Draw(\"CONT3same\");\n\n  gStyle-&gt;SetOptStat(0);\n  can-&gt;SaveAs(\"2D_LHScan.png\");\n\n }\n\n<p></p>\n\n<p>This will produce a plot like the one below,</p>\n<p></p>\n<p>Similarly, 1D scans can be drawn directly from the tree, however for 1D likelihood scans, there is a python script from the <code>CombineHarvester/CombineTools</code> package plot1DScan.py that can be used to make plots and extract the crossings of the <code>2*deltaNLL</code> - e.g the 1\u03c3/2\u03c3 boundaries.</p>"},{"location":"part3/commonstatsmethods/#useful-options-for-likelihood-scans","title":"Useful options for likelihood scans","text":"<p>A number of common, useful options (especially for computing likelihood scans with the grid algo) are,</p>\n<ul>\n<li><code>--autoBoundsPOIs arg</code>: Adjust bounds for the POIs if they end up close to the boundary. This can be a comma-separated list of POIs, or \"*\" to get all of them.</li>\n<li><code>--autoMaxPOIs arg</code>: Adjust maxima for the POIs if they end up close to the boundary. Can be a list of POIs, or \"*\" to get all.</li>\n<li><code>--autoRange X</code>: Set to any X &gt;= 0 to do the scan in the \\(\\hat{p}\\) \\(\\pm\\) X\u03c3 range, where \\(\\hat{p}\\) and \u03c3 are the best fit parameter value and uncertainty from the initial fit (so it may be fairly approximate). In case you do not trust the estimate of the error from the initial fit, you can just centre the range on the best fit value by using the option <code>--centeredRange X</code> to do the scan in the \\(\\hat{p}\\) \\(\\pm\\) X range centered on the best fit value.</li>\n<li><code>--squareDistPoiStep</code>:  POI step size based on distance from the midpoint ( either (max-min)/2 or the best fit if used with <code>--autoRange</code> or <code>--centeredRange</code> ) rather than linear separation.</li>\n<li><code>--skipInitialFit</code>: Skip the initial fit (saves time if, for example, a snapshot is loaded from a previous fit)</li>\n</ul>\n<p>Below is a comparison in a likelihood scan, with 20 points, as a function of <code>r_qqH</code> with our <code>toy-hgg-125.root</code> workspace with and without some of these options. The options added tell Combine to scan more points closer to the minimum (best-fit) than with the default.</p>\n<p></p>\n<p>You may find it useful to use the <code>--robustFit=1</code> option to turn on robust (brute-force) for likelihood scans (and other algorithms). You can set the strategy and tolerance when using the <code>--robustFit</code> option using the options <code>--setRobustFitAlgo</code> (default is <code>Minuit2,migrad</code>), <code>setRobustFitStrategy</code> (default is 0) and <code>--setRobustFitTolerance</code> (default is 0.1). If these options are not set, the defaults (set using <code>cminDefaultMinimizerX</code> options) will be used.</p>\n<p>If running <code>--robustFit=1</code> with the algo singles, you can tune the accuracy of the routine used to find the crossing points of the likelihood using the option <code>--setCrossingTolerance</code> (the default is set to 0.0001)</p>\n<p>If you suspect your fits/uncertainties are not stable, you may also try to run custom HESSE-style calculation of the covariance matrix. This is enabled by running <code>MultiDimFit</code> with the <code>--robustHesse=1</code> option. A simple example of how the default behaviour in a simple datacard is given here.</p>\n<p>For a full list of options use <code>combine -M MultiDimFit --help</code></p>"},{"location":"part3/commonstatsmethods/#fitting-only-some-parameters","title":"Fitting only some parameters","text":"<p>If your model contains more than one parameter of interest, you can still decide to fit a smaller number of them, using the option <code>--parameters</code> (or <code>-P</code>), with a syntax like this:</p>\n<pre><code>combine -M MultiDimFit [...] -P poi1 -P poi2 ... --floatOtherPOIs=(0|1)\n</code></pre>\n<p>If <code>--floatOtherPOIs</code> is set to 0, the other parameters of interest (POIs), which are not included as a <code>-P</code> option, are kept fixed to their nominal values. If it's set to 1, they are kept floating, which has different consequences depending on <code>algo</code>:</p>\n<ul>\n<li>When running with <code>--algo=singles</code>, the other floating POIs are treated as unconstrained nuisance parameters.</li>\n<li>When running with <code>--algo=cross</code> or <code>--algo=contour2d</code>, the other floating POIs are treated as other POIs, and so they increase the number of dimensions of the chi-square.</li>\n</ul>\n<p>As a result, when running with <code>--floatOtherPOIs</code> set to 1, the uncertainties on each fitted parameters do not depend on the selection of POIs passed to MultiDimFit, but only on the number of parameters of the model.</p>\n\n<p>Info</p>\n<p>Note that <code>poi</code> given to the the option <code>-P</code> can also be any nuisance parameter. However, by default, the other nuisance parameters are left floating, so in general this does not need to be specified.</p>\n\n<p>You can save the values of the other parameters of interest in the output tree by passing the option <code>--saveInactivePOI=1</code>. You can additionally save the post-fit values any nuisance parameter, function, or discrete index (RooCategory) defined in the workspace using the following options;</p>\n<ul>\n<li><code>--saveSpecifiedNuis=arg1,arg2,...</code> will store the fitted value of any specified constrained nuisance parameter. Use <code>all</code> to save every constrained nuisance parameter. Note that if you want to store the values of <code>flatParams</code> (or floating parameters that are not defined in the datacard) or <code>rateParams</code>,  which are unconstrained, you should instead use the generic option <code>--trackParameters</code> as described here.</li>\n<li><code>--saveSpecifiedFunc=arg1,arg2,...</code> will store the value of any function (eg <code>RooFormulaVar</code>) in the model.</li>\n<li><code>--saveSpecifiedIndex=arg1,arg2,...</code> will store the index of any <code>RooCategory</code> object - eg a <code>discrete</code> nuisance.</li>\n</ul>"},{"location":"part3/commonstatsmethods/#using-best-fit-snapshots","title":"Using best fit snapshots","text":"<p>This can be used to save time when performing scans so that the best fit does not need to be repeated. It can also be used to perform scans with some nuisance parameters frozen to their best-fit values. This can be done as follows,</p>\n<ul>\n<li>Create a workspace for a floating \\(r,m_{H}\\) fit</li>\n</ul>\n<pre><code>text2workspace.py hgg_datacard_mva_8TeV_bernsteins.txt -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingHiggsMass --PO higgsMassRange=120,130 -o testmass.root`\n</code></pre>\n<ul>\n<li>Perfom the fit, saving the workspace</li>\n</ul>\n<pre><code>combine -m 123 -M MultiDimFit --saveWorkspace -n teststep1 testmass.root  --verbose 9\n</code></pre>\n<p>Now we can load the best fit \\(\\hat{r},\\hat{m}_{H}\\) and fit for \\(r\\) freezing \\(m_{H}\\) and lumi_8TeV to their best-fit values,</p>\n<pre><code>combine -m 123 -M MultiDimFit -d higgsCombineteststep1.MultiDimFit.mH123.root -w w --snapshotName \"MultiDimFit\" -n teststep2  --verbose 9 --freezeParameters MH,lumi_8TeV\n</code></pre>"},{"location":"part3/commonstatsmethods/#feldman-cousins","title":"Feldman-Cousins","text":"<p>The Feldman-Cousins (FC) procedure for computing confidence intervals for a generic model is,</p>\n<ul>\n<li>use the profile likelihood ratio as the test statistic, \\(q(\\vec{\\mu}) = - 2 \\ln \\mathcal{L}(\\vec{\\mu},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}))/\\mathcal{L}(\\hat{\\vec{\\mu}},\\hat{\\vec{\\nu}})\\) where \\(\\vec{\\mu}\\) is a point in the (N-dimensional) parameter space, and \\(\\hat{\\vec{\\mu}}\\) is the point corresponding to the best fit. In this test statistic, the nuisance parameters are profiled, both in the numerator and denominator.</li>\n<li>for each point \\(\\vec{\\mu}\\):<ul>\n<li>compute the observed test statistic \\(q_{\\mathrm{obs}}(\\vec{\\mu})\\)</li>\n<li>compute the expected distribution of \\(q(\\vec{\\mu})\\) under the hypothesis of \\(\\vec{\\mu}\\) as the true value.</li>\n<li>accept the point in the region if \\(p_{\\vec{\\mu}}=P\\left[q(\\vec{\\mu}) &gt; q_{\\mathrm{obs}}(\\vec{\\mu})| \\vec{\\mu}\\right] &gt; \\alpha\\)</li>\n</ul>\n</li>\n</ul>\n<p>With a critical value \\(\\alpha\\).</p>\n<p>In Combine, you can perform this test on each individual point (param1, param2,...) = (value1,value2,...) by doing,</p>\n<pre><code>combine workspace.root -M HybridNew --LHCmode LHC-feldman-cousins --clsAcc 0 --singlePoint  param1=value1,param2=value2,param3=value3,... --saveHybridResult [Other options for toys, iterations etc as with limits]\n</code></pre>\n<p>Note that you can also split this calculationg into several separate runs (remembering to set a random seed <code>-s -1</code> each time the above command is run) and <code>hadd</code> the resulting <code>.root</code> output files into a single file <code>toys.root</code>. This can then be read in and used to calculate \\(p_{\\vec{\\mu}}\\) by using the same command as above but replacing the option <code>--saveHybridResult</code> with <code>--readHybridResult --toysFile toys.root</code>.  </p>\n<p>The point belongs to your confidence region if \\(p_{\\vec{\\mu}}\\) is larger than \\(\\alpha\\) (e.g. 0.3173 for a 1\u03c3 region, \\(1-\\alpha=0.6827\\)).</p>\n\n<p>Warning</p>\n<p>You should not use this method without the option <code>--singlePoint</code>. Although Combine will not complain, the algorithm to find the crossing will only find a single crossing and therefore not find the correct interval. Instead you should calculate the Feldman-Cousins intervals as described above.</p>"},{"location":"part3/commonstatsmethods/#physical-boundaries","title":"Physical boundaries","text":"<p>Imposing physical boundaries (such as requiring \\(r&gt;0\\) for a signal strength \\(r\\) ) is achieved by setting the ranges of the physics model parameters using</p>\n<pre><code>--setParameterRanges param1=param1_min,param1_max:param2=param2_min,param2_max ....\n</code></pre>\n<p>The boundary is imposed by restricting the parameter range(s) to those set by the user, in the fits. Note that this is a trick! The actual fitted value, as one of an ensemble of outcomes, can fall outside of the allowed region, while the boundary should be imposed on the physical parameter. The effect of restricting the parameter value in the fit is such that the test statistic is modified as follows ;</p>\n\\[q(\\vec{\\mu}) = - 2 \\ln \\mathcal{L}(\\vec{\\mu},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}))/\\mathcal{L}(\\hat{\\vec{\\mu}},\\hat{\\vec{\\nu}}),\\]\n<p>if \\(\\hat{\\vec{\\mu}}\\) in contained in the bounded range</p>\n<p>and,</p>\n\\[q(\\vec{\\mu}) = - 2 \\ln \\mathcal{L}(\\vec{\\mu},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}))/\\mathcal{L}(\\vec{\\mu}_{B},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}_{B})),\\]\n<p>if \\(\\hat{\\vec{\\mu}}\\) is outside of the bounded range. Here \\(\\vec{\\mu}_{B}\\) and \\(\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}_{B})\\) are the values of \\(\\vec{\\mu}\\) and \\(\\vec{\\nu}\\) which maximise the likelihood excluding values outside of the bounded region for \\(\\vec{\\mu}\\) - typically, \\(\\vec{\\mu}_{B}\\) will be found at one of the boundaries which is imposed. For example if there is one parameter of interest \\(\\mu\\) , if the boundary \\(\\mu&gt;0\\) is imposed, you will typically expect \\(\\mu_{B}=0\\), when \\(\\hat{\\mu}\\leq 0\\), and \\(\\mu_{B}=\\hat{\\mu}\\) otherewise.</p>\n<p>This can sometimes be an issue as Minuit may not know if has successfully converged when the minimum lies outside of that range. If there is no upper/lower boundary, just set that value to something far from the region of interest.</p>\n\n<p>Info</p>\n<p>One can also imagine imposing the boundaries by first allowing Minuit to find the minimum in the unrestricted  region and then setting the test statistic to that in the case that minimum lies outside the physical boundary. This would avoid potential issues of convergence. If you are interested in implementing this version in Combine, please contact the development team.</p>"},{"location":"part3/commonstatsmethods/#extracting-contours-from-results-files","title":"Extracting contours from results files","text":"<p>As in general for <code>HybridNew</code>, you can split the task into multiple tasks (grid and/or batch) and then merge the outputs with <code>hadd</code>. You can also refer to the combineTool for job submission section for submitting the jobs to the grid/batch or if you have more than one parameter of interest, see the instructions for running <code>HybridNew</code> on a grid of parameter points on the CombineHarvest - HybridNewGrid documentation.</p>"},{"location":"part3/commonstatsmethods/#extracting-1d-intervals","title":"Extracting 1D intervals","text":"<p>For one-dimensional models only, and if the parameter behaves like a cross section, the code is able to interpolate and determine the values of your parameter on the contour (just like it does for the limits). As with limits, read in the grid of points and extract 1D intervals using,</p>\n<pre><code>combine workspace.root -M HybridNew --LHCmode LHC-feldman-cousins --readHybridResults --grid=mergedfile.root --cl &lt;1-alpha&gt;\n</code></pre>\n<p>The output tree will contain the values of the POI that crosses the critical value (\\(\\alpha\\)) - i.e, the boundaries of the confidence intervals.</p>\n<p>You can produce a plot of the value of \\(p_{\\vec{\\mu}}\\) vs the parameter of interest \\(\\vec{\\mu}\\) by adding the option <code>--plot &lt;plotname&gt;</code>.</p>\n<p>As an example, we will use the<code>data/tutorials/multiDim/toy-hgg-125.txt</code> datacard and find the 1D FC 68% interval for the \\(r_{qqH}\\) parameter. First, we construct the model as, </p>\n<pre><code>text2workspace.py -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH toy-hgg-125.txt -o toy-hgg-125.root\n</code></pre>\n<p>Now we generate the grid of test statistics in a suitable range. You could use the <code>combineTool.py</code> as below but for 1D, we can just generate the points in a for loop. </p>\n<pre><code>for i in range 0.1 1.1 2.1 3.1 4.1 5.1 6.1 7.1 8.1 9.1 10.1 ; do combine toy-hgg-125.root --redefineSignalPOI r_qqH   -M HybridNew --LHCmode LHC-feldman-cousins --clsAcc 0 --singlePoint r_qqH=${i} --saveToys --saveHybridResult -n ${i} ; done\n\nhadd -f FeldmanCousins1D.root higgsCombine*.1.HybridNew.mH120.123456.root\n</code></pre>\n<p>Next, we get combine to calculate the interval from this grid. \n<pre><code>combine toy-hgg-125.root -M HybridNew --LHCmode LHC-feldman-cousins --readHybridResults --grid=FeldmanCousins1D.root --cl 0.68 --redefineSignalPOI r_qqH\n</code></pre>\nand we should see the below as the output, </p>\n<pre><code> -- HybridNew --\nfound 68 % confidence regions\n  2.19388 (+/- 0.295316) &lt; r_qqH &lt; 8.01798 (+/- 0.0778685)\nDone in 0.00 min (cpu), 0.00 min (real)\n</code></pre>\n<p>Since we included the <code>--plot</code> option, we will also get a plot like the one below, </p>\n<p></p>"},{"location":"part3/commonstatsmethods/#extracting-2d-contours-general-intervals","title":"Extracting 2D contours / general intervals","text":"<p>For two-dimensional models, or if the parameter does not behave like a cross section, you will need to extract the contours from the output of <code>HybridNew</code> and plot them yourself. We will use the <code>data/tutorials/multiDim/toy-hgg-125.txt</code> datacard in the example below to demonstrate how this can be done. Let's build the model again as we did in the MultiDimFit section.</p>\n<pre><code>text2workspace.py -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH toy-hgg-125.txt -o toy-hgg-125.root\n</code></pre>\n<p>First, we use <code>combineTool.py</code> to create jobs for each point in our parameter scan. We want to impose the boundaries that \\(r_{ggH}&gt;0\\), \\(r_{qqH}&gt;0\\).\nIn the example below, we will run in interactive mode so this can take a little while. You can instead run using a batch cluster (eg <code>condor</code>) or the grid (<code>grid</code>) to submit sepaerate jobs for each point / set of points. We configure the tool by specifying the grid of points in <code>poi_grid_configuration.json</code> as below. Here we want 5000 toys for each point, and we choose a grid of \\(r_{ggH}\\in [0,4]\\) in steps of 0.2, and \\(r_{qqH}\\in[0,10]\\) in steps of 0.5.</p>\n<pre><code>{\n  \"verbose\" : true,\n  \"opts\" : \" --LHCmode LHC-feldman-cousins --saveHybridResult --clsAcc 0 \",\n  \"POIs\" : [\"r_ggH\", \"r_qqH\"],\n  \"grids\" : [\n    [\"0:4|.2\",\"0:10|.5\",\"\"]\n  ],\n  \"toys_per_cycle\"  : 5000,\n  \"FC\" : true,\n  \"min_toys\": 5000,\n  \"max_toys\": 50000,\n  \"output_incomplete\" : true,\n  \"make_plots\": false,\n  \"contours\":[\"obs\"],\n  \"CL\": 0.68,\n  \"output\": \"FeldmanCousins.root\",\n  \"zipfile\"         : \"collected.zip\",\n  \"statusfile\"      : \"status.json\"\n }\n</code></pre>\n<p>The command will look like,</p>\n<pre><code>combineTool.py -M HybridNewGrid  ./poi_grid_configuration.json -d toy-hgg-125.root --task-name fc2d --job-mode 'interactive' --cycles 1\n</code></pre>\n<p>As mentioned, this will take a while to run so you should consider going to make a cup of coffee at this point and reading through the HybridNewGrid documentation to learn more about this tool.\nOnce this is done, we extract the values of \\(p_{\\vec{\\mu}}\\) for each point in our parameter space using the same command, but this time setting <code>--cycles 0</code> and adding the option <code>--output</code>,</p>\n<pre><code>combineTool.py -M HybridNewGrid  ./poi_grid_configuration.json -d toy-hgg-125.root --task-name fc2d --job-mode 'interactive' --cycles 0 --output\n</code></pre>\n<p>which will produce a file <code>FeldmanCousins.root</code> (as defined in the <code>\"output\"</code> field of <code>poi_grid_configuration.json</code>) that contains a <code>TGraph2D</code> which stores the calculated value of \\(p_{\\vec{\\mu}}\\) for each point in the grid. Using something like the macro below, these values can be plotted along with a contour corresponding to 68% CL (\\(\\alpha=0.32\\)).</p>\n\nShow script\n<pre><code>\nvoid plot_2DFC(){\n\n  TFile *_file0 = TFile::Open(\"FeldmanCousins.root\");\n\n  TCanvas *can = new TCanvas(\"c\",\"c\",600,540);\n\n  // Draw p_x\n  TGraph2D *gpX = (TGraph2D*)_file0-&gt;Get(\"obs\");\n  gpX-&gt;Draw(\"colz\");\n\n  // Draw 68% contour\n  TH2F *h68 = (TH2F*)gpX-&gt;GetHistogram()-&gt;Clone(\"h68\");\n  h68-&gt;SetContour(2);\n  h68-&gt;SetContourLevel(1,0.32);\n  h68-&gt;SetLineWidth(3);\n  h68-&gt;SetLineColor(1);\n  h68-&gt;Draw(\"CONT3same\");\n\n  gpX-&gt;SetTitle(\"\");\n  gpX-&gt;GetXaxis()-&gt;SetTitle(\"r_{ggH}\");\n  gpX-&gt;GetYaxis()-&gt;SetTitle(\"r_{qqH}\");\n\n\n  gStyle-&gt;SetOptStat(0);\n  can-&gt;SaveAs(\"2D_FC.png\");\n }\n \n<p></p>\n\n<p>It will produce the plot below.</p>\n<p></p>\n<p>There are several options for reducing the running time, such as setting limits on the region of interest or the minimum number of toys required for a point to be included.</p>"},{"location":"part3/debugging/","title":"Debugging fits","text":"<p>When a fit fails there are several things you can do to investigate. CMS users can have a look at these slides from a previous Combine tutorial. This section contains a few pointers for some of the methods mentioned in the slides.</p>"},{"location":"part3/debugging/#analyzing-the-nll-shape-in-each-parameter","title":"Analyzing the NLL shape in each parameter","text":"<p>The <code>FastScan</code> mode of <code>combineTool.py</code> can be used to analyze the shape of the NLL as a function of each parameter in the fit model. The NLL is evaluated varying a single parameter at a time, the other parameters stay at the default values they have in the workspace. This produces a file with the NLL, plus its first and second derivatives, as a function of each parameter. Discontinuities in the derivatives, particularly if they are close to the minimum of the parameter, can be the source of issues with the fit. </p> <p>The usage is as follows:</p> <p><code>combineTool.py -M FastScan -w workspace.root:w</code></p> <p>Note that this will make use of the data in the workspace for evaluating the NLL. To run this on an asimov data set, with r=1 injected, you can do the following:</p> <pre><code>combine -M GenerateOnly workspace.root -t -1 --saveToys --setParameters r=1\n\ncombineTool.py -M FastScan -w workspace.root:w -d higgsCombineTest.GenerateOnly.mH120.123456.root:toys/toy_asimov\n</code></pre> <p><code>higgsCombineTest.GenerateOnly.mH120.123456.root</code> is generated by the first command; if you pass a value for <code>-m</code> or change the default output file name with <code>-n</code> the file name will be different and you should change the <code>combineTool</code> call accordingly.</p>"},{"location":"part3/nonstandard/","title":"Advanced Use Cases","text":"<p>This section will cover some of the more specific use cases for Combine that are not necessarily related to the main results of the analysis.</p>"},{"location":"part3/nonstandard/#fit-diagnostics","title":"Fit Diagnostics","text":"<p>If you want to diagnose your limits/fit results, you may first want to look at the HIG PAG standard checks, which are applied to all datacards and can be found here.</p> <p>If you have already found the Higgs boson but it's an exotic one, instead of computing a limit or significance you might want to extract its cross section by performing a maximum-likelihood fit. Alternatively, you might want to know how compatible your data and your model are, e.g. how strongly your nuisance parameters are constrained, to what extent they are correlated, etc. These general diagnostic tools are contained in the method <code>FitDiagnostics</code>.</p> <pre><code>    combine -M FitDiagnostics datacard.txt\n</code></pre> <p>The program will print out the result of two fits. The first one is performed with the signal strength r (or the first POI in the list, in models with multiple POIs) set to zero and a second with floating r. The output ROOT tree will contain the best fit value for r and its uncertainty. You will also get a <code>fitDiagnostics.root</code> file containing the following objects:</p> Object Description <code>nuisances_prefit</code> <code>RooArgSet</code> containing the pre-fit values of the nuisance parameters, and their uncertainties from the external constraint terms only <code>fit_b</code> <code>RooFitResult</code> object containing the outcome of the fit of the data with signal strength set to zero <code>fit_s</code> <code>RooFitResult</code> object containing the outcome of the fit of the data with floating signal strength <code>tree_prefit</code> <code>TTree</code> of pre-fit nuisance parameter values and constraint terms (_In) <code>tree_fit_sb</code> <code>TTree</code> of fitted nuisance parameter values and constraint terms (_In) with floating signal strength <code>tree_fit_b</code> <code>TTree</code> of fitted nuisance parameter values and constraint terms (_In) with signal strength set to 0 <p>by including the option <code>--plots</code>, you will additionally find the following contained in the ROOT file:</p> Object Description <code>covariance_fit_s</code> <code>TH2D</code> Covariance matrix of the parameters in the fit with floating signal strength <code>covariance_fit_b</code> <code>TH2D</code> Covariance matrix of the parameters in the fit with signal strength set to zero <code>category_variable_prefit</code> <code>RooPlot</code> plot of the pre-fit PDFs/templates with the data (or toy if running with <code>-t</code>) overlaid <code>category_variable_fit_b</code> <code>RooPlot</code> plot of the PDFs/templates from the background only fit with the data (or toy if running with <code>-t</code>) overlaid <code>category_variable_fit_s</code> <code>RooPlot</code> plot of the PDFs/templates from the signal+background fit with the data (or toy if running with <code>-t</code>) overlaid <p>There will be one <code>RooPlot</code> object per category in the likelihood, and one per variable if using a multi-dimensional dataset. For each of these additional objects a png file will also be produced.</p> <p>Info</p> <p>If you use the option <code>--name</code>, this additional name will be inserted into the file name for this output file.</p> <p>As well as the values of the constrained nuisance parameters (and their constraints), you will also find branches for the number of \"bad\" nll calls (which you should check is not too large) and the status of the fit <code>fit_status</code>. The fit status is computed as follows</p> <pre><code>fit_status = 100 * hesse_status + 10 * minos_status +  minuit_summary_status\n</code></pre> <p>The <code>minuit_summary_status</code> is the usual status from Minuit, details of which can be found here. For the other status values, check these documentation links for the <code>hesse_status</code> and the <code>minos_status</code>.</p> <p>A fit status of -1 indicates that the fit failed (Minuit summary was not 0 or 1) and hence the fit result is not valid.</p>"},{"location":"part3/nonstandard/#fit-options","title":"Fit options","text":"<ul> <li>If you only want to run the signal+background fit, and do not need the output file, you can run with <code>--justFit</code>. In case you would like to run only the signal+background fit but would like to produce the output file, you should use the option <code>--skipBOnlyFit</code> instead.</li> <li>You can use <code>--rMin</code> and <code>--rMax</code> to set the range of the first POI; a range that is not too large compared with the uncertainties you expect from the fit usually gives more stable and accurate results.</li> <li>By default, the uncertainties are computed using MINOS for the first POI and HESSE for all other parameters. For the nuisance parameters the uncertainties will therefore be symmetric. You can run MINOS for all parameters using the option <code>--minos all</code>, or for none of the parameters using <code>--minos none</code>. Note that running MINOS is slower so you should only consider using it if you think the HESSE uncertainties are not accurate.</li> <li>If MINOS or HESSE fails to converge, you can try running with <code>--robustFit=1</code>. This will do a slower, but more robust, likelihood scan, which can be further controlled with the parameter <code>--stepSize</code> (the default value is 0.1, and is relative to the range of the parameter).</li> <li>The strategy and tolerance when using the <code>--robustFit</code> option can be set using the options <code>setRobustFitAlgo</code> (default is <code>Minuit2,migrad</code>), <code>setRobustFitStrategy</code> (default is 0) and <code>--setRobustFitTolerance</code> (default is 0.1). If these options are not set, the defaults (set using <code>cminDefaultMinimizerX</code> options) will be used. You can also tune the accuracy of the routine used to find the crossing points of the likelihood using the option <code>--setCrossingTolerance</code> (the default is set to 0.0001)</li> <li>If you find the covariance matrix provided by HESSE is not accurate (i.e. <code>fit_s-&gt;Print()</code> reports this was forced positive-definite) then a custom HESSE-style calculation of the covariance matrix can be used instead. This is enabled by running <code>FitDiagnostics</code> with the <code>--robustHesse 1</code> option. Please note that the status reported by <code>RooFitResult::Print()</code> will contain <code>covariance matrix quality: Unknown, matrix was externally provided</code> when robustHesse is used, this is normal and does not indicate a problem. NB: one feature of the robustHesse algorithm is that if it still cannot calculate a positive-definite covariance matrix it will try to do so by dropping parameters from the hessian matrix before inverting. If this happens it will be reported in the output to the screen.</li> <li>For other fitting options see the generic minimizer options section.</li> </ul>"},{"location":"part3/nonstandard/#fit-parameter-uncertainties","title":"Fit parameter uncertainties","text":"<p>If you get a warning message when running <code>FitDiagnostics</code> that says <code>Unable to determine uncertainties on all fit parameters</code>. This means the covariance matrix calculated in <code>FitDiagnostics</code> was not correct.</p> <p>The most common problem is that the covariance matrix is forced positive-definite. In this case the constraints on fit parameters as taken from the covariance matrix are incorrect and should not be used. In particular, if you want to make post-fit plots of the distribution used in the signal extraction fit and are extracting the uncertainties on the signal and background expectations from the covariance matrix, the resulting values will not reflect the truth if the covariance matrix was incorrect. By default if this happens and you passed the <code>--saveWithUncertainties</code> flag when calling <code>FitDiagnostics</code>, this option will be ignored as calculating the uncertainties would lead to incorrect results. This behaviour can be overridden by passing <code>--ignoreCovWarning</code>.</p> <p>Such problems with the covariance matrix can be caused by a number of things, for example:</p> <ul> <li> <p>Parameters being close to their boundaries after the fit.</p> </li> <li> <p>Strong (anti-) correlations between some parameters.   A discontinuity in the NLL function or its derivatives at or near the minimum.</p> </li> </ul> <p>If you are aware that your analysis has any of these features you could try resolving these. Setting <code>--cminDefaultMinimizerStrategy 0</code> can also help with this problem.</p>"},{"location":"part3/nonstandard/#pre-and-post-fit-nuisance-parameters","title":"Pre- and post-fit nuisance parameters","text":"<p>It is possible to compare pre-fit and post-fit nuisance parameter values with the script diffNuisances.py. Taking as input a <code>fitDiagnosticsTest.root</code> file, the script will by default print out the parameters that have changed significantly with respect to their initial estimate. </p> <p>For each of those parameters, it will print out </p> <ul> <li>The shift in value and the post-fit uncertainty, both normalized to the initial (pre-fit) value from the s+b fit and the b-only fit. </li> <li>The linear correlation between the parameter and the signal strength <code>r</code> - \\(\\rho(r,\\nu)\\).</li> <li>The approximate impact of the nuisance parameter determined as \\(I(r,\\nu) = \\sigma_{r}\\sigma_{\\nu}\\rho(r,\\nu)\\), where \\(\\sigma_{r}\\) and \\(\\sigma_{\\nu}\\) are the symmetrized total uncertainties on the signal strength and nuisance parameter, respectively (see the section on Nuisance parameter impacts for our recommend calculation of impacts.).</li> </ul> <p>The script has several options to toggle the thresholds used to decide whether a parameter has changed significantly, to get the printout of the absolute value of the nuisance parameters, and to get the output in another format for use on a webpage or in a note (the supported formats are <code>html</code>, <code>latex</code>, <code>twiki</code>). To print all of the parameters, use the option <code>--all</code>. </p> <p>An example of using this script is shown below, </p> <pre><code>combine data/tutorials/counting/realistic-counting-experiment.txt -M FitDiagnostics --forceRecreateNLL --rMin -1 --rMax 1\npython diffNuisances.py fitDiagnosticsTest.root\n</code></pre> Show output <code>--format text</code> (default)<code>--format html</code> <pre><code>diffNuisances run on fitDiagnosticsTest.root, at 2024-07-01 18:05:37.585109 with the following options ... {'vtol': 0.3, 'stol': 0.1, 'vtol2': 2.0, 'stol2': 0.5, 'show_all_parameters': False, 'absolute_values': False, 'poi': 'r', 'format': 'text', 'plotfile': None, 'pullDef': '', 'skipFitS': False, 'skipFitB': False, 'sortBy': 'correlation', 'regex': '.*'}\n\nname                                              b-only fit            s+b fit         rho  approx impact\nCMS_scale_t_tautau_8TeV                          -0.60, 0.40     ! -0.73, 0.39!     !-0.19!      -0.051\nCMS_eff_t_tt_8TeV                                +0.57, 0.32     ! +0.50, 0.32!     !-0.17!      -0.037\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_12         +0.21, 0.89        -0.20, 0.96       -0.15      -0.103\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_11         +0.71, 0.87        +0.42, 0.90       -0.15      -0.095\nCMS_htt_QCDSyst_tauTau_vbf_8TeV                  +0.16, 0.82        -0.15, 0.84       -0.12      -0.071\nCMS_htt_tt_tauTau_vbf_8TeV_ZTT_bin_6             +0.32, 0.97        +0.09, 0.99       -0.08      -0.054\nCMS_htt_QCDSyst_tauTau_1jet_high_mediumhiggs_8TeV         +0.52, 0.20     ! +0.48, 0.20!     !-0.08!      -0.011\nCMS_htt_QCDSyst_tauTau_1jet_high_highhiggs_8TeV         -0.15, 0.84        -0.33, 0.84       -0.08      -0.044\nCMS_htt_extrap_ztt_tauTau_1jet_high_mediumhiggs_8TeV         +0.34, 0.95        +0.45, 0.95       +0.07      +0.049\nCMS_htt_extrap_ztt_tauTau_vbf_8TeV               +0.31, 0.96        +0.14, 0.96       -0.06      -0.040\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_14         +0.29, 0.88        +0.24, 0.89       -0.03      -0.018\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_6         -0.67, 0.94        -0.63, 0.93       +0.02      +0.014\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_23         +0.44, 0.92        +0.46, 0.92       +0.01      +0.004\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_18         +0.44, 0.92        +0.43, 0.93       -0.01      -0.005\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_17         -0.62, 1.00        -0.61, 1.00       +0.01      +0.004\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_15         -0.55, 0.98        -0.54, 0.98       +0.01      +0.005\nCMS_htt_extrap_ztt_tauTau_1jet_high_highhiggs_8TeV         -0.34, 0.95        -0.41, 0.95       -0.01      -0.006\nCMS_htt_tt_tauTau_1jet_high_mediumhiggs_8TeV_ZTT_bin_20         -0.34, 0.99        -0.33, 0.99       +0.00      +0.003\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_5         -0.49, 1.00        -0.48, 1.00       +0.00      +0.003\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_26         -0.47, 0.96        -0.46, 0.97       +0.00      +0.003\n</code></pre> <p></p> <p>By default, the changes in the nuisance parameter values and uncertainties are given relative to their initial (pre-fit) values (usually relative to initial values of 0 and 1 for most nuisance types).</p> <p>The values in the output will be \\((\\nu-\\nu_{I})/\\sigma_{I}\\) if the nuisance has a pre-fit uncertainty, otherwise they will be \\(\\nu-\\nu_{I}\\) (for example, a <code>flatParam</code> has no pre-fit uncertainty).</p> <p>The reported uncertainty will be the ratio \\(\\sigma/\\sigma_{I}\\) - i.e the ratio of the post-fit to the pre-fit uncertainty. If there is no pre-fit uncertainty (as for <code>flatParam</code> nuisances), the post-fit uncertainty is shown.</p> <p>To print the pre-fit and post-fit values and (asymmetric) uncertainties, rather than the ratios, the option <code>--abs</code> can be used.</p> <p>Info</p> <p>We recommend that you include the options <code>--abs</code> and <code>--all</code> to get the full information on all of the parameters (including unconstrained nuisance parameters) at least once when checking your datacards. </p>"},{"location":"part3/nonstandard/#pulls","title":"Pulls","text":"<p>If instead of the nuisance parameter values, you wish to report the pulls, you can do so using the option <code>--pullDef X</code>, with <code>X</code> being one of the options listed below. You should note that since the pulls below are only defined when the pre-fit uncertainty exists, nothing will be reported for parameters that have no prior constraint (except in the case of the <code>unconstPullAsym</code> choice as described below). You may want to run without this option and <code>--all</code> to get information about those parameters.</p> <ul> <li> <p><code>relDiffAsymErrs</code>: This is the same as the default output of the tool, except that only constrained parameters (i.e. where the pre-fit uncertainty is defined) are reported. The uncertainty is also reported and calculated as \\(\\sigma/\\sigma_{I}\\).</p> </li> <li> <p><code>unconstPullAsym</code>: Report the pull as \\(\\frac{\\nu-\\nu_{I}}{\\sigma}\\), where \\(\\nu_{I}\\) and \\(\\sigma\\) are the initial value and post-fit uncertainty of that nuisance parameter. The pull defined in this way will have no error bar, but all nuisance parameters will have a result in this case.</p> </li> <li> <p><code>compatAsym</code>: The pull is defined as \\(\\frac{\\nu-\\nu_{D}}{\\sqrt{\\sigma^{2}+\\sigma_{D}^{2}}}\\), where \\(\\nu_{D}\\) and \\(\\sigma_{D}\\) are calculated as \\(\\sigma_{D} = (\\frac{1}{\\sigma^{2}} - \\frac{1}{\\sigma_{I}^{2}})^{-1}\\) and \\(\\nu_{D} = \\sigma_{D}(\\nu - \\frac{\\nu_{I}}{\\sigma_{I}^{2}})\\). In this expression \\(\\nu_{I}\\) and \\(\\sigma_{I}\\) are the initial value and uncertainty of that nuisance parameter. This can be thought of as a compatibility between the initial measurement (prior) and an imagined measurement where only the data (with no constraint on the nuisance parameter) is used to measure the nuisance parameter. There is no error bar associated with this value.</p> </li> <li> <p><code>diffPullAsym</code>: The pull is defined as \\(\\frac{\\nu-\\nu_{I}}{\\sqrt{\\sigma_{I}^{2}-\\sigma^{2}}}\\), where \\(\\nu_{I}\\) and \\(\\sigma_{I}\\) are the pre-fit value and uncertainty (from L. Demortier and L. Lyons). If the denominator is close to 0 or the post-fit uncertainty is larger than the pre-fit (usually due to some failure in the calculation), the pull is not defined and the result will be reported as <code>0 +/- 999</code>.</p> </li> </ul> <p>If using <code>--pullDef</code>, the results for all parameters for which the pull can be calculated will be shown (i.e <code>--all</code> will be set to <code>true</code>), not just those that have moved by some metric.</p> <p>This script has the option (<code>-g outputfile.root</code>) to produce plots of the fitted values of the nuisance parameters and their post-fit, asymmetric uncertainties. Instead, the pulls defined using one of the options above, can be plotted using the option <code>--pullDef X</code>. In addition this will produce a plot showing a comparison between the post-fit and pre-fit (symmetrized) uncertainties on the nuisance parameters.</p> <p>Info</p> <p>In the above options, if an asymmetric uncertainty is associated with the nuisance parameter, then the choice of which uncertainty is used in the definition of the pull will depend on the sign of \\(\\nu-\\nu_{I}\\).</p>"},{"location":"part3/nonstandard/#normalizations","title":"Normalizations","text":"<p>For a certain class of models, like those made from datacards for shape-based analysis, the tool can also compute and save the best fit yields of all processes to the output ROOT file. If this feature is turned on with the option <code>--saveNormalizations</code>, the file will also contain three <code>RooArgSet</code> objects <code>norm_prefit</code>, <code>norm_fit_s</code>, and <code>norm_fit_b</code>. These each contain one <code>RooConstVar</code> for each channel <code>xxx</code> and process <code>yyy</code> with name <code>xxx/yyy</code> and value equal to the best fit yield. You can use <code>RooRealVar::getVal</code> and <code>RooRealVar::getError</code> to estimate both the post-fit (or pre-fit) values and uncertainties of these normalizations.</p> <p>The sample <code>pyROOT</code> macro mlfitNormsToText.py can be used to convert the ROOT file into a text table with four columns: channel, process, yield from the signal+background fit, and yield from the background-only fit. To include the uncertainties in the table, add the option <code>--uncertainties</code>.</p> <p>Warning</p> <p>Note that when running with multiple toys, the <code>norm_fit_s</code>, <code>norm_fit_b</code>, and <code>norm_prefit</code> objects will be stored for the last toy dataset generated and so may not be useful to you.</p> <p>Note that this procedure works only for \"extended likelihoods\" like the ones used in shape-based analysis, not for counting experiment datacards. You can however convert a counting experiment datacard to an equivalent shape-based one by adding a line <code>shapes * * FAKE</code> in the datacard after the <code>imax</code>, <code>jmax</code>, <code>kmax</code> lines. Alternatively, you can use <code>combineCards.py countingcard.txt -S &gt; shapecard.txt</code> to do this conversion.</p>"},{"location":"part3/nonstandard/#per-bin-norms-for-shape-analyses","title":"Per-bin norms for shape analyses","text":"<p>If you have a shape-based analysis, you can include the option <code>--savePredictionsPerToy</code>. With this option, additional branches will be filled in the three output trees contained in <code>fitDiagnostics.root</code>.</p> <p>The normalization values for each toy will be stored in the branches inside the <code>TTrees</code> named n_exp[_final]_binxxx_proc_yyy. The _final will only be there if there are systematic uncertainties affecting this process.</p> <p>Additionally, there will be branches that provide the value of the expected bin content for each process, in each channel. These are named n_exp[_final]_binxxx_proc_yyy_i (where _final will only be in the name if there are systematic uncertainties affecting this process) for channel <code>xxx</code>, process <code>yyy</code>, bin number <code>i</code>. In the case of the post-fit trees (<code>tree_fit_s/b</code>), these will be the expectations from the fitted models, while for the pre-fit tree, they will be the expectation from the generated model (i.e if running toys with <code>-t N</code> and using <code>--genNuisances</code>, they will be randomized for each toy). These can be useful, for example, for calculating correlations/covariances between different bins, in different channels or processes, within the model from toys.</p> <p>Info</p> <p>Be aware that for unbinned models, a binning scheme is adopted based on the <code>RooRealVar::getBinning</code> for the observable defining the shape, if it exists, or Combine will adopt some appropriate binning for each observable.</p>"},{"location":"part3/nonstandard/#plotting","title":"Plotting","text":"<p><code>FitDiagnostics</code> can also produce pre- and post-fit plots of the model along with the data. They will be stored in the same directory as <code>fitDiagnostics.root</code>. To obtain these, you have to specify the option <code>--plots</code>, and then optionally specify the names of the signal and background PDFs/templates, e.g. <code>--signalPdfNames='ggH*,vbfH*'</code> and <code>--backgroundPdfNames='*DY*,*WW*,*Top*'</code> (by default, the definitions of signal and background are taken from the datacard). For models with more than 1 observable, a separate projection onto each observable will be produced.</p> <p>An alternative is to use the option <code>--saveShapes</code>. This will add additional folders in <code>fitDiagnostics.root</code> for each category, with pre- and post-fit distributions of the signals and backgrounds as TH1s, and the data as <code>TGraphAsymmErrors</code> (with Poisson intervals as error bars).</p> <p>Info</p> <p>If you want to save post-fit shapes at a specific r value, add the options <code>--customStartingPoint</code> and <code>--skipSBFit</code>, and set the r value. The result will appear in shapes_fit_b, as described below.</p> <p>Three additional folders (shapes_prefit, shapes_fit_sb and shapes_fit_b ) will contain the following distributions:</p> Object Description <code>data</code> <code>TGraphAsymmErrors</code> containing the observed data (or toy data if using <code>-t</code>). The vertical error bars correspond to the 68% interval for a Poisson distribution centered on the observed count (Garwood intervals), following the recipe provided by the CMS Statistics Committee. <code>$PROCESS</code> (id &lt;= 0) <code>TH1F</code> for each signal process in each channel, named as in the datacard <code>$PROCESS</code> (id &gt; 0) <code>TH1F</code> for each background process in each channel, named as in the datacard <code>total_signal</code> <code>TH1F</code> Sum over the signal components <code>total_background</code> <code>TH1F</code> Sum over the background components <code>total</code> <code>TH1F</code> Sum over all of the signal and background components <p>The above distributions are provided for each channel included in the datacard, in separate subfolders, named as in the datacard: There will be one subfolder per channel.</p> <p>Warning</p> <p>The pre-fit signal is evaluated for <code>r=1</code> by default, but this can be modified using the option <code>--preFitValue</code>.</p> <p>The distributions and normalizations are guaranteed to give the correct interpretation:</p> <ul> <li> <p>For shape datacards whose inputs are <code>TH1</code>, the histograms/data points will have the bin number as the x-axis and the content of each bin will be a number of events.</p> </li> <li> <p>For datacards whose inputs are <code>RooAbsPdf</code>/<code>RooDataHist</code>s, the x-axis will correspond to the observable and the bin content will be the PDF density / events divided by the bin width. This means the absolute number of events in a given bin, i, can be obtained from <code>h.GetBinContent(i)*h.GetBinWidth(i)</code> or similar for the data graphs. Note that for unbinned analyses Combine will make a reasonable guess as to an appropriate binning.</p> </li> </ul> <p>Uncertainties on the shapes will be added with the option <code>--saveWithUncertainties</code>. These uncertainties are generated by re-sampling of the fit covariance matrix, thereby accounting for the full correlation between the parameters of the fit.</p> <p>Warning</p> <p>It may be tempting to sum up the uncertainties in each bin (in quadrature) to get the total uncertainty on a process. However, this is (usually) incorrect, as doing so would not account for correlations between the bins. Instead you can refer to the uncertainties which will be added to the post-fit normalizations described above.</p> <p>Additionally, the covariance matrix between bin yields (or yields/bin-widths) in each channel will also be saved as a <code>TH2F</code> named total_covar. If the covariance between all bins across all channels is desired, this can be added using the option <code>--saveOverallShapes</code>. Each folder will now contain additional distributions (and covariance matrices) corresponding to the concatenation of the bins in each channel (and therefore the covaraince between every bin in the analysis). The bin labels should make it clear as to which bin corresponds to which channel.</p>"},{"location":"part3/nonstandard/#toy-by-toy-diagnostics","title":"Toy-by-toy diagnostics","text":"<p><code>FitDiagnostics</code> can also be used to diagnose the fitting procedure in toy experiments to identify potentially problematic nuisance parameters when running the full limits/p-values. This can be done by adding the option <code>-t &lt;num toys&gt;</code>. The output file, <code>fitDiagnostics.root</code> the three <code>TTrees</code> will contain the value of the constraint fitted result in each toy, as a separate entry. It is recommended to use the following options when investigating toys to reduce the running time: <code>--toysFrequentist</code> <code>--noErrors</code> <code>--minos none</code></p> <p>The results can be plotted using the macro test/plotParametersFromToys.C</p> <pre><code>$ root -l\n.L plotParametersFromToys.C+\nplotParametersFromToys(\"fitDiagnosticsToys.root\",\"fitDiagnosticsData.root\",\"workspace.root\",\"r&lt;0\")\n</code></pre> <p>The first argument is the name of the output file from running with toys, and the second and third (optional) arguments are the name of the file containing the result from a fit to the data and the workspace (created from <code>text2workspace.py</code>). The fourth argument can be used to specify a cut string applied to one of the branches in the tree, which can be used to correlate strange behaviour with specific conditions. The output will be 2 pdf files (<code>tree_fit_(s)b.pdf</code>) and 2 ROOT files (<code>tree_fit_(s)b.root</code>) containing canvases of the fit results of the tool. For details on the output plots, consult AN-2012/317.</p>"},{"location":"part3/nonstandard/#scaling-constraints","title":"Scaling constraints","text":"<p>It possible to scale the constraints on the nuisance parameters when converting the datacard to a workspace (see the section on physics models) with <code>text2workspace.py</code>. This can be useful for projection studies of the analysis to higher luminosities or with different assumptions about the sizes of certain systematics without changing the datacard by hand.</p> <p>We consider two kinds of scaling;</p> <ul> <li>A constant scaling factor to scale the constraints</li> <li>A functional scale factor that depends on some other parameters in the workspace, eg a luminosity scaling parameter (as a <code>rateParam</code> affecting all processes).</li> </ul> <p>In both cases these scalings can be introduced by adding some extra options at the <code>text2workspace.py</code> step.</p> <p>To add a constant scaling factor we use the option <code>--X-rescale-nuisance</code>, eg</p> <pre><code>text2workspace.py datacard.txt --X-rescale-nuisance '[some regular expression]' 0.5\n</code></pre> <p>will create the workspace in which every nuisance parameter whose name matches the specified regular expression will have the width of the gaussian constraint scaled by a factor 0.5.</p> <p>Multiple <code>--X-rescale-nuisance</code> options can be specified to set different scalings for different nuisances (note that you actually have to write <code>--X-rescale-nuisance</code> each time as in <code>--X-rescale-nuisance 'theory.*' 0.5  --X-rescale-nuisance 'exp.*' 0.1</code>).</p> <p>To add a functional scaling factor we use the option <code>--X-nuisance-function</code>, which works in a similar way. Instead of a constant value you should specify a <code>RooFit</code> factory expression.</p> <p>A typical case would be scaling by \\(1/\\sqrt{L}\\), where \\(L\\) is a luminosity scale factor. For example, assuming there is some parameter in the datacard/workspace called <code>lumiscale</code>,</p> <pre><code>text2workspace.py datacard.txt --X-nuisance-function '[some regular expression]' 'expr::lumisyst(\"1/sqrt(@0)\",lumiscale[1])'\n</code></pre> <p>This factory syntax is flexible, but for our use case the typical format will be: <code>expr::[function name](\"[formula]\", [arg0], [arg1], ...)</code>. The <code>arg0</code>, <code>arg1</code> ... are represented in the formula by <code>@0</code>, <code>@1</code>,... placeholders.</p> <p>Warning</p> <p>We are playing a slight trick here with the <code>lumiscale</code> parameter. At the point at which <code>text2workspace.py</code> is building these scaling terms the <code>lumiscale</code> for the <code>rateParam</code> has not yet been created. By writing <code>lumiscale[1]</code> we are telling RooFit to create this variable with an initial value of 1, and then later this will be re-used by the <code>rateParam</code> creation.</p> <p>A similar option, <code>--X-nuisance-group-function</code>, can be used to scale whole groups of nuisances (see groups of nuisances). Instead of a regular expression just give the group name instead,</p> <pre><code>text2workspace.py datacard.txt --X-nuisance-group-function [group name] 'expr::lumisyst(\"1/sqrt(@0)\",lumiscale[1])'\n</code></pre>"},{"location":"part3/nonstandard/#nuisance-parameter-impacts","title":"Nuisance parameter impacts","text":"<p>The impact of a nuisance parameter (NP) \u03b8 on a parameter of interest (POI) \u03bc is defined as the shift \u0394\u03bc that is induced as \u03b8 is fixed and brought to its +1\u03c3 or \u22121\u03c3 post-fit values, with all other parameters profiled as normal (see JHEP 01 (2015) 069 for a description of this method).</p> <p>This is effectively a measure of the correlation between the NP and the POI, and is useful for determining which NPs have the largest effect on the POI uncertainty.</p> <p>It is possible to use the <code>MultiDimFit</code> method of Combine with the option <code>--algo impact -P parameter</code> to calculate the impact of a particular nuisance parameter on the parameter(s) of interest. We will use the <code>combineTool.py</code> script to automate the fits.</p> <p>We will use an example workspace from the \\(H\\rightarrow\\tau\\tau\\) datacard,</p> <pre><code>$ cp HiggsAnalysis/CombinedLimit/data/tutorials/htt/125/htt_tt.txt .\n$ text2workspace.py htt_tt.txt -m 125\n</code></pre> <p>Calculating the impacts is done in a few stages. First we just fit for each POI, using the <code>--doInitialFit</code> option with <code>combineTool.py</code>, and adding the <code>--robustFit 1</code> option that will be passed through to Combine,</p> <pre><code>combineTool.py -M Impacts -d htt_tt.root -m 125 --doInitialFit --robustFit 1\n</code></pre> <p>Have a look at the options as for likelihood scans when using <code>robustFit 1</code>.</p> <p>Next we perform a similar scan for each nuisance parameter with the <code>--doFits</code> options,</p> <pre><code>combineTool.py -M Impacts -d htt_tt.root -m 125 --robustFit 1 --doFits\n</code></pre> <p>Note that this will run approximately 60 scans, and to speed things up the option <code>--parallel X</code> can be given to run X Combine jobs simultaneously. The batch and grid submission methods described in the combineTool for job submission section can also be used.</p> <p>Once all jobs are completed, the output can be collected and written into a json file:</p> <pre><code>combineTool.py -M Impacts -d htt_tt.root -m 125 -o impacts.json\n</code></pre> <p>A plot summarizing the nuisance parameter values and impacts can be made with <code>plotImpacts.py</code>,</p> <pre><code>plotImpacts.py -i impacts.json -o impacts\n</code></pre> <p>The first page of the output is shown below. Note that in these figures, the nuisance parameters are labelled as \\(\\theta\\) instead of \\(\\nu\\).</p> <p></p> <p>The direction of the +1\u03c3 and -1\u03c3 impacts (i.e. when the NP is moved to its +1\u03c3 or -1\u03c3 values) on the POI indicates whether the parameter is correlated or anti-correlated with it.</p> <p>For models with multiple POIs, the Combine option <code>--redefineSignalPOIs X,Y,Z...</code> should be specified in all three of the <code>combineTool.py -M Impacts [...]</code> steps above. The final step will produce the <code>impacts.json</code> file which will contain the impacts for all the specified POIs. In the <code>plotImpacts.py</code> script, a particular POI can be specified with <code>--POI X</code>.</p> <p>Warning</p> <p>The plot also shows the best fit value of the POI at the top and its uncertainty. You may wish to allow the range to go negative (i.e using <code>--setParameterRanges</code> or <code>--rMin</code>) to avoid getting one-sided impacts!</p> <p>This script also accepts an optional json-file argument with <code>-t</code>, which can be used to provide a dictionary for renaming parameters. A simple example would be to create a file <code>rename.json</code>,</p> <pre><code>{\n  \"r\" : \"#mu\"\n}\n</code></pre> <p>that will rename the POI label on the plot.</p> <p>Info</p> <p>Since <code>combineTool</code> accepts the usual options for combine you can also generate the impacts on an Asimov or toy dataset.</p> <p>The left panel in the summary plot shows the value of \\((\\nu-\\nu_{0})/\\Delta_{\\nu}\\) where \\(\\nu\\) and \\(\\nu_{0}\\) are the post and pre-fit values of the nuisance parameter and \\(\\Delta_{\\nu}\\) is the pre-fit uncertainty. The asymmetric error bars show the post-fit uncertainty divided by the pre-fit uncertainty meaning that parameters with error bars smaller than \\(\\pm 1\\) are constrained in the fit. The pull will additionally be shown. As with the <code>diffNuisances.py</code> script, the option <code>--pullDef</code> can be used (to modify the definition of the pull that is shown).</p>"},{"location":"part3/nonstandard/#breakdown-of-uncertainties","title":"Breakdown of uncertainties","text":"<p>Often you will want to report the breakdown of your total (systematic) uncertainty on a measured parameter due to one or more groups of nuisance parameters. For example, these groups could be theory uncertainties, trigger uncertainties, ... The prodecude to do this in Combine is to sequentially freeze groups of nuisance parameters and subtract (in quadrature) from the total uncertainty. Below are the steps to do so. We will use the <code>data/tutorials/htt/125/htt_tt.txt</code> datacard for this.</p> <ol> <li>Add groups to the datacard to group nuisance parameters. Nuisance parameters not in groups will be considered as \"rest\" in the later steps. The lines should look like the following and you should add them to the end of the datacard</li> </ol> <pre><code>theory      group = QCDscale_VH QCDscale_ggH1in QCDscale_ggH2in QCDscale_qqH UEPS pdf_gg pdf_qqbar\ncalibration group = CMS_scale_j_8TeV CMS_scale_t_tautau_8TeV CMS_htt_scale_met_8TeV\nefficiency  group = CMS_eff_b_8TeV   CMS_eff_t_tt_8TeV CMS_fake_b_8TeV\n</code></pre> <ol> <li> <p>Create the workspace with <code>text2workspace.py data/tutorials/htt/125/htt_tt.txt -m 125</code>.</p> </li> <li> <p>Run a fit with all nuisance parameters floating and store the workspace in an output file - <code>combine data/tutorials/htt/125/htt_tt.root -M MultiDimFit --saveWorkspace -n htt.postfit</code></p> </li> <li> <p>Run a scan from the postfit workspace</p> </li> </ol> <pre><code>combine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit -n htt.total --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4\n</code></pre> <ol> <li>Run additional scans using the post-fit workspace, sequentially adding another group to the list of groups to freeze</li> </ol> <pre><code>combine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeNuisanceGroups theory -n htt.freeze_theory\n\ncombine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeNuisanceGroups theory,calibration -n htt.freeze_theory_calibration\n\ncombine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeNuisanceGroups theory,calibration,efficiency -n htt.freeze_theory_calibration_efficiency\n</code></pre> <ol> <li>Run one last scan freezing all of the constrained nuisance parameters (this represents the statistical uncertainty only).</li> </ol> <pre><code>combine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeParameters allConstrainedNuisances -n htt.freeze_all\n</code></pre> <ol> <li>Use the <code>combineTool</code> script <code>plot1DScan.py</code> to report the breakdown of uncertainties.</li> </ol> <pre><code>plot1DScan.py higgsCombinehtt.total.MultiDimFit.mH120.root --main-label \"Total Uncert.\"  --others higgsCombinehtt.freeze_theory.MultiDimFit.mH120.root:\"freeze theory\":4 higgsCombinehtt.freeze_theory_calibration.MultiDimFit.mH120.root:\"freeze theory+calibration\":7 higgsCombinehtt.freeze_theory_calibration_efficiency.MultiDimFit.mH120.root:\"freeze theory+calibration+efficiency\":2 higgsCombinehtt.freeze_all.MultiDimFit.mH120.root:\"stat only\":6  --output breakdown --y-max 10 --y-cut 40 --breakdown \"theory,calibration,efficiency,rest,stat\"\n</code></pre> <p>The final step calculates the contribution of each group of nuisance parameters as the subtraction in quadrature of each scan from the previous one. This procedure guarantees that the sum in quadrature of the individual components is the same as the total uncertainty.</p> <p>The plot below is produced,</p> <p></p> <p>Warning</p> <p>While the above procedure is guaranteed the have the effect that the sum in quadrature of the breakdown will equal the total uncertainty, the order in which you freeze the groups can make a difference due to correlations induced by the fit. You should check if the answers change significantly if changing the order and we recommend you start with the largest group (in terms of overall contribution to the uncertainty) first, working down the list in order of the size of the contribution.</p>"},{"location":"part3/nonstandard/#channel-masking","title":"Channel Masking","text":"<p>The Combine tool has a number of features for diagnostics and plotting results of fits. It can often be useful to turn off particular channels in a combined analysis to see how constraints/shifts in parameter values can vary. It can also be helpful to plot the post-fit shapes and uncertainties of a particular channel (for example a signal region) without including the constraints from the data in that region.</p> <p>This can in some cases be achieved by removing a specific datacard when running <code>combineCards.py</code>. However, when doing so, the information of particular nuisance parameters and PDFs in that region will be lost. Instead, it is possible to mask that channel from the likelihood. This is achieved at the <code>text2Workspace</code> step using the option <code>--channel-masks</code>.</p>"},{"location":"part3/nonstandard/#example-removing-constraints-from-the-signal-region","title":"Example: removing constraints from the signal region","text":"<p>We will take the control region example from the rate parameters tutorial from data/tutorials/rate_params/.</p> <p>The first step is to combine the cards combineCards.py signal=signal_region.txt dimuon=dimuon_control_region.txt singlemuon=singlemuon_control_region.txt &gt; datacard.txt</p> <p>Note that we use the directive <code>CHANNELNAME=CHANNEL_DATACARD.txt</code> so that the names of the channels are under our control and easier to interpret. Next, we make a workspace and tell Combine to create the parameters used to mask channels</p> <pre><code>text2workspace.py datacard.txt --channel-masks\n</code></pre> <p>Now we will try to do a fit ignoring the signal region. We can turn off the signal region by setting the corresponding channel mask parameter to 1: <code>--setParameters mask_signal=1</code>. Note that <code>text2workspace</code> has created a masking parameter for every channel with the naming scheme mask_CHANNELNAME. By default, every parameter is set to 0 so that the channel is unmasked by default.</p> <pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUncertainties --setParameters mask_signal=1\n</code></pre> <p>Warning</p> <p>There will be a lot of warnings from Combine. These are safe to ignore as they are due to the s+b fit not converging. This is expected as the free signal parameter cannot be constrained because the data in the signal region is being ignored.</p> <p>We can compare the post-fit background and uncertainties with and without the signal region included by re-running with <code>--setParameters mask_signal=0</code> (or just removing that option completely). Below is a comparison of the background in the signal region with and without masking the data in the signal region. We take these from the shapes folder shapes_fit_b/signal/total_background in the <code>fitDiagnostics.root</code> output.</p> <p></p> <p>Clearly the background shape is different and much less constrained without including the signal region, as expected. Channel masking can be used with any method in Combine.</p>"},{"location":"part3/nonstandard/#roomultipdf-conventional-bias-studies","title":"RooMultiPdf conventional bias studies","text":"<p>Several analyses in CMS use a functional form to describe the background. This functional form is fit to the data. Often however, there is some uncertainty associated with the choice of which background function to use, and this choice will impact the fit results. It is therefore often the case that in these analyses, a bias study is performed. This study will give an indication of the size of the potential bias in the result, given a certain choice of functional form. These studies can be conducted using Combine.</p> <p>Below is an example script that will produce a workspace based on a simplified Higgs to diphoton (Hgg) analysis with a single category. It will produce the data and PDFs necessary for this example, and you can use it as a basis to construct your own studies.</p> <pre><code>void makeRooMultiPdfWorkspace(){\n\n   // Load the combine Library\n   gSystem-&gt;Load(\"libHiggsAnalysisCombinedLimit.so\");\n\n   // mass variable\n   RooRealVar mass(\"CMS_hgg_mass\",\"m_{#gamma#gamma}\",120,100,180);\n\n\n   // create 3 background pdfs\n   // 1. exponential\n   RooRealVar expo_1(\"expo_1\",\"slope of exponential\",-0.02,-0.1,-0.0001);\n   RooExponential exponential(\"exponential\",\"exponential pdf\",mass,expo_1);\n\n   // 2. polynomial with 2 parameters\n   RooRealVar poly_1(\"poly_1\",\"T1 of chebychev polynomial\",0,-3,3);\n   RooRealVar poly_2(\"poly_2\",\"T2 of chebychev polynomial\",0,-3,3);\n   RooChebychev polynomial(\"polynomial\",\"polynomial pdf\",mass,RooArgList(poly_1,poly_2));\n\n   // 3. A power law function\n   RooRealVar pow_1(\"pow_1\",\"exponent of power law\",-3,-6,-0.0001);\n   RooGenericPdf powerlaw(\"powerlaw\",\"TMath::Power(@0,@1)\",RooArgList(mass,pow_1));\n\n   // Generate some data (lets use the power lay function for it)\n   // Here we are using unbinned data, but binning the data is also fine\n   RooDataSet *data = powerlaw.generate(mass,RooFit::NumEvents(1000));\n\n   // First we fit the pdfs to the data (gives us a sensible starting value of parameters for, e.g - blind limits)\n   exponential.fitTo(*data);   // index 0\n   polynomial.fitTo(*data);   // index 1\n   powerlaw.fitTo(*data);     // index 2\n\n   // Make a plot (data is a toy dataset)\n   RooPlot *plot = mass.frame();   data-&gt;plotOn(plot);\n   exponential.plotOn(plot,RooFit::LineColor(kGreen));\n   polynomial.plotOn(plot,RooFit::LineColor(kBlue));\n   powerlaw.plotOn(plot,RooFit::LineColor(kRed));\n   plot-&gt;SetTitle(\"PDF fits to toy data\");\n   plot-&gt;Draw();\n\n   // Make a RooCategory object. This will control which of the pdfs is \"active\"\n   RooCategory cat(\"pdf_index\",\"Index of Pdf which is active\");\n\n   // Make a RooMultiPdf object. The order of the pdfs will be the order of their index, ie for below\n   // 0 == exponential\n   // 1 == polynomial\n   // 2 == powerlaw\n   RooArgList mypdfs;\n   mypdfs.add(exponential);\n   mypdfs.add(polynomial);\n   mypdfs.add(powerlaw);\n\n   RooMultiPdf multipdf(\"roomultipdf\",\"All Pdfs\",cat,mypdfs);\n   // By default the multipdf will tell combine to add 0.5 to the nll for each parameter (this is the penalty for the discrete profiling method)\n   // It can be changed with\n   //   multipdf.setCorrectionFactor(penalty)\n   // For bias-studies, this isn;t relevant however, so lets just leave the default\n\n   // As usual make an extended term for the background with _norm for freely floating yield\n   RooRealVar norm(\"roomultipdf_norm\",\"Number of background events\",1000,0,10000);\n\n   // We will also produce a signal model for the bias studies\n   RooRealVar sigma(\"sigma\",\"sigma\",1.2); sigma.setConstant(true);\n   RooRealVar MH(\"MH\",\"MH\",125); MH.setConstant(true);\n   RooGaussian signal(\"signal\",\"signal\",mass,MH,sigma);\n\n\n   // Save to a new workspace\n   TFile *fout = new TFile(\"workspace.root\",\"RECREATE\");\n   RooWorkspace wout(\"workspace\",\"workspace\");\n\n   data-&gt;SetName(\"data\");\n   wout.import(*data);\n   wout.import(cat);\n   wout.import(norm);\n   wout.import(multipdf);\n   wout.import(signal);\n   wout.Print();\n   wout.Write();\n}\n</code></pre> <p>The signal is modelled as a simple Gaussian with a width approximately that of the diphoton resolution. For the background there is a choice of 3 functions: an exponential, a power-law, and a 2nd order polynomial. This choice is accessible within Combine through the use of the RooMultiPdf object, which can switch between the functions by setting their associated indices (herein called pdf_index). This (as with all parameters in Combine) can be set via the <code>--setParameters</code> option.</p> <p>To assess the bias, one can throw toys using one function and fit with another. To do this, only a single datacard is needed: hgg_toy_datacard.txt.</p> <p>The bias studies are performed in two stages. The first is to generate toys using one of the functions, under some value of the signal strength r (or \\(\\mu\\)). This can be repeated for several values of r and also at different masses, but in this example the Higgs boson mass is fixed to 125 GeV.</p> <pre><code>    combine hgg_toy_datacard.txt -M GenerateOnly --setParameters pdf_index=0 --toysFrequentist -t 100 --expectSignal 1 --saveToys -m 125 --freezeParameters pdf_index\n</code></pre> <p>Warning</p> <p>It is important to freeze <code>pdf_index</code>, otherwise Combine will try to iterate over the index in the frequentist fit.</p> <p>Now we have 100 toys which, by setting <code>pdf_index=0</code>, sets the background PDF to the exponential function. This means we assume that the exponential is the true function. Note that the option <code>--toysFrequentist</code> is added; this first performs a fit of the PDF, assuming a signal strength of 1, to the data before generating the toys. This is the most obvious choice as to where to throw the toys from.</p> <p>The next step is to fit the toys under a different background PDF hypothesis. This time we set the <code>pdf_index</code> to 1, which selects the powerlaw, and run fits with the <code>FitDiagnostics</code> method, again freezing <code>pdf_index</code>.</p> <pre><code>    combine hgg_toy_datacard.txt -M FitDiagnostics  --setParameters pdf_index=1 --toysFile higgsCombineTest.GenerateOnly.mH125.123456.root  -t 100 --rMin -10 --rMax 10 --freezeParameters pdf_index --cminDefaultMinimizerStrategy=0\n</code></pre> <p>Note how we add the option <code>--cminDefaultMinimizerStrategy=0</code>. This is because we do not need the Hessian, as <code>FitDiagnostics</code> will run MINOS to get the uncertainty on <code>r</code>. If we do not do this, Minuit will think the fit failed as we have parameters (those not attached to the current PDF) for which the likelihood is flat.</p> <p>Warning</p> <p>You may get warnings about non-accurate errors such as <code>[WARNING]: Unable to determine uncertainties on all fit parameters in b-only fit</code> - These can be ignored since they are related to the free parameters of the background PDFs which are not active.</p> <p>In the output file <code>fitDiagnostics.root</code> there is a tree that contains the best fit results under the signal+background hypothesis. One measure of the bias is the pull defined as the difference between the measured value of \\(\\mu\\) and the generated value (here we used 1) relative to the uncertainty on \\(\\mu\\). The pull distribution can be drawn and the mean provides an estimate of the pull. In this example, we are averaging the positive and negative uncertainties, but we could do something smarter if the uncertainties are very asymmetric.</p> <pre><code>root -l fitDiagnostics.root\ntree_fit_sb-&gt;Draw(\"(r-1)/(0.5*(rHiErr+rLoErr))&gt;&gt;h(20,-5,5)\")\nh-&gt;Fit(\"gaus\")\n</code></pre> <p></p> <p>From the fitted Gaussian, we see the mean is at -1.29, which would indicate a bias of 129% of the uncertainty on mu from choosing the polynomial when the true function is an exponential.</p>"},{"location":"part3/nonstandard/#discrete-profiling","title":"Discrete profiling","text":"<p>If the <code>discrete</code> nuisance is left floating, it will be profiled by looping through the possible index values and finding the PDF that gives the best fit. This allows for the discrete profiling method to be applied for any method which involves a profiled likelihood (frequentist methods).</p> <p>Warning</p> <p>You should be careful since MINOS knows nothing about the discrete nuisances and hence estimations of uncertainties will be incorrect via MINOS. Instead, uncertainties from scans and limits will correctly account for these nuisance parameters. Currently the Bayesian methods will not properly treat the nuisance parameters, so some care should be taken when interpreting Bayesian results.</p> <p>As an example, we can peform a likelihood scan as a function of the Higgs boson signal strength in the toy Hgg datacard. By leaving the object <code>pdf_index</code> non-constant, at each point in the likelihood scan, the PDFs will be iterated over and the one that gives the lowest -2 times log-likelihood, including the correction factor \\(c\\) (as defined in the paper linked above) will be stored in the output tree. We can also check the scan when we fix at each PDF individually to check that the envelope is achieved. For this, you will need to include the option <code>--X-rtd REMOVE_CONSTANT_ZERO_POINT=1</code>. In this way, we can take a look at the absolute value to compare the curves, if we also include <code>--saveNLL</code>.</p> <p>For example for a full scan, you can run</p> <pre><code>    combine -M MultiDimFit -d hgg_toy_datacard.txt --algo grid --setParameterRanges r=-1,3 --cminDefaultMinimizerStrategy 0 --saveNLL -n Envelope -m 125 --setParameters myIndex=-1 --X-rtd REMOVE_CONSTANT_ZERO_POINT=1\n</code></pre> <p>and for the individual <code>pdf_index</code> set to <code>X</code>,</p> <pre><code>    combine -M MultiDimFit -d hgg_toy_datacard.txt --algo grid --setParameterRanges r=-1,3 --cminDefaultMinimizerStrategy 0 --saveNLL --freezeParameters pdf_index --setParameters pdf_index=X -n fixed_pdf_X -m 125 --X-rtd REMOVE_CONSTANT_ZERO_POINT=1\n</code></pre> <p>for <code>X=0,1,2</code></p> <p>You can then plot the value of <code>2*(deltaNLL+nll+nll0)</code> to plot the absolute value of (twice) the negative log-likelihood, including the correction term for extra parameters in the different PDFs.</p> <p>The above output will produce the following scans. </p> <p>As expected, the curve obtained by allowing the <code>pdf_index</code> to float (labelled \"Envelope\") picks out the best function (maximum corrected likelihood) for each value of the signal strength.</p> <p>In general, the performance of Combine can be improved when using the discrete profiling method by including the option <code>--X-rtd MINIMIZER_freezeDisassociatedParams</code>. This will stop parameters not associated to the current PDF from floating in the fits. Additionally, you can include the following options:</p> <ul> <li><code>--X-rtd MINIMIZER_multiMin_hideConstants</code>: hide the constant terms in the likelihood when recreating the minimizer</li> <li><code>--X-rtd MINIMIZER_multiMin_maskConstraints</code>: hide the constraint terms during the discrete minimization process</li> <li><code>--X-rtd MINIMIZER_multiMin_maskChannels=&lt;choice&gt;</code> mask the channels that are not needed from the NLL:</li> <li><code>&lt;choice&gt; 1</code>: keeps unmasked all channels that are participating in the discrete minimization.</li> <li><code>&lt;choice&gt; 2</code>: keeps unmasked only the channel whose index is being scanned at the moment.</li> </ul> <p>You may want to check with the Combine development team if you are using these options, as they are somewhat for expert use.</p>"},{"location":"part3/nonstandard/#roosplinend-multidimensional-splines","title":"RooSplineND multidimensional splines","text":"<p>RooSplineND can be used to interpolate from a tree of points to produce a continuous function in N-dimensions. This function can then be used as input to workspaces allowing for parametric rates/cross-sections/efficiencies. It can also be used to up-scale the resolution of likelihood scans (i.e like those produced from Combine) to produce smooth contours.</p> <p>The spline makes use of a radial basis decomposition to produce a continous \\(N \\to 1\\) map (function) from \\(M\\) provided sample points. The function of the \\(N\\) variables \\(\\vec{x}\\) is assumed to be of the form,</p> \\[ f(\\vec{x}) = \\sum_{i=1}^{M}w_{i}\\phi(||\\vec{x}-\\vec{x}_{i}||), \\] <p>where \\(\\phi(||\\vec{z}||) = e^{-\\frac{||\\vec{z}||}{\\epsilon^{2}}}\\). The distance \\(||.||\\) between two points is given by,</p> \\[ ||\\vec{x}-\\vec{y}||  = \\sum_{j=1}^{N}(x_{j}-y_{j})^{2}, \\] <p>if the option <code>rescale=false</code> and,</p> \\[ ||\\vec{x}-\\vec{y}||  = \\sum_{j=1}^{N} M^{1/N} \\cdot \\left( \\frac{ x_{j}-y_{j} }{ \\mathrm{max_{i=1,M}}(x_{i,j})-\\mathrm{min_{i=1,M}}(x_{i,j}) }\\right)^{2}, \\] <p>if the option <code>rescale=true</code>. Given the sample points, it is possible to determine the weights \\(w_{i}\\) as the solution of the set of equations,</p> \\[ \\sum_{i=1}^{M}w_{i}\\phi(||\\vec{x}_{j}-\\vec{x}_{i}||) = f(\\vec{x}_{j}). \\] <p>The solution is obtained using the <code>eigen</code> c++ package.</p> <p>The typical constructor of the object is as follows;</p> <pre><code>RooSplineND(const char *name, const char *title, RooArgList &amp;vars, TTree *tree, const char* fName=\"f\", double eps=3., bool rescale=false, std::string cutstring=\"\" ) ;\n</code></pre> <p>where the arguments are:</p> <ul> <li><code>vars</code>: A <code>RooArgList</code> of <code>RooRealVars</code> representing the \\(N\\) dimensions of the spline. The length of this list determines the dimension \\(N\\) of the spline.</li> <li><code>tree</code>: a TTree pointer where each entry represents a sample point used to construct the spline. The branch names must correspond to the names of the variables in <code>vars</code>.</li> <li><code>fName</code>: is a string representing the name of the branch to interpret as the target function \\(f\\).</li> <li><code>eps</code> : is the value of \\(\\epsilon\\) and represents the width of the basis functions \\(\\phi\\).</li> <li><code>rescale</code> : is an option to rescale the input sample points so that each variable has roughly the same range (see above in the definition of \\(||.||\\)).</li> <li><code>cutstring</code> : a string to remove sample points from the tree. Can be any typical cut string (eg \"var1&gt;10 &amp;&amp; var2&lt;3\").</li> </ul> <p>The object can be treated as a <code>RooAbsArg</code>; its value for the current values of the parameters is obtained as usual by using the <code>getVal()</code> method.</p> <p>Warning</p> <p>You should not include more variable branches than contained in <code>vars</code> in the tree, as the spline will interpret them as additional sample points. You will get a warning if there are two nearby points in the input samples and this will cause a failure in determining the weights. If you cannot create a reduced tree, you can remove entries by using the <code>cutstring</code>.</p> <p>The following script is an example that produces a 2D spline (<code>N=2</code>) from a set of 400 points (<code>M=400</code>) generated from a function.</p> Show script <pre><code>void splinend(){\n   // library containing the RooSplineND\n   gSystem-&gt;Load(\"libHiggsAnalysisCombinedLimit.so\");\n\n   TTree *tree = new TTree(\"tree_vals\",\"tree_vals\");\n   float xb,yb,fb;\n\n   tree-&gt;Branch(\"f\",&amp;fb,\"f/F\");\n   tree-&gt;Branch(\"x\",&amp;xb,\"x/F\");\n   tree-&gt;Branch(\"y\",&amp;yb,\"y/F\");\n\n   TRandom3 *r = new TRandom3();\n   int nentries = 20; // just use a regular grid of 20x20=400 points\n\n   double xmin = -3.2;\n   double xmax = 3.2;\n   double ymin = -3.2;\n   double ymax = 3.2;\n\n   for (int n=0;n&lt;nentries;n++){\n    for (int k=0;k&lt;nentries;k++){\n\n      xb=xmin+n*((xmax-xmin)/nentries);\n      yb=ymin+k*((ymax-ymin)/nentries);\n      // Gaussian * cosine function radial in \"F(x^2+y^2)\"\n      double R = (xb*xb)+(yb*yb);\n      fb = 0.1*TMath::Exp(-1*(R)/9)*TMath::Cos(2.5*TMath::Sqrt(R));\n      tree-&gt;Fill();\n     }\n   }\n\n   // 2D graph of points in tree\n   TGraph2D *p0 = new TGraph2D();\n   p0-&gt;SetMarkerSize(0.8);\n   p0-&gt;SetMarkerStyle(20);\n\n   int c0=0;\n   for (int p=0;p&lt;tree-&gt;GetEntries();p++){\n        tree-&gt;GetEntry(p);\n        p0-&gt;SetPoint(c0,xb,yb,fb);\n        c0++;\n        }\n\n\n   // ------------------------------ THIS IS WHERE WE BUILD THE SPLINE ------------------------ //\n   // Create 2 Real-vars, one for each of the parameters of the spline\n   // The variables MUST be named the same as the corresponding branches in the tree\n   RooRealVar x(\"x\",\"x\",0.1,xmin,xmax);\n   RooRealVar y(\"y\",\"y\",0.1,ymin,ymax);\n\n\n   // And the spline - arguments are\n   // Required -&gt;   name, title, arglist of dependants, input tree,\n   // Optional -&gt;  function branch name, interpolation width (tunable parameter), rescale Axis bool, cutstring\n   // The tunable parameter gives the radial basis a \"width\", over which the interpolation will be effectively taken\n\n   // the reascale Axis bool (if true) will first try to rescale the points so that they are of order 1 in range\n   // This can be helpful if for example one dimension is in much larger units than another.\n\n   // The cutstring is just a ROOT string which can be used to apply cuts to the tree in case only a sub-set of the points should be used\n\n   RooArgList args(x,y);\n   RooSplineND *spline = new RooSplineND(\"spline\",\"spline\",args,tree,\"f\",1,true);\n      // ----------------------------------------------------------------------------------------- //\n\n\n   //TGraph *gr = spline-&gt;getGraph(\"x\",0.1); // Return 1D graph. Will be a slice of the spline for fixed y generated at steps of 0.1\n\n   // Plot the 2D spline\n   TGraph2D *gr = new TGraph2D();\n   int pt = 0;\n   for (double xx=xmin;xx&lt;xmax;xx+=0.1){\n     for (double yy=xmin;yy&lt;ymax;yy+=0.1){\n        x.setVal(xx);\n        y.setVal(yy);\n        gr-&gt;SetPoint(pt,xx,yy,spline-&gt;getVal());\n        pt++;\n     }\n   }\n\n   gr-&gt;SetTitle(\"\");\n\n   gr-&gt;SetLineColor(1);\n   //p0-&gt;SetTitle(\"0.1 exp(-(x{^2}+y{^2})/9) #times Cos(2.5#sqrt{x^{2}+y^{2}})\");\n   gr-&gt;Draw(\"surf\");\n   gr-&gt;GetXaxis()-&gt;SetTitle(\"x\");\n   gr-&gt;GetYaxis()-&gt;SetTitle(\"y\");\n   p0-&gt;Draw(\"Pcolsame\");\n\n   //p0-&gt;Draw(\"surfsame\");\n   TLegend *leg = new TLegend(0.2,0.82,0.82,0.98);\n   leg-&gt;SetFillColor(0);\n   leg-&gt;AddEntry(p0,\"0.1 exp(-(x{^2}+y{^2})/9) #times Cos(2.5#sqrt{x^{2}+y^{2}})\",\"p\");\n   leg-&gt;AddEntry(gr,\"RooSplineND (N=2) interpolation\",\"L\");\n   leg-&gt;Draw();\n}\n</code></pre> <p>Running the script will produce the following plot. The plot shows the sampled points and the spline produced from them.</p> <p></p>"},{"location":"part3/nonstandard/#rooparametrichist-gamman-for-shapes","title":"RooParametricHist gammaN for shapes","text":"<p>Currently, there is no straightforward implementation of using per-bin gmN-like uncertainties with shape (histogram) analyses. Instead, it is possible to tie control regions (written as datacards) with the signal region using three methods.</p> <p>For analyses that take the normalization of some process from a control region, it is possible to use either lnU or rateParam directives to float the normalization in a correlated way of some process between two regions. Instead if each bin is intended to be determined via a control region, one can use a number of <code>RooFit</code> histogram PDFs/functions to accomplish this. The example below shows a simple implementation of a RooParametricHist to achieve this.</p> <p>Copy the script below into a file called <code>examplews.C</code> and create the input workspace using <code>root -l examplews.C</code>...</p> Show script <pre><code>void examplews(){\n    // As usual, load the combine library to get access to the RooParametricHist\n    gSystem-&gt;Load(\"libHiggsAnalysisCombinedLimit.so\");\n\n    // Output file and workspace\n    TFile *fOut = new TFile(\"param_ws.root\",\"RECREATE\");\n    RooWorkspace wspace(\"wspace\",\"wspace\");\n\n    // better to create the bins rather than use the \"nbins,min,max\" to avoid spurious warning about adding bins with different\n    // ranges in combine - see https://root-forum.cern.ch/t/attempt-to-divide-histograms-with-different-bin-limits/17624/3 for why!\n    const int nbins = 4;\n    double xmin=200.;\n    double xmax=1000.;\n    double xbins[5] = {200.,400.,600.,800.,1000.};\n\n    // A search in a MET tail, define MET as our variable\n\n    RooRealVar met(\"met\",\"E_{T}^{miss}\",200,xmin,xmax);\n    RooArgList vars(met);\n\n\n    // ---------------------------- SIGNAL REGION -------------------------------------------------------------------//\n    // Make a dataset, this will be just four bins in MET.\n    // its easiest to make this from a histogram. Set the contents to \"somehting\"\n    TH1F data_th1(\"data_obs_SR\",\"Data observed in signal region\",nbins,xbins);\n\n    data_th1.SetBinContent(1,100);\n    data_th1.SetBinContent(2,50);\n    data_th1.SetBinContent(3,25);\n    data_th1.SetBinContent(4,10);\n    RooDataHist data_hist(\"data_obs_SR\",\"Data observed\",vars,&amp;data_th1);\n    wspace.import(data_hist);\n\n    // In the signal region, our background process will be freely floating,\n    // Create one parameter per bin representing the yield. (note of course we can have multiple processes like this)\n    RooRealVar bin1(\"bkg_SR_bin1\",\"Background yield in signal region, bin 1\",100,0,500);\n    RooRealVar bin2(\"bkg_SR_bin2\",\"Background yield in signal region, bin 2\",50,0,500);\n    RooRealVar bin3(\"bkg_SR_bin3\",\"Background yield in signal region, bin 3\",25,0,500);\n    RooRealVar bin4(\"bkg_SR_bin4\",\"Background yield in signal region, bin 4\",10,0,500);\n    RooArgList bkg_SR_bins;\n    bkg_SR_bins.add(bin1);\n    bkg_SR_bins.add(bin2);\n    bkg_SR_bins.add(bin3);\n    bkg_SR_bins.add(bin4);\n\n    // Create a RooParametericHist which contains those yields, last argument is just for the binning,\n    // can use the data TH1 for that\n    RooParametricHist p_bkg(\"bkg_SR\", \"Background PDF in signal region\",met,bkg_SR_bins,data_th1);\n    // Always include a _norm term which should be the sum of the yields (thats how combine likes to play with pdfs)\n    RooAddition p_bkg_norm(\"bkg_SR_norm\",\"Total Number of events from background in signal region\",bkg_SR_bins);\n\n    // Every signal region needs a signal\n    TH1F signal_th1(\"signal_SR\",\"Signal expected in signal region\",nbins,xbins);\n\n    signal_th1.SetBinContent(1,1);\n    signal_th1.SetBinContent(2,2);\n    signal_th1.SetBinContent(3,3);\n    signal_th1.SetBinContent(4,8);\n    RooDataHist signal_hist(\"signal\",\"Data observed\",vars,&amp;signal_th1);\n    wspace.import(signal_hist);\n\n    // -------------------------------------------------------------------------------------------------------------//\n    // ---------------------------- CONTROL REGION -----------------------------------------------------------------//\n    TH1F data_CRth1(\"data_obs_CR\",\"Data observed in control region\",nbins,xbins);\n\n    data_CRth1.SetBinContent(1,200);\n    data_CRth1.SetBinContent(2,100);\n    data_CRth1.SetBinContent(3,50);\n    data_CRth1.SetBinContent(4,20);\n\n    RooDataHist data_CRhist(\"data_obs_CR\",\"Data observed\",vars,&amp;data_CRth1);\n    wspace.import(data_CRhist);\n\n    // This time, the background process will be dependent on the yields of the background in the signal region.\n    // The transfer factor TF must account for acceptance/efficiency etc differences in the signal to control\n    // In this example lets assume the control region is populated by the same process decaying to clean daughters with 2xBR\n    // compared to the signal region\n\n    // NB You could have a different transfer factor for each bin represented by a completely different RooRealVar\n\n    // We can imagine that the transfer factor could be associated with some uncertainty - lets say a 1% uncertainty due to efficiency and 2% due to acceptance.\n    // We need to make these nuisance parameters ourselves and give them a nominal value of 0\n\n\n    RooRealVar efficiency(\"efficiency\", \"efficiency nuisance parameter\",0);\n    RooRealVar acceptance(\"acceptance\", \"acceptance nuisance parameter\",0);\n\n    // We would need to make the transfer factor a function of those too. Here we've assumed Log-normal effects (i.e the same as putting lnN in the CR datacard)\n    // but note that we could use any function which could be used to parameterise the effect - eg if the systematic is due to some alternate template, we could\n    // use polynomials for example.\n\n\n    RooFormulaVar TF(\"TF\",\"Trasnfer factor\",\"2*TMath::Power(1.01,@0)*TMath::Power(1.02,@1)\",RooArgList(efficiency,acceptance) );\n\n    // Finally, we need to make each bin of the background in the control region a function of the background in the signal and the transfer factor\n    // N_CR = N_SR x TF\n\n    RooFormulaVar CRbin1(\"bkg_CR_bin1\",\"Background yield in control region, bin 1\",\"@0*@1\",RooArgList(TF,bin1));\n    RooFormulaVar CRbin2(\"bkg_CR_bin2\",\"Background yield in control region, bin 2\",\"@0*@1\",RooArgList(TF,bin2));\n    RooFormulaVar CRbin3(\"bkg_CR_bin3\",\"Background yield in control region, bin 3\",\"@0*@1\",RooArgList(TF,bin3));\n    RooFormulaVar CRbin4(\"bkg_CR_bin4\",\"Background yield in control region, bin 4\",\"@0*@1\",RooArgList(TF,bin4));\n\n    RooArgList bkg_CR_bins;\n    bkg_CR_bins.add(CRbin1);\n    bkg_CR_bins.add(CRbin2);\n    bkg_CR_bins.add(CRbin3);\n    bkg_CR_bins.add(CRbin4);\n    RooParametricHist p_CRbkg(\"bkg_CR\", \"Background PDF in control region\",met,bkg_CR_bins,data_th1);\n    RooAddition p_CRbkg_norm(\"bkg_CR_norm\",\"Total Number of events from background in control region\",bkg_CR_bins);\n    // -------------------------------------------------------------------------------------------------------------//\n\n\n    // we can also use the standard interpolation from combine by providing alternative shapes (as RooDataHists)\n    // here we're adding two of them (JES and ISR)\n    TH1F background_up(\"tbkg_CR_JESUp\",\"\",nbins,xbins);\n    background_up.SetBinContent(1,CRbin1.getVal()*1.01);\n    background_up.SetBinContent(2,CRbin2.getVal()*1.02);\n    background_up.SetBinContent(3,CRbin3.getVal()*1.03);\n    background_up.SetBinContent(4,CRbin4.getVal()*1.04);\n    RooDataHist bkg_CRhist_sysUp(\"bkg_CR_JESUp\",\"Bkg sys up\",vars,&amp;background_up);\n    wspace.import(bkg_CRhist_sysUp);\n\n    TH1F background_down(\"bkg_CR_JESDown\",\"\",nbins,xbins);\n    background_down.SetBinContent(1,CRbin1.getVal()*0.90);\n    background_down.SetBinContent(2,CRbin2.getVal()*0.98);\n    background_down.SetBinContent(3,CRbin3.getVal()*0.97);\n    background_down.SetBinContent(4,CRbin4.getVal()*0.96);\n    RooDataHist bkg_CRhist_sysDown(\"bkg_CR_JESDown\",\"Bkg sys down\",vars,&amp;background_down);\n    wspace.import(bkg_CRhist_sysDown);\n\n    TH1F background_2up(\"tbkg_CR_ISRUp\",\"\",nbins,xbins);\n    background_2up.SetBinContent(1,CRbin1.getVal()*0.85);\n    background_2up.SetBinContent(2,CRbin2.getVal()*0.9);\n    background_2up.SetBinContent(3,CRbin3.getVal()*0.95);\n    background_2up.SetBinContent(4,CRbin4.getVal()*0.99);\n    RooDataHist bkg_CRhist_sys2Up(\"bkg_CR_ISRUp\",\"Bkg sys 2up\",vars,&amp;background_2up);\n    wspace.import(bkg_CRhist_sys2Up);\n\n    TH1F background_2down(\"bkg_CR_ISRDown\",\"\",nbins,xbins);\n    background_2down.SetBinContent(1,CRbin1.getVal()*1.15);\n    background_2down.SetBinContent(2,CRbin2.getVal()*1.1);\n    background_2down.SetBinContent(3,CRbin3.getVal()*1.05);\n    background_2down.SetBinContent(4,CRbin4.getVal()*1.01);\n    RooDataHist bkg_CRhist_sys2Down(\"bkg_CR_ISRDown\",\"Bkg sys 2down\",vars,&amp;background_2down);\n    wspace.import(bkg_CRhist_sys2Down);\n\n    // import the pdfs\n    wspace.import(p_bkg);\n    wspace.import(p_bkg_norm,RooFit::RecycleConflictNodes());\n    wspace.import(p_CRbkg);\n    wspace.import(p_CRbkg_norm,RooFit::RecycleConflictNodes());\n    fOut-&gt;cd();\n    wspace.Write();\n\n    // Clean up\n    fOut-&gt;Close();\n    fOut-&gt;Delete();\n\n\n}\n</code></pre> <p>We will now discuss what the script is doing. First, the observable for the search is the missing energy, so we create a parameter to represent this observable.</p> <pre><code>   RooRealVar met(\"met\",\"E_{T}^{miss}\",xmin,xmax);\n</code></pre> <p>The following lines create a freely floating parameter for each of our bins (in this example, there are only 4 bins, defined for our observable <code>met</code>).</p> <pre><code>   RooRealVar bin1(\"bkg_SR_bin1\",\"Background yield in signal region, bin 1\",100,0,500);\n   RooRealVar bin2(\"bkg_SR_bin2\",\"Background yield in signal region, bin 2\",50,0,500);\n   RooRealVar bin3(\"bkg_SR_bin3\",\"Background yield in signal region, bin 3\",25,0,500);\n   RooRealVar bin4(\"bkg_SR_bin4\",\"Background yield in signal region, bin 4\",10,0,500);\n\n   RooArgList bkg_SR_bins;\n   bkg_SR_bins.add(bin1);\n   bkg_SR_bins.add(bin2);\n   bkg_SR_bins.add(bin3);\n   bkg_SR_bins.add(bin4);\n</code></pre> <p>They are put into a list so that we can create a <code>RooParametricHist</code> and its normalisation from that list</p> <pre><code>  RooParametricHist p_bkg(\"bkg_SR\", \"Background PDF in signal region\",met,bkg_SR_bins,data_th1);\n\n  RooAddition p_bkg_norm(\"bkg_SR_norm\",\"Total Number of events from background in signal region\",bkg_SR_bins);\n</code></pre> <p>For the control region, the background process will be dependent on the yields of the background in the signal region using a transfer factor. The transfer factor <code>TF</code> must account for acceptance/efficiency/etc differences between the signal region and the control regions.</p> <p>In this example we will assume the control region is populated by the same process decaying to a different final state with twice as large branching fraction as the one in the signal region.</p> <p>We could imagine that the transfer factor could be associated with some uncertainty - for example a 1% uncertainty due to efficiency and a 2% uncertainty due to acceptance differences. We need to make nuisance parameters ourselves to model this, and give them a nominal value of 0.</p> <pre><code>   RooRealVar efficiency(\"efficiency\", \"efficiency nuisance parameter\",0);\n   RooRealVar acceptance(\"acceptance\", \"acceptance nuisance parameter\",0);\n</code></pre> <p>We need to make the transfer factor a function of these parameters, since variations in these uncertainties will lead to variations of the transfer factor. Here we have assumed Log-normal effects (i.e the same as putting lnN in the CR datacard), but we could use any function which could be used to parameterize the effect - for example if the systematic uncertainty is due to some alternate template, we could use polynomials.</p> <pre><code>   RooFormulaVar TF(\"TF\",\"Trasnfer factor\",\"2*TMath::Power(1.01,@0)*TMath::Power(1.02,@1)\",RooArgList(efficiency,acceptance) );\n</code></pre> <p>Then, we need to make each bin of the background in the control region a function of the background in the signal region and the transfer factor - i.e $N{CR} = N{SR} \\times TF $.</p> <pre><code>   RooFormulaVar CRbin1(\"bkg_CR_bin1\",\"Background yield in control region, bin 1\",\"@0*@1\",RooArgList(TF,bin1));\n   RooFormulaVar CRbin2(\"bkg_CR_bin2\",\"Background yield in control region, bin 2\",\"@0*@1\",RooArgList(TF,bin2));\n   RooFormulaVar CRbin3(\"bkg_CR_bin3\",\"Background yield in control region, bin 3\",\"@0*@1\",RooArgList(TF,bin3));\n   RooFormulaVar CRbin4(\"bkg_CR_bin4\",\"Background yield in control region, bin 4\",\"@0*@1\",RooArgList(TF,bin4));\n</code></pre> <p>As before, we also need to create the <code>RooParametricHist</code> for this process in the control region but this time the bin yields will be the <code>RooFormulaVars</code> we just created instead of freely floating parameters.</p> <pre><code>   RooArgList bkg_CR_bins;\n   bkg_CR_bins.add(CRbin1);\n   bkg_CR_bins.add(CRbin2);\n   bkg_CR_bins.add(CRbin3);\n   bkg_CR_bins.add(CRbin4);\n\n   RooParametricHist p_CRbkg(\"bkg_CR\", \"Background PDF in control region\",met,bkg_CR_bins,data_th1);\n   RooAddition p_CRbkg_norm(\"bkg_CR_norm\",\"Total Number of events from background in control region\",bkg_CR_bins);\n</code></pre> <p>Finally, we can also create alternative shape variations (Up/Down) that can be fed to Combine as we do with <code>TH1</code> or <code>RooDataHist</code> type workspaces. These need to be of type <code>RooDataHist</code>. The example below is for a Jet Energy Scale type shape uncertainty.</p> <pre><code>   TH1F background_up(\"tbkg_CR_JESUp\",\"\",nbins,xbins);\n   background_up.SetBinContent(1,CRbin1.getVal()*1.01);\n   background_up.SetBinContent(2,CRbin2.getVal()*1.02);\n   background_up.SetBinContent(3,CRbin3.getVal()*1.03);\n   background_up.SetBinContent(4,CRbin4.getVal()*1.04);\n   RooDataHist bkg_CRhist_sysUp(\"bkg_CR_JESUp\",\"Bkg sys up\",vars,&amp;background_up);\n   wspace.import(bkg_CRhist_sysUp);\n\n   TH1F background_down(\"bkg_CR_JESDown\",\"\",nbins,xbins);\n   background_down.SetBinContent(1,CRbin1.getVal()*0.90);\n   background_down.SetBinContent(2,CRbin2.getVal()*0.98);\n   background_down.SetBinContent(3,CRbin3.getVal()*0.97);\n   background_down.SetBinContent(4,CRbin4.getVal()*0.96);\n   RooDataHist bkg_CRhist_sysDown(\"bkg_CR_JESDown\",\"Bkg sys down\",vars,&amp;background_down);\n   wspace.import(bkg_CRhist_sysDown);\n</code></pre> <p>Below are datacards (for signal and control regions) which can be used in conjunction with the workspace built above. In order to \"use\" the control region, simply combine the two cards as usual using <code>combineCards.py</code>.</p> Show Signal Region Datacard <pre><code>Signal Region Datacard -- signal category\n\nimax _ number of bins\njmax _ number of processes minus 1\nkmax \\* number of nuisance parameters\n\n---\n\nshapes data_obs signal param_ws.root wspace:data_obs_SR\nshapes background signal param_ws.root wspace:bkg_SR # the background model pdf which is freely floating, note other backgrounds can be added as usual\nshapes signal signal param_ws.root wspace:signal\n\n---\n\nbin signal\nobservation -1\n\n---\n\n# background rate must be taken from \\_norm param x 1\n\nbin signal signal\nprocess background signal\nprocess 1 0\nrate 1 -1\n\n---\n\n# Normal uncertainties in the signal region\n\n## lumi_8TeV lnN - 1.026\n\n# free floating parameters, we do not need to declare them, but its a good idea to\n\nbkg_SR_bin1 flatParam\nbkg_SR_bin2 flatParam\nbkg_SR_bin3 flatParam\nbkg_SR_bin4 flatParam\n</code></pre> Show Control Region Datacard <pre><code>Control Region Datacard -- control category\n\nimax _ number of bins\njmax _ number of processes minus 1\nkmax \\* number of nuisance parameters\n\n---\n\nshapes data*obs control param_ws.root wspace:data_obs_CR\nshapes background control param_ws.root wspace:bkg_CR wspace:bkg_CR*$SYSTEMATIC # the background model pdf which is dependant on that in the SR, note other backgrounds can be added as usual\n\n---\n\nbin control\nobservation -1\n\n---\n\n# background rate must be taken from \\_norm param x 1\n\nbin control\nprocess background\nprocess 1\nrate 1\n\n---\n\nJES shape 1\nISR shape 1\nefficiency param 0 1\nacceptance param 0 1\n</code></pre> <p>Note that for the control region, our nuisance parameters appear as <code>param</code> types, so that Combine will correctly constrain them.</p> <p>If we combine the two cards and fit the result with <code>-M MultiDimFit -v 3</code> we can see that the parameters that give the rate of background in each bin of the signal region, along with the nuisance parameters and signal strength, are determined by the fit - i.e we have properly included the constraint from the control region, just as with the 1-bin <code>gmN</code>.</p> <pre><code>acceptance = 0.00374312 +/- 0.964632 (limited)\nbkg_SR_bin1 = 99.9922 +/- 5.92062 (limited)\nbkg_SR_bin2 = 49.9951 +/- 4.13535 (limited)\nbkg_SR_bin3 = 24.9915 +/- 2.9267 (limited)\nbkg_SR_bin4 = 9.96478 +/- 2.1348 (limited)\nefficiency = 0.00109195 +/- 0.979334 (limited)\nlumi_8TeV = -0.0025911 +/- 0.994458\nr = 0.00716347 +/- 12.513 (limited)\n</code></pre> <p>The example given here is extremely basic and it should be noted that additional complexity in the transfer factors, as well as additional uncertainties/backgrounds etc in the cards are, as always, supported.</p> <p>Danger</p> <p>If trying to implement parametric uncertainties in this setup (eg on transfer factors) that are correlated with other channels and implemented separately, you MUST normalize the uncertainty effect so that the datacard line can read <code>param name X 1</code>. That is, the uncertainty on this parameter must be 1. Without this, there will be inconsistency with other nuisances of the same name in other channels implemented as shape or lnN.</p>"},{"location":"part3/nonstandard/#look-elsewhere-effect-for-one-parameter","title":"Look-elsewhere effect for one parameter","text":"<p>In case you see an excess somewhere in your analysis, you can evaluate the look-elsewhere effect (LEE) of that excess. For an explanation of the LEE, take a look at the CMS Statistics Committee Twiki here.</p> <p>To calculate the look-elsewhere effect for a single parameter (in this case the mass of the resonance), you can follow the instructions below. Note that these instructions assume you have a workspace that is parametric in your resonance mass \\(m\\), otherwise you need to fit each background toy with separate workspaces. We will assume the local significance for your excess is \\(\\sigma\\).</p> <ul> <li> <p>Generate background-only toys <code>combine ws.root -M GenerateOnly --toysFrequentist -m 16.5 -t 100 --saveToys --expectSignal=0</code>. The output will be something like <code>higgsCombineTest.GenerateOnly.mH16.5.123456.root</code>.</p> </li> <li> <p>For each toy, calculate the significance for a predefined range (e.g \\(m\\in [10,35]\\) GeV) in steps suitable to the resolution (e.g. 1 GeV). For <code>toy_1</code> the procedure would be: <code>for i in $(seq 10 35); do combine ws.root -M Significance --redefineSignalPOI r --freezeParameters MH --setParameter MH=$i -n $i -D higgsCombineTest.GenerateOnly.mH16.5.123456.root:toys/toy_1</code>. Calculate the maximum significance over all of these mass points - call this \\(\\sigma_{max}\\).</p> </li> <li> <p>Count how many toys have a maximum significance larger than the local one for your observed excess. This fraction of toys with \\(\\sigma_{max}&gt;\\sigma\\) is the global p-value.</p> </li> </ul> <p>You can find more tutorials on the LEE here</p>"},{"location":"part3/regularisation/","title":"Unfolding &amp; regularization","text":"<p>This section details how to perform an unfolded cross-section measurement, including regularization, within Combine. </p> <p>There are many resources available that describe unfolding, including when to use it (or not), and what the common issues surrounding it are. For CMS users, useful summary is available in the CMS Statistics Committee pages on unfolding. You can also  find an overview of unfolding and its usage in Combine in these slides.</p> <p>The basic idea behind the unfolding technique is to describe smearing introduced through the reconstruction (e.g. of the particle energy) in a given truth level bin \\(x_{i}\\) through a linear relationship with the effects in the nearby truth-bins. We can make statements about the probability \\(p_{j}\\) that the event falling in the truth bin \\(x_{i}\\) is reconstructed in the bin \\(y_{i}\\) via the linear relationship,</p> \\[ y_{obs} = \\tilde{\\boldsymbol{R}}\\cdot x_{true} + b \\] <p>or, if the truth bins are expressed relative to some particular model, we use the usual signal strength terminology, </p> \\[ y_{obs} = \\boldsymbol{R}\\cdot \\mu + b \\] <p>Unfolding aims to find the distribution at truth level \\(x\\), given the observations \\(y\\) at reco-level.</p>"},{"location":"part3/regularisation/#likelihood-based-unfolding","title":"Likelihood-based unfolding","text":"<p>Since Combine has access to the full likelihood for any analysis written in the usual datacard format, we will use likelihood-based unfolding  throughout - for other approaches, there are many other tools available (eg <code>RooUnfold</code> or <code>TUnfold</code>), which can be used instead. </p> <p>The benefits of the likelihood-based approach are that, </p> <ul> <li>Background subtraction is accounted for directly in the likelihood</li> <li>Systematic uncertainties are accounted for directly during the unfolding as nuisance parameters</li> <li>We can profile the nuisance parameters during the unfolding to make the most of the data available </li> </ul> <p>In practice, one must construct the response matrix and unroll it in the reconstructed bins:</p> <ul> <li>First, one derives the truth distribution, e.g. after the generator-level selection only, \\(x_{i}\\).</li> <li>Each reconstructed bin (e.g. each datacard) should describe the contribution from each truth bin - this is how Combine knows about the response matrix \\(\\boldsymbol{R}\\)  and folds in the acceptance/efficiency effects as usual.</li> <li>The out-of-acceptance contributions can also be included in the above.</li> </ul> <p>The model we use for this is then just the usual <code>PhysicsModel:multiSignalModel</code>, where each signal refers to a particular truth level bin. The results can be extracted through a  simple maximum-likelihood fit with, </p> <pre><code>    text2workspace.py -m 125 --X-allow-no-background -o datacard.root datacard.txt\n       -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO map='.*GenBin0.*:r_Bin0[1,-1,20]' --PO map='.*GenBin1.*:r_Bin1[1,-1,20]' --PO map='.*GenBin2.*:r_Bin2[1,-1,20]' --PO map='.*GenBin3.*:r_Bin3[1,-1,20]' --PO map='.*GenBin4.*:r_Bin4[1,-1,20]'\n\n    combine -M MultiDimFit --setParameters=r_Bin0=1,r_Bin1=1,r_Bin2=1,r_Bin3=1,r_Bin4=1 -t -1 -m 125 datacard.root\n    combine -M MultiDimFit --setParameters=r_Bin0=1,r_Bin1=1,r_Bin2=1,r_Bin3=1,r_Bin4=1 -t -1 -m 125 --algo=grid --points=100 -P r_Bin1 --setParameterRanges r_Bin1=0.5,1.5 --floatOtherPOIs=1 datacard.root\n</code></pre> <p>Notice that one can also perform the so called bin-by-bin unfolding (though it is strongly discouraged, except for testing) with, </p> <pre><code>    text2workspace.py -m 125 --X-allow-no-background -o datacard.root datacard.txt\n      -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO map='.*RecoBin0.*:r_Bin0[1,-1,20]' --PO map='.*RecoBin1.*:r_Bin1[1,-1,20]' --PO map='.*RecoBin2.*:r_Bin2[1,-1,20]' --PO map='.*RecoBin3.*:r_Bin3[1,-1,20]' --PO map='.*RecoBin4.*:r_Bin4[1,-1,20]'\n</code></pre> <p>Nuisance parameters can be added to the likelihood function and profiled in the usual way via the datacards. Theory uncertainties on the inclusive cross section are typically not included in unfolded measurements.</p> <p>The figure below shows a comparison of likelihood-based unfolding and a least-squares based unfolding as implemented in <code>RooUnfold</code>. </p> Show comparison <p></p>"},{"location":"part3/regularisation/#regularization","title":"Regularization","text":"<p>The main difference with respect to other models with multiple signal contributions is the introduction of Regularization, which is used to stabilize the unfolding process. </p> <p>An example of unfolding in Combine with and without regularization, can be found under  data/tutorials/regularization. </p> <p>Running <code>python createWs.py [-r]</code> will create a simple datacard and perform a fit both with and without including regularization.</p> <p>The simplest way to introduce regularization in the likelihood based approach, is to apply a penalty term, which  depends on the values of the truth bins, in the likelihood function (so-called Tikhonov regularization):</p> \\[ -2\\ln L = -2\\ln L + P(\\vec{x})  \\] <p>Here, \\(P\\) is a linear operator. There are two different approaches that are supported to construct \\(P\\). If you run <code>python makeModel.py</code>, you will create a more complex datacard with the two regularization schemes implemented. You will need  to uncomment the relevant sections of code to activate <code>SVD</code> or <code>TUnfold</code>-type regularization.</p> <p>Warning</p> <p>When using any unfolding method with regularization, you must perform studies of the potential bias/coverage properties introduced through the </p> <p>inclusion of regularization, and how strong the associated regularization is. Advice on this can be found in the CMS Statistics Committee pages. </p>"},{"location":"part3/regularisation/#singular-value-decomposition-svd","title":"Singular Value Decomposition (SVD)","text":"<p>In the SVD approach - as described in the SVD paper - the penalty term is constructed directly based on the strengths (\\(\\vec{\\mu}=\\{\\mu_{i}\\}_{i=1}^{N}\\)), </p> \\[ P = \\tau\\left| A\\cdot \\vec{\\mu} \\right|^{2}, \\] <p>where \\(A\\) is typically the discrete curvature matrix, with </p> \\[ A =  \\begin{bmatrix}  1 &amp; -1 &amp; ... \\\\ 1 &amp; -2 &amp; 1 &amp;  ... \\\\ ...  \\end{bmatrix} \\] <p>Penalty terms on the derivatives can also be included. Such a penalty term is included by modifying the likelihood to include one constraint for each  row of the product \\(A\\cdot\\vec{\\mu}\\), by including them as lines in the datacard of the form, </p> <p><pre><code>    name constr formula dependents delta\n</code></pre> where the regularization strength is \\(\\delta=\\frac{1}{\\sqrt{\\tau}}\\) and can either be a fixed value (e.g. by directly putting <code>0.01</code>) or as  a modifiable parameter with e.g. <code>delta[0.01]</code>. </p> <p>For example, for 3 bins and a regularization strength of 0.03, the first line would be </p> <pre><code>    name constr @0-2*@2+@1 r_Bin0,r_Bin1,r_Bin2 0.03\n</code></pre> <p>Alternative valid syntaxes are  </p> <pre><code>    constr1 constr r_bin0-r_bin1 0.01\n    constr1 constr r_bin0-r_bin1 delta[0.01]\n    constr1 constr r_bin0+r_bin1 r_bin0,r_bin1 0.01\n    constr1 constr r_bin0+r_bin1 {r_bin0,r_bin1} delta[0.01]\n</code></pre> <p>The figure below shows an example unfolding using the \"SVD regularization\" approach with the least squares method (as implemented by <code>RooUnfold</code>) and implemented as a penalty term added to the likelihood using the maximum likelihood approach in Combine.</p> Show comparison <p></p>"},{"location":"part3/regularisation/#tunfold-method","title":"TUnfold method","text":"<p>The Tikhonov regularization as implemented in <code>TUnfold</code> uses the MC information, or rather the density prediction, as a bias vector.  In order to give this information to Combine, a single datacard for each reconstruction-level bin needs to be produced, so that we have access to the proper normalization terms during the minimization. In this case the bias vector is \\(\\vec{x}_{obs}-\\vec{x}_{true}\\) </p> <p>Then one can write a constraint term in the datacard via, for example,</p> <pre><code>    constr1 constr (r_Bin0-1.)*(shapeSig_GenBin0_RecoBin0__norm+shapeSig_GenBin0_RecoBin1__norm+shapeSig_GenBin0_RecoBin2__norm+shapeSig_GenBin0_RecoBin3__norm+shapeSig_GenBin0_RecoBin4__norm)+(r_Bin2-1.)*(shapeSig_GenBin2_RecoBin0__norm+shapeSig_GenBin2_RecoBin1__norm+shapeSig_GenBin2_RecoBin2__norm+shapeSig_GenBin2_RecoBin3__norm+shapeSig_GenBin2_RecoBin4__norm)-2*(r_Bin1-1.)*(shapeSig_GenBin1_RecoBin0__norm+shapeSig_GenBin1_RecoBin1__norm+shapeSig_GenBin1_RecoBin2__norm+shapeSig_GenBin1_RecoBin3__norm+shapeSig_GenBin1_RecoBin4__norm) {r_Bin0,r_Bin1,r_Bin2,shapeSig_GenBin1_RecoBin0__norm,shapeSig_GenBin0_RecoBin0__norm,shapeSig_GenBin2_RecoBin0__norm,shapeSig_GenBin1_RecoBin1__norm,shapeSig_GenBin0_RecoBin1__norm,shapeSig_GenBin2_RecoBin1__norm,shapeSig_GenBin1_RecoBin2__norm,shapeSig_GenBin0_RecoBin2__norm,shapeSig_GenBin2_RecoBin2__norm,shapeSig_GenBin1_RecoBin3__norm,shapeSig_GenBin0_RecoBin3__norm,shapeSig_GenBin2_RecoBin3__norm,shapeSig_GenBin1_RecoBin4__norm,shapeSig_GenBin0_RecoBin4__norm,shapeSig_GenBin2_RecoBin4__norm} delta[0.03]\n</code></pre>"},{"location":"part3/runningthetool/","title":"How to run the tool","text":"<p>The executable Combine provided by the package is used to invoke the tools via the command line. The statistical analysis method, as well as user settings, are also specified on the command line. To see the full list of available options, you can run:</p> <pre><code>combine --help\n</code></pre> <p>The option <code>-M</code> is used to choose the statistical evaluation method. There are several groups of statistical methods:</p> <ul> <li>Asymptotic likelihood methods:<ul> <li><code>AsymptoticLimits</code>: limits calculated according to the asymptotic formulae in arxiv:1007.1727.</li> <li><code>Significance</code>: simple profile likelihood approximation, for calculating significances.</li> </ul> </li> <li>Bayesian methods:<ul> <li><code>BayesianSimple</code>: performing a classical numerical integration (for simple models only).</li> <li><code>MarkovChainMC</code>: performing Markov Chain integration, for arbitrarily complex models.</li> </ul> </li> <li>Frequentist or hybrid bayesian-frequentist methods:<ul> <li><code>HybridNew</code>: compute modified frequentist limits, significance/p-values and confidence intervals according to several possible prescriptions with toys. </li> </ul> </li> <li>Fitting<ul> <li><code>FitDiagnostics</code>: performs maximum likelihood fits to extract the signal rate, and provides diagnostic tools such as pre- and post-fit figures and correlations</li> <li><code>MultiDimFit</code>: performs maximum likelihood fits and likelihood scans with an arbitrary number of parameters of interest.</li> </ul> </li> <li>Miscellaneous other modules that do not compute limits or confidence intervals, but use the same framework:<ul> <li><code>GoodnessOfFit</code>: perform a goodness of fit test for models including shape information. Several GoF tests are implemented.</li> <li><code>ChannelConsistencyCheck</code>: study the consistency between individual channels in a combination.</li> <li><code>GenerateOnly</code>: generate random or asimov toy datasets for use as input to other methods</li> </ul> </li> </ul> <p>The command help is organized into five parts:</p> <ul> <li>The Main options section indicates how to pass the datacard as input to the tool (<code>-d datacardName</code>), how to choose the statistical method (<code>-M MethodName</code>), and how to set the verbosity level <code>-v</code></li> <li>Under Common statistics options, options common to different statistical methods are given. Examples are <code>--cl</code>, to specify the confidence level (default is 0.95), or <code>-t</code>, to give the number of toy MC extractions required.</li> <li>The Common input-output options section includes, for example, the options to specify the mass hypothesis under study (<code>-m</code>) or to include a specific string in the output filename (<code>--name</code>). </li> <li>Common miscellaneous options.</li> <li>Further method-specific options are available for each method. By passing the method name via the <code>-M</code> option, along with <code>--help</code>, the options for that specific method are shown in addition to the common options. </li> </ul> <p>Not all the available options are discussed in this online documentation; use <code>--help</code> to get the documentation of all options.</p>"},{"location":"part3/runningthetool/#common-command-line-options","title":"Common command-line options","text":"<p>There are a number of useful command-line options that can be used to alter the model (or parameters of the model) at run time. The most commonly used, generic options, are:</p> <ul> <li> <p><code>-H</code>: first run a different, faster, algorithm (e.g. the <code>ProfileLikelihood</code> described below) to obtain an approximate indication of the limit, which will allow the precise chosen algorithm to converge more quickly. We strongly recommend to use this option when using the <code>MarkovChainMC</code>, <code>HybridNew</code> or <code>FeldmanCousins</code> calculators, unless you know in which range your limit lies and you set this range manually (the default is <code>[0, 20]</code>)</p> </li> <li> <p><code>--rMax</code>, <code>--rMin</code>: manually restrict the range of signal strengths to consider. For Bayesian limits with MCMC, a rule of thumb is that <code>rMax</code> should be 3-5 times the limit (a too small value of <code>rMax</code> will bias your limit towards low values, since you are restricting the integration range, while a too large value will bias you to higher limits)</p> </li> <li> <p><code>--setParameters name=value[,name2=value2,...]</code> sets the starting values of the parameters, useful e.g. when generating toy MC or when setting the parameters as fixed. This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code>.</p> </li> <li> <p><code>--setParameterRanges name=min,max[:name2=min2,max2:...]</code> sets the ranges of the parameters (useful e.g. for scans in <code>MultiDimFit</code>, or for Bayesian integration). This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code>.</p> </li> <li> <p><code>--redefineSignalPOIs name[,name2,...]</code> redefines the set of parameters of interest.</p> <ul> <li>If the parameters were constant in the input workspace, they are set to be floating.</li> <li>Nuisance parameters promoted to parameters of interest are removed from the list of nuisances, and thus they are not randomized in methods that randomize nuisances (e.g. <code>HybridNew</code> in non-frequentist mode, or <code>BayesianToyMC</code>, or in toy generation with <code>-t</code> but without <code>--toysFreq</code>). This does not have any impact on algorithms that do not randomize nuisance parameters (e.g. fits, <code>AsymptoticLimits</code>, or <code>HybridNew</code> in fequentist mode) or on algorithms that treat all parameters in the same way (e.g. <code>MarkovChainMC</code>).</li> <li>Note that constraint terms for the nuisances are dropped after promotion to a POI using <code>--redefineSignalPOI</code>. To produce a likelihood scan for a nuisance parameter, using <code>MultiDimFit</code> with <code>--algo grid</code>, you should instead use the <code>--parameters (-P)</code> option, which will not cause the loss of the constraint term when scanning.</li> <li>Parameters of interest of the input workspace that are not selected by this command become unconstrained nuisance parameters, but they are not added to the list of nuisances so they will not be randomized (see above).</li> </ul> </li> <li> <p><code>--freezeParameters name1[,name2,...]</code> Will freeze the parameters with the given names to their set values. This option supports the use of regular expression by replacing <code>name</code> with <code>rgx{some regular expression}</code> for matching to constrained nuisance parameters or <code>var{some regular expression}</code> for matching to any parameter. For example <code>--freezeParameters rgx{CMS_scale_j.*}</code> will freeze all constrained nuisance parameters with the prefix <code>CMS_scale_j</code>, while <code>--freezeParameters var{.*rate_scale}</code> will freeze any parameter (constrained nuisance parameter or otherwise) with the suffix <code>rate_scale</code>.</p> <ul> <li>Use the option <code>--freezeParameters allConstrainedNuisances</code> to freeze all nuisance parameters that have a constraint term (i.e not <code>flatParams</code> or <code>rateParams</code> or other freely floating parameters).</li> <li>Similarly, the option <code>--floatParameters name1[,name2,...]</code> sets the parameter(s) floating and also accepts regular expressions.</li> <li>Groups of nuisance parameters (constrained or otherwise), as defined in the datacard, can be frozen using <code>--freezeNuisanceGroups</code>. You can also freeze all nuisances that are not contained in a particular group using a ^ before the group name (<code>--freezeNuisanceGroups=^group_name</code> will freeze everything except nuisance parameters in the group \"group_name\".)</li> <li>All constrained nuisance parameters (not <code>flatParam</code> or <code>rateParam</code>) can be set floating using <code>--floatAllNuisances</code>.</li> </ul> </li> </ul> <p>Warning</p> <p>Note that the floating/freezing options have a priority ordering from lowest to highest as <code>floatParameters &lt; freezeParameters &lt; freezeNuisanceGroups &lt; floatAllNuisances</code>. Options with higher priority will take precedence over those with lower priority.</p> <ul> <li> <p><code>--trackParameters name1[,name2,...]</code> will add a branch to the output tree for each of the named parameters. This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code></p> <ul> <li>The name of the branch will be trackedParam_name.</li> <li>The exact behaviour depends on the method used. For example, when using <code>MultiDimFit</code> with <code>--algo scan</code>, the value of the parameter at each point in the scan will be saved, while for <code>FitDiagnostics</code>, only the value at the end of the fit will be saved.</li> </ul> </li> <li> <p><code>--trackErrors name1[,name2,...]</code> will add a branch to the output tree for the error of each of the named parameters. This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code></p> <ul> <li>The name of the branch will be trackedError_name.</li> <li>The behaviour, in terms of which values are saved, is the same as <code>--trackParameters</code> above.</li> </ul> </li> </ul> <p>By default, the data set used by Combine will be the one listed in the datacard. You can tell Combine to use a different data set (for example a toy data set that you generated) by using the option <code>--dataset</code>. The argument should be <code>rootfile.root:workspace:location</code> or <code>rootfile.root:location</code>. In order to use this option, you must first convert your datacard to a binary workspace and use this binary workspace as the input to Combine. </p>"},{"location":"part3/runningthetool/#generic-minimizer-options","title":"Generic Minimizer Options","text":"<p>Combine uses its own minimizer class, which is used to steer Minuit (via RooMinimizer), named the <code>CascadeMinimizer</code>. This allows for sequential minimization, which can help in case a particular setting or algorithm fails. The <code>CascadeMinimizer</code> also knows about extra features of Combine such as discrete nuisance parameters.</p> <p>All of the fits that are performed in Combine's methods use this minimizer. This means that the fits can be tuned using these common options,</p> <ul> <li><code>--cminPoiOnlyFit</code>: First, perform a fit floating only the parameters of interest. This can be useful to find, roughly, where the global minimum is.</li> <li><code>--cminPreScan</code>: Do a scan before the first minimization.</li> <li><code>--cminPreFit arg</code> If set to a value N &gt; 0, the minimizer will perform a pre-fit with strategy (N-1), with the nuisance parameters frozen.<ul> <li><code>--cminApproxPreFitTolerance arg</code>: If non-zero, first do a pre-fit with this tolerance (or 10 times the final tolerance, whichever is largest)</li> <li><code>--cminApproxPreFitStrategy arg</code>:   Strategy to use in the pre-fit. The default is strategy 0.</li> </ul> </li> <li><code>--cminDefaultMinimizerType arg</code>: Set the default minimizer type. By default this is set to Minuit2.</li> <li><code>--cminDefaultMinimizerAlgo arg</code>: Set the default minimizer algorithm. The default algorithm is Migrad.</li> <li><code>--cminDefaultMinimizerTolerance arg</code>: Set the default minimizer tolerance, the default is 0.1.</li> <li><code>--cminDefaultMinimizerStrategy arg</code>: Set the default minimizer strategy between 0 (speed), 1 (balance - default), 2 (robustness). The Minuit documentation for this is pretty sparse but in general, 0 means evaluate the function less often, while 2 will waste function calls to get precise answers. An important note is that the <code>Hesse</code> algorithm (for error and correlation estimation) will be run only if the strategy is 1 or 2.</li> <li><code>--cminFallbackAlgo arg</code>: Provides a list of fallback algorithms, to be used in case the default minimizer fails. You can provide multiple options using the syntax <code>Type[,algo],strategy[:tolerance]</code>: eg <code>--cminFallbackAlgo Minuit2,Simplex,0:0.1</code> will fall back to the simplex algorithm of Minuit2 with strategy 0 and a tolerance 0.1, while <code>--cminFallbackAlgo Minuit2,1</code> will use the default algorithm (Migrad) of Minuit2 with strategy 1.</li> <li><code>--cminSetZeroPoint (0/1)</code>: Set the reference of the NLL to 0 when minimizing, this can help faster convergence to the minimum if the NLL itself is large. The default is true (1), set to 0 to turn off.</li> </ul> <p>The allowed combinations of minimizer types and minimizer algorithms are as follows:</p> Minimizer type Minimizer algorithm <code>Minuit</code> <code>Migrad</code>, <code>Simplex</code>, <code>Combined</code>, <code>Scan</code> <code>Minuit2</code> <code>Migrad</code>, <code>Simplex</code>, <code>Combined</code>, <code>Scan</code> <code>GSLMultiMin</code> <code>ConjugateFR</code>, <code>ConjugatePR</code>, <code>BFGS</code>, <code>BFGS2</code>, <code>SteepestDescent</code> <p>You can find details about these in the Minuit2 documentation here.</p> <p>More of these options can be found in the Cascade Minimizer options section when running <code>--help</code>.</p>"},{"location":"part3/runningthetool/#output-from-combine","title":"Output from combine","text":"<p>Most methods will print the results of the computation to the screen. However, in addition, Combine will also produce a root file containing a tree called limit with these results. The name of this file will be of the format,</p> <pre><code>higgsCombineTest.MethodName.mH$MASS.[word$WORD].root\n</code></pre> <p>where $WORD is any user defined keyword from the datacard which has been set to a particular value.</p> <p>A few command-line options can be used to control this output:</p> <ul> <li>The option <code>-n</code> allows you to specify part of the name of the root file. e.g. if you pass <code>-n HWW</code> the root file will be called <code>higgsCombineHWW....</code> instead of <code>higgsCombineTest</code></li> <li>The option <code>-m</code> allows you to specify the (Higgs boson) mass hypothesis, which gets written in the filename and in the output tree. This simplifies the bookeeping, as it becomes possible to merge multiple trees corresponding to different (Higgs boson) masses using <code>hadd</code>. Quantities can then be plotted as a function of the mass. The default value is m=120.</li> <li>The option <code>-s</code> can be used to specify the seed (eg <code>-s 12345</code>) used in toy generation. If this option is given, the name of the file will be extended by this seed, eg <code>higgsCombineTest.AsymptoticLimits.mH120.12345.root</code></li> <li>The option <code>--keyword-value</code> allows you to specify the value of a keyword in the datacard such that $WORD (in the datacard) will be given the value of VALUE in the command <code>--keyword-value WORD=VALUE</code>, eg  <code>higgsCombineTest.AsymptoticLimits.mH120.WORDVALUE.12345.root</code></li> </ul> <p>The output file will contain a <code>TDirectory</code> named toys, which will be empty if no toys are generated (see below for details) and a <code>TTree</code> called limit with the following branches;</p> Branch name Type Description <code>limit</code> <code>Double_t</code> Main result of combine run, with method-dependent meaning <code>limitErr</code> <code>Double_t</code> Estimated uncertainty on the result <code>mh</code> <code>Double_t</code> Value of MH, specified with <code>-m</code> option <code>iToy</code> <code>Int_t</code> Toy number identifier if running with <code>-t</code> <code>iSeed</code> <code>Int_t</code> Seed specified with <code>-s</code> <code>t_cpu</code> <code>Float_t</code> Estimated CPU time for algorithm <code>t_real</code> <code>Float_t</code> Estimated real time for algorithm <code>quantileExpected</code> <code>Float_t</code> Quantile identifier for methods that calculated expected (quantiles) and observed results (eg conversions from \\(\\Delta\\ln L\\) values), with method-dependent meaning. Negative values are reserved for entries that do not relate to quantiles of a calculation, with the default being set to -1 (usually meaning the observed result). <p>The value of any user-defined keyword $WORD that is set using <code>keyword-value</code> described above will also be included as a branch with type <code>string</code> named WORD. The option can be repeated multiple times for multiple keywords.</p> <p>In some cases, the precise meanings of the branches will depend on the method being used. In this case, it will be specified in this documentation.</p>"},{"location":"part3/runningthetool/#toy-data-generation","title":"Toy data generation","text":"<p>By default, each of the methods described so far will be run using the observed data as the input. In several cases (as detailed below), it is useful to run the tool using toy datasets, including Asimov data sets.</p> <p>The option <code>-t</code> is used to tell Combine to first generate one or more toy data sets, which will be used instead of the observed data. There are two versions,</p> <ul> <li> <p><code>-t N</code> with N &gt; 0. Combine will generate N toy datasets from the model and re-run the method once per toy. The seed for the toy generation can be modified with the option <code>-s</code> (use <code>-s -1</code> for a random seed). The output file will contain one entry in the tree for each of these toys.</p> </li> <li> <p><code>-t -1</code> will produce an Asimov data set, in which statistical fluctuations are suppressed. The procedure for generating this Asimov data set depends on the type of analysis you are using. More details are given below. </p> </li> </ul> <p>Warning</p> <p>The default values of the nuisance parameters (or any parameter) are used to generate the toy. This means that if, for example, you are using parametric shapes and the parameters inside the workspace are set to arbitrary values, those arbitrary values will be used to generate the toy. This behaviour can be modified through the use of the option <code>--setParameters x=value_x,y=value_y...</code>, which will set the values of the parameters (<code>x</code> and <code>y</code>) before toy generation. You can also load a snapshot from a previous fit to set the nuisance parameters to their post-fit values (see below).</p> <p>The output file will contain the toys (as <code>RooDataSets</code> for the observables, including global observables) in the toys directory if the option <code>--saveToys</code> is provided. If you include this option, the <code>limit</code> TTree in the output will have an entry corresponding to the state of the POI used for the generation of the toy, with the value of <code>quantileExpected</code> set to -2. </p> <p>The branches that are created by methods like <code>MultiDimFit</code> will not show the values used to generate the toy. If you also want the TTree to show the values of the POIs used to generate the toy, you should add additional branches using the <code>--trackParameters</code> option as described in the common command-line options section above. These branches will behave as expected when adding the option <code>--saveToys</code>. </p> <p>Warning</p> <p>For statistical methods that make use of toys (including <code>HybridNew</code>, <code>MarkovChainMC</code> and running with <code>-t N</code>), the results of repeated Combine commands will not be identical when using the datacard as the input. This is due to a feature in the tool that allows one to run concurrent commands that do not interfere with one another. In order to produce reproducible results with toy-based methods, you should first convert the datacard to a binary workspace using <code>text2workspace.py</code> and then use the resulting file as input to the Combine commands</p>"},{"location":"part3/runningthetool/#asimov-datasets","title":"Asimov datasets","text":"<p>If you are using either <code>-t -1</code> or  <code>AsymptoticLimits</code>, Combine will calculate results based on an Asimov data set.</p> <ul> <li> <p>For counting experiments, the Asimov data set will just be the total number of expected events (given the values of the nuisance parameters and POIs of the model)</p> </li> <li> <p>For shape analyses with templates, the Asimov data set will be constructed as a histogram using the same binning that is defined for your analysis.</p> </li> <li> <p>If your model uses parametric shapes, there are some options as to what Asimov data set to produce. By default, Combine will produce the Asimov data set as a histogram using the binning that is associated with each observable (ie as set using <code>RooRealVar::setBins</code>). If this binning does not exist, Combine will guess a suitable binning - it is therefore best to use <code>RooRealVar::setBins</code> to associate a binning with each observable, even if your data is unbinned, if you intend to use Asimov data sets.</p> </li> </ul> <p>You can also ask Combine to use a Pseudo-Asimov dataset, which is created from many weighted unbinned events.</p> <p>Setting <code>--X-rtd TMCSO_AdaptivePseudoAsimov=</code>\\(\\beta\\) with \\(\\beta&gt;0\\) will trigger the internal logic of whether to produce a Pseudo-Asimov dataset. This logic is as follows;</p> <ol> <li> <p>For each observable in your dataset, the number of bins, \\(n_{b}\\) is determined either from the value of <code>RooRealVar::getBins</code>, if it exists, or assumed to be 100.</p> </li> <li> <p>If \\(N_{b}=\\prod_{b}n_{b}&gt;5000\\), the number of expected events \\(N_{ev}\\) is determined. Note if you are combining multiple channels, \\(N_{ev}\\) refers to the number of expected events in a single channel. The logic is separate for each channel. If  \\(N_{ev}/N_{b}&lt;0.01\\) then a Pseudo-Asimov data set is created with the number of events equal to \\(\\beta \\cdot \\mathrm{max}\\{100*N_{ev},1000\\}\\). If \\(N_{ev}/N_{b}\\geq 0.01\\) , then a normal Asimov data set is produced.</p> </li> <li> <p>If \\(N_{b}\\leq 5000\\) then a normal Asimov data set will be produced</p> </li> </ol> <p>The production of a Pseudo-Asimov data set can be forced by using the option <code>--X-rtd TMCSO_PseudoAsimov=X</code> where <code>X&gt;0</code> will determine the number of weighted events for the Pseudo-Asimov data set. You should try different values of <code>X</code>, since larger values lead to more events in the Pseudo-Asimov data set, resulting in higher precision. However, in general, the fit will be slower. </p> <p>You can turn off the internal logic by setting <code>--X-rtd TMCSO_AdaptivePseudoAsimov=0 --X-rtd TMCSO_PseudoAsimov=0</code>, thereby forcing histograms to be generated.</p> <p>Info</p> <p>If you set <code>--X-rtd TMCSO_PseudoAsimov=X</code> with <code>X&gt;0</code> and also turn on <code>--X-rtd TMCSO_AdaptivePseudoAsimov=</code>\\(\\beta\\), with \\(\\beta&gt;0\\), the internal logic will be used, but this time the default will be to generate Pseudo-Asimov data sets, rather than the standard Asimov ones.</p>"},{"location":"part3/runningthetool/#nuisance-parameter-generation","title":"Nuisance parameter generation","text":"<p>The default method of handling systematics is to generate random values (around their nominal values, see above) for the nuisance parameters, according to their prior PDFs centred around their default values, before generating the data. The unconstrained nuisance parameters (eg <code>flatParam</code> or <code>rateParam</code>), or those with flat priors are not randomized before the data generation. If you wish to also randomize these parameters, you must declare them as <code>flatParam</code> in your datacard and, when running text2workspace, you must add the option <code>--X-assign-flatParam-prior</code> to the command line.</p> <p>The following options define how the toys will be generated,</p> <ul> <li> <p><code>--toysNoSystematics</code> the nuisance parameters in each toy are not randomized when generating the toy data sets - i.e their nominal values are used to generate the data. Note that for methods which profile (fit) the nuisances, the parameters are still floating when evaluating the likelihood.</p> </li> <li> <p><code>--toysFrequentist</code> the nuisance parameters in each toy are set to their nominal values which are obtained after first fitting to the observed data, with the POIs fixed, before generating the toy data sets. For evaluating likelihoods, the constraint terms are instead randomized within their PDFs around the post-fit nuisance parameter values.</p> </li> </ul> <p>If you are using <code>toysFrequentist</code>, be aware that the values set by <code>--setParameters</code> will be ignored for the toy generation as the post-fit values will instead be used (except for any parameter that is also a parameter of interest). You can override this behaviour and choose the nominal values for toy generation for any parameter by adding the option <code>--bypassFrequentistFit</code>, which will skip the initial fit to data, or by loading a snapshot (see below).</p> <p>Warning</p> <p>For methods such as <code>AsymptoticLimits</code> and <code>HybridNew --LHCmode LHC-limits</code>, the  \"nominal\" nuisance parameter values are taken from fits to the data and are, therefore, not \"blind\" to the observed data by default (following the fully frequentist paradigm). See the detailed documentation on these methods for how to run in fully \"blinded\" mode.</p>"},{"location":"part3/runningthetool/#generate-only","title":"Generate only","text":"<p>It is also possible to generate the toys first, and then feed them to the methods in Combine. This can be done using <code>-M GenerateOnly --saveToys</code>. The toys can then be read and used with the other methods by specifying <code>--toysFile=higgsCombineTest.GenerateOnly...</code> and using the same options for the toy generation. </p> <p>You can specify to run on a single toy, in place of the observed data, by including the option <code>-D file.root:toys/toy_i</code>. For example adding <code>-D higgsCombineTest.GenerateOnly.mH120.123456.root:toys/toy_10</code> will run on the  data set <code>toy_10</code> (the 10th toy) that was generated and saved in the file <code>higgsCombineTest.GenerateOnly.mH120.123456.root</code>. </p> <p>Warning</p> <p>Some methods also use toys within the method itself (eg <code>AsymptoticLimits</code> and <code>HybridNew</code>). For these, you should not specify the toy generation with <code>-t</code> or the options above. Instead, you should follow the method-specific instructions.</p>"},{"location":"part3/runningthetool/#loading-snapshots","title":"Loading snapshots","text":"<p>Snapshots from workspaces can be loaded and used in order to generate toys using the option <code>--snapshotName &lt;name of snapshot&gt;</code>. This will first set the parameters to the values in the snapshot, before any other parameter options are set and toys are generated.</p> <p>See the section on saving post-fit workspaces for creating workspaces with post-fit snapshots from <code>MultiDimFit</code>.</p> <p>Here are a few examples of calculations with toys from post-fit workspaces using a workspace with \\(r, m_{H}\\) as parameters of interest</p> <ul> <li> <p>Throw post-fit toy with b from s+b(floating \\(r,m_{H}\\)) fit, s with r=1.0, m=best fit MH, using nuisance parameter values and constraints re-centered on s+b(floating \\(r,m_{H}\\)) fit values (aka frequentist post-fit expected) and compute post-fit expected r uncertainty profiling MH <code>combine higgsCombinemumhfit.MultiDimFit.mH125.root --snapshotName MultiDimFit -M MultiDimFit --verbose 9 -n randomtest --toysFrequentist --bypassFrequentistFit -t -1 --expectSignal=1 -P r --floatOtherPOIs=1 --algo singles</code></p> </li> <li> <p>Throw post-fit toy with b from s+b(floating \\(r,m_{H}\\)) fit, s with r=1.0, m=128.0, using nuisance parameter values and constraints re-centered on s+b(floating \\(r,m_{H}\\)) fit values (aka frequentist post-fit expected) and compute post-fit expected significance (with MH fixed at 128 implicitly)     <code>combine higgsCombinemumhfit.MultiDimFit.mH125.root -m 128 --snapshotName MultiDimFit -M ProfileLikelihood --significance --verbose 9 -n randomtest --toysFrequentist --bypassFrequentistFit --overrideSnapshotMass -t -1 --expectSignal=1 --redefineSignalPOIs r --freezeParameters MH</code></p> </li> <li> <p>Throw post-fit toy with b from s+b(floating \\(r,m_{H}\\)) fit, s with r=0.0, using nuisance parameter values and constraints re-centered on s+b(floating \\(r,m_{H}\\)) fit values (aka frequentist post-fit expected) and compute post-fit expected and observed asymptotic limit (with MH fixed at 128 implicitly)     <code>combine higgsCombinemumhfit.MultiDimFit.mH125.root -m 128 --snapshotName MultiDimFit -M AsymptoticLimits --verbose 9 -n randomtest --bypassFrequentistFit --overrideSnapshotMass--redefineSignalPOIs r --freezeParameters MH</code></p> </li> </ul>"},{"location":"part3/runningthetool/#combinetool-for-job-submission","title":"combineTool for job submission","text":"<p>For longer tasks that cannot be run locally, several methods in Combine can be split to run on a batch system or on the Grid. The splitting and submission is handled using the <code>combineTool.py</code> script.</p>"},{"location":"part3/runningthetool/#submission-to-condor","title":"Submission to Condor","text":"<p>The syntax for running on condor with the tool is</p> <pre><code>combineTool.py -M ALGO [options] --job-mode condor --sub-opts='CLASSADS' --task-name NAME [--dry-run]\n</code></pre> <p>with <code>options</code> being the usual list of Combine options. The help option <code>-h</code> will give a list of both Combine and <code>combineTool</code> options. It is possible to use this tool with several different methods from Combine.</p> <p>The <code>--sub-opts</code> option takes a string with the different ClassAds that you want to set, separated by <code>\\n</code> as argument (e.g. <code>'+JobFlavour=\"espresso\"\\nRequestCpus=1'</code>).</p> <p>The <code>--dry-run</code> option will show what will be run without actually doing so / submitting the jobs.</p> <p>For example, to generate toys (eg for use with limit setting) users running on lxplus at CERN can use the condor mode:</p> <p><pre><code>combineTool.py -d workspace.root -M HybridNew --LHCmode LHC-limits --clsAcc 0  -T 2000 -s -1 --singlePoint 0.2:2.0:0.05 --saveHybridResult -m 125 --job-mode condor --task-name condor-test --sub-opts='+JobFlavour=\"tomorrow\"'\n</code></pre> The <code>--singlePoint</code> option is over-ridden, so that this will produce a script for each value of the POI in the range 0.2 to 2.0 in steps of 0.05. You can merge multiple points into a script using <code>--merge</code> - e.g adding <code>--merge 10</code> to the above command will mean that each job contains at most 10 of the values. The scripts are labelled by the <code>--task-name</code> option. They will be submitted directly to condor, adding any options in <code>--sub-opts</code> to the condor submit script. Make sure multiple options are separated by <code>\\n</code>. The jobs will run and produce output in the current directory.</p> <p>Below is an example for splitting points in a multi-dimensional likelihood scan.</p>"},{"location":"part3/runningthetool/#splitting-jobs-for-a-multi-dimensional-likelihood-scan","title":"Splitting jobs for a multi-dimensional likelihood scan","text":"<p>The option <code>--split-points</code> issues the command to split the jobs for <code>MultiDimFit</code> when using <code>--algo grid</code>. The following example will split the jobs such that there are 10 points in each of the jobs, which will be submitted to the workday queue.</p> <pre><code>combineTool.py datacard.txt -M MultiDimFit --algo grid --points 50 --rMin 0 --rMax 1 --job-mode condor --split-points 10 --sub-opts='+JobFlavour=\"workday\"' --task-name mytask -n mytask\n</code></pre> <p>Remember, any usual options (such as redefining POIs or freezing parameters) are passed to Combine and can be added to the command line for <code>combineTool</code>.</p> <p>Info</p> <p>The option <code>-n NAME</code> should be included to avoid overwriting output files, as the jobs will be run inside the directory from which the command is issued.</p>"},{"location":"part3/runningthetool/#grid-submission-with-combinetool","title":"Grid submission with combineTool","text":"<p>For more CPU-intensive tasks, for example determining limits for complex models using toys, it is generally not feasible to compute all the results interactively. Instead, these jobs can be submitted to the Grid.</p> <p>In this example we will use the <code>HybridNew</code> method of Combine to determine an upper limit for a sub-channel of the Run 1 SM \\(H\\rightarrow\\tau\\tau\\) analysis. For full documentation, see the section on computing limits with toys.</p> <p>With this model it would take too long to find the limit in one go, so instead we create a set of jobs in which each one throws toys and builds up the test statistic distributions for a fixed value of the signal strength. These jobs can then be submitted to a batch system or to the Grid using <code>crab3</code>. From the set of output distributions it is possible to extract the expected and observed limits.</p> <p>For this we will use <code>combineTool.py</code></p> <p>First we need to build a workspace from the \\(H\\rightarrow\\tau\\tau\\) datacard,</p> <pre><code>$ text2workspace.py data/tutorials/htt/125/htt_mt.txt -m 125\n$ mv data/tutorials/htt/125/htt_mt.root ./\n</code></pre> <p>To get an idea of the range of signal strength values we will need to build test-statistic distributions for, we will first use the <code>AsymptoticLimits</code> method of Combine,</p> <pre><code>$ combine -M Asymptotic htt_mt.root -m 125\n &lt;&lt; Combine &gt;&gt;\n[...]\n -- AsymptoticLimits (CLs) --\nObserved Limit: r &lt; 1.7384\nExpected  2.5%: r &lt; 0.4394\nExpected 16.0%: r &lt; 0.5971\nExpected 50.0%: r &lt; 0.8555\nExpected 84.0%: r &lt; 1.2340\nExpected 97.5%: r &lt; 1.7200\n</code></pre> <p>Based on this, a range of 0.2 to 2.0 should be suitable.</p> <p>We can use the same command for generating the distribution of test statistics with <code>combineTool</code>. The <code>--singlePoint</code> option is now enhanced to support expressions that generate a set of calls to Combine with different values. The accepted syntax is of the form MIN:MAX:STEPSIZE, and multiple comma-separated expressions can be specified.</p> <p>The script also adds an option <code>--dry-run</code>, which will not actually call comCombinebine but just prints out the commands that would be run, e.g,</p> <pre><code>combineTool.py -M HybridNew -d htt_mt.root --LHCmode LHC-limits --singlePoint 0.2:2.0:0.2 -T 2000 -s -1 --saveToys --saveHybridResult -m 125 --dry-run\n...\n[DRY-RUN]: combine -d htt_mt.root --LHCmode LHC-limits -T 2000 -s -1 --saveToys --saveHybridResult -M HybridNew -m 125 --singlePoint 0.2 -n .Test.POINT.0.2\n[DRY-RUN]: combine -d htt_mt.root --LHCmode LHC-limits -T 2000 -s -1 --saveToys --saveHybridResult -M HybridNew -m 125 --singlePoint 0.4 -n .Test.POINT.0.4\n[...]\n[DRY-RUN]: combine -d htt_mt.root --LHCmode LHC-limits -T 2000 -s -1 --saveToys --saveHybridResult -M HybridNew -m 125 --singlePoint 2.0 -n .Test.POINT.2.0\n</code></pre> <p>When the <code>--dry-run</code> option is removed each command will be run in sequence.</p>"},{"location":"part3/runningthetool/#grid-submission-with-crab3","title":"Grid submission with crab3","text":"<p>Submission to the grid with <code>crab3</code> works in a similar way. Before doing so, ensure that the <code>crab3</code> environment has been sourced in addition to the CMSSW environment. We will use the example of generating a grid of test-statistic distributions for limits.</p> <pre><code>$ cmsenv; source /cvmfs/cms.cern.ch/crab3/crab.sh\n$ combineTool.py -d htt_mt.root -M HybridNew --LHCmode LHC-limits --clsAcc 0 -T 2000 -s -1 --singlePoint 0.2:2.0:0.05 --saveToys --saveHybridResult -m 125 --job-mode crab3 --task-name grid-test --custom-crab custom_crab.py\n</code></pre> <p>The option <code>--custom-crab</code> should point to a python file python containing a function of the form <code>custom_crab(config)</code> that will be used to modify the default crab configuration. You can use this to set the output site to your local grid site, or modify other options such as the voRole, or the site blacklist/whitelist.</p> <p>For example</p> <pre><code>def custom_crab(config):\n  print '&gt;&gt; Customising the crab config'\n  config.Site.storageSite = 'T2_CH_CERN'\n  config.Site.blacklist = ['SOME_SITE', 'SOME_OTHER_SITE']\n</code></pre> <p>Again it is possible to use the option <code>--dry-run</code> to see what the complete crab config will look like before actually submitting it.</p> <p>Once submitted, the progress can be monitored using the standard <code>crab</code> commands. When all jobs are completed, copy the output from your site's storage element to the local output folder.</p> <pre><code>$ crab getoutput -d crab_grid-test\n# Now we have to un-tar the output files\n$ cd crab_grid-test/results/\n$ for f in *.tar; do tar xf $f; done\n$ mv higgsCombine*.root ../../\n$ cd ../../\n</code></pre> <p>These output files should be combined with <code>hadd</code>, after which we invoke Combine as usual to calculate observed and expected limits from the merged grid, as usual.</p>"},{"location":"part3/simplifiedlikelihood/","title":"Procedure for creating and validating simplified likelihood inputs","text":"<p>This page is to give a brief outline for the creation of (potentially aggregated) predictions and their covariance to facilitate external reinterpretation using the simplified likelihood (SL) approach. Instructions for validating the simplified likelihood method (detailed in the CMS note here and \"The Simplified Likelihood Framework\" paper) are also given.</p>"},{"location":"part3/simplifiedlikelihood/#requirements","title":"Requirements","text":"<p>You need an up to date version of Combine. Note You should use the latest release of Combine for the exact commands on this page. You should be using Combine tag <code>v9.0.0</code> or higher or the latest version of the <code>112x</code> branch to follow these instructions.  </p> <p>You will find the python scripts needed to convert Combine outputs into simplified likelihood inputs under <code>test/simplifiedLikelihood</code></p> <p>If you're using the <code>102x</code> branch (not recommended), then you can obtain these scripts from here by running:  <pre><code>curl -s https://raw.githubusercontent.com/nucleosynthesis/work-tools/master/sparse-checkout-SL-ssh.sh &gt; checkoutSL.sh\nbash checkoutSL.sh\nls work-tools/stats-tools\n</code></pre></p> <p>If you also want to validate your inputs and perform fits/scans using them, you can use the package SLtools from The Simplified Likelihood Framework paper for this. <pre><code>git clone https://gitlab.cern.ch/SimplifiedLikelihood/SLtools.git\n</code></pre></p>"},{"location":"part3/simplifiedlikelihood/#producing-covariance-for-recasting","title":"Producing covariance for recasting","text":"<p>Producing the necessary predictions and covariance for recasting varies depending on whether or not control regions are explicitly included in the datacard when running fits. Instructions for cases where the control regions are and are not included are detailed below.</p> <p>Warning</p> <p>The instructions below will calculate moments based on the assumption that \\(E[x]=\\hat{x}\\), i.e it will use the maximum likelihood estimators for the yields as the expectation values. If instead you want to use the full definition of the moments, you can run the <code>FitDiagnostics</code> method with the <code>-t</code> option and include <code>--savePredictionsPerToy</code> and remove the other options, which will produce a tree of the toys in the output from which moments can be calculated. </p>"},{"location":"part3/simplifiedlikelihood/#type-a-control-regions-included-in-datacard","title":"Type A - Control regions included in datacard","text":"<p>For an example datacard 'datacard.txt' including two signal channels 'Signal1' and 'Signal2', make the workspace including the masking flags</p> <pre><code>text2workspace.py --channel-masks --X-allow-no-signal --X-allow-no-background datacard.txt -o datacard.root\n</code></pre> <p>Run the fit making the covariance (output saved as <code>fitDiagnostics.root</code>) masking the signal channels. Note that all signal channels must be masked!</p> <p><pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 2000 --setParameters mask_Signal1=1,mask_Signal2=1 --saveOverall  -N Name\n</code></pre> Where \"Name\" can be specified by you.</p> <p>Outputs, including predictions and covariance, will be saved in <code>fitDiagnosticsName.root</code> folder <code>shapes_fit_b</code></p>"},{"location":"part3/simplifiedlikelihood/#type-b-control-regions-not-included-in-datacard","title":"Type B - Control regions not included in datacard","text":"<p>For an example datacard 'datacard.txt' including two signal channels 'Signal1' and 'Signal2', make the workspace</p> <pre><code>text2workspace.py --X-allow-no-signal --X-allow-no-background datacard.txt -o datacard.root\n</code></pre> <p>Run the fit making the covariance (output saved as <code>fitDiagnosticsName.root</code>) setting no pre-fit signal contribution. Note we must set <code>--preFitValue 0</code> in this case since, we will be using the pre-fit uncertainties for the covariance calculation and we do not want to include the uncertainties on the signal. </p> <p><pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 2000 --saveOverall --preFitValue 0   -n Name\n</code></pre> Where \"Name\" can be specified by you.</p> <p>Outputs, including predictions and covariance, will be saved in <code>fitDiagnosticsName.root</code> folder <code>shapes_prefit</code></p> <p>In order to also extract the signal yields corresponding to <code>r=1</code> (in case you want to run the validation step later), you also need to produce a second file with the pre-fit value set to 1. For this you do not need to run many toys. To save time you can set <code>--numToysForShape</code> to a low value. </p> <pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 1 --saveOverall --preFitValue 1   -n Name2\n</code></pre> <p>You should check that the order of the bins in the covariance matrix is as expected.</p>"},{"location":"part3/simplifiedlikelihood/#produce-simplified-likelihood-inputs","title":"Produce simplified likelihood inputs","text":"<p>Head over to the <code>test/simplifiedLikelihoods</code> directory inside your Combine area. The following instructions depend on whether you are aggregating or not aggregating your signal regions. Choose the instructions for your case. </p>"},{"location":"part3/simplifiedlikelihood/#not-aggregating","title":"Not Aggregating","text":"<p>Run the <code>makeLHInputs.py</code> script to prepare the inputs for the simplified likelihood. The filter flag can be used to select only signal regions based on the channel names. To include all channels do not include the filter flag.</p> <p>The SL input must NOT include any control regions that were not masked in the fit.</p> <p>If your analysis is Type B (i.e everything in the datacard is a signal region), then you can just run </p> <pre><code>python makeLHInputs.py -i fitDiagnosticsName.root -o SLinput.root \n</code></pre> <p>If necessary (i.e as in Type B analyses) you may also need to run the same on the output of the run where the pre-fit value was set to 1. </p> <pre><code>python makeLHInputs.py -i fitDiagnosticsName2.root -o SLinput2.root \n</code></pre> <p>If you instead have a Type A analysis (some of the regions are control regions that were used to fit but not masked) then you should add the option <code>--filter SignalName</code> where <code>SignalName</code> is some string that defines the signal regions in your datacards (for example, \"SR\" is a common name for these).</p> <p>Note: If your signal regions cannot be easily identified by a string, follow the instructions below for aggregating, but define only one channel for each aggregate region. This will maintain the full information and will not actually aggregate any regions.</p>"},{"location":"part3/simplifiedlikelihood/#aggregating","title":"Aggregating","text":"<p>If aggregating based on covariance, edit the config file <code>aggregateCFG.py</code> to define aggregate regions based on channel names. Note that wildcards are supported. You can then make likelihood inputs using</p> <pre><code>python makeLHInputs.py -i fitDiagnosticsName.root -o SLinput.root --config aggregateCFG.py\n</code></pre> <p>At this point you have the inputs as ROOT files necessary to publish and run the simplified likelihood. </p>"},{"location":"part3/simplifiedlikelihood/#validating-the-simplified-likelihood-approach","title":"Validating the simplified likelihood approach","text":"<p>The simplified likelihood relies on several assumptions (detailed in the documentation at the top). To test the validity for your analysis, statistical results between Combine and the simplified likelihood can be compared. </p> <p>We will use the package SLtools from the Simplified Likelihood Paper for this. The first step is to convert the ROOT files into python configs to run in the tool. </p>"},{"location":"part3/simplifiedlikelihood/#convert-root-to-python","title":"Convert ROOT to Python","text":"<p>If you followed the steps above, you have all of the histograms already necessary to generate the python configs. The script <code>test/simplifiedLikelihoods/convertSLRootToPython.py</code>  can be used to do the conversion. Just provide the following options when running with python.</p> <ul> <li><code>-O/--outname</code> : The output python file containing the model (default is <code>test.py</code>)</li> <li><code>-s/--signal</code> : The signal histogram, should be of format <code>file.root:location/to/histogram</code></li> <li><code>-b/--background</code> : The background histogram, should be of format <code>file.root:location/to/histogram</code></li> <li><code>-d/--data</code> : The data TGraph, should be of format <code>file.root:location/to/graph</code></li> <li><code>-c/--covariance</code> : The covariance TH2 histogram, should be of format <code>file.root:location/to/histogram</code></li> </ul> <p>For example, to get the correct output from a Type B analysis with no aggregating, you can run </p> <pre><code>python test/simplifiedLikelihoods/convertSLRootToPython.py -O mymodel.py -s SLinput.root:shapes_prefit/total_signal  -b SLinput.root:shapes_prefit/total_M2 d -d SLinput.root:shapes_prefit/total_data -c SLinput.root:shapes_prefit/total_M2\n</code></pre> <p>The output will be a python file with the right format for the SL tool. You can mix different ROOT files for these inputs. Note that the <code>SLtools</code> package also has some tools to covert <code>.yaml</code>-based inputs into the python config for you.</p>"},{"location":"part3/simplifiedlikelihood/#run-a-likelihood-scan-with-the-sl","title":"Run a likelihood scan with the SL","text":"<p>If you have checked out the SLtools, you can create a simple python script as the one below to produce a scan of the simplified likelihood from your inputs.</p> <pre><code>#! /usr/bin/env python\nimport simplike as sl\n\nexec(open(\"mymodel.py\").read())\nslp1 = sl.SLParams(background, covariance, obs=data, sig=signal)\n\nimport numpy as np\nnpoints = 50\nmus = np.arange(-0.5, 2, (2+0.5)/npoints)\ntmus1 = [slp1.tmu(mu) for mu in mus]\nfrom matplotlib import pyplot as plt\nplt.plot(mus,tmus1)\nplt.show()\n</code></pre> <p>Where the <code>mymodel.py</code> config is a simple python file defined as;</p> <ul> <li><code>data</code> : A python array of observed data, one entry per bin.</li> <li><code>background</code> : A python array of expected background, one entry per bin.</li> <li><code>covariance</code> : A python array of the covariance between expected backgrounds. The format is a flat array which is converted into a 2D array inside the tool</li> <li><code>signal</code> : A python array of the expected signal, one entry per bin. This should be replaced with whichever signal model you are testing.</li> </ul> <p>This <code>model.py</code> can also just be the output of the previous section converted from the ROOT files for you.</p> <p>The example below is from the note CMS-NOTE-2017-001</p> Show example <pre><code>\nimport numpy\nimport array\n\nname = \"CMS-NOTE-2017-001 dummy model\"\nnbins = 8\ndata = array.array('d',[1964,877,354,182,82,36,15,11])\nbackground = array.array('d',[2006.4,836.4,350.,147.1,62.0,26.2,11.1,4.7])\nsignal = array.array('d',[47,29.4,21.1,14.3,9.4,7.1,4.7,4.3])\ncovariance = array.array('d', [ 18774.2, -2866.97, -5807.3, -4460.52, -2777.25, -1572.97, -846.653, -442.531, -2866.97, 496.273, 900.195, 667.591, 403.92, 222.614, 116.779, 59.5958, -5807.3, 900.195, 1799.56, 1376.77, 854.448, 482.435, 258.92, 134.975, -4460.52, 667.591, 1376.77, 1063.03, 664.527, 377.714, 203.967, 106.926, -2777.25, 403.92, 854.448, 664.527, 417.837, 238.76, 129.55, 68.2075, -1572.97, 222.614, 482.435, 377.714, 238.76, 137.151, 74.7665, 39.5247, -846.653, 116.779, 258.92, 203.967, 129.55, 74.7665, 40.9423, 21.7285, -442.531, 59.5958, 134.975, 106.926, 68.2075, 39.5247, 21.7285, 11.5732])\n</code>"},{"location":"part3/simplifiedlikelihood/#example-using-tutorial-datacard","title":"Example using tutorial datacard","text":"<p>For this example, we will use the tutorial datacard <code>data/tutorials/longexercise/datacard_part3.txt</code>. This datacard is of Type B since there are no control regions (all regions are signal regions). </p>\n<p>First, we will create the binary file (run <code>text2workspace</code>)\n<pre><code>text2workspace.py --X-allow-no-signal --X-allow-no-background data/tutorials/longexercise/datacard_part3.txt  -m 200\n</code></pre></p>\n<p>And next, we will generate the covariance between the bins of the background model. \n<pre><code>combine data/tutorials/longexercise/datacard_part3.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 10000 --saveOverall --preFitValue 0   -n SimpleTH1 -m 200\n\ncombine data/tutorials/longexercise/datacard_part3.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 1 --saveOverall --preFitValue 1   -n SimpleTH1_Signal1 -m 200\n</code></pre>\nWe will also want to compare our scan to that from the full likelihood, which we can get as usual from Combine. </p>\n<pre><code>combine -M MultiDimFit data/tutorials/longexercise/datacard_part3.root --rMin -0.5 --rMax 2 --algo grid -n SimpleTH1 -m 200\n</code></pre>\n<p>Next, since we do not plan to aggregate any of the bins, we will follow the instructions for this and pick out the right covariance matrix.</p>\n<pre><code>python test/simplifiedLikelihoods/makeLHInputs.py -i fitDiagnosticsSimpleTH1.root -o SLinput.root \n\npython test/simplifiedLikelihoods/makeLHInputs.py -i fitDiagnosticsSimpleTH1_Signal1.root -o SLinput_Signal1.root \n</code></pre>\n<p>We now have everything we need to provide the simplified likelihood inputs:</p>\n<pre><code>$ root -l SLinput.root\nroot [0] .ls\n\nAttaching file SLinput.root as _file0...\n(TFile *) 0x3667820\nroot [1] .ls\nTFile**         SLinput.root\n TFile*         SLinput.root\n  KEY: TDirectoryFile   shapes_fit_b;1  shapes_fit_b\n  KEY: TDirectoryFile   shapes_prefit;1 shapes_prefit\n  KEY: TDirectoryFile   shapes_fit_s;1  shapes_fit_s\n</code></pre>\n<p>We can convert this to a python module that we can use to run a scan with the <code>SLtools</code> package. Note, since we have a Type B datacard, we will be using the pre-fit covariance matrix. Also, this means we want to take the signal from the file where the prefit value of <code>r</code> was 1. </p>\n<pre><code>python test/simplifiedLikelihoods/convertSLRootToPython.py -O mymodel.py -s SLinput_Signal1.root:shapes_prefit/total_signal  -b SLinput.root:shapes_prefit/total_M1-d SLinput.root:shapes_prefit/total_data -c SLinput.root:shapes_prefit/total_M2\n</code></pre>\n<p>We can compare the profiled likelihood scans from our simplified likelihood (using the python file we just created) and from the full likelihood (that we created with Combine.). For the former, we need to first checkout the <code>SLtools</code> package </p>\n<pre><code>git clone https://gitlab.cern.ch/SimplifiedLikelihood/SLtools.git\nmv higgsCombineSimpleTH1.MultiDimFit.mH200.root SLtools/ \nmv mymodel.py SLtools/\ncd SLtools\n</code></pre>\n<p>The script below will create a plot of the comparison for us. </p>\n<p><pre><code>#! /usr/bin/env python\nimport simplike as sl\n\nexec(open(\"mymodel.py\").read())\n\nslp1 = sl.SLParams(background, covariance, obs=data, sig=signal)\n\nimport ROOT \nfi = ROOT.TFile.Open(\"higgsCombineSimpleTH1.MultiDimFit.mH200.root\")\ntr = fi.Get(\"limit\")\n\npoints = []\nfor i in range(tr.GetEntries()):\n  tr.GetEntry(i)\n  points.append([tr.r,2*tr.deltaNLL])\npoints.sort()\n\nmus2=[pt[0] for pt in points]\ntmus2=[pt[1] for pt in points]\n\nimport numpy as np\nnpoints = 50\nmus1 = np.arange(-0.5, 2, (2+0.5)/npoints)\ntmus1 = [slp1.tmu(mu) for mu in mus1]\n\nfrom matplotlib import pyplot as plt\nplt.plot(mus1,tmus1,label='simplified likelihood')\nplt.plot(mus2,tmus2,label='full likelihood')\nplt.legend()\nplt.xlabel(\"$\\mu$\")\nplt.ylabel(\"$-2\\Delta \\ln L$\")\n\nplt.savefig(\"compareLH.pdf\")\n</code></pre>\nThis will produce a figure like the one below. </p>\n<p></p>\n<p>It is also possible to include the third moment of each bin to improve the precision of the simplified likelihood [ JHEP 64 2019 ]. The necessary information is stored in the outputs from Combine, therefore you just need to include the option <code>-t SLinput.root:shapes_prefit/total_M3</code> in the options list for <code>convertSLRootToPython.py</code> to  include this in the model file. The third moment information can be included in <code>SLtools</code> by using <code>sl.SLParams(background, covariance, third_moment, obs=data, sig=signal)</code></p>"},{"location":"part3/validation/","title":"Validating datacards","text":"<p>This section covers the main features of the datacard validation tool that helps you spot potential problems with your datacards at an early stage. The tool is implemented in the <code>CombineHarvester/CombineTools</code> subpackage. See the <code>combineTool</code>  section of the documentation for checkout instructions for the full tool, which is needed for this task.</p> <p>The datacard validation tool contains a number of checks. It is possible to call subsets of these checks when creating datacards within <code>CombineHarvester</code>. However, for now we will only describe the usage of the validation tool on already existing datacards. If you create your datacards with <code>CombineHarvester</code> and would like to include the checks at the datacard creation stage, please contact us via https://cms-talk.web.cern.ch/c/physics/cat/cat-stats/279.</p>"},{"location":"part3/validation/#how-to-use-the-tool","title":"How to use the tool","text":"<p>The basic syntax is:</p> <pre><code>ValidateDatacards.py datacard.txt\n</code></pre> <p>This will write the results of the checks to a json file (default: <code>validation.json</code>), and will print a summary to the screen, for example:</p> <pre><code>================================\n=======Validation results=======\n================================\n&gt;&gt;&gt;There were  7800 warnings of type  'up/down templates vary the yield in the same direction'\n&gt;&gt;&gt;There were  5323 warnings of type  'up/down templates are identical'\n&gt;&gt;&gt;There were no warnings of type  'At least one of the up/down systematic uncertainty templates is empty'\n&gt;&gt;&gt;There were  4406 warnings of type  'Uncertainty has normalisation effect of more than 10.0%'\n&gt;&gt;&gt;There were  8371 warnings of type  'Uncertainty probably has no genuine shape effect'\n&gt;&gt;&gt;There were no warnings of type 'Empty process'\n&gt;&gt;&gt;There were no warnings of type 'Bins of the template empty in background'\n&gt;&gt;&gt;INFO: there were  169  alerts of type  'Small signal process'\n</code></pre> <p>The meaning of each of these warnings/alerts is discussed below.</p> <p>The following arguments are possible: <pre><code>usage: ValidateDatacards.py [-h] [--printLevel PRINTLEVEL] [--readOnly]\n                            [--checkUncertOver CHECKUNCERTOVER]\n                            [--reportSigUnder REPORTSIGUNDER]\n                            [--jsonFile JSONFILE] [--mass MASS]\n                            cards\n\npositional arguments:\n  cards                 Specifies the full path to the datacards to check\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --printLevel PRINTLEVEL, -p PRINTLEVEL\n                        Specify the level of info printing (0-3, default:1)\n  --readOnly            If this is enabled, skip validation and only read the\n                        output json\n  --checkUncertOver CHECKUNCERTOVER, -c CHECKUNCERTOVER\n                        Report uncertainties which have a normalization effect\n                        larger than this fraction (default:0.1)\n  --reportSigUnder REPORTSIGUNDER, -s REPORTSIGUNDER\n                        Report signals contributing less than this fraction of\n                        the total in a channel (default:0.001)\n  --jsonFile JSONFILE   Path to the json file to read/write results from\n                        (default:validation.json)\n  --mass MASS           Signal mass to use (default:*)\n</code></pre> <code>printLevel</code> adjusts how much information is printed to the screen. When set to 0, the results are only written to the json file, but not to the screen. When set to 1 (default), the number of warnings/alerts of a given type is printed to the screen. Setting this option to 2 prints the same information as level 1, and additionally prints which uncertainties are affected (if the check is related to uncertainties) or which processes are affected (if the check is related only to processes). When <code>printLevel</code> is set to 3, the information from level 2 is printed, and additionaly for checks related to uncertainties it prints which processes are affected.</p> <p>To print information to screen, the script parses the json file that contains the results of the validation checks. Therefore, if you have already run the validation tool and produced this json file, you can simply change the <code>printLevel</code> by re-running the tool with <code>printLevel</code> set to a different value, and enabling the <code>--readOnly</code> option.</p> <p>The options <code>--checkUncertOver</code> and <code>--reportSigUnder</code> will be described in more detail in the section that discusses the checks for which they are relevant.</p> <p>Note: the <code>--mass</code> argument should only be set if you normally use it when running Combine, otherwise you can leave it at the default.</p> <p>The datacard validation tool is primarily intended for shape (histogram) based analyses. However, when running on a parametric model or counting experiment the checks for small signal processes, empty processes, and uncertainties with large normalization effects can still be performed. </p>"},{"location":"part3/validation/#details-on-checks","title":"Details on checks","text":""},{"location":"part3/validation/#uncertainties-with-large-normalization-effect","title":"Uncertainties with large normalization effect","text":"<p>This check highlights nuisance parameters that have a normalization effect larger than the fraction set by the option <code>--checkUncertOver</code>. The default value is 0.1, meaning that any uncertainties with a normalization effect larger than 10% are flagged up.</p> <p>The output file contains the following information for this check:</p> <pre><code>largeNormEff: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> <p>Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#at-least-one-of-the-updown-systematic-templates-is-empty","title":"At least one of the Up/Down systematic templates is empty","text":"<p>For shape uncertainties, this check reports all cases where the up and/or down template(s) are empty, when the nominal template is not.</p> <p>The output file contains the following information for this check:</p> <p><pre><code>emptySystematicShape: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#identical-updown-templates","title":"Identical Up/Down templates","text":"<p>This check applies to shape uncertainties only, and will highlight cases where the shape uncertainties have identical Up and Down templates (identical in shape and in normalization).</p> <p>The information given in the output file for this check is:</p> <p><pre><code>uncertTemplSame: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#up-and-down-templates-vary-the-yield-in-the-same-direction","title":"Up and Down templates vary the yield in the same direction","text":"<p>Again, this check only applies to shape uncertainties - it highlights cases where the 'Up' template and the 'Down' template both have the effect of increasing or decreasing the normalization of a process.</p> <p>The information given in the output file for this check is:</p> <p><pre><code>uncertVarySameDirect: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#uncertainty-probably-has-no-genuine-shape-effect","title":"Uncertainty probably has no genuine shape effect","text":"<p>In this check, applying only to shape uncertainties, the normalized nominal templates are compared with the normalized templates for the 'up' and 'down' systematic variations. The script calculates \\(\\Sigma_i \\frac{2|\\text{up}(i) - \\text{nominal}(i)|}{|\\text{up}(i)| + |\\text{nominal}(i)|}\\) and \\(\\Sigma_i \\frac{2|\\text{down}(i) - \\text{nominal}(i)|}{|\\text{down}(i)| + |\\text{nominal}(i)|}\\).</p> <p>where the sums run over all bins in the histograms, and 'nominal', 'up', and 'down' are the central template and up and down varied templates, all normalized.</p> <p>If both sums are smaller than 0.001, the uncertainty is flagged up as probably not having a genuine shape effect. This means a 0.1% variation in one bin is enough to avoid being reported, but many smaller variations can also sum to be large enough to pass the threshold. It should be noted that the chosen threshold is somewhat arbitrary: if an uncertainty is flagged up as probably having no genuine shape effect you should take this as a starting point to investigate. </p> <p>The information given in the output file for this check is:</p> <p><pre><code>smallShapeEff: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"diff_d\":&lt;value&gt;\n        \"diff_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>diff_d</code> and <code>diff_u</code> are the values of the sums described above for the 'down' variation and the 'up' variation.</p>"},{"location":"part3/validation/#empty-process","title":"Empty process","text":"<p>If a process is listed in the datacard, but the yield is 0, it is flagged up by this check. </p> <p>The information given in the output file for this check is:</p> <pre><code>emptyProcessShape: {\n  &lt;analysis category&gt;: {\n    &lt;process1&gt;,\n    &lt;process2&gt;,\n    &lt;process3&gt;\n  }\n}\n</code></pre>"},{"location":"part3/validation/#bins-that-have-signal-but-no-background","title":"Bins that have signal but no background","text":"<p>For shape-based analyses, this checks whether there are any bins in the nominal templates that have signal contributions, but no background contributions. </p> <p>The information given in the output file for this check is:</p> <pre><code>emptyBkgBin: {\n  &lt;analysis category&gt;: {\n    &lt;bin_nr1&gt;,\n    &lt;bin_nr2&gt;,\n    &lt;bin_nr3&gt;\n  }\n}\n</code></pre>"},{"location":"part3/validation/#small-signal-process","title":"Small signal process","text":"<p>This reports signal processes that contribute less than the fraction specified by <code>--reportSigUnder</code> (default 0.001 = 0.1%) of the total signal in a given category. This produces an alert, not a warning, as it does not hint at a potential problem. However, in analyses with many signal contributions and with long fitting times, it can be helpful to remove signals from a category in which they do not contribute a significant amount.</p> <p>The information given in the output file for this check is:</p> <pre><code>smallSignalProc: {\n  &lt;analysis category&gt;: {\n    &lt;process&gt;: {\n      \"sigrate_tot\":&lt;value&gt;\n      \"procrate\":&lt;value&gt;\n    } \n  }\n}\n</code></pre> <p>Where <code>sigrate_tot</code> is the total signal yield in the analysis category and <code>procrate</code> is the yield of signal process <code>&lt;process&gt;</code>.</p>"},{"location":"part3/validation/#what-to-do-in-case-of-a-warning","title":"What to do in case of a warning","text":"<p>These checks are mostly a tool to help you investigate your datacards: a warning does not necessarily mean there is a mistake in your datacard, but you should use it as a starting point to investigate. Empty processes and emtpy shape uncertainties connected to nonempty processes will most likely be unintended. The same holds for cases where the 'up' and 'down' shape templates are identical. If there are bins that contain signal but no background contributions, this should be corrected. See the FAQ for more information on that point.</p> <p>For other checks it depends on the situation whether there is a problem or not. Some examples:</p> <ul> <li>An analysis-specific nonclosure uncertainty could be larger than 10%. A theoretical uncertainty in the ttbar normalization probably not.</li> <li>In an analysis with a selection that requires the presence of exactly 1 jet, 'up' and 'down' variations in the jet energy uncertainty could both change the process normalization in the same direction. (But they do not have to!)</li> </ul> <p>As always: think about whether you expect a check to yield a warning in case of your analysis, and if not, investigate to make sure there are no issues.</p>"},{"location":"part4/usefullinks/","title":"Useful links and further reading","text":""},{"location":"part4/usefullinks/#tutorials-and-reading-material","title":"Tutorials and reading material","text":"<p>There are several tutorials that have been run over the last few years with instructions and examples for running the Combine tool.</p> <p>Tutorial Sessions:</p> <ul> <li>1st tutorial 17th Nov 2015.</li> <li>2nd tutorial 30th Nov 2016.</li> <li>3rd tutorial 29th Nov 2017</li> <li>4th tutorial 31st Oct 2018 - Latest for <code>81x-root606</code> branch.</li> <li>5th tutorial 2nd-4th Dec 2019</li> <li>6th tutorial 14th-16th Dec 2020 - Latest for <code>102x</code> branch</li> <li>7th tutorial 3rd Feb 2023 - Uses <code>113x</code> branch</li> </ul> <p>Worked examples from Higgs analyses using Combine:</p> <ul> <li>The CMS DAS at CERN 2014</li> <li>The CMS DAS at DESY 2018</li> </ul> <p>Higgs combinations procedures</p> <ul> <li> <p>Conventions to be used when preparing inputs for Higgs combinations</p> </li> <li> <p>CMS AN-2011/298 Procedure for the LHC Higgs boson search combination in summer 2011. This describes in more detail some of the methods used in Combine.</p> </li> </ul>"},{"location":"part4/usefullinks/#citations","title":"Citations","text":"<p>The paper for the Combine tool is available here. In addition, you can cite the following papers for the methods used within the tool; </p> <ul> <li> <p>Summer 2011 public ATLAS-CMS note for any Frequentist limit setting procedures with toys or Bayesian limits, constructing likelihoods, descriptions of nuisance parameter options (like log-normals (<code>lnN</code>) or gamma (<code>gmN</code>), and for definitions of test-statistics.</p> </li> <li> <p>CCGV paper if you use any of the asymptotic (eg with <code>-M AsymptoticLimits</code> or <code>-M Significance</code> approximations for limits/p-values.</p> </li> <li> <p>If you use the Barlow-Beeston approach to MC stat (bin-by-bin) uncertainties, please cite their paper Barlow-Beeston. You should also cite this note if you use the <code>autoMCStats</code> directive to produce a single parameter per bin.</p> </li> <li> <p>If you use <code>shape</code> uncertainties for template (<code>TH1</code> or <code>RooDataHist</code>) based datacards, you can cite this note from J. Conway.</p> </li> <li> <p>If you are extracting uncertainties from LH scans - i.e using \\(-2\\Delta Log{L}=1\\) etc for the 1\\(\\sigma\\) intervals, you can cite either the ATLAS+CMS or CMS Higgs paper.</p> </li> <li> <p>There is also a long list of citation recommendations from the CMS Statistics Committee pages.</p> </li> </ul>"},{"location":"part4/usefullinks/#combine-based-packages","title":"Combine based packages","text":"<ul> <li> <p>SWGuideHiggs2TauLimits (Deprecated)</p> </li> <li> <p>ATGCRooStats</p> </li> <li> <p>CombineHarvester</p> </li> </ul>"},{"location":"part4/usefullinks/#contacts","title":"Contacts","text":"<ul> <li>CMStalk forum: https://cms-talk.web.cern.ch/c/physics/cat/cat-stats/279</li> </ul>"},{"location":"part4/usefullinks/#cms-statistics-committee","title":"CMS Statistics Committee","text":"<ul> <li>You can find much more statistics theory and reccomendations on various statistical procedures in the CMS Statistics Committee Twiki Pages</li> </ul>"},{"location":"part4/usefullinks/#faq","title":"FAQ","text":"<ul> <li>Why does Combine have trouble with bins that have zero expected contents?<ul> <li>If you are computing only upper limits, and your zero-prediction bins are all empty in data, then you can just set the background to a very small value instead of zero as the computation is regular for background going to zero (e.g. a counting experiment with \\(B\\leq1\\) will have essentially the same expected limit and observed limit as one with \\(B=0\\)). If you are computing anything else, e.g. p-values, or if your zero-prediction bins are not empty in data, you're out of luck, and you should find a way to get a reasonable background prediction there (and set an uncertainty on it, as per the point above)</li> </ul> </li> <li>How can an uncertainty be added to a zero quantity?<ul> <li>You can put an uncertainty even on a zero event yield if you use a gamma distribution. That is in fact the more proper way of doing it if the prediction of zero comes from the limited size of your MC or data sample used to compute it.</li> </ul> </li> <li>Why does changing the observation in data affect my expected limit?<ul> <li>The expected limit (if using either the default behaviour of <code>-M AsymptoticLimits</code> or using the <code>LHC-limits</code> style limit setting with toys) uses the post-fit expectation of the background model to generate toys. This means that first the model is fit to the observed data before toy generation. See the sections on blind limits and toy generation to avoid this behavior. </li> </ul> </li> <li>How can I deal with an interference term which involves a negative contribution?<ul> <li>You will need to set up a specific PhysicsModel to deal with this, however you can see this section to implement such a model that can incorperate a negative contribution to the physics process</li> </ul> </li> <li>How does Combine work?<ul> <li>That is not a question that can be answered without someone's head exploding; please try to formulate something specific.</li> </ul> </li> <li>What does fit status XYZ mean? <ul> <li>Combine reports the fit status in some routines (for example in the <code>FitDiagnostics</code> method). These are typically the status of the last call from Minuit. For details on the meanings of these status codes see the Minuit2Minimizer documentation page.</li> </ul> </li> <li>Why does my fit not converge? <ul> <li>There are several reasons why some fits may not converge. Often some indication can be obtained from the <code>RooFitResult</code> or status that you will see information from when using the <code>--verbose X</code> (with \\(X&gt;2\\)) option. Sometimes however, it can be that the likelihood for your data is very unusual. You can get a rough idea about what the likelihood looks like as a function of your parameters (POIs and nuisances) using <code>combineTool.py -M FastScan -w myworkspace.root</code> (use --help for options, see also here.</li> <li>We have often seen that fits in Combine using <code>RooCBShape</code> as a parametric function will fail. This is related to an optimization that fails. You can try to fix the problem as described in this issue: issues#347 (i.e add the option <code>--X-rtd ADDNLL_CBNLL=0</code>).</li> </ul> </li> <li>Why does the fit/fits take so long? <ul> <li>The minimization routines are common to many methods in Combine. You can tune the fits using the generic optimization command line options described here. For example, setting the default minimizer strategy to 0 can greatly improve the speed, since this avoids running HESSE. In calculations such as <code>AsymptoticLimits</code>, HESSE is not needed and hence this can be done, however, for <code>FitDiagnostics</code> the uncertainties and correlations are part of the output, so using strategy 0 may not be particularly accurate. </li> </ul> </li> <li>Why are the results for my counting experiment so slow or unstable? <ul> <li>There is a known issue with counting experiments with large numbers of events that will cause unstable fits or even the fit to fail. You can avoid this by creating a \"fake\" shape datacard (see this section from the setting up the datacards page). The simplest way to do this is to run <code>combineCards.py -S mycountingcard.txt &gt; myshapecard.txt</code>. You may still find that your parameter uncertainties are not correct when you have large numbers of events. This can be often fixed using the <code>--robustHesse</code> option. An example of this issue is detailed here. </li> </ul> </li> <li>Why do some of my nuisance parameters have uncertainties &gt; 1?<ul> <li>When running <code>-M FitDiagnostics</code> you may find that the post-fit uncertainties of the nuisances are \\(&gt; 1\\) (or larger than their pre-fit values). If this is the case, you should first check if the same is true when adding the option <code>--minos all</code>, which will invoke MINOS to scan the likelihood as a function of these parameters to determine the crossing at \\(-2\\times\\Delta\\log\\mathcal{L}=1\\) rather than relying on the estimate from HESSE. However, this is not guaranteed to succeed, in which case you can scan the likelihood yourself using <code>MultiDimFit</code> (see here ) and specifying the option <code>--poi X</code> where <code>X</code> is your nuisance parameter. </li> </ul> </li> <li>How can I avoid using the data? <ul> <li>For almost all methods, you can use toy data (or an Asimov dataset) in place of the real data for your results to be blind. You should be careful however as in some methods, such as <code>-M AsymptoticLimits</code> or <code>-M HybridNew --LHCmode LHC-limits</code> or any other method using the option <code>--toysFrequentist</code>, the data will be used to determine the most likely nuisance parameter values (to determine the so-called a-posteriori expectation). See the section on toy data generation for details on this. </li> </ul> </li> <li>What if my nuisance parameters have correlations which are not 0 or 1?<ul> <li>Combine is designed under the assumption that each source of nuisance parameter is uncorrelated with the other sources. If you have a case where some pair (or set) of nuisances have some known correlation structure, you can compute the eigenvectors of their correlation matrix and provide these diagonalised nuisances to Combine. You can also model partial correlations, between different channels or data taking periods, of a given nuisance parameter using the <code>combineTool</code> as described in this page. </li> </ul> </li> <li>My nuisances are (artificially) constrained and/or the impact plot show some strange behaviour, especially after including MC statistical uncertainties. What can I do?<ul> <li>Depending on the details of the analysis, several solutions can be adopted to mitigate these effects. We advise to run the validation tools at first, to identify possible redundant shape uncertainties that can be safely eliminated or replaced with lnN ones. Any remaining artificial constraints should be studies. Possible mitigating strategies can be to (a) smooth the templates or (b) adopt some rebinning in order to reduce statistical fluctuations in the templates. A description of possible strategies and effects can be found in this talk by Margaret Eminizer</li> </ul> </li> <li>What do CLs, CLs+b and CLb in the code mean?<ul> <li>The names CLs+b and CLb what are found within some of the <code>RooStats</code> tools are rather outdated and should instead be referred to as p-values - \\(p_{\\mu}\\) and \\(1-p_{b}\\), respectively. We use the CLs (which itself is not a p-value) criterion often in High energy physics as it is designed to avoid excluding a signal model when the sensitivity is low (and protects against excluding due to underfluctuations in the data). Typically, when excluding a signal model the p-value \\(p_{\\mu}\\) often refers to the p-value under the signal+background hypothesis, assuming a particular value of the signal strength (\\(\\mu\\)) while \\(p_{b}\\) is the p-value under the background only hypothesis. You can find more details and definitions of the CLs criterion and \\(p_{\\mu}\\) and \\(p_{b}\\) in section 39.4.2.4 of the 2016 PDG review.       </li> </ul> </li> </ul>"},{"location":"part5/longexercise/","title":"Main Features of Combine (Long Exercises)","text":"<p>This exercise is designed to give a broad overview of the tools available for statistical analysis in CMS using the combine tool. Combine is a high-level tool for building <code>RooFit</code>/<code>RooStats</code> models and running common statistical methods. We will cover the typical aspects of setting up an analysis and producing the results, as well as look at ways in which we can diagnose issues and get a deeper understanding of the statistical model. This is a long exercise - expect to spend some time on it especially if you are new to Combine. If you get stuck while working through this exercise or have questions specifically about the exercise, you can ask them on this mattermost channel. Finally, we also provide some solutions to some of the questions that are asked as part of the exercise. These are available here.</p> <p>For the majority of this course we will work with a simplified version of a real analysis, that nonetheless will have many features of the full analysis. The analysis is a search for an additional heavy neutral Higgs boson decaying to tau lepton pairs. Such a signature is predicted in many extensions of the standard model, in particular the minimal supersymmetric standard model (MSSM). You can read about the analysis in the paper here. The statistical inference makes use of a variable called the total transverse mass (\\(M_{\\mathrm{T}}^{\\mathrm{tot}}\\)) that provides good discrimination between the resonant high-mass signal and the main backgrounds, which have a falling distribution in this high-mass region. The events selected in the analysis are split into a several categories which target the main di-tau final states as well as the two main production modes: gluon-fusion (ggH) and b-jet associated production (bbH). One example is given below for the fully-hadronic final state in the b-tag category which targets the bbH signal:</p> <p></p> <p>Initially we will start with the simplest analysis possible: a one-bin counting experiment using just the high \\(M_{\\mathrm{T}}^{\\mathrm{tot}}\\) region of this distribution, and from there each section of this exercise will expand on this, introducing a shape-based analysis and adding control regions to constrain the backgrounds.</p>"},{"location":"part5/longexercise/#background","title":"Background","text":"<p>You can find a presentation with some more background on likelihoods and extracting confidence intervals here. A presentation that discusses limit setting in more detail can be found here. If you are not yet familiar with these concepts, or would like to refresh your memory, we recommend that you have a look at these presentations before you start with the exercise.</p>"},{"location":"part5/longexercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>Now we will move to the working directory for this tutorial, which contains all the inputs needed to run the exercises below: <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/longexercise/\n</code></pre></p>"},{"location":"part5/longexercise/#part-1-a-one-bin-counting-experiment","title":"Part 1: A one-bin counting experiment","text":"<p>Topics covered in this section:</p> <ul> <li>A: Computing limits using the asymptotic approximation</li> <li>Advanced section: B: Computing limits with toys</li> </ul> <p>We will begin with a simplified version of a datacard from the MSSM \\(\\phi\\rightarrow\\tau\\tau\\) analysis that has been converted to a one-bin counting experiment, as described above. While the full analysis considers a range of signal mass hypotheses, we will start by considering just one: \\(m_{\\phi}\\)=800GeV. Click the text below to study the datacard (<code>datacard_part1.txt</code> in the <code>longexercise</code> directory):</p> Show datacard <pre><code>imax    1 number of bins\njmax    4 number of processes minus 1\nkmax    * number of nuisance parameters\n--------------------------------------------------------------------------------\n--------------------------------------------------------------------------------\nbin          signal_region\nobservation  10.0\n--------------------------------------------------------------------------------\nbin                      signal_region   signal_region   signal_region   signal_region   signal_region\nprocess                  ttbar           diboson         Ztautau         jetFakes        bbHtautau\nprocess                  1               2               3               4               0\nrate                     4.43803         3.18309         3.7804          1.63396         0.711064\n--------------------------------------------------------------------------------\nCMS_eff_b          lnN   1.02            1.02            1.02            -               1.02\nCMS_eff_t          lnN   1.12            1.12            1.12            -               1.12\nCMS_eff_t_highpt   lnN   1.1             1.1             1.1             -               1.1\nacceptance_Ztautau lnN   -               -               1.08            -               -\nacceptance_bbH     lnN   -               -               -               -               1.05\nacceptance_ttbar   lnN   1.005           -               -               -               -\nnorm_jetFakes      lnN   -               -               -               1.2             -\nxsec_diboson       lnN   -               1.05            -               -               -\n</code></pre> <p>The layout of the datacard is as follows:</p> <ul> <li>At the top are the numbers <code>imax</code>, <code>jmax</code> and <code>kmax</code> representing the number of bins, processes and nuisance parameters respectively. Here a \"bin\" can refer to a literal single event count as in this example, or a full distribution we are fitting, in general with many histogram bins, as we will see later. We will refer to both as \"channels\" from now on. It is possible to replace these numbers with <code>*</code> and they will be deduced automatically.</li> <li>The first line starting with <code>bin</code> gives a unique label to each channel, and the following line starting with <code>observation</code> gives the number of events observed in data.</li> <li>In the remaining part of the card there are several columns: each one represents one process in one channel. The first four lines labelled <code>bin</code>, <code>process</code>, <code>process</code> and <code>rate</code> give the channel label, the process label, a process identifier (<code>&lt;=0</code> for signal, <code>&gt;0</code> for background) and the number of expected events respectively.</li> <li>The remaining lines describe sources of systematic uncertainty. Each line gives the name of the uncertainty, (which will become the name of the nuisance parameter inside our RooFit model), the type of uncertainty (\"lnN\" = log-normal normalisation uncertainty) and the effect on each process in each channel. E.g. a 20% uncertainty on the yield is written as 1.20.</li> <li>It is also possible to add a hash symbol (<code>#</code>) at the start of a line, which Combine will then ignore when it reads the card.</li> </ul> <p>We can now run Combine directly using this datacard as input. The general format for running Combine is:</p> <pre><code>combine -M [method] [datacard] [additional options...]\n</code></pre>"},{"location":"part5/longexercise/#a-computing-limits-using-the-asymptotic-approximation","title":"A: Computing limits using the asymptotic approximation","text":"<p>As we are searching for a signal process that does not exist in the standard model, it's natural to set an upper limit on the cross section times branching fraction of the process (assuming our dataset does not contain a significant discovery of new physics). Combine has dedicated methods for calculating upper limits. The most commonly used one is <code>AsymptoticLimits</code>, which implements the CLs criterion and uses the modified profile likelihood ratio for upper limits as the default test statistic. As the name implies, the test statistic distributions are determined analytically in the asymptotic approximation, so there is no need for more time-intensive toy throwing and fitting. Try running the following command:</p> <pre><code>combine -M AsymptoticLimits datacard_part1.txt -n .part1A\n</code></pre> <p>You should see the results of the observed and expected limit calculations printed to the screen. Here we have added an extra option, <code>-n .part1A</code>, which is short for <code>--name</code>, and is used to label the output file Combine produces, which in this case will be called <code>higgsCombine.part1A.AsymptoticLimits.mH120.root</code>. The file name depends on the options we ran with, and is of the form: <code>higgsCombine[name].[method].mH[mass].root</code>. The file contains a TTree called <code>limit</code> which stores the numerical values returned by the limit computation. Note that in our case we did not set a signal mass when running Combine (i.e. <code>-m 800</code>), so the output file just uses the default value of <code>120</code>. This does not affect our result in any way though, just the label that is used on the output file.</p> <p>The limits are given on a parameter called <code>r</code>. This is the default parameter of interest (POI) that is added to the model automatically. It is a linear scaling of the normalization of all signal processes given in the datacard, i.e. if \\(s_{i,j}\\) is the nominal number of signal events in channel \\(i\\) for signal process \\(j\\), then the normalization of that signal in the model is given as \\(r\\cdot s_{i,j}(\\vec{\\theta})\\), where \\(\\vec{\\theta}\\) represents the set of nuisance parameters which may also affect the signal normalization. We therefore have some choice in the interpretation of r: for the measurement of a process with a well-defined SM prediction we may enter this as the nominal yield in the datacard, such that \\(r=1\\) corresponds to this SM expectation, whereas for setting limits on BSM processes we may choose the nominal yield to correspond to some cross section, e.g. 1 pb, such that we can interpret the limit as a cross section limit directly. In this example the signal has been normalised to a cross section times branching fraction of 1 fb.</p> <p>The expected limit is given under the background-only hypothesis. The median value under this hypothesis as well as the quantiles needed to give the 68% and 95% intervals are also calculated. These are all the ingredients needed to produce the standard limit plots you will see in many CMS results, for example the \\(\\sigma \\times \\mathcal{B}\\) limits for the \\(\\text{bb}\\phi\\rightarrow\\tau\\tau\\) process:</p> <p></p> <p>In this case we only computed the values for one signal mass hypothesis, indicated by a red dashed line.</p> <p>Tasks and questions:</p> <ul> <li>There are some important uncertainties missing from the datacard above. Add the uncertainty on the luminosity (name: <code>lumi_13TeV</code>) which has a 2.5% effect on all processes (except the <code>jetFakes</code>, which are taken from data), and uncertainties on the inclusive cross sections of the <code>Ztautau</code> and <code>ttbar</code> processes (with names <code>xsec_Ztautau</code> and <code>xsec_ttbar</code>) which are 4% and 6% respectively.</li> <li>Try changing the values of some uncertainties (up or down, or removing them altogether) - how do the expected and observed limits change?</li> <li>Now try changing the number of observed events. The observed limit will naturally change, but the expected does too - why might this be?</li> </ul> <p>There are other command line options we can supply to Combine which will change its behaviour when run. You can see the full set of supported options by doing <code>combine -h</code>. Many options are specific to a given method, but others are more general and are applicable to all methods. Throughout this tutorial we will highlight some of the most useful options you may need to use, for example:</p> <ul> <li>The range on the signal strength modifier: <code>--rMin=X</code> and <code>--rMax=Y</code>: In <code>RooFit</code> parameters can optionally have a range specified. The implication of this is that their values cannot be adjusted beyond the limits of this range. The min and max values can be adjusted though, and we might need to do this for our POI <code>r</code> if the order of magnitude of our measurement is different from the default range of <code>[0, 20]</code>. This will be discussed again later in the tutorial.</li> <li>Verbosity: <code>-v X</code>: By default combine does not usually produce much output on the screen other the main result at the end. However, much more detailed information can be printed by setting the <code>-v N</code> with N larger than zero. For example at <code>-v 3</code> the logs from the minimizer, Minuit, will also be printed. These are very useful for debugging problems with the fit.</li> </ul>"},{"location":"part5/longexercise/#advanced-section-b-computing-limits-with-toys","title":"Advanced section: B: Computing limits with toys","text":"<p>Now we will look at computing limits without the asymptotic approximation, so instead using toy datasets to determine the test statistic distributions under the signal+background and background-only hypotheses. This can be necessary if we are searching for signal in bins with a small number of events expected. In Combine we will use the <code>HybridNew</code> method to calculate limits using toys. This mode is capable of calculating limits with several different test statistics and with fine-grained control over how the toy datasets are generated internally. To calculate LHC-style profile likelihood limits (i.e. the same as we did with the asymptotic) we set the option <code>--LHCmode LHC-limits</code>. You can read more about the different options in the Combine documentation.</p> <p>Run the following command: <pre><code>combine -M HybridNew datacard_part1.txt --LHCmode LHC-limits -n .part1B --saveHybridResult\n</code></pre> In contrast to <code>AsymptoticLimits</code> this will only determine the observed limit, and will take a few minutes. There will not be much output to the screen while combine is running. You can add the option <code>-v 1</code> to get a better idea of what is going on. You should see Combine stepping around in <code>r</code>, trying to find the value for which CLs = 0.05, i.e. the 95% CL limit. The <code>--saveHybridResult</code> option will cause the test statistic distributions that are generated at each tested value of <code>r</code> to be saved in the output ROOT file.</p> <p>To get an expected limit add the option <code>--expectedFromGrid X</code>, where <code>X</code> is the desired quantile, e.g. for the median:</p> <pre><code>combine -M HybridNew datacard_part1.txt --LHCmode LHC-limits -n .part1B --saveHybridResult --expectedFromGrid 0.500\n</code></pre> <p>Calculate the median expected limit and the 68% range. The 95% range could also be done, but note it will take much longer to run the 0.025 quantile. While Combine is running you can move on to the next steps below.</p> <p>Tasks and questions:</p> <ul> <li>In contrast to <code>AsymptoticLimits</code>, with <code>HybridNew</code> each limit comes with an uncertainty. What is the origin of this uncertainty?</li> <li>How good is the agreement between the asymptotic and toy-based methods?</li> <li>Why does it take longer to calculate the lower expected quantiles (e.g. 0.025, 0.16)? Think about how the statistical uncertainty on the CLs value depends on Pmu and Pb.</li> </ul> <p>Next plot the test statistic distributions stored in the output file: <pre><code>python3 $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/test/plotTestStatCLs.py --input higgsCombine.part1B.HybridNew.mH120.root --poi r --val all --mass 120\n</code></pre></p> <p>This produces a new ROOT file <code>test_stat_distributions.root</code> containing the plots, to save them as pdf/png files run this small script and look at the resulting figures:</p> <pre><code>python3 printTestStatPlots.py test_stat_distributions.root\n</code></pre>"},{"location":"part5/longexercise/#advanced-section-b-asymptotic-approximation-limitations","title":"Advanced section: B: Asymptotic approximation limitations","text":"<p>These distributions can be useful in understanding features in the CLs limits, especially in the low statistics regime. To explore this, try reducing the observed and expected yields in the datacard by a factor of 10, and rerun the above steps to compare the observed and expected limits with the asymptotic approach, and plot the test statistic distributions.</p> <p>Tasks and questions:</p> <ul> <li>Is the asymptotic limit still a good approximation?</li> <li>You might notice that the test statistic distributions are not smooth but rather have several \"bump\" structures? Where might this come from? Try reducing the size of the systematic uncertainties to make them more pronounced.</li> </ul> <p>Note that for more complex models the fitting time can increase significantly, making it infeasible to run all the toy-based limits interactively like this. An alternative strategy is documented here</p>"},{"location":"part5/longexercise/#part-2-a-shape-based-analysis","title":"Part 2: A shape-based analysis","text":"<p>Topics covered in this section:</p> <ul> <li>A: Setting up the datacard</li> <li>B: Running Combine for a blind analysis</li> <li>C: Using FitDiagnostics</li> <li>D: MC statistical uncertainties</li> </ul>"},{"location":"part5/longexercise/#a-setting-up-the-datacard","title":"A: Setting up the datacard","text":"<p>Now we move to the next step: instead of a one-bin counting experiment we will fit a binned distribution. In a typical analysis we will produce TH1 histograms of some variable sensitive to the presence of signal: one for the data and one for each signal and background processes. Then we add a few extra lines to the datacard to link the declared processes to these shapes which are saved in a ROOT file, for example:</p> Show datacard <pre><code>imax 1\njmax 1\nkmax *\n---------------\nshapes * * simple-shapes-TH1_input.root $PROCESS $PROCESS_$SYSTEMATIC\nshapes signal * simple-shapes-TH1_input.root $PROCESS$MASS $PROCESS$MASS_$SYSTEMATIC\n---------------\nbin bin1\nobservation 85\n------------------------------\nbin             bin1       bin1\nprocess         signal     background\nprocess         0          1\nrate            10         100\n--------------------------------\nlumi     lnN    1.10       1.0\nbgnorm   lnN    1.00       1.3\nalpha  shape    -          1\n</code></pre> <p>Note that as with the one-bin card, the total nominal rate of a given process must be specified in the <code>rate</code> line of the datacard. This should agree with the value returned by <code>TH1::Integral</code>. However, we can also put a value of <code>-1</code> and the Integral value will be substituted automatically.</p> <p>There are two other differences with respect to the one-bin card:</p> <ul> <li>A new block of lines at the top defining how channels and processes are mapped to the histograms (more than one line can be used)</li> <li>In the list of systematic uncertainties some are marked as shape instead of lnN</li> </ul> <p>The syntax of the \"shapes\" line is: <code>shapes [process] [channel] [file] [histogram] [histogram_with_systematics]</code>. It is possible to use the <code>*</code> wildcard to map multiple processes and/or channels with one line. The histogram entries can contain the <code>$PROCESS</code>, <code>$CHANNEL</code> and <code>$MASS</code> place-holders which will be substituted when searching for a given (process, channel) combination. The value of <code>$MASS</code> is specified by the <code>-m</code> argument when combine. By default the observed data process name will be <code>data_obs</code>.</p> <p>Shape uncertainties can be added by supplying two additional histograms for a process, corresponding to the distribution obtained by shifting that parameter up and down by one standard deviation. These shapes will be interpolated (see the template shape uncertainties section for details) for shifts within \\(\\pm1\\sigma\\) and linearly extrapolated beyond. The normalizations are interpolated linearly in log scale just like we do for log-normal uncertainties.</p> <p></p> <p>The final argument of the \"shapes\" line above should contain the <code>$SYSTEMATIC</code> place-holder which will be substituted by the systematic name given in the datacard.</p> <p>In the list of uncertainties the interpretation of the values for <code>shape</code> lines is a bit different from <code>lnN</code>. The effect can be \"-\" or 0 for no effect, 1 for normal effect, and possibly something different from 1 to test larger or smaller effects (in that case, the unit Gaussian is scaled by that factor before using it as parameter for the interpolation).</p> <p>In this section we will use a datacard corresponding to the full distribution that was shown at the start of section 1, not just the high mass region. Have a look at <code>datacard_part2.txt</code>: this is still currently a one-bin counting experiment, however the yields are much higher since we now consider the full range of \\(M_{\\mathrm{T}}^{\\mathrm{tot}}\\). If you run the asymptotic limit calculation on this you should find the sensitivity is significantly worse than before.</p> <p>The first task is to convert this to a shape analysis: the file <code>datacard_part2.shapes.root</code> contains all the necessary histograms, including those for the relevant shape systematic uncertainties. Add the relevant <code>shapes</code> lines to the top of the datacard (after the <code>kmax</code> line) to map the processes to the correct TH1s in this file. Hint: you will need a different line for the signal process.</p> <p>Compared to the counting experiment we must also consider the effect of uncertainties that change the shape of the distribution. Some, like <code>CMS_eff_t_highpt</code>, were present before, as it has both a shape and normalisation effect. Others are primarily shape effects so were not included before.</p> <p>Add the following shape uncertainties: <code>top_pt_ttbar_shape</code> affecting <code>ttbar</code>,the tau energy scale uncertainties <code>CMS_scale_t_1prong0pi0_13TeV</code>, <code>CMS_scale_t_1prong1pi0_13TeV</code> and <code>CMS_scale_t_3prong0pi0_13TeV</code> affecting all processes except <code>jetFakes</code>, and <code>CMS_eff_t_highpt</code> also affecting the same processes.</p> <p>Once this is done you can run the asymptotic limit calculation on this datacard. From now on we will convert the text datacard into a RooFit workspace ourselves instead of combine doing it internally every time we run. This is a good idea for more complex analyses since the conversion step can take a notable amount of time. For this we use the <code>text2workspace.py</code> command:</p> <p><pre><code>text2workspace.py datacard_part2.txt -m 800 -o workspace_part2.root\n</code></pre> And then we can use this as input to combine instead of the text datacard: <pre><code>combine -M AsymptoticLimits workspace_part2.root -m 800\n</code></pre> Tasks and questions:</p> <ul> <li>Verify that the sensitivity of the shape analysis is indeed improved over the counting analysis in the first part.</li> <li>Advanced task: You can open the workspace ROOT file interactively and print the contents: <code>w-&gt;Print();</code>. Each process is represented by a PDF object that depends on the shape morphing nuisance parameters. From the workspace, choose a process and shape uncertainty, and make a plot overlaying the nominal shape with different values of the shape morphing nuisance parameter. You can change the value of a parameter with <code>w-&gt;var(\"X\")-&gt;setVal(Y)</code>, and access a particular pdf with <code>w-&gt;pdf(\"Z\")</code>. PDF objects in RooFit have a createHistogram method that requires the name of the observable (the variable defining the x-axis) - this is called <code>CMS_th1x</code> in combine datacards. Feel free to ask for help with this!</li> </ul>"},{"location":"part5/longexercise/#b-running-combine-for-a-blind-analysis","title":"B: Running combine for a blind analysis","text":"<p>Most analyses are developed and optimised while we are \"blind\" to the region of data where we expect our signal to be. With <code>AsymptoticLimits</code> we can choose just to run the expected limit (<code>--run expected</code>), so as not to calculate the observed. However the data is still used, even for the expected, since in the frequentist approach a background-only fit to the data is performed to define the Asimov dataset used to calculate the expected limits. To skip this fit to data and use the pre-fit state of the model the option <code>--run blind</code> or <code>--noFitAsimov</code> can be used. Task: Compare the expected limits calculated with <code>--run expected</code> and <code>--run blind</code>. Why are they different?</p> <p>A more general way of blinding is to use combine's toy and Asimov dataset generating functionality. You can read more about this here. These options can be used with any method in combine, not just <code>AsymptoticLimits</code>.</p> <p>Task: Calculate a blind limit by generating a background-only Asimov with the <code>-t -1</code> option instead of using the <code>AsymptoticLimits</code> specific options. You should find the observed limit is the same as the expected. Then see what happens if you inject a signal into the Asimov dataset using the <code>--expectSignal [X]</code> option.</p>"},{"location":"part5/longexercise/#c-using-fitdiagnostics","title":"C: Using FitDiagnostics","text":"<p>We will now explore one of the most commonly used modes of Combine: <code>FitDiagnostics</code> . As well as allowing us to make a measurement of some physical quantity (as opposed to just setting a limit on it), this method is useful to gain additional information about the model and the behaviour of the fit. It performs two fits:</p> <ul> <li>A \"background-only\" (b-only) fit: first POI (usually \"r\") fixed to zero</li> <li>A \"signal+background\" (s+b) fit: all POIs are floating</li> </ul> <p>With the s+b fit Combine will report the best-fit value of our signal strength modifier <code>r</code>. As well as the usual output file, a file named <code>fitDiagnosticsTest.root</code> is produced which contains additional information. In particular it includes two <code>RooFitResult</code> objects, one for the b-only and one for the s+b fit, which store the fitted values of all the nuisance parameters (NPs) and POIs as well as estimates of their uncertainties. The covariance matrix from both fits is also included, from which we can learn about the correlations between parameters. Run the <code>FitDiagnostics</code> method on our workspace:</p> <p><pre><code>combine -M FitDiagnostics workspace_part2.root -m 800 --rMin -20 --rMax 20\n</code></pre> Open the resulting <code>fitDiagnosticsTest.root</code> interactively and print the contents of the s+b RooFitResult:</p> <pre><code>root [1] fit_s-&gt;Print()\n</code></pre> Show output <pre><code>RooFitResult: minimized FCN value: -2.55338e-05, estimated distance to minimum: 7.54243e-06\n                covariance matrix quality: Full, accurate covariance matrix\n                Status : MINIMIZE=0 HESSE=0\n\n    Floating Parameter    FinalValue +/-  Error\n  --------------------  --------------------------\n             CMS_eff_b   -4.5380e-02 +/-  9.93e-01\n             CMS_eff_t   -2.6311e-01 +/-  7.33e-01\n      CMS_eff_t_highpt   -4.7146e-01 +/-  9.62e-01\n  CMS_scale_t_1prong0pi0_13TeV   -1.5989e-01 +/-  5.93e-01\n  CMS_scale_t_1prong1pi0_13TeV   -1.6426e-01 +/-  4.94e-01\n  CMS_scale_t_3prong0pi0_13TeV   -3.0698e-01 +/-  6.06e-01\n    acceptance_Ztautau   -3.1262e-01 +/-  8.62e-01\n        acceptance_bbH   -2.8676e-05 +/-  1.00e+00\n      acceptance_ttbar    4.9981e-03 +/-  1.00e+00\n            lumi_13TeV   -5.6366e-02 +/-  9.89e-01\n         norm_jetFakes   -9.3327e-02 +/-  2.56e-01\n                     r   -2.7220e+00 +/-  2.59e+00\n    top_pt_ttbar_shape    1.7586e-01 +/-  7.00e-01\n          xsec_Ztautau   -1.6007e-01 +/-  9.66e-01\n          xsec_diboson    3.9758e-02 +/-  1.00e+00\n            xsec_ttbar    5.7794e-02 +/-  9.46e-01\n</code></pre> <p>There are several useful pieces of information here. At the top the status codes from the fits that were performed is given. In this case we can see that two algorithms were run: <code>MINIMIZE</code> and <code>HESSE</code>, both of which returned a successful status code (0). Both of these are routines in the Minuit2 minimization package - the default minimizer used in RooFit. The first performs the main fit to the data, and the second calculates the covariance matrix at the best-fit point. It is important to always check this second step was successful and the message \"Full, accurate covariance matrix\" is printed, otherwise the parameter uncertainties can be very inaccurate, even if the fit itself was successful.</p> <p>Underneath this the best-fit values (\\(\\theta\\)) and symmetrised uncertainties for all the floating parameters are given. For all the constrained nuisance parameters a convention is used by which the nominal value (\\(\\theta_I\\)) is zero, corresponding to the mean of a Gaussian constraint PDF with width 1.0, such that the parameter values \\(\\pm 1.0\\) correspond to the \\(\\pm 1\\sigma\\) input uncertainties.</p> <p>A more useful way of looking at this is to compare the pre- and post-fit values of the parameters, to see how much the fit to data has shifted and constrained these parameters with respect to the input uncertainty. The script <code>diffNuisances.py</code> can be used for this:</p> <pre><code>python diffNuisances.py fitDiagnosticsTest.root --all\n</code></pre> Show output <pre><code>name                                              b-only fit            s+b fit         rho\nCMS_eff_b                                        -0.04, 0.99        -0.05, 0.99       +0.01\nCMS_eff_t                                     * -0.24, 0.73*     * -0.26, 0.73*       +0.06\nCMS_eff_t_highpt                              * -0.56, 0.94*     * -0.47, 0.96*       +0.02\nCMS_scale_t_1prong0pi0_13TeV                  * -0.17, 0.58*     * -0.16, 0.59*       -0.04\nCMS_scale_t_1prong1pi0_13TeV                  ! -0.12, 0.45!     ! -0.16, 0.49!       +0.20\nCMS_scale_t_3prong0pi0_13TeV                  * -0.31, 0.61*     * -0.31, 0.61*       +0.02\nacceptance_Ztautau                            * -0.31, 0.86*     * -0.31, 0.86*       -0.05\nacceptance_bbH                                   +0.00, 1.00        -0.00, 1.00       +0.05\nacceptance_ttbar                                 +0.01, 1.00        +0.00, 1.00       +0.00\nlumi_13TeV                                       -0.05, 0.99        -0.06, 0.99       +0.01\nnorm_jetFakes                                 ! -0.09, 0.26!     ! -0.09, 0.26!       -0.05\ntop_pt_ttbar_shape                            * +0.24, 0.69*     * +0.18, 0.70*       +0.22\nxsec_Ztautau                                     -0.16, 0.97        -0.16, 0.97       -0.02\nxsec_diboson                                     +0.03, 1.00        +0.04, 1.00       -0.02\nxsec_ttbar                                       +0.08, 0.95        +0.06, 0.95       +0.02\n</code></pre> <p>The numbers in each column are respectively \\(\\frac{\\theta-\\theta_I}{\\sigma_I}\\) (This is often called the pull, but note that this is a misnomer. In this tutorial we will refer to it as the fitted value of the nuisance parameter relative to the input uncertainty. The true pull is defined as discussed under <code>diffPullAsym</code> here ), where \\(\\sigma_I\\) is the input uncertainty; and the ratio of the post-fit to the pre-fit uncertainty \\(\\frac{\\sigma}{\\sigma_I}\\).</p> <p>Tasks and questions:</p> <ul> <li>Which parameter has the largest shift from the nominal value (0) in the fitted value of the nuisance parameter relative to the input uncertainty? Which has the tightest constraint?</li> <li>Should we be concerned when a parameter is more strongly constrained than the input uncertainty (i.e. \\(\\frac{\\sigma}{\\sigma_I}&lt;1.0\\))?</li> <li>Check the fitted values of the nuisance parameters and constraints on a b-only and s+b asimov dataset instead. This check is required for all analyses in the Higgs PAG. It serves both as a closure test (do we fit exactly what signal strength we input?) and a way to check whether there are any infeasibly strong constraints while the analysis is still blind (typical example: something has probably gone wrong if we constrain the luminosity uncertainty to 10% of the input!)</li> <li>Advanced task: Sometimes there are problems in the fit model that aren't apparent from only fitting the Asimov dataset, but will appear when fitting randomised data. Follow the exercise on toy-by-toy diagnostics here to explore the tools available for this.</li> </ul>"},{"location":"part5/longexercise/#d-mc-statistical-uncertainties","title":"D: MC statistical uncertainties","text":"<p>So far there is an important source of uncertainty we have neglected. Our estimates of the backgrounds come either from MC simulation or from sideband regions in data, and in both cases these estimates are subject to a statistical uncertainty on the number of simulated or data events. In principle we should include an independent statistical uncertainty for every bin of every process in our model. It's important to note that Combine/<code>RooFit</code> does not take this into account automatically - statistical fluctuations of the data are implicitly accounted for in the likelihood formalism, but statistical uncertainties in the model must be specified by us.</p> <p>One way to implement these uncertainties is to create a <code>shape</code> uncertainty for each bin of each process, in which the up and down histograms have the contents of the bin  shifted up and down by the \\(1\\sigma\\) uncertainty. However this makes the likelihood evaluation computationally inefficient, and can lead to a large number of nuisance parameters in more complex models. Instead we will use a feature in Combine called <code>autoMCStats</code> that creates these automatically from the datacard, and uses a technique called \"Barlow-Beeston-lite\" to reduce the number of systematic uncertainties that are created. This works on the assumption that for high MC event counts we can model the uncertainty with a Gaussian distribution. Given the uncertainties in different bins are independent, the total uncertainty of several processes in a particular bin is just the sum of \\(N\\) individual Gaussians, which is itself a Gaussian distribution. So instead of \\(N\\) nuisance parameters we need only one. This breaks down when the number of events is small and we are not in the Gaussian regime. The <code>autoMCStats</code> tool has a threshold setting on the number of events below which the the Barlow-Beeston-lite approach is not used, and instead a Poisson PDF is used to model per-process uncertainties in that bin.</p> <p>After reading the full documentation on <code>autoMCStats</code> here, add the corresponding line to your datacard. Start by setting a threshold of 0, i.e. <code>[channel] autoMCStats 0</code>, to force the use of Barlow-Beeston-lite in all bins.</p> <p>Tasks and questions:</p> <ul> <li>Check how much the cross section measurement and uncertainties change using <code>FitDiagnostics</code>.</li> <li>It is also useful to check how the expected uncertainty changes using an Asimov dataset, say with <code>r=10</code> injected.</li> <li>Advanced task: See what happens if the Poisson threshold is increased. Based on your results, what threshold would you recommend for this analysis?</li> </ul>"},{"location":"part5/longexercise/#part-3-adding-control-regions","title":"Part 3: Adding control regions","text":"<p>Topics covered in this section:</p> <ul> <li>A: Use of rateParams</li> <li>B: Nuisance parameter impacts</li> <li>C: Post-fit distributions</li> <li>D: Calculating the significance</li> <li>E: Signal strength measurement and uncertainty breakdown</li> <li>F: Use of channel masking</li> </ul> <p>In a modern analysis it is typical for some or all of the backgrounds to be estimated using the data, instead of relying purely on MC simulation. This can take many forms, but a common approach is to use \"control regions\" (CRs) that are pure and/or have higher statistics for a given process. These are defined by event selections that are similar to, but non-overlapping with, the signal region. In our \\(\\phi\\rightarrow\\tau\\tau\\) example the \\(\\text{Z}\\rightarrow\\tau\\tau\\) background normalisation can be calibrated using a \\(\\text{Z}\\rightarrow\\mu\\mu\\) CR, and the \\(\\text{t}\\bar{\\text{t}}\\) background using an \\(e+\\mu\\) CR. By comparing the number of data events in these CRs to our MC expectation we can obtain scale factors to apply to the corresponding backgrounds in the signal region (SR). The idea is that the data will gives us a more accurate prediction of the background with less systematic uncertainties. For example, we can remove the cross section and acceptance uncertainties in the SR, since we are no longer using the MC prediction (with a caveat discussed below). While we could simply derive these correction factors and apply them to our signal region datacard and better way is to include these regions in our fit model and tie the normalisations of the backgrounds in the CR and SR together. This has a number of advantages:</p> <ul> <li>Automatically handles the statistical uncertainty due to the number of data events in the CR</li> <li>Allows for the presence of some signal contamination in the CR to be handled correctly</li> <li>The CRs are typically not 100% pure in the background they're meant to control - other backgrounds may be present, with their own systematic uncertainties, some of which may be correlated with the SR or other CRs. Propagating these effects through to the SR \"by hand\" can become very challenging.</li> </ul> <p>In this section we will continue to use the same SR as in the previous one, however we will switch to a lower signal mass hypothesis, \\(m_{\\phi}=200\\)GeV, as its sensitivity depends more strongly on the background prediction than the high mass signal, so is better for illustrating the use of CRs. Here the nominal signal (<code>r=1</code>) has been normalised to a cross section of 1 pb.</p> <p>The SR datacard for the 200 GeV signal is <code>datacard_part3.txt</code>. Two further datacards are provided: <code>datacard_part3_ttbar_cr.txt</code> and <code>datacard_part3_DY_cr.txt</code> which represent the CRs for the Drell-Yan and \\(\\text{t}\\bar{\\text{t}}\\) processes as described above. The cross section and acceptance uncertainties for these processes have pre-emptively been removed from the SR card. However we cannot get away with neglecting acceptance effects altogether. We are still implicitly using the MC simulation to predict to the ratio of events in the CR and SR, and this ratio will in general carry a theoretical acceptance uncertainty. If the CRs are well chosen then this uncertainty should be smaller than the direct acceptance uncertainty in the SR however. The uncertainties <code>acceptance_ttbar_cr</code> and <code>acceptance_DY_cr</code> have been added to these datacards cover this effect. Task: Calculate the ratio of CR to SR events for these two processes, as well as their CR purity to verify that these are useful CRs.</p> <p>The next step is to combine these datacards into one, which is done with the <code>combineCards.py</code> script:</p> <pre><code>combineCards.py signal_region=datacard_part3.txt ttbar_cr=datacard_part3_ttbar_cr.txt DY_cr=datacard_part3_DY_cr.txt &amp;&gt; part3_combined.txt\n</code></pre> <p>Each argument is of the form <code>[new channel name]=[datacard.txt]</code>. The new datacard is written to the screen by default, so we redirect the output into our new datacard file. The output looks like:</p> Show datacard <pre><code>imax 3 number of bins\njmax 8 number of processes minus 1\nkmax 15 number of nuisance parameters\n----------------------------------------------------------------------------------------------------------------------------------\nshapes *              DY_cr          datacard_part3_DY_cr.shapes.root DY_control_region/$PROCESS DY_control_region/$PROCESS_$SYSTEMATIC\nshapes *              signal_region  datacard_part3.shapes.root signal_region/$PROCESS signal_region/$PROCESS_$SYSTEMATIC\nshapes bbHtautau      signal_region  datacard_part3.shapes.root signal_region/bbHtautau$MASS signal_region/bbHtautau$MASS_$SYSTEMATIC\nshapes *              ttbar_cr       datacard_part3_ttbar_cr.shapes.root tt_control_region/$PROCESS tt_control_region/$PROCESS_$SYSTEMATIC\n----------------------------------------------------------------------------------------------------------------------------------\nbin          signal_region  ttbar_cr       DY_cr\nobservation  3416           79251          365754\n----------------------------------------------------------------------------------------------------------------------------------\nbin                                               signal_region  signal_region  signal_region  signal_region  signal_region  ttbar_cr       ttbar_cr       ttbar_cr       ttbar_cr       ttbar_cr       DY_cr          DY_cr          DY_cr          DY_cr          DY_cr          DY_cr\nprocess                                           bbHtautau      ttbar          diboson        Ztautau        jetFakes       W              QCD            ttbar          VV             Ztautau        W              QCD            Zmumu          ttbar          VV             Ztautau\nprocess                                           0              1              2              3              4              5              6              1              7              3              5              6              8              1              7              3\nrate                                              198.521        683.017        96.5185        742.649        2048.94        597.336        308.965        67280.4        10589.6        150.025        59.9999        141.725        305423         34341.1        5273.43        115.34\n----------------------------------------------------------------------------------------------------------------------------------\nCMS_eff_b               lnN                       1.02           1.02           1.02           1.02           -              -              -              -              -              -              -              -              -              -              -              -\nCMS_eff_e               lnN                       -              -              -              -              -              1.02           -              -              1.02           1.02           -              -              -              -              -              -\n...\n</code></pre> <p>The <code>[new channel name]=</code> part of the input arguments is not required, but it gives us control over how the channels in the combined card will be named, otherwise default values like <code>ch1</code>, <code>ch2</code> etc will be used.</p>"},{"location":"part5/longexercise/#a-use-of-rateparams","title":"A: Use of rateParams","text":"<p>We now have a combined datacard that we can run text2workspace.py on and start doing fits, however there is still one important ingredient missing. Right now the yields of the <code>Ztautau</code> process in the SR and <code>Zmumu</code> in the CR are not connected to each other in any way, and similarly for the <code>ttbar</code> processes. In the fit both would be adjusted by the nuisance parameters only, and constrained to the nominal yields. To remedy this we introduce <code>rateParam</code> directives to the datacard. A <code>rateParam</code> is a new free parameter that multiples the yield of a given process, just in the same way the signal strength <code>r</code> multiplies the signal yield. The syntax of a <code>rateParam</code> line in the datacard is</p> <pre><code>[name] rateParam [channel] [process] [init] [min,max]\n</code></pre> <p>where <code>name</code> is the chosen name for the parameter, <code>channel</code> and <code>process</code> specify which (channel, process) combination it should affect, <code>init</code> gives the initial value, and optionally <code>[min,max]</code> specifies the ranges on the RooRealVar that will be created. The <code>channel</code> and <code>process</code> arguments support the use of the wildcard <code>*</code> to match multiple entries. Task: Add two <code>rateParam</code>s with nominal values of <code>1.0</code> to the end of the combined datacard named <code>rate_ttbar</code> and <code>rate_Zll</code>. The former should affect the <code>ttbar</code> process in all channels, and the latter should affect the <code>Ztautau</code> and <code>Zmumu</code> processes in all channels. Set ranges of <code>[0,5]</code> to both. Note that a <code>rateParam</code> name can be repeated to apply it to multiple processes, e.g.:</p> <pre><code>rateScale rateParam * procA 1.0\nrateScale rateParam * procB 1.0\n</code></pre> <p>is perfectly valid and only one <code>rateParam</code> will be created. These parameters will allow the yields to float in the fit without prior constraint (unlike a regular <code>lnN</code> or <code>shape</code> systematic), with the yields in the CRs and SR tied together.</p> <p>Tasks and questions:</p> <ul> <li>Run <code>text2workspace.py</code> on this combined card (don't forget to set the mass and output name <code>-m 200 -o workspace_part3.root</code>) and then use <code>FitDiagnostics</code> on an Asimov dataset with <code>r=1</code> to get the expected uncertainty. Suggested command line options: <code>--rMin 0 --rMax 2</code></li> <li>Using the RooFitResult in the <code>fitDiagnosticsTest.root</code> file, check the post-fit value of the rateParams. To what level are the normalisations of the DY and ttbar processes constrained?</li> <li>To compare to the previous approach of fitting the SR only, with cross section and acceptance uncertainties restored, an additional card is provided: <code>datacard_part3_nocrs.txt</code>. Run the same fit on this card to verify the improvement of the SR+CR approach</li> </ul>"},{"location":"part5/longexercise/#b-nuisance-parameter-impacts","title":"B: Nuisance parameter impacts","text":"<p>It is often useful to examine in detail the effects the systematic uncertainties have on the signal strength measurement. This is often referred to as calculating the \"impact\" of each uncertainty. What this means is to determine the shift in the signal strength, with respect to the best-fit, that is induced if a given nuisance parameter is shifted by its \\(\\pm1\\sigma\\) post-fit uncertainty values. If the signal strength shifts a lot, it tells us that it has a strong dependency on this systematic uncertainty. In fact, what we are measuring here is strongly related to the correlation coefficient between the signal strength and the nuisance parameter. The <code>MultiDimFit</code> method has an algorithm for calculating the impact for a given systematic: <code>--algo impact -P [parameter name]</code>, but it is typical to use a higher-level script, <code>combineTool.py</code>, to automatically run the impacts for all parameters. Full documentation on this is given here. There is a three step process for running this. First we perform an initial fit for the signal strength and its uncertainty:</p> <p><pre><code>combineTool.py -M Impacts -d workspace_part3.root -m 200 --rMin -1 --rMax 2 --robustFit 1 --doInitialFit\n</code></pre> Then we run the impacts for all the nuisance parameters: <pre><code>combineTool.py -M Impacts -d workspace_part3.root -m 200 --rMin -1 --rMax 2 --robustFit 1 --doFits\n</code></pre> This will take a little bit of time. When finished we collect all the output and convert it to a json file: <pre><code>combineTool.py -M Impacts -d workspace_part3.root -m 200 --rMin -1 --rMax 2 --robustFit 1 --output impacts.json\n</code></pre> We can then make a plot showing the fitted values of the nuisance parameters, relative to the input uncertainty, and parameter impacts, sorted by the largest impact: <pre><code>plotImpacts.py -i impacts.json -o impacts\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Identify the most important uncertainties using the impacts tool.</li> <li>In the plot, some parameters do not show a fitted value of the nuisance parameter relative to the input uncertainty, but rather just a numerical value - why?</li> </ul>"},{"location":"part5/longexercise/#c-post-fit-distributions","title":"C: Post-fit distributions","text":"<p>Another thing the <code>FitDiagnostics</code> mode can help us with is visualising the distributions we are fitting, and the uncertainties on those distributions, both before the fit is performed (\"pre-fit\") and after (\"post-fit\"). The pre-fit can give us some idea of how well our uncertainties cover any data-MC discrepancy, and the post-fit if discrepancies remain after the fit to data (as well as possibly letting us see the presence of a significant signal!).</p> <p>To produce these distributions add the <code>--saveShapes</code> and <code>--saveWithUncertainties</code> options when running <code>FitDiagnostics</code>:</p> <pre><code>combine -M FitDiagnostics workspace_part3.root -m 200 --rMin -1 --rMax 2 --saveShapes --saveWithUncertainties -n .part3B\n</code></pre> <p>Combine will produce pre- and post-fit distributions (for fit_s and fit_b) in the fitDiagnosticsTest.root output file:</p> <p></p> <p>Tasks and questions:</p> <ul> <li> <p>Make a plot showing the expected background and signal contributions using the output from <code>FitDiagnostics</code> - do this for both the pre-fit and post-fit. You will find a script <code>postFitPlot.py</code> in the <code>longexercise</code> directory that can help you get started.  The bin errors on the TH1s in the fitDiagnostics file are determined from the systematic uncertainties. In the post-fit these take into account the additional constraints on the nuisance parameters as well as any correlations.</p> </li> <li> <p>Why is the uncertainty on the post-fit so much smaller than on the pre-fit?</p> </li> </ul>"},{"location":"part5/longexercise/#d-calculating-the-significance","title":"D: Calculating the significance","text":"<p>In the event that you observe a deviation from your null hypothesis, in this case the b-only hypothesis, Combine can be used to calculate the p-value or significance. To do this using the asymptotic approximation simply do:</p> <p><pre><code>combine -M Significance workspace_part3.root -m 200 --rMin -1 --rMax 2\n</code></pre> To calculate the expected significance for a given signal strength we can just generate an Asimov dataset first:</p> <p><pre><code>combine -M Significance workspace_part3.root -m 200 --rMin -1 --rMax 5 -t -1 --expectSignal 1.5\n</code></pre> Note that the Asimov dataset generated this way uses the nominal values of all model parameters to define the dataset. Another option is to add <code>--toysFrequentist</code>, which causes a fit to the data to be performed first (with <code>r</code> frozen to the <code>--expectSignal</code> value) and then any subsequent Asimov datasets or toys are generated using the post-fit values of the model parameters. In general this will result in a different value for the expected significance due to changes in the background normalisation and shape induced by the fit to data:</p> <pre><code>combine -M Significance workspace_part3.root -m 200 --rMin -1 --rMax 5 -t -1 --expectSignal 1.5 --toysFrequentist\n</code></pre> <p>Tasks and questions:</p> <ul> <li>Note how much the expected significance changes with the --toysFrequentist option. Does the change make sense given the difference in the post-fit and pre-fit distributions you looked at in the previous section?</li> <li>Advanced task It is also possible to calculate the significance using toys with <code>HybridNew</code> (details here) if we are in a situation where the asymptotic approximation is not reliable or if we just want to verify the result. Why might this be challenging for a high significance, say larger than \\(5\\sigma\\)?</li> </ul>"},{"location":"part5/longexercise/#e-signal-strength-measurement-and-uncertainty-breakdown","title":"E: Signal strength measurement and uncertainty breakdown","text":"<p>We have seen that with <code>FitDiagnostics</code> we can make a measurement of the best-fit signal strength and uncertainty. In the asymptotic approximation we find an interval at the \\(\\alpha\\) CL around the best fit by identifying the parameter values at which our test statistic \\(q=\u22122\\Delta \\ln L\\) equals a critical value. This value is the \\(\\alpha\\) quantile of the \\(\\chi^2\\) distribution with one degree of freedom. In the expression for q we calculate the difference in the profile likelihood between some fixed point and the best-fit.</p> <p>Depending on what we want to do with the measurement, e.g. whether it will be published in a journal, we may want to choose a more precise method for finding these intervals. There are a number of ways that parameter uncertainties are estimated in combine, and some are more precise than others:</p> <ul> <li>Covariance matrix: calculated by the Minuit HESSE routine, this gives a symmetric uncertainty by definition and is only accurate when the profile likelihood for this parameter is symmetric and parabolic.</li> <li>Minos error: calculated by the Minuit MINOS route - performs a search for the upper and lower values of the parameter that give the critical value of \\(q\\) for the desired CL. Return an asymmetric interval. This is what <code>FitDiagnostics</code> does by default, but only for the parameter of interest. Usually accurate but prone to fail on more complex models and not easy to control the tolerance for terminating the search.</li> <li>RobustFit error: a custom implementation in combine similar to Minos that returns an asymmetric interval, but with more control over the precision. Enabled by adding <code>--robustFit 1</code> when running <code>FitDiagnostics</code>.</li> <li>Explicit scan of the profile likelihood on a chosen grid of parameter values. Interpolation between points to find parameter values corresponding to appropriate d. It is a good idea to use this for important measurements since we can see by eye that there are no unexpected features in the shape of the likelihood curve.</li> </ul> <p>In this section we will look at the last approach, using the <code>MultiDimFit</code> mode of combine. By default this mode just performs a single fit to the data:</p> <pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E -m 200 --rMin -1 --rMax 2\n</code></pre> <p>You should see the best-fit value of the signal strength reported and nothing else. By adding the <code>--algo X</code> option combine will run an additional algorithm after this best fit. Here we will use <code>--algo grid</code>, which performs a scan of the likelihood with <code>r</code> fixed to a set of different values. The set of points will be equally spaced between the <code>--rMin</code> and <code>--rMax</code> values, and the number of points is controlled with <code>--points N</code>:</p> <pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E -m 200 --rMin -1 --rMax 2 --algo grid --points 30\n</code></pre> <p>The results of the scan are written into the output file, if opened interactively should see:</p> Show output <pre><code>root [1] limit-&gt;Scan(\"r:deltaNLL\")\n************************************\n*    Row   *         r *  deltaNLL *\n************************************\n*        0 * 0.5399457 *         0 *\n*        1 * -0.949999 * 5.6350698 *\n*        2 * -0.850000 * 4.9482779 *\n*        3 *     -0.75 * 4.2942519 *\n*        4 * -0.649999 * 3.6765284 *\n*        5 * -0.550000 * 3.0985388 *\n*        6 * -0.449999 * 2.5635135 *\n*        7 * -0.349999 * 2.0743820 *\n*        8 *     -0.25 * 1.6337506 *\n*        9 * -0.150000 * 1.2438088 *\n*       10 * -0.050000 * 0.9059833 *\n*       11 * 0.0500000 * 0.6215767 *\n*       12 * 0.1500000 * 0.3910581 *\n*       13 *      0.25 * 0.2144184 *\n*       14 * 0.3499999 * 0.0911308 *\n*       15 * 0.4499999 * 0.0201983 *\n*       16 * 0.5500000 * 0.0002447 *\n*       17 * 0.6499999 * 0.0294311 *\n*       18 *      0.75 * 0.1058298 *\n*       19 * 0.8500000 * 0.2272539 *\n*       20 * 0.9499999 * 0.3912534 *\n*       21 * 1.0499999 * 0.5952836 *\n*       22 * 1.1499999 * 0.8371513 *\n*       23 *      1.25 * 1.1142146 *\n*       24 * 1.3500000 * 1.4240909 *\n*       25 * 1.4500000 * 1.7644306 *\n*       26 * 1.5499999 * 2.1329684 *\n*       27 * 1.6499999 * 2.5273966 *\n*       28 *      1.75 * 2.9458723 *\n*       29 * 1.8500000 * 3.3863399 *\n*       30 * 1.9500000 * 3.8469560 *\n************************************\n</code></pre> <p>To turn this into a plot run: <pre><code>python plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root -o single_scan\n</code></pre> This script will also perform a spline interpolation of the points to give accurate values for the uncertainties.</p> <p>In the next step we will split this total uncertainty into two components. It is typical to separate the contribution from statistics and systematics, and sometimes even split the systematic part into different components. This gives us an idea of which aspects of the uncertainty dominate. The statistical component is usually defined as the uncertainty we would have if all the systematic uncertainties went to zero. We can emulate this effect by freezing all the nuisance parameters when we do the scan in <code>r</code>, such that they do not vary in the fit. This is achieved by adding the <code>--freezeParameters allConstrainedNuisances</code> option. It would also work if the parameters are specified explicitly, e.g. <code>--freezeParameters CMS_eff_t,lumi_13TeV,...,</code> but the <code>allConstrainedNuisances</code> option is more concise. Run the scan again with the systematics frozen, and use the plotting script to overlay this curve with the previous one:</p> <pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E.freezeAll -m 200 --rMin -1 --rMax 2 --algo grid --points 30 --freezeParameters allConstrainedNuisances\npython plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root --others 'higgsCombine.part3E.freezeAll.MultiDimFit.mH200.root:FreezeAll:2' -o freeze_first_attempt\n</code></pre> <p></p> <p>This doesn't look quite right - the best-fit has been shifted because unfortunately the <code>--freezeParameters</code> option acts before the initial fit, whereas we only want to add it for the scan after this fit. To remedy this we can use a feature of Combine that lets us save a \"snapshot\" of the best-fit parameter values, and reuse this snapshot in subsequent fits. First we perform a single fit, adding the <code>--saveWorkspace</code> option:</p> <p><pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E.snapshot -m 200 --rMin -1 --rMax 2 --saveWorkspace\n</code></pre> The output file will now contain a copy of our workspace from the input, and this copy will contain a snapshot of the best-fit parameter values. We can now run the frozen scan again, but instead using this copy of the workspace as input, and restoring the snapshot that was saved:</p> <pre><code>combine -M MultiDimFit higgsCombine.part3E.snapshot.MultiDimFit.mH200.root -n .part3E.freezeAll -m 200 --rMin -1 --rMax 2 --algo grid --points 30 --freezeParameters allConstrainedNuisances --snapshotName MultiDimFit\npython plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root --others 'higgsCombine.part3E.freezeAll.MultiDimFit.mH200.root:FreezeAll:2' -o freeze_second_attempt --breakdown Syst,Stat\n</code></pre> <p>Now the plot should look correct:</p> <p></p> <p>We added the <code>--breakdown Syst,Stat</code> option to the plotting script to make it calculate the systematic component, which is defined simply as \\(\\sigma_{\\text{syst}} = \\sqrt{\\sigma^2_{\\text{tot}} - \\sigma^2_{\\text{stat}}}\\). To split the systematic uncertainty into different components we just need to run another scan with a subset of the systematics frozen. For example, say we want to split this into experimental and theoretical uncertainties, we would calculate the uncertainties as:</p> <p>\\(\\sigma_{\\text{theory}} = \\sqrt{\\sigma^2_{\\text{tot}} - \\sigma^2_{\\text{fr.theory}}}\\)</p> <p>\\(\\sigma_{\\text{expt}} = \\sqrt{\\sigma^2_{\\text{fr.theory}} - \\sigma^2_{\\text{fr.theory+expt}}}\\)</p> <p>\\(\\sigma_{\\text{stat}} = \\sigma_{\\text{fr.theory+expt}}\\)</p> <p>where fr.=freeze.</p> <p>While it is perfectly fine to just list the relevant nuisance parameters in the <code>--freezeParameters</code> argument for the \\(\\sigma_{\\text{fr.theory}}\\) scan, a convenient way can be to define a named group of parameters in the text datacard and then freeze all parameters in this group with <code>--freezeNuisanceGroups</code>. The syntax for defining a group is:</p> <pre><code>[group name] group = uncertainty_1 uncertainty_2 ... uncertainty_N\n</code></pre> <p>Tasks and questions:</p> <ul> <li>Take our stat+syst split one step further and separate the systematic part into two: one part for hadronic tau uncertainties and one for all others.</li> <li>Do this by defining a <code>tauID</code> group in the datacard including the following parameters: <code>CMS_eff_t</code>, <code>CMS_eff_t_highpt</code>, and the three <code>CMS_scale_t_X</code> uncertainties.</li> <li>To plot this and calculate the split via the relations above you can just add further arguments to the <code>--others</code> option in the <code>plot1DScan.py</code> script. Each is of the form: <code>'[file]:[label]:[color]'</code>. The <code>--breakdown</code> argument should also be extended to three terms.</li> <li>How important are these tau-related uncertainties compared to the others?</li> </ul>"},{"location":"part5/longexercise/#f-use-of-channel-masking","title":"F: Use of channel masking","text":"<p>We will now return briefly to the topic of blinding. We've seen that we can compute expected results by performing any Combine method on an Asimov dataset generated using <code>-t -1</code>. This is useful, because we can optimise our analysis without introducing any accidental bias that might come from looking at the data in the signal region. However our control regions have been chosen specifically to be signal-free, and it would be useful to use the data here to set the normalisation of our backgrounds even while the signal region remains blinded. Unfortunately there's no easy way to generate a partial Asimov dataset just for the signal region, but instead we can use a feature called \"channel masking\" to remove specific channels from the likelihood evaluation. One useful application of this feature is to make post-fit plots of the signal region from a control-region-only fit.</p> <p>To use the masking we first need to rerun <code>text2workspace.py</code> with an extra option that will create variables named like <code>mask_[channel]</code> in the workspace:</p> <pre><code>text2workspace.py part3_combined.txt -m 200 -o workspace_part3_with_masks.root --channel-masks\n</code></pre> <p>These parameters have a default value of 0 which means the channel is not masked. By setting it to 1 the channel is masked from the likelihood evaluation. Task: Run the same <code>FitDiagnostics</code> command as before to save the post-fit shapes, but add an option <code>--setParameters mask_signal_region=1</code>. Note that the s+b fit will probably fail in this case, since we are no longer fitting a channel that contains signal, however the b-only fit should work fine. Task: Compare the expected background distribution and uncertainty to the pre-fit, and to the background distribution from the full fit you made before.</p>"},{"location":"part5/longexercise/#part-4-physics-models","title":"Part 4: Physics models","text":"<p>Topics covered in this section:</p> <ul> <li>A: Writing a simple physics model</li> <li>B: Performing and plotting 2D likelihood scans</li> </ul> <p>With Combine we are not limited to parametrising the signal with a single scaling parameter <code>r</code>. In fact we can define any arbitrary scaling using whatever functions and parameters we would like. For example, when measuring the couplings of the Higgs boson to the different SM particles we would introduce a POI for each coupling parameter, for example \\(\\kappa_{\\text{W}}\\), \\(\\kappa_{\\text{Z}}\\), \\(\\kappa_{\\tau}\\) etc. We would then generate scaling terms for each \\(i\\rightarrow \\text{H}\\rightarrow j\\) process in terms of how the cross section (\\(\\sigma_i(\\kappa)\\)) and branching ratio (\\(\\frac{\\Gamma_i(\\kappa)}{\\Gamma_{\\text{tot}}(\\kappa)}\\)) scale relative to the SM prediction.</p> <p>This parametrisation of the signal (and possibly backgrounds too) is specified in a physics model. This is a python class that is used by <code>text2workspace.py</code> to construct the model in terms of RooFit objects. There is documentation on using phyiscs models here.</p>"},{"location":"part5/longexercise/#a-writing-a-simple-physics-model","title":"A: Writing a simple physics model","text":"<p>An example physics model that just implements a single parameter <code>r</code> is given in <code>DASModel.py</code>:</p> Show DASModel.py <pre><code>from HiggsAnalysis.CombinedLimit.PhysicsModel import PhysicsModel\n\n\nclass DASModel(PhysicsModel):\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"r[0,0,10]\")\n        self.modelBuilder.doSet(\"POI\", \",\".join([\"r\"]))\n\n    def getYieldScale(self, bin, process):\n        \"Return the name of a RooAbsReal to scale this yield by or the two special values 1 and 0 (don't scale, and set to zero)\"\n        if self.DC.isSignal[process]:\n            print(\"Scaling %s/%s by r\" % (bin, process))\n            return \"r\"\n        return 1\n\n\ndasModel = DASModel()\n</code></pre> <p>In this we override two methods of the basic <code>PhysicsModel</code> class: <code>doParametersOfInterest</code> and <code>getYieldScale</code>. In the first we define our POI variables, using the doVar function which accepts the RooWorkspace factory syntax for creating variables, and then define all our POIs in a set via the doSet function. The second function will be called for every process in every channel (bin), and using the corresponding strings we have to specify how that process should be scaled. Here we check if the process was declared as signal in the datacard, and if so scale it by <code>r</code>, otherwise if it is a background no scaling is applied (<code>1</code>). To use the physics model with <code>text2workspace.py</code> first copy it to the python directory in the Combine package: <pre><code>cp DASModel.py $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/python/\n</code></pre></p> <p>In this section we will use the full datacards from the MSSM analysis. Have a look in <code>part4/200/combined.txt</code>. You will notice that there are now two signal processes declared: <code>ggH</code> and <code>bbH</code>. In the MSSM these cross sections can vary independently depending on the exact parameters of the model, so it is useful to be able to measure them independently too. First run <code>text2workspace.py</code> as follows, adding the <code>-P</code> option to specify the physics model, then verify the result of the fit:</p> <pre><code>text2workspace.py part4/200/combined.txt -P HiggsAnalysis.CombinedLimit.DASModel:dasModel -m 200 -o workspace_part4.root\ncombine -M MultiDimFit workspace_part4.root -n .part4A -m 200 --rMin 0 --rMax 2\n</code></pre> <p>Tasks and questions:</p> <ul> <li>Modify the physics model to scale the ggH and bbH processes by <code>r_ggH</code> and <code>r_bbH</code> separately.</li> <li>Then rerun the <code>MultiDimFit</code> command - you should see the result for both signal strengths printed.</li> </ul>"},{"location":"part5/longexercise/#b-performing-and-plotting-2d-likelihood-scans","title":"B: Performing and plotting 2D likelihood scans","text":"<p>For a model with two POIs it is often useful to look at the how well we are able to measure both simultaneously. A natural extension of determining 1D confidence intervals on a single parameter like we did in part 3D is to determine confidence level regions in 2D. To do this we also use combine in a similar way, with <code>-M MultiDimFit --algo grid</code>. When two POIs are found, Combine will scan a 2D grid of points instead of a 1D array.</p> <p>Tasks and questions:</p> <ul> <li>Run a 2D likelihood scan in <code>r_ggH</code> and <code>r_bbH</code>. You can start with around 100 points but may need to increase this later too see more detail in the resulting plot.</li> <li>Have a look at the output limit tree, it should have branches for each POI as well as the usual deltaNLL value. You can use TTree::Draw to plot a 2D histogram of deltaNLL with <code>r_ggH</code> and <code>r_bbH</code> on the axes.</li> </ul>"},{"location":"part5/longexerciseanswers/","title":"Answers to tasks and questions","text":""},{"location":"part5/longexerciseanswers/#part-1-a-one-bin-counting-experiment","title":"Part 1: A one-bin counting experiment","text":""},{"location":"part5/longexerciseanswers/#a-computing-limits-using-the-asymptotic-approximation","title":"A: Computing limits using the asymptotic approximation","text":"<p>Tasks and questions:</p> <ul> <li>There are some important uncertainties missing from the datacard above. Add the uncertainty on the luminosity (name: <code>lumi_13TeV</code>) which has a 2.5% effect on all processes (except the <code>jetFakes</code>, which are taken from data), and uncertainties on the inclusive cross sections of the <code>Ztautau</code> and <code>ttbar</code> processes (with names <code>xsec_Ztautau</code> and <code>xsec_diboson</code>) which are 4% and 6% respectively.</li> <li>Try changing the values of some uncertainties (up or down, or removing them altogether) - how do the expected and observed limits change?</li> </ul> Show answer  Larger uncertainties make the limits worse (ie, higher values of the limit); smaller uncertainties improve the limit (lower values of the limit).  <ul> <li>Now try changing the number of observed events. The observed limit will naturally change, but the expected does too - why might this be?</li> </ul> Show answer  This is because the expected limit relies on a background-only Asimov dataset that is created  after  a background-only fit to the data. By changing the observed the pulls on the NPs in this fit also change, and therefore so does the expected sensitivity."},{"location":"part5/longexerciseanswers/#advanced-section-b-computing-limits-with-toys","title":"Advanced section: B: Computing limits with toys","text":"<p>Tasks and questions:</p> <ul> <li>In contrast to <code>AsymptoticLimits</code>, with <code>HybridNew</code> each limit comes with an uncertainty. What is the origin of this uncertainty?</li> </ul> Show answer  The uncertainty is caused by the limited number of toys: the values of Pmu and Pb come from counting the number of toys in the tails of the test statistic distributions. The number of toys used can be adjusted with the option <code> --toysH </code> <ul> <li>How good is the agreement between the asymptotic and toy-based methods?</li> </ul> Show answer  The agreement should be pretty good in this example, but will generally break down once we get to the level of 0-5 events.  <ul> <li>Why does it take longer to calculate the lower expected quantiles (e.g. 0.025, 0.16)? Think about how the statistical uncertainty on the CLs value depends on Pmu and Pb.</li> </ul> Show answer  For this we need the definition of CLs = Pmu / (1-Pb). The 0.025 expected quantile is by definition where Pb = 0.025, so for a 95% CL limit we have CLs = 0.05, implying we are looking for the value of r where Pmu = 0.00125. With 1000 s+b toys we would then only expect `1000 * 0.00125 = 1.25` toys in the tail region we have to integrate over. Contrast this to the median limit where 25 toys would be in this region. This means we have to generate a much larger numbers of toys to get the same statistical power."},{"location":"part5/longexerciseanswers/#advanced-section-b-asymptotic-approximation-limitations","title":"Advanced section: B: Asymptotic approximation limitations","text":"<p>Tasks and questions:</p> <ul> <li>Is the asymptotic limit still a good approximation?</li> </ul> Show answer  A \"good\" approximation is not well defined, but the difference is clearly larger here.  <ul> <li>You might notice that the test statistic distributions are not smooth but rather have several \"bump\" structures? Where might this come from? Try reducing the size of the systematic uncertainties to make them more pronounced.</li> </ul> Show answer  This bump structure comes from the discrete-ness of the Poisson sampling of the toy datasets. Systematic uncertainties then smear these bumps out, but without systematics we would see delta functions corresponding to the possible integer number of events that could be observed. Once we go to more typical multi-bin analyses with more events and systematic uncertainties these discrete-ness washes out very quickly."},{"location":"part5/longexerciseanswers/#part-2-a-shape-based-analysis","title":"Part 2: A shape-based analysis","text":""},{"location":"part5/longexerciseanswers/#a-setting-up-the-datacard","title":"A: Setting up the datacard","text":"<p>Only tasks, no questions in this section</p>"},{"location":"part5/longexerciseanswers/#b-running-combine-for-a-blind-analysis","title":"B: Running combine for a blind analysis","text":"<p>Tasks and questions:</p> <ul> <li>Compare the expected limits calculated with --run expected and --run blind. Why are they different?</li> </ul> Show answer  When using --run blind combine will create a background-only Asimov dataset without performing a fit to data first. With --run expected, the observed limit isn't shown, but the background-only Asimov dataset used for the limit calculation is still created after a background-only fit to the data.  <ul> <li>Calculate a blind limit by generating a background-only Asimov with the -t option instead of using the AsymptoticLimits specific options. You should find the observed limit is the same as the expected. Then see what happens if you inject a signal into the Asimov dataset using the --expectSignal [X] option.</li> </ul> Show answer  You should see that with a signal injected the observed limit is worse (has a higher value) than the expected limit: for the expected limit the b-only Asimov dataset is still used, but the observed limit is now calculated on the signal + background Asimov dataset, with a signal at the specified cross section [X]."},{"location":"part5/longexerciseanswers/#c-using-fitdiagnostics","title":"C: Using FitDiagnostics","text":"<p>Tasks and questions:</p> <ul> <li>Which parameter has the largest shift from the nominal value? Which has the tightest constraint?</li> </ul> Show answer <code> CMS_eff_t_highpt </code> should have the largest shift from the nominal value (around 0.47), <code> norm_jetFakes </code> has the tightest constraint (to 25% of the input uncertainty).  <ul> <li>Should we be concerned when a parameter is more strongly constrained than the input uncertainty (i.e. \\(\\frac{\\sigma}{\\sigma_I}&lt;1.0\\))?</li> </ul> Show answer  This is still a hot topic in CMS analyses today, and there isn't a right or wrong answer. Essentially we have to judge if our analysis should really be able to provide more information about this parameter than the external measurement that gave us the input uncertainty. So we would not expect to be able to constrain the luminosity uncertainty for example, but uncertainties specific to the analysis might legitimately be constrained."},{"location":"part5/longexerciseanswers/#d-mc-statistical-uncertainties","title":"D: MC statistical uncertainties","text":"<p>Tasks and questions:</p> <ul> <li>Check how much the cross section measurement and uncertainties change using <code>FitDiagnostics</code>.</li> </ul> Show answer  Without autoMCStats we find: <code> Best fit r: -2.73273  -2.13428/+3.38185</code>, with autoMCStats: <code> Best fit r: -3.07825  -3.17742/+3.7087 </code> <ul> <li>It is also useful to check how the expected uncertainty changes using an Asimov dataset, say with <code>r=10</code> injected.</li> </ul> Show answer  Without autoMCStats we find: <code> Best fit r: 9.99978  -4.85341/+6.56233 </code>, with autoMCStats: <code> Best fit r: 9.99985  -5.24634/+6.98266 </code> <ul> <li>Advanced task: See what happens if the Poisson threshold is increased. Based on your results, what threshold would you recommend for this analysis?</li> </ul> Show answer  At first the uncertainties increase, as the threshold increases, and at some point they stabilise. A Poisson threshold at 10 is probably reasonable for this analysis."},{"location":"part5/longexerciseanswers/#part-3-adding-control-regions","title":"Part 3: Adding control regions","text":""},{"location":"part5/longexerciseanswers/#a-use-of-rateparams","title":"A: Use of rateParams","text":"<p>Tasks and questions:</p> <ul> <li>Run <code>text2workspace.py</code> on this combined card and then use <code>FitDiagnostics</code> on an Asimov dataset with <code>r=1</code> to get the expected uncertainty. Suggested command line options: <code>--rMin 0 --rMax 2</code></li> </ul> Show answer  As expected uncertainty you should get <code> -0.417238/+0.450593 </code> <ul> <li>Using the RooFitResult in the <code>fitDiagnosticsTest.root</code> file, check the post-fit value of the rateParams. To what level are the normalisations of the DY and ttbar processes constrained?</li> </ul> Show answer  They are constrained to around 1-2%  <ul> <li>To compare to the previous approach of fitting the SR only, with cross section and acceptance uncertainties restored, an additional card is provided: <code>datacard_part3_nocrs.txt</code>. Run the same fit on this card to verify the improvement of the SR+CR approach</li> </ul> Show answer  The expected uncertainty is larger with only the SR: <code> -0.465799/+0.502088 </code> compared with <code> -0.417238/+0.450593 </code> in the SR+CR approach."},{"location":"part5/longexerciseanswers/#b-nuisance-parameter-impacts","title":"B: Nuisance parameter impacts","text":"<p>Tasks and questions:</p> <ul> <li>Identify the most important uncertainties using the impacts tool.</li> </ul> Show answer  The most important uncertainty is <code>norm_jetFakes</code>, followed by two MC statistical uncerainties (<code>prop_binsignal_region_bin8</code> and <code>prop_binsignal_region_bin9</code>). <ul> <li>In the plot, some parameters do not show a plotted point for the fitted value, but rather just a numerical value - why?</li> </ul> Show answer  These are freely floating parameters (<code> rate_ttbar </code> and <code> rate_Zll </code>). They have no prior constraint (and so no shift from the nominal value relative to the input uncertainty) - we show the best-fit value + uncertainty directly."},{"location":"part5/longexerciseanswers/#c-post-fit-distributions","title":"C: Post-fit distributions","text":"<p>Tasks and questions:</p> <p>The bin errors on the TH1s in the fitdiagnostics file are determined from the systematic uncertainties. In the post-fit these take into account the additional constraints on the nuisance parameters as well as any correlations.</p> <ul> <li>Why is the uncertainty on the post-fit so much smaller than on the pre-fit?</li> </ul> Show answer  There are two effects at play here: the nuisance parameters get constrained, and there are anti-correlations between the parameters which also have the effect of reducing the total uncertainty. Note: the post-fit uncertainty could become larger when rateParams are present as they are not taken into account in the pre-fit uncertainty but do enter in the post-fit uncertainty."},{"location":"part5/longexerciseanswers/#d-calculating-the-significance","title":"D: Calculating the significance","text":"<p>Tasks and questions:</p> <ul> <li>Advanced task It is also possible to calculate the significance using toys with <code>HybridNew</code> (details here) if we are in a situation where the asymptotic approximation is not reliable or if we just want to verify the result. Why might this be challenging for a high significance, say larger than \\(5\\sigma\\)?</li> </ul> Show answer  A significance of $5\\sigma$ corresponds to a p-value of around $3\\cdot 10^{-7}$ - so we need to populate the very tail of the test statistic distribution and this requires generating a large number of toys."},{"location":"part5/longexerciseanswers/#e-signal-strength-measurement-and-uncertainty-breakdown","title":"E: Signal strength measurement and uncertainty breakdown","text":"<p>** Tasks and questions: **</p> <ul> <li>Take our stat+syst split one step further and separate the systematic part into two: one part for hadronic tau uncertainties and one for all others. Do this by defining a <code>tauID</code> group in the datacard including the following parameters: <code>CMS_eff_t</code>, <code>CMS_eff_t_highpt</code>, and the three <code>CMS_scale_t_X</code> uncertainties.</li> </ul> Show datacard line  You should add this line to the end of the datacard: <pre><code>tauID group = CMS_eff_t CMS_eff_t_highpt CMS_scale_t_1prong0pi0_13TeV CMS_scale_t_1prong1pi0_13TeV CMS_scale_t_3prong0pi0_13TeV\n</code></pre> <ul> <li>To plot this and calculate the split via the relations above you can just add further arguments to the <code>--others</code> option in the <code>plot1DScan.py</code> script. Each is of the form: <code>'[file]:[label]:[color]'</code>. The <code>--breakdown</code> argument should also be extended to three terms.</li> </ul> Show code  This can be done as: <pre><code>python plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root --others 'higgsCombine.part3E.freezeTauID.MultiDimFit.mH200.root:FreezeTauID:4' 'higgsCombine.part3E.freezeAll.MultiDimFit.mH200.root:FreezeAll:2' -o freeze_third_attempt --breakdown TauID,OtherSyst,Stat\n</code></pre> <ul> <li>How important are these tau-related uncertainties compared to the others?</li> </ul> Show answer  They are smaller than both the statistical uncertainty and the remaining systematic uncertainties"},{"location":"part5/roofit/","title":"RooFit Basics","text":"<p><code>RooFit</code> is a OO analysis environment built on <code>ROOT</code>. It has a collection of classes designed to augment root for data modeling.</p> <p>This section covers a few of the basics of <code>RooFit</code>. There are many more tutorials available at this link: https://root.cern.ch/root/html600/tutorials/roofit/index.html</p>"},{"location":"part5/roofit/#objects","title":"Objects","text":"<p>In <code>RooFit</code>, any variable, data point, function, PDF (etc.) is represented by a c++ object The most basic of these is the <code>RooRealVar</code>. We will create one that will represent the mass of some hypothetical particle, we name it and give it an initial starting value and range.</p> <p><pre><code>RooRealVar MH(\"MH\",\"mass of the Hypothetical Boson (H-boson) in GeV\",125,120,130);\nMH.Print();\n</code></pre> <pre><code>RooRealVar::MH = 125  L(120 - 130)\n</code></pre></p> <p>Ok, great. This variable is now an object we can play around with. We can access this object and modify its properties, such as its value. </p> <pre><code>MH.setVal(130);\nMH.getVal();\n</code></pre> <p>In particle detectors we typically do not observe this particle mass, but usually define some observable which is sensitive to this mass. We will assume we can detect and reconstruct the decay products of the H-boson and measure the invariant mass of those particles. We need to make another variable that represents that invariant mass.</p> <pre><code>RooRealVar mass(\"m\",\"m (GeV)\",100,80,200);\n</code></pre> <p>In the perfect world we would perfectly measure the exact mass of the particle in every single event. However, our detectors are usually far from perfect so there will be some resolution effect. We will assume the resolution of our measurement of the invariant mass is 10 GeV and call it \"sigma\"</p> <pre><code>RooRealVar sigma(\"resolution\",\"#sigma\",10,0,20);\n</code></pre> <p>More exotic variables can be constructed out of these <code>RooRealVar</code>s using <code>RooFormulaVars</code>. For example, suppose we wanted to make a function out of the variables that represented the relative resolution as a function of the hypothetical mass MH. </p> <pre><code>RooFormulaVar func(\"R\",\"@0/@1\",RooArgList(sigma,mass));\nfunc.Print(\"v\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes: \n  Address: 0x10e878068\n  Clients: \n  Servers: \n    (0x10dcd47b0,V-) RooRealVar::resolution \"#sigma\"\n    (0x10dcd4278,V-) RooRealVar::m \"m (GeV)\"\n  Proxies: \n    actualVars -&gt; \n      1)  resolution\n      2)           m\n--- RooAbsReal ---\n\n  Plot label is \"R\"\n    --- RooFormula ---\n    Formula: \"@0/@1\"\n    (resolution,m)\n</code></pre> <p>Notice how there is a list of the variables we passed (the servers or \"actual vars\"). We can now plot the function. <code>RooFit</code> has a special plotting object <code>RooPlot</code> which keeps track of the objects (and their normalisations) that we want to draw. Since <code>RooFit</code> does not know the difference between objects that are and are not dependent, we need to tell it. </p> <p>Right now, we have the relative resolution as \\(R(m,\\sigma)\\), whereas we want to plot  \\(R(m,\\sigma(m))\\)!</p> <p><pre><code>TCanvas *can = new TCanvas();\n\n//make the x-axis the \"mass\"\nRooPlot *plot = mass.frame(); \nfunc.plotOn(plot);\n\nplot-&gt;Draw();\ncan-&gt;Draw();\n</code></pre> </p> <p>The main objects we are interested in using from <code>RooFit</code> are probability denisty functions or (PDFs). We can construct the PDF,</p> \\[ f(m|M_{H},\\sigma) \\] <p>as a simple Gaussian shape for example or a <code>RooGaussian</code> in <code>RooFit</code> language (think McDonald's logic, everything is a <code>RooSomethingOrOther</code>)</p> <pre><code>RooGaussian gauss(\"gauss\",\"f(m|M_{H},#sigma)\",mass,MH,sigma);\ngauss.Print(\"V\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes: \n  Address: 0x10ecf4188\n  Clients: \n  Servers: \n    (0x10dcd4278,V-) RooRealVar::m \"m (GeV)\"\n    (0x10a08a9d8,V-) RooRealVar::MH \"mass of the Hypothetical Boson (H-boson) in GeV\"\n    (0x10dcd47b0,V-) RooRealVar::resolution \"#sigma\"\n  Proxies: \n    x -&gt; m\n    mean -&gt; MH\n    sigma -&gt; resolution\n--- RooAbsReal ---\n\n  Plot label is \"gauss\"\n--- RooAbsPdf ---\nCached value = 0\n</code></pre> <p>Notice how the gaussian PDF, like the <code>RooFormulaVar</code> depends on our <code>RooRealVar</code> objects, these are its servers.  Its evaluation will depend on their values. </p> <p>The main difference between PDFs and Functions in RooFit is that PDFs are automatically normalised to unitiy, hence they represent a probability density, you don't need to normalise yourself. Lets plot it for the different values of \\(m\\).</p> <pre><code>plot = mass.frame();\n\ngauss.plotOn(plot);\n\nMH.setVal(120);\ngauss.plotOn(plot,RooFit::LineColor(kBlue));\n\nMH.setVal(125);\ngauss.plotOn(plot,RooFit::LineColor(kRed));\n\nMH.setVal(135);\ngauss.plotOn(plot,RooFit::LineColor(kGreen));\n\nplot-&gt;Draw();\n\ncan-&gt;Update();\ncan-&gt;Draw();\n</code></pre> <p></p> <p>Note that as we change the value of <code>MH</code>, the PDF gets updated at the same time.</p> <p>PDFs can be used to generate Monte Carlo data. One of the benefits of <code>RooFit</code> is that to do so only uses a single line of code! As before, we have to tell <code>RooFit</code> which variables to generate in (e.g which are the observables for an experiment). In this case, each of our events will be a single value of \"mass\" \\(m\\). The arguments for the function are the set of observables, follwed by the number of events,</p> <pre><code>RooDataSet *gen_data = (RooDataSet*) gauss.generate(RooArgSet(mass),500); \n</code></pre> <p>Now we can plot the data as with other RooFit objects.</p> <pre><code>plot = mass.frame();\n\ngen_data-&gt;plotOn(plot);\ngauss.plotOn(plot);\ngauss.paramOn(plot);\n\nplot-&gt;Draw();\ncan-&gt;Update();\ncan-&gt;Draw();\n</code></pre> <p></p> <p>Of course we are not in the business of generating MC events, but collecting real data!. Next we will look at using real data in <code>RooFit</code>.</p>"},{"location":"part5/roofit/#datasets","title":"Datasets","text":"<p>A dataset is essentially just a collection of points in N-dimensional (N-observables) space. There are two basic implementations in <code>RooFit</code>, </p> <p>1) an \"unbinned\" dataset - <code>RooDataSet</code></p> <p>2) a \"binned\" dataset - <code>RooDataHist</code></p> <p>both of these use the same basic structure as below</p> <p></p> <p>We will create an empty dataset where the only observable is the mass. Points can be added to the dataset one by one ...</p> <pre><code>RooDataSet mydata(\"dummy\",\"My dummy dataset\",RooArgSet(mass)); \n// We've made a dataset with one observable (mass)\n\nmass.setVal(123.4);\nmydata.add(RooArgSet(mass));\nmass.setVal(145.2);\nmydata.add(RooArgSet(mass));\nmass.setVal(170.8);\nmydata.add(RooArgSet(mass));\n\nmydata.Print();\n</code></pre> <pre><code>RooDataSet::dummy[m] = 3 entries\n</code></pre> <p>There are also other ways to manipulate datasets in this way as shown in the diagram below </p> <p></p> <p>Luckily there are also Constructors for a <code>RooDataSet</code> from a <code>TTree</code> and for a <code>RooDataHist</code> from a <code>TH1</code> so its simple to convert from your usual ROOT objects.</p> <p>We will take an example dataset put together already. The file <code>tutorial.root</code> can be downloaded here.</p> <pre><code>TFile *file = TFile::Open(\"tutorial.root\");\nfile-&gt;ls();\n</code></pre> Show file contents <pre><code>TFile**     tutorial.root\n TFile*     tutorial.root\n  KEY: RooWorkspace workspace;1 Tutorial Workspace\n  KEY: TProcessID   ProcessID0;1    48737500-e7e5-11e6-be6f-0d0011acbeef\n</code></pre> <p>Inside the file, there is something called a <code>RooWorkspace</code>. This is just the <code>RooFit</code> way of keeping a persistent link between the objects for a model. It is a very useful way to share data and PDFs/functions etc among CMS collaborators.</p> <p>We will now take a look at it. It contains a <code>RooDataSet</code> and one variable. This time we called our variable (or observable) <code>CMS_hgg_mass</code>, we will assume that this is the invariant mass of photon pairs where we assume our H-boson decays to photons.  </p> <pre><code>RooWorkspace *wspace = (RooWorkspace*) file-&gt;Get(\"workspace\");\nwspace-&gt;Print(\"v\");\n</code></pre> Show <pre><code>RooWorkspace(workspace) Tutorial Workspace contents\n\nvariables\n---------\n(CMS_hgg_mass)\n\ndatasets\n--------\nRooDataSet::dataset(CMS_hgg_mass)\n</code></pre> <p>Now we will have a look at the data. The <code>RooWorkspace</code> has several accessor functions, we will use the <code>RooWorkspace::data</code> one.  There are also <code>RooWorkspace::var</code>, <code>RooWorkspace::function</code> and <code>RooWorkspace::pdf</code> with (hopefully) obvious purposes.</p> <p><pre><code>RooDataSet *hgg_data = (RooDataSet*) wspace-&gt;data(\"dataset\");\nRooRealVar *hgg_mass = (RooRealVar*) wspace-&gt;var(\"CMS_hgg_mass\");\n\nplot = hgg_mass-&gt;frame();\n\nhgg_data-&gt;plotOn(plot,RooFit::Binning(160)); \n// Here we've picked a certain number of bins just for plotting purposes \n\nTCanvas *hggcan = new TCanvas();\nplot-&gt;Draw();\nhggcan-&gt;Update();\nhggcan-&gt;Draw();\n</code></pre> </p>"},{"location":"part5/roofit/#likelihoods-and-fitting-to-data","title":"Likelihoods and Fitting to data","text":"<p>The data we have in our file does not look like a Gaussian distribution. Instead, we could probably use something like an exponential to describe it. </p> <p>There is an exponential PDF already in <code>RooFit</code> (yes, you guessed it) <code>RooExponential</code>. For a PDF, we only need one parameter which is the exponential slope \\(\\alpha\\) so our pdf is,  </p> \\[ f(m|\\alpha) = \\dfrac{1}{N} e^{-\\alpha m}\\] <p>Where of course, \\(N = \\int_{110}^{150} e^{-\\alpha m} dm\\) is the normalisation constant.</p> <p>You can find several available <code>RooFit</code> functions here: https://root.cern.ch/root/html/ROOFIT_ROOFIT_Index.html</p> <p>There is also support for a generic PDF in the form of a <code>RooGenericPdf</code>, check this link: https://root.cern.ch/doc/v608/classRooGenericPdf.html</p> <p>Now we will create an exponential PDF for our background, </p> <pre><code>RooRealVar alpha(\"alpha\",\"#alpha\",-0.05,-0.2,0.01);\nRooExponential expo(\"exp\",\"exponential function\",*hgg_mass,alpha);\n</code></pre> <p>We can use <code>RooFit</code> to tell us to estimate the value of \\(\\alpha\\) using this dataset. You will learn more about parameter estimation, but for now we will just assume you know about maximizing likelihoods. This maximum likelihood estimator is common in HEP and is known to give unbiased estimates for things like distribution means etc. </p> <p>This also introduces the other main use of PDFs in <code>RooFit</code>. They can be used to construct likelihoods easily.</p> <p>The likelihood \\(\\mathcal{L}\\) is defined for a particluar dataset (and model) as being proportional to the probability to observe the data assuming some pdf. For our data, the probability to observe an event with a value in an interval bounded by a and b is given by,</p> \\[ P\\left(m~\\epsilon~[a,b] \\right) = \\int_{a}^{b} f(m|\\alpha)dm  \\] <p>As that interval shrinks we can say this probability just becomes equal to \\(f(m|\\alpha)dm\\).</p> <p>The probability to observe the dataset we have is given by the product of such probabilities for each of our data points, so that </p> \\[\\mathcal{L}(\\alpha) \\propto \\prod_{i} f(m_{i}|\\alpha)\\] <p>Note that for a specific dataset, the \\(dm\\) factors which should be there are constnant. They can therefore be absorbed into the constant of proportionality!</p> <p>The maximum likelihood esitmator for \\(\\alpha\\), usually written as \\(\\hat{\\alpha}\\), is found by maximising \\(\\mathcal{L}(\\alpha)\\).</p> <p>Note that this will not depend on the value of the constant of proportionality so we can ignore it. This is true in most scenarios because usually only the ratio of likelihoods is needed, in which the constant factors out. </p> <p>Obviously this multiplication of exponentials can lead to very large (or very small) numbers which can lead to numerical instabilities. To avoid this, we can take logs of the likelihood. Its also common to multiply this by -1 and minimize the resulting Negative Log Likelihood : \\(\\mathrm{-Log}\\mathcal{L}(\\alpha)\\).</p> <p><code>RooFit</code> can construct the NLL for us.</p> <pre><code>RooNLLVar *nll = (RooNLLVar*) expo.createNLL(*hgg_data);\nnll-&gt;Print(\"v\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes:\n  Address: 0x7fdddbe46200\n  Clients:\n  Servers:\n    (0x11eab5638,V-) RooRealVar::alpha \"#alpha\"\n  Proxies:\n    paramSet -&gt;\n      1)  alpha\n--- RooAbsReal ---\n\n  Plot label is \"nll_exp_dataset\"\n</code></pre> <p>Notice that the NLL object knows which RooRealVar is the parameter because it doesn't find that one in the dataset. This is how <code>RooFit</code> distiguishes between observables and parameters.</p> <p><code>RooFit</code> has an interface to Minuit via the <code>RooMinimizer</code> class which takes the NLL as an argument. To minimize, we just call the <code>RooMinimizer::minimize()</code> function. <code>Minuit2</code> is the program and <code>migrad</code> is the minimization routine which uses gradient descent.</p> <pre><code>RooMinimizer minim(*nll);\nminim.minimize(\"Minuit2\",\"migrad\");  \n</code></pre> Show <pre><code> **********\n **    1 **SET PRINT           1\n **********\n **********\n **    2 **SET NOGRAD\n **********\n PARAMETER DEFINITIONS:\n    NO.   NAME         VALUE      STEP SIZE      LIMITS\n     1 alpha       -5.00000e-02  2.10000e-02   -2.00000e-01  1.00000e-02\n **********\n **    3 **SET ERR         0.5\n **********\n **********\n **    4 **SET PRINT           1\n **********\n **********\n **    5 **SET STR           1\n **********\n NOW USING STRATEGY  1: TRY TO BALANCE SPEED AGAINST RELIABILITY\n **********\n **    6 **MIGRAD         500           1\n **********\n FIRST CALL TO USER FUNCTION AT NEW START POINT, WITH IFLAG=4.\n START MIGRAD MINIMIZATION.  STRATEGY  1.  CONVERGENCE WHEN EDM .LT. 1.00e-03\n FCN=3589.52 FROM MIGRAD    STATUS=INITIATE        4 CALLS           5 TOTAL\n                     EDM= unknown      STRATEGY= 1      NO ERROR MATRIX\n  EXT PARAMETER               CURRENT GUESS       STEP         FIRST\n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE\n   1  alpha       -5.00000e-02   2.10000e-02   2.24553e-01  -9.91191e+01\n                               ERR DEF= 0.5\n MIGRAD MINIMIZATION HAS CONVERGED.\n MIGRAD WILL VERIFY CONVERGENCE AND ERROR MATRIX.\n COVARIANCE MATRIX CALCULATED SUCCESSFULLY\n FCN=3584.68 FROM MIGRAD    STATUS=CONVERGED      18 CALLS          19 TOTAL\n                     EDM=1.4449e-08    STRATEGY= 1      ERROR MATRIX ACCURATE\n  EXT PARAMETER                                   STEP         FIRST\n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE\n   1  alpha       -4.08262e-02   2.91959e-03   1.33905e-03  -3.70254e-03\n                               ERR DEF= 0.5\n EXTERNAL ERROR MATRIX.    NDIM=  25    NPAR=  1    ERR DEF=0.5\n  8.527e-06\n</code></pre> <p><code>RooFit</code> has found the best fit value of alpha for this dataset. It also estimates an uncertainty on alpha using the Hessian matrix from the fit.</p> <p><pre><code>alpha.Print(\"v\");\n</code></pre> <pre><code>--- RooAbsArg ---\n  Value State: clean\n  Shape State: clean\n  Attributes:\n  Address: 0x11eab5638\n  Clients:\n    (0x11eab5978,V-) RooExponential::exp \"exponential function\"\n    (0x7fdddbe46200,V-) RooNLLVar::nll_exp_dataset \"-log(likelihood)\"\n    (0x7fdddbe95600,V-) RooExponential::exp \"exponential function\"\n    (0x7fdddbe5a400,V-) RooRealIntegral::exp_Int[CMS_hgg_mass] \"Integral of exponential function\"\n  Servers:\n  Proxies:\n--- RooAbsReal ---\n\n  Plot label is \"alpha\"\n--- RooAbsRealLValue ---\n  Fit range is [ -0.2 , 0.01 ]\n--- RooRealVar ---\n  Error = 0.00291959\n</code></pre></p> <p>We will plot the resulting exponential on top of the data. Notice that the value of \\(\\hat{\\alpha}\\) is used for the exponential. </p> <pre><code>expo.plotOn(plot);\nexpo.paramOn(plot);\nplot-&gt;Draw();\nhggcan-&gt;Update();\nhggcan-&gt;Draw();\n</code></pre> <p></p> <p>It looks like there could be a small region near 125 GeV for which our fit does not quite go through the points. Maybe our hypothetical H-boson is not so hypothetical after all!</p> <p>We will now see what happens if we include some resonant signal into the fit. We can take our Gaussian function again and use that as a signal model. A reasonable value for the resolution of a resonant signal with a mass around 125 GeV decaying to a pair of photons is around a GeV.</p> <pre><code>sigma.setVal(1.);\nsigma.setConstant();\n\nMH.setVal(125);\nMH.setConstant();\n\nRooGaussian hgg_signal(\"signal\",\"Gaussian PDF\",*hgg_mass,MH,sigma);\n</code></pre> <p>By setting these parameters constant, <code>RooFit</code> knows (either when creating the NLL by hand or when using <code>fitTo</code>) that there is not need to fit for these parameters. </p> <p>We need to add this to our exponential model and fit a \"Sigmal+Background model\" by creating a <code>RooAddPdf</code>. In <code>RooFit</code> there are two ways to add PDFs, recursively where the fraction of yields for the signal and background is a parameter or absolutely where each PDF has its own normalization. We're going to use the second one.</p> <pre><code>RooRealVar norm_s(\"norm_s\",\"N_{s}\",10,100);\nRooRealVar norm_b(\"norm_b\",\"N_{b}\",0,1000);\n\nconst RooArgList components(hgg_signal,expo);\nconst RooArgList coeffs(norm_s,norm_b);\n\nRooAddPdf model(\"model\",\"f_{s+b}\",components,coeffs);\nmodel.Print(\"v\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes: \n  Address: 0x11ed5d7a8\n  Clients: \n  Servers: \n    (0x11ed5a0f0,V-) RooGaussian::signal \"Gaussian PDF\"\n    (0x11ed5d058,V-) RooRealVar::norm_s \"N_{s}\"\n    (0x11eab5978,V-) RooExponential::exp \"exponential function\"\n    (0x11ed5d398,V-) RooRealVar::norm_b \"N_{b}\"\n  Proxies: \n    !refCoefNorm -&gt; \n    !pdfs -&gt; \n      1)  signal\n      2)     exp\n    !coefficients -&gt; \n      1)  norm_s\n      2)  norm_b\n--- RooAbsReal ---\n\n  Plot label is \"model\"\n--- RooAbsPdf ---\nCached value = 0\n</code></pre> <p>Ok, now we will fit the model. Note this time we add the option <code>Extended()</code>, which tells <code>RooFit</code> that we care about the overall number of observed events in the data \\(n\\) too. It will add an additional Poisson term in the likelihood to account for this so our likelihood this time looks like,</p> \\[L_{s+b}(N_{s},N_{b},\\alpha) = \\dfrac{ N_{s}+N_{b}^{n} e^{N_{s}+N_{b}} }{n!} \\cdot \\prod_{i}^{n} \\left[ c f_{s}(m_{i}|M_{H},\\sigma)+ (1-c)f_{b}(m_{i}|\\alpha)  \\right] \\] <p>where \\(c = \\dfrac{ N_{s} }{ N_{s} + N_{b} }\\),   \\(f_{s}(m|M_{H},\\sigma)\\) is the Gaussian signal pdf and \\(f_{b}(m|\\alpha)\\) is the exponential pdf. Remember that \\(M_{H}\\) and \\(\\sigma\\) are fixed so that they are no longer parameters of the likelihood.</p> <p>There is a simpler interface for maximum-likelihood fits which is the <code>RooAbsPdf::fitTo</code> method. With this simple method, <code>RooFit</code> will construct the negative log-likelihood function, from the pdf, and minimize all of the free parameters in one step.</p> <pre><code>model.fitTo(*hgg_data,RooFit::Extended());\n\nmodel.plotOn(plot,RooFit::Components(\"exp\"),RooFit::LineColor(kGreen));\nmodel.plotOn(plot,RooFit::LineColor(kRed));\nmodel.paramOn(plot);\n\nhggcan-&gt;Clear();\nplot-&gt;Draw();\nhggcan-&gt;Update();\nhggcan-&gt;Draw();\n</code></pre> <p></p> <p>What about if we also fit for the mass (\\(M_{H}\\))? we can easily do this by removing the constant setting on MH.</p> <pre><code>MH.setConstant(false);\nmodel.fitTo(*hgg_data,RooFit::Extended());\n</code></pre> Show output <pre><code>[#1] INFO:Minization -- RooMinimizer::optimizeConst: activating const optimization\n[#1] INFO:Minization --  The following expressions will be evaluated in cache-and-track mode: (signal,exp)\n **********\n **    1 **SET PRINT           1\n **********\n **********\n **    2 **SET NOGRAD\n **********\n PARAMETER DEFINITIONS:\n    NO.   NAME         VALUE      STEP SIZE      LIMITS\n     1 MH           1.25000e+02  1.00000e+00    1.20000e+02  1.30000e+02\n     2 alpha       -4.08793e-02  2.96856e-03   -2.00000e-01  1.00000e-02\n     3 norm_b       9.67647e+02  3.25747e+01    0.00000e+00  1.00000e+03\n MINUIT WARNING IN PARAMETR\n ============== VARIABLE3 BROUGHT BACK INSIDE LIMITS.\n     4 norm_s       3.22534e+01  1.16433e+01    1.00000e+01  1.00000e+02\n **********\n **    3 **SET ERR         0.5\n **********\n **********\n **    4 **SET PRINT           1\n **********\n **********\n **    5 **SET STR           1\n **********\n NOW USING STRATEGY  1: TRY TO BALANCE SPEED AGAINST RELIABILITY\n **********\n **    6 **MIGRAD        2000           1\n **********\n FIRST CALL TO USER FUNCTION AT NEW START POINT, WITH IFLAG=4.\n START MIGRAD MINIMIZATION.  STRATEGY  1.  CONVERGENCE WHEN EDM .LT. 1.00e-03\n FCN=-2327.53 FROM MIGRAD    STATUS=INITIATE       10 CALLS          11 TOTAL\n                     EDM= unknown      STRATEGY= 1      NO ERROR MATRIX       \n  EXT PARAMETER               CURRENT GUESS       STEP         FIRST   \n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE \n   1  MH           1.25000e+02   1.00000e+00   2.01358e-01   1.12769e+01\n   2  alpha       -4.08793e-02   2.96856e-03   3.30048e-02  -1.22651e-01\n   3  norm_b       9.67647e+02   3.25747e+01   2.56674e-01  -1.96463e-02\n   4  norm_s       3.22534e+01   1.16433e+01   3.10258e-01  -8.97036e-04\n                               ERR DEF= 0.5\n MIGRAD MINIMIZATION HAS CONVERGED.\n MIGRAD WILL VERIFY CONVERGENCE AND ERROR MATRIX.\n COVARIANCE MATRIX CALCULATED SUCCESSFULLY\n FCN=-2327.96 FROM MIGRAD    STATUS=CONVERGED      65 CALLS          66 TOTAL\n                     EDM=1.19174e-05    STRATEGY= 1      ERROR MATRIX ACCURATE \n  EXT PARAMETER                                   STEP         FIRST   \n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE \n   1  MH           1.24628e+02   3.98153e-01   2.66539e-03   2.46327e-02\n   2  alpha       -4.07708e-02   2.97195e-03   1.10093e-03   8.33780e-02\n   3  norm_b       9.66105e+02   3.25772e+01   5.96627e-03   1.83523e-03\n   4  norm_s       3.39026e+01   1.17380e+01   9.60816e-03  -2.32681e-03\n                               ERR DEF= 0.5\n EXTERNAL ERROR MATRIX.    NDIM=  25    NPAR=  4    ERR DEF=0.5\n  1.589e-01 -3.890e-05  1.462e-01 -1.477e-01 \n -3.890e-05  8.836e-06 -2.020e-04  2.038e-04 \n  1.462e-01 -2.020e-04  1.073e+03 -1.072e+02 \n -1.477e-01  2.038e-04 -1.072e+02  1.420e+02 \n PARAMETER  CORRELATION COEFFICIENTS  \n       NO.  GLOBAL      1      2      3      4\n        1  0.04518   1.000 -0.033  0.011 -0.031\n        2  0.03317  -0.033  1.000 -0.002  0.006\n        3  0.27465   0.011 -0.002  1.000 -0.275\n        4  0.27610  -0.031  0.006 -0.275  1.000\n **********\n **    7 **SET ERR         0.5\n **********\n **********\n **    8 **SET PRINT           1\n **********\n **********\n **    9 **HESSE        2000\n **********\n COVARIANCE MATRIX CALCULATED SUCCESSFULLY\n FCN=-2327.96 FROM HESSE     STATUS=OK             23 CALLS          89 TOTAL\n                     EDM=1.19078e-05    STRATEGY= 1      ERROR MATRIX ACCURATE \n  EXT PARAMETER                                INTERNAL      INTERNAL  \n  NO.   NAME      VALUE            ERROR       STEP SIZE       VALUE   \n   1  MH           1.24628e+02   3.98106e-01   5.33077e-04  -7.45154e-02\n   2  alpha       -4.07708e-02   2.97195e-03   2.20186e-04   5.42722e-01\n   3  norm_b       9.66105e+02   3.26003e+01   2.38651e-04   1.20047e+00\n   4  norm_s       3.39026e+01   1.17445e+01   3.84326e-04  -4.87967e-01\n                               ERR DEF= 0.5\n EXTERNAL ERROR MATRIX.    NDIM=  25    NPAR=  4    ERR DEF=0.5\n  1.588e-01 -3.888e-05  1.304e-01 -1.304e-01 \n -3.888e-05  8.836e-06 -1.954e-04  1.954e-04 \n  1.304e-01 -1.954e-04  1.074e+03 -1.082e+02 \n -1.304e-01  1.954e-04 -1.082e+02  1.421e+02 \n PARAMETER  CORRELATION COEFFICIENTS  \n       NO.  GLOBAL      1      2      3      4\n        1  0.04274   1.000 -0.033  0.010 -0.027\n        2  0.03314  -0.033  1.000 -0.002  0.006\n        3  0.27694   0.010 -0.002  1.000 -0.277\n        4  0.27806  -0.027  0.006 -0.277  1.000\n[#1] INFO:Minization -- RooMinimizer::optimizeConst: deactivating const optimization\n</code></pre> <p>Notice the result for the fitted MH is not 125 and is included in the list of fitted parameters.  We can get more information about the fit, via the <code>RooFitResult</code>, using the option <code>Save()</code>. </p> <pre><code>RooFitResult *fit_res = (RooFitResult*) model.fitTo(*hgg_data,RooFit::Extended(),RooFit::Save());\n</code></pre> <p>For example, we can get the Correlation Matrix from the fit result... Note that the order of the parameters are the same as listed in the \"Floating Parameter\" list above</p> <p><pre><code>TMatrixDSym cormat = fit_res-&gt;correlationMatrix();\ncormat.Print();\n</code></pre> <pre><code>4x4 matrix is as follows\n\n     |      0    |      1    |      2    |      3    |\n---------------------------------------------------------\n   0 |          1    -0.03282    0.009538    -0.02623 \n   1 |   -0.03282           1   -0.001978    0.005439 \n   2 |   0.009538   -0.001978           1     -0.2769 \n   3 |   -0.02623    0.005439     -0.2769           1 \n</code></pre></p> <p>A nice feature of <code>RooFit</code> is that once we have a PDF, data and results like this, we can import this new model into our <code>RooWorkspace</code> and show off our new discovery to our LHC friends (if we weren't already too late!). We can also save the \"state\" of our parameters for later, by creating a snapshot of the current values. </p> <pre><code>wspace-&gt;import(model);  \nRooArgSet *params = model.getParameters(*hgg_data);\nwspace-&gt;saveSnapshot(\"nominal_values\",*params);\n\nwspace-&gt;Print(\"V\");\n</code></pre> Show output <pre><code>RooWorkspace(workspace) Tutorial Workspace contents\n\nvariables\n---------\n(CMS_hgg_mass,MH,alpha,norm_b,norm_s,resolution)\n\np.d.f.s\n-------\nRooExponential::exp[ x=CMS_hgg_mass c=alpha ] = 0.00248636\nRooAddPdf::model[ norm_s * signal + norm_b * exp ] = 0.00240205\nRooGaussian::signal[ x=CMS_hgg_mass mean=MH sigma=resolution ] = 5.34013e-110\n\ndatasets\n--------\nRooDataSet::dataset(CMS_hgg_mass)\n\nparameter snapshots\n-------------------\nnominal_values = (MH=124.627 +/- 0.398094,resolution=1[C],norm_s=33.9097 +/- 11.7445,alpha=-0.040779 +/- 0.00297195,norm_b=966.109 +/- 32.6025)\n</code></pre> <p>This is exactly what needs to be done when you want to use shape based datacards in Combine with parametric models.</p>"},{"location":"part5/roofit/#a-likelihood-for-a-counting-experiment","title":"A likelihood for a counting experiment","text":"<p>An introductory presentation about likelihoods and interval estimation is available here.</p> <p>Note: We will use python syntax in this section; you should use a .py script. Make sure to do <code>import ROOT</code> at the top of your script</p> <p>We have seen how to create variables and PDFs, and how to fit a PDF to data. But what if we have a counting experiment, or a histogram template shape? And what about systematic uncertainties?  We are going to build a likelihood  for this:</p> <p>\\(\\mathcal{L} \\propto p(\\text{data}|\\text{parameters})\\)</p> <p>where our parameters are parameters of interest, \\(\\mu\\), and nuisance parameters, \\(\\nu\\). The nuisance parameters are constrained by external measurements, so we add constraint terms \\(\\pi(\\vec{\\nu}_0|\\vec{\\nu})\\)</p> <p>So we have \\(\\mathcal{L} \\propto p(\\text{data}|\\mu,\\vec{\\nu})\\cdot \\pi(\\vec{\\nu}_0|\\vec{\\nu})\\)</p> <p>now we will try to build the likelihood by hand for a 1-bin counting experiment. The data is the number of observed events \\(N\\), and the probability is just a Poisson probability \\(p(N|\\lambda) = \\frac{\\lambda^N e^{-\\lambda}}{N!}\\), where \\(\\lambda\\) is the number of events expected in our signal+background model: \\(\\lambda = \\mu\\cdot s(\\vec{\\nu}) + b(\\vec{\\nu})\\). </p> <p>In the expression, s and b are the numbers of expected signal and background events, which both depend on the nuisance parameters. We will start by building a simple likelihood function with one signal process and one background process. We will assume there are no nuisance parameters for now. The number of observed events in data is 15, the expected number of signal events is 5 and the expected number of background events 8.1.</p> <p>It is easiest to use the <code>RooFit</code> workspace factory to build our model (this tutorial has more information on the factory syntax).</p> <p><pre><code>import ROOT\nw = ROOT.RooWorkspace(\"w\")\n</code></pre> We need to create an expression for the number of events in our model, \\(\\mu s +b\\):</p> <p><pre><code>w.factory('expr::n(\"mu*s +b\", mu[1.0,0,4], s[5],b[8.1])')\n</code></pre> Now we can build the likelihood, which is just our Poisson PDF: <pre><code>w.factory('Poisson::poisN(N[15],n)')\n</code></pre></p> <p>To find the best fit value for our parameter of interest \\(\\mu\\) we need to maximize the likelihood. In practice it is actually easier to minimize the Negative log of the likelihood, or NLL:</p> <pre><code>w.factory('expr::NLL(\"-log(@0)\",poisN)')\n</code></pre> <p>We can now use the <code>RooMinimizer</code> to find the minimum of the NLL</p> <p><pre><code>nll = w.function(\"NLL\")\nminim = ROOT.RooMinimizer(nll)\nminim.setErrorLevel(0.5)\nminim.minimize(\"Minuit2\",\"migrad\")\nbestfitnll = nll.getVal()\n</code></pre> Notice that we need to set the error level to 0.5 to get the uncertainties (relying on Wilks' theorem!) - note that there is a more reliable way of extracting the confidence interval (explicitly rather than relying on migrad). We will discuss this a bit later in this section.</p> <p>Now we will add a nuisance parameter, lumi, which represents the luminosity uncertainty. It has a 2.5% effect on both the signal and the background. The parameter will be log-normally distributed: when it's 0, the normalization of the signal and background are not modified; at \\(+1\\sigma\\) the signal and background normalizations will be multiplied by 1.025 and at \\(-1\\sigma\\) they will be divided by 1.025.  We should modify the expression for the number of events in our model:</p> <pre><code>w.factory('expr::n(\"mu*s*pow(1.025,lumi) +b*pow(1.025,lumi)\", mu[1.0,0,4], s[5],b[8.1],lumi[0,-4,4])')\n</code></pre> <p>And we add a unit gaussian constraint  <pre><code>w.factory('Gaussian::lumiconstr(lumi,0,1)')\n</code></pre></p> <p>Our full likelihood will now be <pre><code>w.factory('PROD::likelihood(poisN,lumiconstr)')\n</code></pre> and the NLL <pre><code>w.factory('expr::NLL(\"-log(@0)\",likelihood)')\n</code></pre></p> <p>Which we can minimize in the same way as before. </p> <p>Now we will extend our model a bit. </p> <ul> <li>Expanding on what was demonstrated above, build the likelihood for \\(N=15\\), a signal process s with expectation 5 events, a background ztt with expectation 3.7 events and a background tt with expectation 4.4 events. The luminosity uncertainty applies to all three processes. The signal process is further subject to a 5% log-normally distributed uncertainty sigth, tt is subject to a 6% log-normally distributed uncertainty ttxs, and ztt is subject to a 4% log-normally distributed uncertainty zttxs. Find the best-fit value and the associated uncertainty</li> <li>Also perform an explicit scan of the \\(\\Delta\\) NLL ( = log of profile likelihood ratio) and make a graph of the scan. Some example code can be found below to get you started. Hint: you'll need to perform fits for different values of mu, where mu is fixed. In <code>RooFit</code> you can set a variable to be constant as <code>var(\"VARNAME\").setConstant(True)</code></li> <li>From the curve that you have created by performing an explicit scan, we can extract the 68% CL interval. You can do so by eye or by writing some code to find the relevant intersections of the curve. </li> </ul> <pre><code>gr = ROOT.TGraph()\n\nnpoints = 0\nfor i in range(0,60):\n  npoints+=1\n  mu=0.05*i\n  ...\n  [perform fits for different values of mu with mu fixed]\n  ...\n  deltanll = ...\n  gr.SetPoint(npoints,mu,deltanll)\n\n\ncanv = ROOT.TCanvas()\ngr.Draw(\"ALP\")\ncanv.SaveAs(\"likelihoodscan.pdf\")\n</code></pre> <p>Well, this is doable - but we were only looking at a simple one-bin counting experiment. This might become rather cumbersome for large models... \\([*]\\)</p> <p>For the next set ot tutorials, we will now switch to working with Combine that will help in building the statistical model and do the statistical analysis, instead of  building the likelihood with <code>RooFit</code>.</p> <p>Info</p> <p><code>RooFit</code> does have additional functionality to help with statistical model building, but we will not go into detail in these tutorials.   </p>"},{"location":"tutorial2023/parametric_exercise/","title":"Parametric Models in Combine","text":""},{"location":"tutorial2023/parametric_exercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>Now let's move to the working directory for this tutorial which contains all of the inputs and scripts needed to run the parametric fitting exercise: <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/parametric_exercise\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#session-structure","title":"Session structure","text":"<p>The exercise is split into six parts which cover:</p> <p>1) Parametric model building</p> <p>2) Simple fits</p> <p>3) Systematic uncertainties</p> <p>4) Toy generation</p> <p>5) Discrete profiling</p> <p>6) Multi-signal hypothesis</p> <p>Throughout the tutorial there are a number of questions and exercises for you to complete. These are shown by the bullet points in this markdown file.</p> <p>All the code required to run the different parts is available in python scripts. We have purposely commented out the code to encourage you to open the scripts and take a look what is inside. Each block is separated by a divider and a blank line. When you are happy and understand the code, you can uncomment (block by block) and then run the scripts (using python3) e.g.: <pre><code>python3 construct_models_part1.py\n</code></pre> A number of scripts will produce plots (as .png files). The default path to store these plots is in the current working directory. You can change this (e.g. pipe to an eos webpage) by changing the <code>plot_dir</code> variable in the <code>config.py</code> script.</p> <p>There's also a set of combine (.txt) datacards which will help you get through the various parts of the exercise. The exercises should help you become familiar with the structure of parametric fitting datacards.</p> <p>Finally, this exercise is heavily based off the <code>RooFit</code> package. So if you find yourself using the python interpreter for any checks, don't forget to... <pre><code>import ROOT\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#jupyter-notebooks","title":"Jupyter notebooks","text":"<p>Alternatively, we have provided <code>Jupyter</code> notebooks to run the different parts of the exercise e.g. <code>part1.ipynb</code>. You will have already downloaded these notebooks when cloning the tutorial gitlab repo. To open Jupyter notebooks on lxplus within a CMSSW environment, you can add the following option when you <code>ssh</code> into lxplus: <pre><code>ssh -X -Y username@lxplus.cern.ch -L8xxx:localhost:8xxx\n</code></pre> where you should replace <code>xxx</code> with some three digit number. Then <code>cd</code> into the <code>combinetutorial-2023-parametric</code> directory and set up the CMSSW environment with: <pre><code>cmsenv\n</code></pre> You can then open the Jupyter notebook inside the environment with: <pre><code>jupyter notebook --no-browser --port 8xxx\n</code></pre> replacing <code>xxx</code> with the same three digit number. You should now be able to copy the url it provides into a browser and access the various exercise notebooks.</p>"},{"location":"tutorial2023/parametric_exercise/#analysis-overview","title":"Analysis overview","text":"<p>In this exercise we will look at one of the most famous parametric fitting analyses at the LHC: the Higgs boson decaying to two photons (H \\(\\rightarrow \\gamma\\gamma\\)). This decay channel is key in understanding the properties of the Higgs boson due to its clean final state topology. The excellent energy resolution- of the CMS electromagnetic calorimeter leads to narrow signal peak in the diphoton invariant mass spectrum, \\(m_{\\gamma\\gamma}\\), above a smoothly falling background continuum. The mass spectrum for the legacy Run 2 analysis is shown below.</p> <p></p> <p>In the analysis, we construct parametric models (analytic functions) of both signal and background events to fit the \\(m_{\\gamma\\gamma}\\) spectrum in data. From the fit we can extract measurements of Higgs boson properties including its rate of production, its mass (\\(m_H\\)), its coupling behaviour, to name a few. This exercise will show how to construct parametric models using RooFit, and subsequently how to use combine to extract the results.</p>"},{"location":"tutorial2023/parametric_exercise/#part-1-parametric-model-building","title":"Part 1: Parametric model building","text":"<p>As with any fitting exercise, the first step is to understand the format of the input data, explore its contents and construct a model. The python script which performs the model construction is <code>construct_models_part1.py</code>. This section will explain what the various lines of code are doing. If you are not very familiar with <code>RooFit</code>, you may want to refer to our <code>RooFit</code> Basics tutorial  here.</p>"},{"location":"tutorial2023/parametric_exercise/#signal-modelling","title":"Signal modelling","text":"<p>Firstly, we will construct a model to fit the signal (H \\(\\rightarrow\\gamma\\gamma\\)) mass peak using a Monte Carlo simulation sample of gluon-gluon fusion production (ggH) events with \\(m_H=125\\) GeV. This production mode has the largest cross section in the SM, and the LO Feynman diagram is shown below.</p> <p></p> <p>There has already been a dedicated selection performed on the events to increase the signal-to-background ratio (e.g. using some ML event classifier). Events passing this selection enter the analysis category, Tag0. Events entering Tag0 are used for the parametric fitting of the \\(m_{\\gamma\\gamma}\\) spectrum.</p> <p>The events are stored in a ROOT <code>TTree</code>, where the diphoton mass, <code>CMS_hgg_mass</code>, and the event weight, <code>weight</code>, are saved. Let's begin by loading the MC, and converting the <code>TTree</code> data into <code>RooDataSet</code>: <pre><code>import ROOT\nROOT.gROOT.SetBatch(True)\n\nf = ROOT.TFile(\"mc_part1.root\",\"r\")\n# Load TTree\nt = f.Get(\"ggH_Tag0\")\n\n# Define mass and weight variables\nmass = ROOT.RooRealVar(\"CMS_hgg_mass\", \"CMS_hgg_mass\", 125, 100, 180)\nweight = ROOT.RooRealVar(\"weight\",\"weight\",0,0,1)\n\n# Convert to RooDataSet\nmc = ROOT.RooDataSet(\"ggH_Tag0\",\"ggH_Tag0\", t, ROOT.RooArgSet(mass,weight), \"\", \"weight\" )\n\n# Lets plot the signal mass distribution\ncan = ROOT.TCanvas()\nplot = mass.frame()\nmc.plotOn(plot)\nplot.Draw()\ncan.Update()\ncan.SaveAs(\"part1_signal_mass.png\")\n</code></pre></p> <p></p> <p>The plot shows a peak centred on the Higgs mass at 125 GeV. Let's use a simple Gaussian to model the peak. <pre><code># Introduce a RooRealVar into the workspace for the Higgs mass\nMH = ROOT.RooRealVar(\"MH\", \"MH\", 125, 120, 130 )\nMH.setConstant(True)\n\n# Signal peak width\nsigma = ROOT.RooRealVar(\"sigma_ggH_Tag0\", \"sigma_ggH_Tag0\", 2, 1, 5)\n\n# Define the Gaussian with mean=MH and width=sigma\nmodel = ROOT.RooGaussian( \"model_ggH_Tag0\", \"model_ggH_Tag0\", mass, MH, sigma )\n\n# Fit Gaussian to MC events and plot\nmodel.fitTo(mc,ROOT.RooFit.SumW2Error(True))\n\ncan = ROOT.TCanvas()\nplot = mass.frame()\nmc.plotOn(plot)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(2) )\nplot.Draw()\ncan.Update()\ncan.Draw()\ncan.SaveAs(\"part1_signal_model_v0.png\")\n</code></pre></p> <p></p> <p>It looks like a good fit!</p> <p>Tasks and questions:</p> <ul> <li>Run the code above for yourself (or uncomment the relevant sections in <code>python3 construct_models_part1.py</code>) to produce the plots of the signal mass distribution and the signal model.</li> <li>Do you understand the output from the <code>fitTo</code> command (i.e the mimimization)? From now on we will add the option <code>ROOT.RooFit.PrintLevel(-1)</code> when fitting the models to surpress the minimizer output.</li> </ul> <p>But what if the mean of the model does not correspond directly to the Higgs boson mass i.e. there are some reconstruction effects. Let's instead define the mean of the model as:</p> \\[\\mu = m_H + \\delta\\] <p>and we can fit for \\(\\delta\\) in the model construction. For this we introduce a <code>RooFormulaVar</code>. <pre><code>dMH = ROOT.RooRealVar(\"dMH_ggH_Tag0\", \"dMH_ggH_Tag0\", 0, -1, 1 )\nmean = ROOT.RooFormulaVar(\"mean_ggH_Tag0\", \"mean_ggH_Tag0\", \"(@0+@1)\", ROOT.RooArgList(MH,dMH))\nmodel = ROOT.RooGaussian( \"model_ggH_Tag0\", \"model_ggH_Tag0\", mass, mean, sigma )\n\n# Fit the new model with a variable mean\nmodel.fitTo(mc,ROOT.RooFit.SumW2Error(True),ROOT.RooFit.PrintLevel(-1))\n\n# Model is parametric in MH. Let's show this by plotting for different values of MH\ncan = ROOT.TCanvas()\nplot = mass.frame()\nMH.setVal(120)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(2) )\nMH.setVal(125)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(3) )\nMH.setVal(130)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(4) )\nplot.Draw()\ncan.Update()\ncan.SaveAs(\"part1_signal_model_v1.png\")\n</code></pre></p> <p></p> <p>Tasks and questions:</p> <ul> <li>Run the code above (or uncomment the relevant sections in the script) to produce the plots.</li> <li>This choice of setting the shape parameters to constant means we believe our MC will perfectly model the Higgs boson events in data. Is this the case? How could we account for the MC mis-modelling in the fit? (See part 3).</li> <li>Let's now save the model inside a <code>RooWorkspace</code>. Combine will load this model when performing the fits. Crucially, we need to freeze the fit parameters of the signal model, otherwise they will be freely floating in the final results extraction. Run the code below to save the model to a workspace.</li> </ul> <p><pre><code>MH.setVal(125)\ndMH.setConstant(True)\nsigma.setConstant(True)\n\nf_out = ROOT.TFile(\"workspace_sig.root\", \"RECREATE\")\nw_sig = ROOT.RooWorkspace(\"workspace_sig\",\"workspace_sig\")\ngetattr(w_sig, \"import\")(model)\nw_sig.Print()\nw_sig.Write()\nf_out.Close()\n</code></pre> We have successfully constructed a parametric model to fit the shape of the signal peak. But we also need to know the yield/normalisation of the ggH signal process. In the SM, the ggH event yield in Tag0 is equal to:</p> \\[ N = \\sigma_{ggH} \\cdot \\mathcal{B}^{\\gamma\\gamma} \\cdot \\epsilon \\cdot \\mathcal{L}\\] <p>Where \\(\\sigma_{ggH}\\) is the SM ggH cross section, \\(\\mathcal{B}^{\\gamma\\gamma}\\) is the SM branching fraction of the Higgs boson to two photons, \\(\\epsilon\\) is the efficiency factor and corresponds to the fraction of the total ggH events landing in the Tag0 analysis category. Finally \\(\\mathcal{L}\\) is the integrated luminosity.</p> <p>In this example, the ggH MC events are normalised before any selection is performed to \\(\\sigma_{ggH} \\cdot \\mathcal{B}^{\\gamma\\gamma}\\), taking the values from the LHCHWG twiki. Note this does not include the lumi scaling, which may be different to what you have in your own analyses! We can then calculate the efficiency factor, \\(\\epsilon\\), by taking the sum of weights in the MC dataset and dividing through by \\(\\sigma_{ggH} \\cdot \\mathcal{B}^{\\gamma\\gamma}\\). This will tell us what fraction of ggH events land in Tag0. <pre><code># Define SM cross section and branching fraction values\nxs_ggH = 48.58 #in [pb]\nbr_gamgam = 2.7e-3\n\n# Calculate the efficiency and print output\nsumw = mc.sumEntries()\neff = sumw/(xs_ggH*br_gamgam)\nprint(\"Efficiency of ggH events landing in Tag0 is: %.2f%%\"%(eff*100))\n\n# Calculate the total yield (assuming full Run 2 lumi) and print output\nlumi = 138000\nN = xs_ggH*br_gamgam*eff*lumi\nprint(\"For 138fb^-1, total normalisation of signal is: N = xs * br * eff * lumi = %.2f events\"%N)\n</code></pre> Gives the output: <pre><code>Efficiency of ggH events landing in Tag0 is: 1.00%\nFor 138fb^-1, total normalisation of signal is: N = xs * br * eff * lumi = 181.01 events\n</code></pre> So we find 1% of all ggH events enter Tag0. And the total expected yield of ggH events in Tag0 (with lumi scaling) is <code>181.01</code>. Lets make a note of this for later!</p>"},{"location":"tutorial2023/parametric_exercise/#background-modelling","title":"Background modelling","text":"<p>In the H \\(\\rightarrow\\gamma\\gamma\\) analysis we construct the background model directly from data. To avoid biasing our background estimate, we remove the signal region from the model construction and fit the mass sidebands. Let's begin by loading the data <code>TTree</code> and converting to a <code>RooDataSet</code>. We will then plot the mass sidebands. <pre><code>f = ROOT.TFile(\"data_part1.root\",\"r\")\nt = f.Get(\"data_Tag0\")\n\n# Convert TTree to a RooDataSet\ndata = ROOT.RooDataSet(\"data_Tag0\", \"data_Tag0\", t, ROOT.RooArgSet(mass), \"\", \"weight\")\n\n# Define mass sideband ranges on the mass variable: 100-115 and 135-180\nn_bins = 80\nbinning = ROOT.RooFit.Binning(n_bins,100,180)\nmass.setRange(\"loSB\", 100, 115 )\nmass.setRange(\"hiSB\", 135, 180 )\nmass.setRange(\"full\", 100, 180 )\nfit_range = \"loSB,hiSB\"\n\n# Plot the data in the mass sidebands\ncan = ROOT.TCanvas()\nplot = mass.frame()\ndata.plotOn( plot, ROOT.RooFit.CutRange(fit_range), binning )\nplot.Draw()\ncan.Update()\ncan.Draw()\ncan.SaveAs(\"part1_data_sidebands.png\")\n</code></pre></p> <p></p> <p>By eye, it looks like an exponential function would fit the data sidebands well.</p> <p>Tasks and questions:</p> <ul> <li>Run the code above to produce the plot.</li> <li>Construct the background model using a <code>RooExponential</code> and fit the data sidebands. You can use something like the code below to do this (or uncomment the relevant section of the script.)</li> </ul> <pre><code>alpha = ROOT.RooRealVar(\"alpha\", \"alpha\", -0.05, -0.2, 0 )\nmodel_bkg = ROOT.RooExponential(\"model_bkg_Tag0\", \"model_bkg_Tag0\", mass, alpha )\n\n# Fit model to data sidebands\nmodel_bkg.fitTo( data, ROOT.RooFit.Range(fit_range),  ROOT.RooFit.PrintLevel(-1))\n\n# Let's plot the model fit to the data\ncan = ROOT.TCanvas()\nplot = mass.frame()\n# We have to be careful with the normalisation as we only fit over sidebands\n# First do an invisible plot of the full data set\ndata.plotOn( plot, binning, ROOT.RooFit.MarkerColor(0), ROOT.RooFit.LineColor(0) )\nmodel_bkg.plotOn( plot, ROOT.RooFit.NormRange(fit_range), ROOT.RooFit.Range(\"full\"), ROOT.RooFit.LineColor(2))\ndata.plotOn( plot, ROOT.RooFit.CutRange(fit_range), binning )\nplot.Draw()\ncan.Update()\ncan.Draw()\ncan.SaveAs(\"part1_bkg_model.png\")\n</code></pre> <p></p> <p>Tasks and questions:</p> <ul> <li>As the background model is extracted from data, we want to introduce a freely floating normalisation term in the fit that enters the likelihood as an additional Poisson term (this is known as performing an extended maximum likelihood fit). We use the total number of data events (including in the signal region) as the initial prefit value of this normalisation object i.e. assuming no signal in the data. The syntax to name this normalisation object is <code>{model}_norm</code> which will the be picked up automatically by combine. Note we also allow the shape parameter to float in the final fit to data (by not setting to constant). Include the code below to include this parameter in the workspace.</li> </ul> <p><pre><code>norm = ROOT.RooRealVar(\"model_bkg_Tag0_norm\", \"Number of background events in Tag0\", data.numEntries(), 0, 3*data.numEntries() )\nalpha.setConstant(False)\n</code></pre> Let's then save the background model, the normalisation object, and the data distribution to a new <code>RooWorkspace</code>: <pre><code>f_out = ROOT.TFile(\"workspace_bkg.root\", \"RECREATE\")\nw_bkg = ROOT.RooWorkspace(\"workspace_bkg\",\"workspace_bkg\")\ngetattr(w_bkg, \"import\")(data)\ngetattr(w_bkg, \"import\")(norm)\ngetattr(w_bkg, \"import\")(model_bkg)\nw_bkg.Print()\nw_bkg.Write()\nf_out.Close()\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#datacard","title":"Datacard","text":"<p>The model workspaces have now been constructed. But before we can run any fits in combine we need to build the so-called datacard. This is a text file which defines the different processes entering the fit and their expected yields, and maps these processes to the corresponding (parametric) models. We also store information on the systematic uncertainties in the datacard (see part 3). Given the low complexity of this example, the datacard is reasonably short. The datacard for this section is titled <code>datacard_part1.txt</code>. Take some time to understand the different lines. In particular, the values for the process normalisations:</p> <p>Tasks and questions:</p> <ul> <li>Where does the signal (ggH) normalisation come from?</li> <li>Why do we use a value of 1.0 for the background model normalisation in this analysis?</li> </ul> <p><pre><code># Datacard example for combine tutorial 2023 (part 1)\n---------------------------------------------\nimax 1\njmax 1\nkmax *\n---------------------------------------------\n\nshapes      ggH          Tag0      workspace_sig.root      workspace_sig:model_ggH_Tag0\nshapes      bkg_mass     Tag0      workspace_bkg.root      workspace_bkg:model_bkg_Tag0\nshapes      data_obs     Tag0      workspace_bkg.root      workspace_bkg:data_Tag0\n\n---------------------------------------------\nbin             Tag0\nobservation     -1\n---------------------------------------------\nbin             Tag0         Tag0\nprocess         ggH          bkg_mass\nprocess         0            1\nrate            181.01       1.0\n---------------------------------------------\n</code></pre> To compile the datacard we run the following command, using a value of the Higgs mass of 125.0: <pre><code>text2workspace.py datacard_part1.txt -m 125\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Run the command above. his compiles the datacard into a RooWorkspace, effectively building the likelihood function. Try opening the compiled workspace (<code>root datacard_part1.root</code>) and print the contents (use <code>w-&gt;Print()</code>).</li> <li>Do you understand what all the different objects are? What does the variable <code>r</code> correspond to? Try (verbose) printing with <code>w-&gt;var(\"r\")-&gt;Print(\"v\")</code>.</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-signal-normalisation-object","title":"Extension: signal normalisation object","text":"<p>In the example above, the signal model normalisation is input by hand in the datacard. We can instead define the signal normalisation components in the model in a similar fashion to the background model normalisation object. Let's build the cross section (ggH), branching fraction (H-&gt;gamgam), and efficiency variables. It's important to set these terms to be constant for the final fit to data: <pre><code>xs_ggH = ROOT.RooRealVar(\"xs_ggH\", \"Cross section of ggH in [pb]\", 48.58 )\nbr_gamgam = ROOT.RooRealVar(\"BR_gamgam\", \"Branching ratio of Higgs to gamma gamma\", 0.0027 )\neff_ggH_Tag0 = ROOT.RooRealVar(\"eff_ggH_Tag0\", \"Efficiency for ggH events to land in Tag0\", eff )\n\nxs_ggH.setConstant(True)\nbr_gamgam.setConstant(True)\neff_ggH_Tag0.setConstant(True)\n</code></pre> The normalisation component is then defined as the product of these three variables: <pre><code>norm_sig = ROOT.RooProduct(\"model_ggH_Tag0_norm\", \"Normalisation term for ggH in Tag 0\", ROOT.RooArgList(xs_ggH,br_gamgam,eff_ggH_Tag0))\n</code></pre></p> <p>Again the syntax <code>{model}_norm</code> has been used so that combine will automatically assign this object as the normalisation for the model (<code>model_ggH_Tag0</code>). Firstly we need to save a new version of the signal model workspace with the normalisation term included. <pre><code>f_out = ROOT.TFile(\"workspace_sig_with_norm.root\", \"RECREATE\")\nw_sig = ROOT.RooWorkspace(\"workspace_sig\",\"workspace_sig\")\ngetattr(w_sig, \"import\")(model)\ngetattr(w_sig, \"import\")(norm_sig)\nw_sig.Print()\nw_sig.Write()\nf_out.Close()\n</code></pre> We then need to modify the datacard to account for this normalisation term. Importantly, the <code>{model}_norm</code> term in our updated signal model workspace does not contain the integrated luminosity. Therefore, the <code>rate</code> term in the datacard must be set equal to the integrated luminosity in [pb^-1] (as the cross section was defined in [pb]). The total normalisation for the signal model is then the product of the <code>{model}_norm</code> and the <code>rate</code> value.</p> <p>Tasks and questions:</p> <ul> <li>You can find the example datacard here: <code>datacard_part1_with_norm.txt</code> with the signal normalisation object included. Check if it compiles successfully using <code>text2workspace</code>? If so, try printing out the contents of the workspace. Can you see the normalisation component?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-unbinned-vs-binned","title":"Extension: unbinned vs binned","text":"<p>In a parametric analysis, the fit can be performed using a binned or unbinned likelihood function. The consequences of binned vs unbinned likelihoods were discussed in the morning session. In combine, we can simply toggle between binned and unbinned fits by changing how the data set is stored in the workspace. In the example above, the data was saved as a <code>RooDataSet</code>. This means that an unbinned maximum likelihood function would be used.</p> <p>To switch to a binned maximum likelihood fit, we need to store the data set in the workspace as a <code>RooDataHist</code>.</p> <p>Tasks and questions:</p> <ul> <li>First load the data as a <code>RooDataSet</code> as before:</li> </ul> <p><pre><code>f = ROOT.TFile(\"data_part1.root\",\"r\")\nt = f.Get(\"data_Tag0\")\n\n# Convert TTree to a RooDataSet\ndata = ROOT.RooDataSet(\"data_Tag0\", \"data_Tag0\", t, ROOT.RooArgSet(mass, weight), \"\", \"weight\")\n</code></pre>   -   Now set the number of bins in the observable and convert the data to a <code>RooDataHist</code> (as below). In the example below we will use 320 bins over the full mass range (0.25 GeV per bin). It is important that the binning is sufficiently granular so that we do not lose information in the data by switching to a binned likelihood fit. When fitting a signal peak over a background we want the bin width to be sufficiently smaller than the signal model mass resolution. Try changing the number of bins (reducing them) at what point do you start to see diffeeences in the fit results?</p> <pre><code># Set bin number for mass variables\nmass.setBins(320)\ndata_hist = ROOT.RooDataHist(\"data_hist_Tag0\", \"data_hist_Tag0\", mass, data)\n\n# Save the background model with the RooDataHist instead\nf_out = ROOT.TFile(\"workspace_bkg_binned.root\", \"RECREATE\")\nw_bkg = ROOT.RooWorkspace(\"workspace_bkg\",\"workspace_bkg\")\ngetattr(w_bkg, \"import\")(data_hist)\ngetattr(w_bkg, \"import\")(norm)\ngetattr(w_bkg, \"import\")(model_bkg)\nw_bkg.Print()\nw_bkg.Write()\nf_out.Close()\n</code></pre>"},{"location":"tutorial2023/parametric_exercise/#part-2-simple-fits","title":"Part 2: Simple fits","text":"<p>Now the parametric models have been constructed and the datacard has been compiled, we are ready to start using combine for running fits. In CMS analyses we begin by blinding ourselves to the data in the signal region, and looking only at the expected results based off toys datasets (asimov or pseudo-experiments). In this exercise, we will look straight away at the observed results. Note, the python commands in this section are taken from the script called <code>simple_fits.py</code>.</p> <p>Tasks and questions:</p> <ul> <li>Run a simple best-fit for the signal strength, <code>r</code>, fixing the Higgs mass to 125 GeV, you can run the command in the terminal: <pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH --saveWorkspace -n .bestfit\n</code></pre> We obtain a best-fit signal strength of <code>r = 1.548</code> i.e. the observed signal yield is 1.548 times the SM prediction.</li> </ul> <p>The option <code>--saveWorkspace</code> stores a snapshot of the postfit workspace in the output file (<code>higgsCombine.bestfit.MultiDimFit.mH125.root</code>). We can load the postfit workspace and look at how the values of all the fit parameters change (compare the <code>clean</code> and <code>MultiDimFit</code> parameter snapshots): <pre><code>import ROOT\n\nf = ROOT.TFile(\"higgsCombine.bestfit.MultiDimFit.mH125.root\")\nw = f.Get(\"w\")\nw.Print(\"v\")\n</code></pre> We can even plot the postfit signal-plus-background model using the workspace snapshot: <pre><code>n_bins = 80\nbinning = ROOT.RooFit.Binning(n_bins,100,180)\n\ncan = ROOT.TCanvas()\nplot = w.var(\"CMS_hgg_mass\").frame()\nw.data(\"data_obs\").plotOn( plot, binning )\n\n# Load the S+B model\nsb_model = w.pdf(\"model_s\").getPdf(\"Tag0\")\n\n# Prefit\nsb_model.plotOn( plot, ROOT.RooFit.LineColor(2), ROOT.RooFit.Name(\"prefit\") )\n\n# Postfit\nw.loadSnapshot(\"MultiDimFit\")\nsb_model.plotOn( plot, ROOT.RooFit.LineColor(4), ROOT.RooFit.Name(\"postfit\") )\nr_bestfit = w.var(\"r\").getVal()\n\nplot.Draw()\n\nleg = ROOT.TLegend(0.55,0.6,0.85,0.85)\nleg.AddEntry(\"prefit\", \"Prefit S+B model (r=1.00)\", \"L\")\nleg.AddEntry(\"postfit\", \"Postfit S+B model (r=%.2f)\"%r_bestfit, \"L\")\nleg.Draw(\"Same\")\n\ncan.Update()\ncan.SaveAs(\"part2_sb_model.png\")\n</code></pre></p> <p></p>"},{"location":"tutorial2023/parametric_exercise/#confidence-intervals","title":"Confidence intervals","text":"<p>We not only want to find the best-fit value of the signal strength, r, but also the confidence intervals. The <code>singles</code> algorithm will find the 68% CL intervals: <pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH -n .singles --algo singles\n</code></pre> To perform a likelihood scan (i.e. calculate 2NLL at fixed values of the signal strength, profiling the other parameters), we use the <code>grid</code> algorithm. We can control the number of points in the scan using the <code>--points</code> option. Also, it is important to set a suitable range for the signal strength parameter. The <code>singles</code> algorithm has shown us that the 1 stdev interval on r is around +/-0.2.</p> <p>Tasks and questions:</p> <ul> <li>Use these intervals to define a suitable range for the scan, and change <code>lo,hi</code> in the following options accordingly: <code>--setParameterRanges r=lo,hi</code>.</li> </ul> <p><pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH -n .scan --algo grid --points 20 --setParameterRanges r=lo,hi\n</code></pre> We can use the <code>plot1DScan.py</code> function from CombineTools to plot the likelihood scan: <pre><code>plot1DScan.py higgsCombine.scan.MultiDimFit.mH125.root -o part2_scan\n</code></pre> </p> <ul> <li>Do you understand what the plot is showing? What information about the signal strength parameter can be inferred from the plot?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-expected-fits","title":"Extension: expected fits","text":"<p>To run expected fits we simply add <code>-t N</code> to the combine command. For <code>N&gt;0</code>, this will generate N random toys from the model and fit each one independently. For <code>N=-1</code>, this will generate an asimov toy in which all statistical fluctuations from the model are suppressed.</p> <p>You can use the <code>--expectSignal 1</code> option to set the signal strength parameter to 1 when generating the toy. Alternatively, <code>--expectSignal 0</code> will generate a toy from the background-only model. For multiple parameter models you can set the initial values when generating the toy(s) using the <code>--setParameters</code> option of combine. For example, if you want to throw a toy where the Higgs mass is at 124 GeV and the background slope parameter <code>alpha</code> is equal to -0.05, you would add <code>--setParameters MH=124.0,alpha=-0.05</code>.</p> <p>Tasks and questions:</p> <ul> <li>Try running the asimov likelihood scan for <code>r=1</code> and <code>r=0</code>, and plotting them using the <code>plot1DScan.py</code> script.</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-goodness-of-fit-tests","title":"Extension: goodness-of-fit tests","text":"<p>The goodness-of-fit tests available in combine are only well-defined for binned maximum likelihood fits. Therefore, to perform a goodness-of-fit test with a parametric datacard, make sure to save the data object as a <code>RooDataHist</code>, as in <code>workspace_bkg_binned.root</code>.</p> <p>Tasks and questions:</p> <ul> <li>Try editing the <code>datacard_part1_with_norm.txt</code> file to pick up the correct binned workspace file, and the <code>RooDataHist</code>. The goodness-of-fit method requires at-least one nuisance parameter in the model to run successfully. Append the following line to the end of the datacard: <pre><code>lumi_13TeV      lnN          1.01         -\n</code></pre></li> <li>Does the datacard compile with the <code>text2workspace.py</code> command?</li> </ul> <p>We use the <code>GoodnessOfFit</code> method in combine to evaluate how compatible the observed data are with the model pdf. There are three types of GoF algorithm within combine, this example will use the <code>saturated</code> algorithm. You can find more information about the other algorithms here.</p> <p>Firstly, we want to calculate the value of the test statistic for the observed data: <pre><code>combine -M GoodnessOfFit datacard_part1_binned.root --algo saturated -m 125 --freezeParameters MH -n .goodnessOfFit_data\n</code></pre></p> <p>Now lets calculate the test statistic value for many toys thrown from the model: <pre><code>combine -M GoodnessOfFit datacard_part1_binned.root --algo saturated -m 125 --freezeParameters MH -n .goodnessOfFit_toys -t 1000\n</code></pre></p> <p>To make a plot of the GoF test-statistic distribution you can run the following commands, which first collect the values of the test-statistic into a json file, and then plots from the json file: <pre><code>combineTool.py -M CollectGoodnessOfFit --input higgsCombine.goodnessOfFit_data.GoodnessOfFit.mH125.root higgsCombine.goodnessOfFit_toys.GoodnessOfFit.mH125.123456.root -m 125.0 -o gof.json\n\nplotGof.py gof.json --statistic saturated --mass 125.0 -o part2_gof\n</code></pre></p> <p></p> <ul> <li>What does the plot tell us? Does the model fit the data well?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#part-3-systematic-uncertainties","title":"Part 3: Systematic uncertainties","text":"<p>In this section, we will learn how to add systematic uncertainties to a parametric fit analysis. The python commands are taken from the <code>systematics.py</code> script.</p> <p>For uncertainties which only affect the process normalisation, we can simply implement these as <code>lnN</code> uncertainties in the datacard. The file <code>mc_part3.root</code> contains the systematic-varied trees i.e. Monte-Carlo events where some systematic uncertainty source <code>{photonID,JEC,scale,smear}</code> has been varied up and down by \\(1\\sigma\\). <pre><code>import ROOT\n\nf = ROOT.TFile(\"mc_part3.root\")\nf.ls()\n</code></pre> Gives the output: <pre><code>TFile**     mc_part3.root\n TFile*     mc_part3.root\n  KEY: TTree    ggH_Tag0;1  ggH_Tag0\n  KEY: TTree    ggH_Tag0_photonIDUp01Sigma;1    ggH_Tag0_photonIDUp01Sigma\n  KEY: TTree    ggH_Tag0_photonIDDown01Sigma;1  ggH_Tag0_photonIDDown01Sigma\n  KEY: TTree    ggH_Tag0_scaleUp01Sigma;1   ggH_Tag0_scaleUp01Sigma\n  KEY: TTree    ggH_Tag0_scaleDown01Sigma;1 ggH_Tag0_scaleDown01Sigma\n  KEY: TTree    ggH_Tag0_smearUp01Sigma;1   ggH_Tag0_smearUp01Sigma\n  KEY: TTree    ggH_Tag0_smearDown01Sigma;1 ggH_Tag0_smearDown01Sigma\n  KEY: TTree    ggH_Tag0_JECUp01Sigma;1 ggH_Tag0_JECUp01Sigma\n  KEY: TTree    ggH_Tag0_JECDown01Sigma;1   ggH_Tag0_JECDown01Sigma\n</code></pre> Let's first load the systematic-varied trees as RooDataSets and store them in a python dictionary, <code>mc</code>:</p> <p>Tasks and questions:</p> <ul> <li>Run the code below (or uncomment the relevant section of the script for this part).</li> </ul> <pre><code># Define mass and weight variables\nmass = ROOT.RooRealVar(\"CMS_hgg_mass\", \"CMS_hgg_mass\", 125, 100, 180)\nweight = ROOT.RooRealVar(\"weight\",\"weight\",0,0,1)\n\nmc = {}\n\n# Load the nominal dataset\nt = f.Get(\"ggH_Tag0\")\nmc['nominal'] = ROOT.RooDataSet(\"ggH_Tag0\",\"ggH_Tag0\", t, ROOT.RooArgSet(mass,weight), \"\", \"weight\" )\n\n# Load the systematic-varied datasets\nfor syst in ['JEC','photonID','scale','smear']:\n    for direction in ['Up','Down']:\n        key = \"%s%s01Sigma\"%(syst,direction)\n        name = \"ggH_Tag0_%s\"%(key)\n        t = f.Get(name)\n        mc[key] = ROOT.RooDataSet(name, name, t, ROOT.RooArgSet(mass,weight), \"\", \"weight\" )\n</code></pre> <p>The jet energy scale (JEC) and photon identification (photonID) uncertainties do not affect the shape of the \\(m_{\\gamma\\gamma}\\) distribution i.e. they only effect the signal yield estimate. We can calculate their impact by comparing the sum of weights to the nominal dataset. Note, the photonID uncertainty changes the weight of the events in the tree, whereas the JEC varied trees contain a different set of events, generated by shifting the jet energy scale in the simulation. In any case, the means for calculating the yield variations is equivalent: <pre><code>for syst in ['JEC','photonID']:\n    for direction in ['Up','Down']:\n        yield_variation = mc['%s%s01Sigma'%(syst,direction)].sumEntries()/mc['nominal'].sumEntries()\n        print(\"Systematic varied yield (%s,%s): %.3f\"%(syst,direction,yield_variation))\n</code></pre> <pre><code>Systematic varied yield (JEC,Up): 1.056\nSystematic varied yield (JEC,Down): 0.951\nSystematic varied yield (photonID,Up): 1.050\nSystematic varied yield (photonID,Down): 0.950\n</code></pre> We can write these yield variations in the datacard with the lines: <pre><code>CMS_scale_j           lnN      0.951/1.056      -\nCMS_hgg_phoIdMva      lnN      1.05             -\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Run the code (or uncomment the relevant lines of code in the script) to produce the systematic variations in the workspace and add the datacard lines above to the datacard.</li> <li>Why is the photonID uncertainty expressed as one number, whereas the JEC uncertainty is defined by two?</li> </ul> <p>Note in this analysis there are no systematic uncertainties affecting the background estimate (<code>-</code> in the datacard), as the background model has been derived directly from data.</p>"},{"location":"tutorial2023/parametric_exercise/#parametric-shape-uncertainties","title":"Parametric shape uncertainties","text":"<p>What about systematic uncertainties which affect the shape of the mass distribution?</p> <p>In a parametric analysis, we need to build the dependence directly into the model parameters. The example uncertainty sources in this tutorial are the photon energy scale and smearing uncertainties. From the names alone we can expect that the scale uncertainty will affect the mean of the signal Gaussian, and the smear uncertainty will impact the resolution (sigma). Let's first take a look at the <code>scaleUp01Sigma</code> dataset:</p> <p><pre><code># Build the model to fit the systematic-varied datasets\nmean = ROOT.RooRealVar(\"mean\", \"mean\", 125, 124, 126)\nsigma = ROOT.RooRealVar(\"sigma\", \"sigma\", 2, 1.5, 2.5)\ngaus = ROOT.RooGaussian(\"model\", \"model\", mass, mean, sigma)\n\n# Run the fits twice (second time from the best-fit of first run) to obtain more reliable results\ngaus.fitTo(mc['scaleUp01Sigma'], ROOT.RooFit.SumW2Error(True),ROOT.RooFit.PrintLevel(-1))\ngaus.fitTo(mc['scaleUp01Sigma'], ROOT.RooFit.SumW2Error(True),ROOT.RooFit.PrintLevel(-1))\nprint(\"Mean = %.3f +- %.3f GeV, Sigma = %.3f +- %.3f GeV\"%(mean.getVal(),mean.getError(),sigma.getVal(),sigma.getError()) )\n</code></pre> Gives the output: <pre><code>Mean = 125.370 +- 0.009 GeV, Sigma = 2.011 +- 0.006 GeV\n</code></pre> Now let's compare the values to the nominal fit for all systematic-varied trees. We observe a significant variation in the mean for the scale uncertainty, and a significant variation in sigma for the smear uncertainty. <pre><code># First fit the nominal dataset\ngaus.fitTo(mc['nominal'], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1) )\ngaus.fitTo(mc['nominal'], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1) )\n# Save the mean and sigma values and errors to python dicts\nmean_values, sigma_values = {}, {}\nmean_values['nominal'] = [mean.getVal(),mean.getError()]\nsigma_values['nominal'] = [sigma.getVal(),sigma.getError()]\n\n# Next for the systematic varied datasets\nfor syst in ['scale','smear']:\n    for direction in ['Up','Down']:\n        key = \"%s%s01Sigma\"%(syst,direction)\n        gaus.fitTo(mc[key] , ROOT.RooFit.SumW2Error(True),  ROOT.RooFit.PrintLevel(-1))\n        gaus.fitTo(mc[key], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1))\n        mean_values[key] = [mean.getVal(), mean.getError()]\n        sigma_values[key] = [sigma.getVal(), sigma.getError()]\n\n# Print the variations in mean and sigma\nfor key in mean_values.keys():\n    print(\"%s: mean = %.3f +- %.3f GeV, sigma = %.3f +- %.3f GeV\"%(key,mean_values[key][0],mean_values[key][1],sigma_values[key][0],sigma_values[key][1]))\n</code></pre> Prints the output: <pre><code>nominal: mean = 125.001 +- 0.009 GeV, sigma = 1.996 +- 0.006 GeV\nscaleUp01Sigma: mean = 125.370 +- 0.009 GeV, sigma = 2.011 +- 0.006 GeV\nscaleDown01Sigma: mean = 124.609 +- 0.009 GeV, sigma = 2.005 +- 0.006 GeV\nsmearUp01Sigma: mean = 125.005 +- 0.009 GeV, sigma = 2.097 +- 0.007 GeV\nsmearDown01Sigma: mean = 125.007 +- 0.009 GeV, sigma = 1.912 +- 0.006 GeV\n</code></pre> The values tell us that the scale uncertainty (at \\(\\pm 1 \\sigma\\)) varies the signal peak mean by around 0.3%, and the smear uncertainty (at \\(\\pm 1 \\sigma\\)) varies the signal width (sigma) by around 4.5% (average of up and down variations).</p> <p>Now we need to bake these effects into the parametric signal model. The mean of the Gaussian was previously defined as:</p> \\[ \\mu = m_H + \\delta\\] <p>We introduce the nuisance parameter <code>nuisance_scale</code> = \\(\\eta\\) to account for a shift in the signal peak mean using:</p> \\[ \\mu = (m_H + \\delta) \\cdot (1+0.003\\eta)\\] <p>At \\(\\eta = +1 (-1)\\) the signal peak mean will shift up (down) by 0.3%. To build this into the RooFit signal model we simply define a new parameter, \\(\\eta\\), and update the definition of the mean formula variable: <pre><code># Building the workspace with systematic variations\nMH = ROOT.RooRealVar(\"MH\", \"MH\", 125, 120, 130 )\nMH.setConstant(True)\n\n# Define formula for mean of Gaussian\ndMH = ROOT.RooRealVar(\"dMH_ggH_Tag0\", \"dMH_ggH_Tag0\", 0, -5, 5 )\neta = ROOT.RooRealVar(\"nuisance_scale\", \"nuisance_scale\", 0, -5, 5)\neta.setConstant(True)\nmean_formula = ROOT.RooFormulaVar(\"mean_ggH_Tag0\", \"mean_ggH_Tag0\", \"(@0+@1)*(1+0.003*@2)\", ROOT.RooArgList(MH,dMH,eta))\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Update the workspace with the new mean formula variable (using the code above, or uncomment in the script)</li> <li>Why do we set the nuisance parameter to constant at this stage?</li> </ul> <p>Similarly for the width introducing a nuisance parameter, \\(\\chi\\):</p> \\[ \\sigma = \\sigma \\cdot (1+0.045\\chi)\\] <p><pre><code>sigma = ROOT.RooRealVar(\"sigma_ggH_Tag0_nominal\", \"sigma_ggH_Tag0_nominal\", 2, 1, 5)\nchi = ROOT.RooRealVar(\"nuisance_smear\", \"nuisance_smear\", 0, -5, 5)\nchi.setConstant(True)\nsigma_formula = ROOT.RooFormulaVar(\"sigma_ggH_Tag0\", \"sigma_ggH_Tag0\", \"@0*(1+0.045*@1)\", ROOT.RooArgList(sigma,chi))\n</code></pre> Let's now fit the new model to the signal Monte-Carlo dataset, build the normalisation object and save the workspace. <pre><code># Define Gaussian\nmodel = ROOT.RooGaussian( \"model_ggH_Tag0\", \"model_ggH_Tag0\", mass, mean_formula, sigma_formula )\n\n# Fit model to MC\nmodel.fitTo( mc['nominal'], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1) )\n\n# Build signal model normalisation object\nxs_ggH = ROOT.RooRealVar(\"xs_ggH\", \"Cross section of ggH in [pb]\", 48.58 )\nbr_gamgam = ROOT.RooRealVar(\"BR_gamgam\", \"Branching ratio of Higgs to gamma gamma\", 0.0027 )\neff = mc['nominal'].sumEntries()/(xs_ggH.getVal()*br_gamgam.getVal())\neff_ggH_Tag0 = ROOT.RooRealVar(\"eff_ggH_Tag0\", \"Efficiency for ggH events to land in Tag0\", eff )\n# Set values to be constant\nxs_ggH.setConstant(True)\nbr_gamgam.setConstant(True)\neff_ggH_Tag0.setConstant(True)\n# Define normalisation component as product of these three variables\nnorm_sig = ROOT.RooProduct(\"model_ggH_Tag0_norm\", \"Normalisation term for ggH in Tag 0\", ROOT.RooArgList(xs_ggH,br_gamgam,eff_ggH_Tag0))\n\n# Set shape parameters of model to be constant (i.e. fixed in fit to data)\ndMH.setConstant(True)\nsigma.setConstant(True)\n\n# Build new signal model workspace with signal normalisation term.\nf_out = ROOT.TFile(\"workspace_sig_with_syst.root\", \"RECREATE\")\nw_sig = ROOT.RooWorkspace(\"workspace_sig\",\"workspace_sig\")\ngetattr(w_sig, \"import\")(model)\ngetattr(w_sig, \"import\")(norm_sig)\nw_sig.Print()\nw_sig.Write()\nf_out.Close()\n</code></pre> The final step is to add the parametric uncertainties as Gaussian-constrained nuisance parameters into the datacard. The syntax means the Gaussian constraint term in the likelihood function will have a mean of 0 and a width of 1. <pre><code>nuisance_scale        param    0.0    1.0\nnuisance_smear        param    0.0    1.0\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Run the python code above to include the smearing uncertainty too.</li> <li>Try adding these lines to <code>datacard_part1_with_norm.txt</code>, along with the lines for the JEC and photonID yield uncertainties above, and compiling with the <code>text2workspace</code> command. Open the workspace and look at its contents. You will need to change the signal process workspace file name in the datacard to point to the new workspace (<code>workspace_sig_with_syst.root</code>).</li> <li>Can you see the new objects in the compiled datacard that have been created for the systematic uncertainties? What do they correspond to?</li> </ul> <p>We can now run a fit with the systematic uncertainties included. The option <code>--saveSpecifiedNuis</code> can be called to save the postfit nuisance parameter values in the combine output limit tree. <pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH --saveWorkspace -n .bestfit.with_syst --saveSpecifiedNuis CMS_scale_j,CMS_hgg_phoIdMva,nuisance_scale,nuisance_smear\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>What do the postfit values of the nuisances tell us here? You can check them by opening the output file (<code>root higgsCombine.bestfit.with_syst.MultiDimFit.mH125.root</code>) and running <code>limit-&gt;Show(0)</code>.</li> <li>Try plotting the postfit mass distribution (as detailed in part 2). Do you notice any difference?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#uncertainty-breakdown","title":"Uncertainty breakdown","text":"<p>A more complete datacard with additional nuisance parameters is stored in <code>datacard_part3.txt</code>. We will use this datacard for the rest of part 3. Open the text file and have a look at the contents.</p> <p>The following line has been appended to the end of the datacard to define the set of theory nuisance parameters. This will come in handy when calculating the uncertainty breakdown. <pre><code>theory group = BR_hgg QCDscale_ggH pdf_Higgs_ggH alphaS_ggH UnderlyingEvent PartonShower\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li> <p>Compile the datacard and run an observed <code>MultiDimFit</code> likelihood scan over the signal strength, r: <pre><code>text2workspace.py datacard_part3.txt -m 125\n\ncombine -M MultiDimFit datacard_part3.root -m 125 --freezeParameters MH -n .scan.with_syst --algo grid --points 20 --setParameterRanges r=0.5,2.5\n</code></pre></p> </li> <li> <p>Our aim is to break down the total uncertainty into the systematic and statistical components. To get the statistical-uncertainty-only scan it should be as simple as freezing the nuisance parameters in the fit... right? Try it by adding <code>,allConstrainedNuisances</code> to the <code>--freezeParameters</code> option. This will freeze all (constrained) nuisance parameters in the fit. You can also feed in regular expressions with wildcards using <code>rgx{.*}</code>. For instance to freeze only the <code>nuisance_scale</code> and <code>nuisance_smear</code> you could run with <code>--freezeParameters MH,rgx{nuisance_.*}</code>.</p> </li> </ul> <p><pre><code>combine -M MultiDimFit datacard_part3.root -m 125 --freezeParameters MH,allConstrainedNuisances -n .scan.with_syst.statonly --algo grid --points 20 --setParameterRanges r=0.5,2.5\n</code></pre>   -   Plot the two likelihood scans on the same axis. You can use the plotting script provided or write your own. <pre><code>plot1DScan.py higgsCombine.scan.with_syst.MultiDimFit.mH125.root --main-label \"With systematics\" --main-color 1 --others higgsCombine.scan.with_syst.statonly.MultiDimFit.mH125.root:\"Stat-only\":2 -o part3_scan_v0\n</code></pre></p> <p></p> <ul> <li>Can you spot the problem?</li> </ul> <p>The nuisance parameters introduced into the model have pulled the best-fit signal strength point! Therefore we cannot simply subtract the uncertainties in quadrature to get an estimate for the systematic/statistical uncertainty breakdown.</p> <p>The correct approach is to freeze the nuisance parameters to their respective best-fit values in the stat-only scan.</p> <ul> <li>We can do this by first saving a postfit workspace with all nuisance parameters profiled in the fit. Then we load the postfit snapshot values of the nuisance parameters (with the option <code>--snapshotName MultiDimFit</code>) from the combine output of the previous step, and then freeze the nuisance parameters for the stat-only scan. <pre><code>combine -M MultiDimFit datacard_part3.root -m 125 --freezeParameters MH -n .bestfit.with_syst --setParameterRanges r=0.5,2.5 --saveWorkspace\n\ncombine -M MultiDimFit higgsCombine.bestfit.with_syst.MultiDimFit.mH125.root -m 125 --freezeParameters MH,allConstrainedNuisances -n .scan.with_syst.statonly_correct --algo grid --points 20 --setParameterRanges r=0.5,2.5 --snapshotName MultiDimFit\n</code></pre></li> <li>Adding the option <code>--breakdown syst,stat</code> to the <code>plot1DScan.py</code> command will automatically calculate the uncertainty breakdown for you. <pre><code>plot1DScan.py higgsCombine.scan.with_syst.MultiDimFit.mH125.root --main-label \"With systematics\" --main-color 1 --others higgsCombine.scan.with_syst.statonly_correct.MultiDimFit.mH125.root:\"Stat-only\":2 -o part3_scan_v1 --breakdown syst,stat\n</code></pre></li> </ul> <p></p> <p>We can also freeze groups of nuisance parameters defined in the datacard with the option <code>--freezeNuisanceGroups</code>. Let's run a scan freezing only the theory uncertainties (using the nuisance group we defined in the datacard): <pre><code>combine -M MultiDimFit higgsCombine.bestfit.with_syst.MultiDimFit.mH125.root -m 125 --freezeParameters MH --freezeNuisanceGroups theory -n .scan.with_syst.freezeTheory --algo grid --points 20 --setParameterRanges r=0.5,2.5 --snapshotName MultiDimFit\n</code></pre> To breakdown the total uncertainty into the theory, experimental and statistical components we can then use: <pre><code>plot1DScan.py higgsCombine.scan.with_syst.MultiDimFit.mH125.root --main-label Total --main-color 1 --others higgsCombine.scan.with_syst.freezeTheory.MultiDimFit.mH125.root:\"Freeze theory\":4 higgsCombine.scan.with_syst.statonly_correct.MultiDimFit.mH125.root:\"Stat-only\":2 -o part3_scan_v2 --breakdown theory,exp,stat\n</code></pre></p> <p></p> <p>These methods are not limited to this particular grouping of systematics. We can use the above procedure to assess the impact of any nuisance parameter(s) on the signal strength confidence interval.</p> <p>Tasks and questions:</p> <ul> <li>Try and calculate the contribution to the total uncertainty from the luminosity estimate using this approach.</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#impacts","title":"Impacts","text":"<p>It is often useful/required to check the impacts of the nuisance parameters (NP) on the parameter of interest, r. The impact of a NP is defined as the shift \\(\\Delta r\\) induced as the NP, \\(\\theta\\), is fixed to its \\(\\pm1\\sigma\\) values, with all other parameters profiled as normal. More information can be found in the combine documentation via this link.</p> <p>Let's calculate the impacts for our analysis. We can use the <code>combineTool.py</code> to automate the scripts. The impacts are calculated in a few stages:</p> <p>1) Do an initial fit for the parameter of interest, adding the <code>--robustFit 1</code> option: <pre><code>combineTool.py -M Impacts -d datacard_part3.root -m 125 --freezeParameters MH -n .impacts --setParameterRanges r=0.5,2.5 --doInitialFit --robustFit 1\n</code></pre></p> <p>2) Next perform a similar scan for each NP with the <code>--doFits</code> option. This may take a few minutes: <pre><code>combineTool.py -M Impacts -d datacard_part3.root -m 125 --freezeParameters MH -n .impacts --setParameterRanges r=0.5,2.5 --doFits --robustFit 1\n</code></pre></p> <p>3) Collect the outputs from the previous step and write the results to a json file: <pre><code>combineTool.py -M Impacts -d datacard_part3.root -m 125 --freezeParameters MH -n .impacts --setParameterRanges r=0.5,2.5 -o impacts_part3.json\n</code></pre></p> <p>4) Produce a plot summarising the nuisance parameter values and impacts: <pre><code>plotImpacts.py -i impacts_part3.json -o impacts_part3\n</code></pre></p> <p></p> <p>Tasks and questions:</p> <ul> <li>Run the commands 1-4 above. There is a lot of information in these plots, which can be of invaluable use to analysers in understanding the fit. Do you understand everything that the plot is showing?</li> <li>Which NP has the highest impact on the signal strength measurement?</li> <li>Which NP is pulled the most in the fit to data? What does this information imply about the signal model mean in relation to the data?</li> <li>Which NP is the most constrained in the fit to the data? What does it mean for a nuisance parameter to be constrained?</li> <li>Try adding the option <code>--summary</code> to the impacts plotting command. This is a nice new feature in combine!</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#part-4-toy-generation-and-bias-studies","title":"Part 4: Toy generation and bias studies","text":"<p>With combine we can generate toy datasets from the compiled datacard workspace. Please read this section in the combine manual before proceeding.</p> <p>An interesting use case of toy generation is when performing bias studies. In the Higgs to two photon (Hgg) analysis, the background is fit with some functional form. However (due to the complexities of QCD) the exact form of this function is unknown. Therefore, we need to understand how our choice of background function may impact the fitted signal strength. This is performed using a bias study, which will indicate how much potential bias is present given a certain choice of functional form.</p> <p>In the classical bias studies we begin by building a set of workspaces which correspond to different background function choices. In addition to the <code>RooExponential</code> constructed in Section 1, let's also try a (4th order) <code>RooChebychev</code> polynomial and a simple power law function to fit the background \\(m_{\\gamma\\gamma}\\) distribution.</p> <p>The script used to fit the different functions and build the workspaces is <code>construct_models_bias_study_part4.py</code>. Take some time to look at the script and understand what the code is doing. In particular notice how we have saved the data as a <code>RooDataHist</code> in the workspace. This means we are now performing binned maximum likelihood fits (this is useful for part 4 to speed up fitting the many toys). If the binning is sufficiently granular, then there will be no noticeable difference in the results to the unbinned likelihood fits. Run the script with: <pre><code>python3  construct_models_bias_study_part4.py\n</code></pre></p> <p>The outputs are a set of workspaces which correspond to different choices of background model functions, and a plot showing fits of the different functions to the data mass sidebands.</p> <p></p> <p>The datacards for the different background model functions are saved as <code>datacard_part4_{pdf}.txt</code> where <code>pdf = {exp,poly,pow}</code>. Have a look inside the .txt files and understand what changes have been made to pick up the different functions. Compile the datacards with: <pre><code>for pdf in {exp,poly,pow}; do text2workspace.py datacard_part4_${pdf}.txt -m 125; done\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#bias-studies","title":"Bias studies","text":"<p>For the bias studies we want to generate (\"throw\") toy datasets with some choice of background function and fit back with another. The toys are thrown with a known value of the signal strength (r=1 in this example), which we will call \\(r_{truth}\\). The fitted value of r is defined as \\(r_{fit}\\), with some uncertainty \\(\\sigma_{fit}\\). A pull value, \\(P\\), is calculated for each toy dataset according to,</p> \\[ P = (r_{truth}-r_{fit})/\\sigma_{fit}\\] <p>By repeating the process for many toys we can build up a pull distribution. If there is no bias present then we would expect to obtain a normal distribution centred at 0, with a standard deviation of 1. Let's calculate the bias for our analysis.</p> <p>Firstly,  we generate N=1000 toys from each of the background function choices and save them in a ROOT file. For this we use the <code>GenerateOnly</code> method of combine. We will inject signal in the toys by setting <code>r=1</code> using the <code>--expectSignal 1</code> option.</p> <p>Tasks and questions:</p> <ul> <li>Repeat the bias studies as outlined above with <code>--expectSignal 0</code>. This will inform us of the potential bias in the signal strength measurement given that there is no true signal.</li> </ul> <p>The following commands show the example of throwing 1000 toys from the exponential function, and then fitting back with the 4th-order Chebychev polynomial. We use the <code>singles</code> algorithm to obtain a value for \\(r_{fit}\\) and \\(\\sigma_{fit}\\) simultaneously. <pre><code>combine -M GenerateOnly datacard_part4_exp.root -m 125 --freezeParameters MH -t 1000 -n .generate_exp --expectSignal 1 --saveToys\n\ncombine -M MultiDimFit datacard_part4_poly.root -m 125 --freezeParameters MH -t 1000 -n .bias_truth_exp_fit_poly --expectSignal 1 --toysFile higgsCombine.generate_exp.GenerateOnly.mH125.123456.root --algo singles\n</code></pre> The script <code>plot_bias_pull.py</code> will plot the pull distribution and fit a Gaussian to it: <pre><code>python3 plot_bias_pull.py\n</code></pre></p> <p></p> <p>The potential bias is defined as the (fitted) mean of the pull distribution.</p> <p>Tasks and questions:</p> <ul> <li>What is our bias value? Have we generated enough toys to be confident of the bias value? You could try generating more toys if not.</li> <li>What threshold do we use to define \"acceptable\" bias?</li> </ul> <p>From the pull definition, we see the bias value is defined relative to the total uncertainty in the signal strength (denominator of \\(\\sigma_{fit}\\)). Some analyses use 0.14 as the threshold because a bias below this value would change the total uncertainty (when added in quadrature) by less than 1% (see equation below). Other analyses use 0.2 as this will change the total uncertainty by less than 2%. We should define the threshold before performing the bias study.</p> \\[ \\sqrt{ 1^2 + 0.14^2} = 1.0098 \\] <ul> <li>How does our bias value compare to the thresholds? If we the bias is outside the acceptable region we should account for this using a spurious signal method (see advanced exercises TBA).</li> <li>Repeat the bias study for each possible truth and fitted background function combinations. Do the bias values induced by the choice of background function merit adding a spurious signal component into the fit?</li> <li>What would you expect the bias value to be for a background function that does not fit the data well? Should we be worried about such functions? What test could we use to reject such functions from the study beforehand?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#part-5-discrete-profiling","title":"Part 5: Discrete-profiling","text":"<p>If multiple pdfs exist to fit some distribution, we can store all pdfs in a single workspace by using a <code>RooMultiPdf</code> object. The script <code>construct_models_multipdf_part5.py</code> shows how to store the exponential, (4th order) Chebychev polynomial and the power law function from the previous section in a <code>RooMultiPdf</code> object. This requires a <code>RooCategory</code> index, which controls the pdf which is active at any one time. Look at the contents of the script and then run with: <pre><code>python3 construct_models_multipdf_part5.py\n</code></pre> The file <code>datacard_part5.txt</code> will load the multipdf as the background model. Notice the line at the end of the datacard (see below). This tells combine about the <code>RooCategory</code> index. <pre><code>pdfindex_Tag0         discrete\n</code></pre> Compile the datacard with: <pre><code>text2workspace.py datacard_part5.txt -m 125\n</code></pre></p> <p>The <code>RooMultiPdf</code> is a handy object for performing bias studies as all functions can be stored in a single workspace. You can then set which function is used for generating the toys with the <code>--setParameters pdfindex_Tag0=i</code> option, and which function is used for fitting with <code>--setParameters pdfindex_Tag0=j --freezeParameters pdfindex_Tag0</code> options.</p> <p>Tasks and questions:</p> <ul> <li>It would be a useful exercise to repeat the bias studies from part 4 but using the RooMultiPdf workspace. What happens when you do not freeze the index in the fitting step?</li> </ul> <p>But simpler bias studies are not the only benefit of using the <code>RooMultiPdf</code>! It also allows us to apply the discrete profiling method in our analysis. In this method, the index labelling which pdf is active (a discrete nuisance parameter) is left floating in the fit, and will be profiled by looping through all the possible index values and finding the pdf which gives the best fit. In this manner, we are able to account for the uncertainty in the choice of the background function.</p> <p>Note, by default, the multipdf will tell combine to add 0.5 to the NLL for each parameter in the pdf. This is known as the penalty term (or correction factor) for the discrete profiling method. You can toggle this term when building the workspace with the command <code>multipdf.setCorrectionFactor(0.5)</code>. You may need to change the value of this term to obtain an acceptable bias in your fit!</p> <p>Let's run a likelihood scan using the compiled datacard with the <code>RooMultiPdf</code>: <pre><code>combine -M MultiDimFit datacard_part5.root -m 125 --freezeParameters MH -n .scan.multidimfit --algo grid --points 20 --cminDefaultMinimizerStrategy 0 --saveSpecifiedIndex pdfindex_Tag0 --setParameterRanges r=0.5,2.5\n</code></pre> The option <code>--cminDefaultMinimizerStrategy 0</code> is required to prevent HESSE being called as this cannot handle discrete nuisance parameters. HESSE is the full calculation of the second derivative matrix (Hessian) of the likelihood using finite difference methods.</p> <p>The option <code>--saveSpecifiedIndex pdfindex_Tag0</code> saves the value of the index at each point in the likelihood scan. Let's have a look at how the index value changes as a function of the signal strength. You can make the following plot by running: <pre><code>python3 plot_pdfindex.py\n</code></pre></p> <p></p> <p>By floating the discrete nuisance parameter <code>pdfindex_Tag0</code>, at each point in the likelihood scan the pdfs will be iterated over and the one which gives the max likelihood (lowest 2NLL) including the correction factor will be used. The plot above shows that the <code>pdfindex_Tag0=0</code> (exponential) is chosen for the majority of r values, but this switches to <code>pdfindex_Tag0=1</code> (Chebychev polynomial) at the lower edge of the r range. We can see the impact on the likelihood scan by fixing the pdf to the exponential: <pre><code>combine -M MultiDimFit datacard_part5.root -m 125 --freezeParameters MH,pdfindex_Tag0 --setParameters pdfindex_Tag0=0 -n .scan.multidimfit.fix_exp --algo grid --points 20 --cminDefaultMinimizerStrategy 0 --saveSpecifiedIndex pdfindex_Tag0 --setParameterRanges r=0.5,2.5\n</code></pre> Plotting the two scans on the same axis: <pre><code>plot1DScan.py higgsCombine.scan.multidimfit.MultiDimFit.mH125.root --main-label \"Pdf choice floating\" --main-color 1 --others higgsCombine.scan.multidimfit.fix_exp.MultiDimFit.mH125.root:\"Pdf fixed to exponential\":2 -o part5_scan --y-cut 35 --y-max 35\n</code></pre></p> <p></p> <p>The impact on the likelihood scan is evident at the lower edge, where the scan in which the index is floating flattens out. In this example, neither the \\(1\\sigma\\) or \\(2\\sigma\\) intervals are affected. But this is not always the case! Ultimately, this method allows us to account for the uncertainty in the choice of background function in the signal strength measurement.</p> <p>Coming back to the bias studies. Do you now understand what you are testing if you do not freeze the index in the fitting stage? In this case you are fitting the toys back with the discrete profiling method. This is the standard approach for the bias studies when we use the discrete-profiling method in an analysis.</p> <p>There are a number of options which can be added to the combine command to improve the performance when using discrete nuisance parameters. These are detailed at the end of this section in the combine manual.</p>"},{"location":"tutorial2023/parametric_exercise/#part-6-multi-signal-model","title":"Part 6: Multi-signal model","text":"<p>In reality, there are multiple Higgs boson processes which contribute to the total signal model, not only ggH. This section will explain how we can add an additional signal process (VBF) into the fit. Following this, we will add a second analysis category (Tag1), which has a higher purity of VBF events. To put this in context, the selection for Tag1 may require two jets with a large pseudorapidity separation and high invariant mass, which are typical properties of the VBF topology. By including this additional category with a different relative yield of VBF to ggH production, we are able to simultaneously constrain the rate of the two production modes.</p> <p>In the SM, the VBF process has a cross section which is roughly 10 times smaller than the ggH cross section. This explains why we need to use certain features of the event to boost the purity of VBF events. The LO Feynman diagram for VBF production is shown below.</p> <p></p>"},{"location":"tutorial2023/parametric_exercise/#building-the-models","title":"Building the models","text":"<p>Firstly, lets build the necessary inputs for this section using <code>construct_models_part6.py</code>. This script uses everything we have learnt in the previous sections: 1) Signal models (Gaussians) are built separately for each process (ggH and VBF) in each analysis category (Tag0 and Tag1). This uses separate <code>TTrees</code> for each contribution in the <code>mc_part6.root</code> file. The mean and width of the Gaussians include the effect of the parametric shape uncertainties, <code>nuisance_scale</code> and <code>nuisance_smear</code>. Each signal model is normalised according to the following equation, where \\(\\epsilon_{ij}\\) labels the fraction of process, \\(i\\) (=ggH,VBF), landing in analysis category, \\(j\\) (=Tag0,Tag1), and \\(\\mathcal{L}\\) is the integrated luminosity (defined in the datacard).</p> \\[ N_{ij} = \\sigma_i \\cdot \\mathcal{B}^{\\gamma\\gamma} \\cdot \\epsilon_{ij} \\cdot \\mathcal{L}\\] <p>2) A background model is constructed for each analysis category by fitting the mass sidebands in data. The input data is stored in the <code>data_part6.root</code> file. The models are <code>RooMultiPdfs</code> which contain an exponential, a 4th-order Chebychev polynomial and a power law function. The shape parameters and normalisation terms of the background models are freely floating in the final fit.</p> <p>Tasks and questions:</p> <ul> <li>Have a look through the <code>construct_models_part6.py</code> script and try to understand all parts of the model construction. When you are happy, go ahead and construct the models with: <pre><code>python3 construct_models_part6.py\n</code></pre></li> </ul> <p>The datacards for the two analysis categories are saved separately as <code>datacard_part6_Tag0.txt</code> and <code>datacard_part6_Tag1.txt</code>.</p> <ul> <li>Do you understand the changes made to include multiple signal processes in the datacard? What value in the <code>process</code> line is used to label VBF as a signal?</li> <li>Try compiling the individual datacards. What are the prefit ggH and VBF yields in each analysis category? You can find these by opening the workspace and printing the contents.</li> <li>Run the best fits and plot the prefit and postfit S+B models along with the data (see code in part 2). How does the absolute number of data events in Tag1 compare to Tag0? What about the signal-to-background ratio, S/B?</li> </ul> <p>In order to combine the two categories into a single datacard, we make use of the <code>combineCards.py</code> script: <pre><code>combineCards.py datacard_part6_Tag0.txt datacard_part6_Tag1.txt &gt; datacard_part6_combined.txt\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#running-the-fits","title":"Running the fits","text":"<p>If we use the default <code>text2workspace</code> command on the combined datacard, then this will introduce a single signal strength modifer which modifies the rate of all signal processes (ggH and VBF) by the same factor.</p> <p>Tasks and questions:</p> <ul> <li>Try compiling the combined datacard and running a likelihood scan. Does the sensitivity to the global signal strength improve by adding the additional analysis category \"Tag1\"?</li> </ul> <p>If we want to measure the independent rates of both processes simultaneously, then we need to introduce a separate signal strength for ggH and VBF. To do this we use the <code>multiSignalModel</code> physics model in combine by adding the following options to the <code>text2workspace</code> command: <pre><code>text2workspace.py datacard_part6_combined.txt -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO \"map=.*/ggH:r_ggH[1,0,2]\" --PO \"map=.*/VBF:r_VBF[1,0,3]\" -o datacard_part6_combined_multiSignalModel.root\n</code></pre> The syntax for the parameter to process mapping is <code>map=category/process/POI[default,min,max]</code>. We have used the wildcard <code>.*</code> to tell combine that the POI (parameter of interest) should scale all cases of that process, regardless of the analysis category. The output of this command tells us what is scaled by the two signal strengths: <pre><code>Will scale  ch1/ggH  by  r_ggH\nWill scale  ch1/VBF  by  r_VBF\nWill scale  ch1/bkg_mass  by  1\nWill scale  ch2/ggH  by  r_ggH\nWill scale  ch2/VBF  by  r_VBF\nWill scale  ch2/bkg_mass  by  1\nWill scale  ch1/ggH  by  r_ggH\nWill scale  ch1/VBF  by  r_VBF\nWill scale  ch1/bkg_mass  by  1\nWill scale  ch2/ggH  by  r_ggH\nWill scale  ch2/VBF  by  r_VBF\nWill scale  ch2/bkg_mass  by  1\n</code></pre> Exactly what we require!</p> <p>To run a 1D \"profiled\" likelihood scan for ggH we use the following command: <pre><code>combine -M MultiDimFit datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .scan.part6_multiSignalModel_ggH --algo grid --points 20 --cminDefaultMinimizerStrategy 0 --saveInactivePOI 1 -P r_ggH --floatOtherPOIs 1\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>\"Profiled\" here means we are profiling over the other parameter of interest, <code>r_VBF</code> in the fit. In other words, we are treating <code>r_VBF</code> as an additional nuisance parameter. The option <code>--saveInactivePOI 1</code> stores the value of <code>r_VBF</code> in the combine output. Take a look at the fit output. Does the value of <code>r_VBF</code> depend on <code>r_ggH</code>? Are the two parameters of interest correlated? Remember, to look at the contents of the TTree you can use <code>limit-&gt;Show(i)</code>, where i is an integer labelling the point in the likelihood scan.</li> <li>Run the profiled scan for the VBF signal strength. Plot the <code>r_ggH</code> and <code>r_VBF</code> likelihood scans using the <code>plot1DScan.py</code> script. You will need to change some of the input options, in particular the <code>--POI</code> option. You can list the full set of options by running: <pre><code>plot1DScan.py --help\n</code></pre></li> </ul>"},{"location":"tutorial2023/parametric_exercise/#two-dimensional-likelihood-scan","title":"Two-dimensional likelihood scan","text":"<p>We can also run the fit at fixed points in (<code>r_ggH</code>,<code>r_VBF</code>) space. By using a sufficient number of points, we are able to up the 2D likelihood surface. Let's change the ranges of the parameters of interest to match what we have found in the profiled scans: <pre><code>combine -M MultiDimFit datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .scan2D.part6_multiSignalModel --algo grid --points 800 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --setParameterRanges r_ggH=0.5,2.5:r_VBF=-1,2\n</code></pre> To plot the output you can use the <code>plot_2D_scan.py</code> script: <pre><code>python3 plot_2D_scan.py\n</code></pre> This script interpolates the 2NLL value between the points ran in the scan so that the plot shows a smooth likelihood surface. You may find in some cases, the number of scanned points and interpolation parameters need to be tuned to get a sensible looking surface. This basically depends on how complicated the likelihood surface is.</p> <p></p> <p>Tasks and questions:</p> <ul> <li>The plot shows that the data is in agreement with the SM within the \\(2\\sigma\\) CL. Here, the \\(1\\sigma\\) and \\(2\\sigma\\) confidence interval contours corresponds to 2NLL values of 2.3 and 5.99, respectively. Do you understand why this? Think about Wilk's theorem.</li> <li>Does the plot show any correlation between the ggH and VBF signal strengths? Are the two positively or negatively correlated? Does this make sense for this pair of parameters given the analysis setup? Try repeating the 2D likelihood scan using the \"Tag0\" only datacard. How does the correlation behaviour change?</li> <li>How can we read off the \"profiled\" 1D likelihood scan constraints from this plot?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#correlations-between-parameters","title":"Correlations between parameters","text":"<p>For template-based analyses we can use the <code>FitDiagnostics</code> method in combine to extract the covariance matrix for the fit parameters. Unfortunately, this method is not compatible when using discrete nuisance parameters (<code>RooMultiPdf</code>). Instead, we can use the <code>robustHesse</code> method to find the Hessian matrix by finite difference methods. The matrix is then inverted to get the covariance. Subsequently, we can use the covariance to extract the correlations between fit parameters. <pre><code>combine -M MultiDimFit datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .robustHesse.part6_multiSignalModel --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --setParameterRanges r_ggH=0.5,2.5:r_VBF=-1,2 --robustHesse 1 --robustHesseSave 1 --saveFitResult\n</code></pre> The output file <code>robustHesse.robustHesse.part6_multiSignalModel.root</code> stores the correlation matrix (<code>h_correlation</code>). This contains the correlations between all parameters including the nuisances. So if we are interested in the correlation between <code>r_ggH</code> and <code>r_VBF</code>, we first need to find which bin corresponds to these parameters: <pre><code>root robustHesse.robustHesse.part6_multiSignalModel.root\n\nroot [1] h_correlation-&gt;GetXaxis()-&gt;GetBinLabel(19)\n(const char *) \"r_VBF\"\nroot [2] h_correlation-&gt;GetYaxis()-&gt;GetBinLabel(20)\n(const char *) \"r_ggH\"\nroot [3] h_correlation-&gt;GetBinContent(19,20)\n(double) -0.19822058\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>The two parameters of interest have a correlation coefficient of -0.198. This means the two parameters are somewhat anti-correlated. Does this match what we see in the 2D likelihood scan?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#impacts_1","title":"Impacts","text":"<p>We extract the impacts for each parameter of interest using the following commands: <pre><code>combineTool.py -M Impacts -d datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .impacts_part6_multiSignal --robustFit 1 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --doInitialFit\n\ncombineTool.py -M Impacts -d datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .impacts_part6_multiSignal --robustFit 1 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --doFits\n\ncombineTool.py -M Impacts -d datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .impacts_part6_multiSignal --robustFit 1 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF -o impacts_part6.json\n\nplotImpacts.py -i impacts_part6.json -o impacts_part6_r_ggH --POI r_ggH\nplotImpacts.py -i impacts_part6.json -o impacts_part6_r_VBF --POI r_VBF\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Look at the output PDF files. How does the ranking of the nuisance parameters change for the different signal strengths?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#advanced-exercises-to-be-added","title":"Advanced exercises (to be added)","text":"<p>The combine experts will include additional exercises here in due course. These will include:</p> <ul> <li>Convolution of model pdfs: <code>RooAddPdf</code></li> <li>Application of the spurious signal method</li> <li>Advanced physics models including parametrised signal strengths e.g. SMEFT</li> <li>Mass fits</li> <li>Two-dimensional parametric models</li> </ul>"},{"location":"tutorial2023_unfolding/unfolding_exercise/","title":"Likelihood Based Unfolding Exercise in Combine","text":""},{"location":"tutorial2023_unfolding/unfolding_exercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>After setting up <code>Combine</code>, you can access the working directory for this tutorial which contains all of the inputs and scripts needed to run the unfolding fitting exercise:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/tutorial_unfolding_2023/\n</code></pre>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#exercise-outline","title":"Exercise outline","text":"<p>The hands-on exercise is split into seven parts: </p> <p>1) \"Simple\" Unfolding Experiment</p> <p>2) Producing the Migration matrix from the datacards</p> <p>3) Advanced Unfolding with more detector-level information and control regions</p> <p>4) Extracting the expected intervals</p> <p>5) Producing Impacts for multiple POIs</p> <p>6) Unfold to the generator-level quantities </p> <p>7) Extracting POI correlations from the FitDiagnostics output</p> <p>Throughout the tutorial there are a number of questions and exercises for you to complete. These are shown in the boxes like this one. </p> <p>Note that some additional information on unfolding in <code>Combine</code> are available here, which also includes some information on regularization, which is not discussed in this tutorial. </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#analysis-overview","title":"Analysis overview","text":"<p>In this tutorial we will look at the cross section measurements of on of the SM Higgs processes VH, in \\(H\\to b\\bar{b}\\) (VHbb) final state. </p> <p>The measurement is performed within the Simplified Template Cross Section (STXS) framework, which provides the prediction in the bins of generator-level quantities \\(p_{T}(V)\\) and number of additional jets. The maximum likelihood based unfolding is performed to measure the cross section in the generator-level bins defined by STXS scheme. At the detector-level we define appropriate categories to match the STXS bins as closely as possible so that there is a good correspondence between the detector-level observable and the underlying generator-level quantity we are interested in.  </p> <p> </p> <p>Note that for this STXS measurement, as well as measuring the cross-section as a function of the \\(p_{T}\\) of the vector boson, the measurement includes some information on the number of additional jets and is performed over multiple different production modes, for different production processes. However, it is common to focus on a single distribution (e.g. \\(p_{T}\\)) for a signle process, (e.g. \\(t\\bar{t}\\)).</p> <p>In this tutorial we will focus on the ZH production, with the Z boson decaying to charged leptons, and Higgs boson reconstructed with the resolved \\(b\\bar{b}\\) pair. We will also use only a part of the Run 2 categories, we will not achieve the same sensitivity as the full analysis. Note that ggZH and ZH production modes are combined in the fit, since it is not possible to resolve them at this stage of the analysis. The STXS categories are defined independently of the Higgs decay channel, to streamline the combinations of the cross section measurement. </p> <p>In the first part of the tutorial, we will setup a relatively simple unfolding, where there is a single detector-level bin for every generator-level bin we are trying to measure. We will then perform a blind analysis using this setup to see the expected sensitivity. </p> <p>In this simple version of the analysis, we use a series of datacards, one for each detector-level bin, implemented as a counting experiment. We then combine the datacards for the full measurement. It is also possible to implement the same analysis as a single datacard, passing a histogram with each of the detector-level bins. Either method can be used, depending on which is more practical for the analysis being considered. </p> <p>In the second part of the tutorial we will perform the same measurement with a more advanced setup, making use of differential distributions per generator-level bin we are trying to measure, as well as control regions. By providing this additional information to the fit, we are able to achieve a better and more robust unfolding result. After checking the expected sensitivity, we will take a look at the impacts and pulls of the nuisance parameters. Then we will unblind and look at the results of the measurement, produce generator-level plots and provide the correlation matrix for our measured observables.</p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#simplified-unfolding","title":"Simplified unfolding","text":"<p>When determining the detector-level binning for any differential analysis the main goal is to chose a binning that distinguishes contributions from the various generator-level bins well. In the simplest case it can be done with the cut-based approach, i.e. applying the same binning for the detector-level observables as is being applied to the generator-level quantities being measured. In this case, that means binning in \\(p_{T}(Z)\\) and \\(n_{\\text{add. jets}}\\).  Due to the good lepton \\(p_{T}\\) resolution we can follow the original STXS scheme quite closely with the detector-level selection, with one exception, it is not possible to access the very-low transverse momenta bin \\(p_{T}(Z)&lt;75\\) GeV.  </p> <p>In <code>counting/regions</code> dicrectory you can find the datacards with five detector-level categories, each targetting a corresponding generator-level bin. Below you can find an example of the datacard for the detector-level bin with \\(p_{T}(Z)&gt;400\\) GeV. </p> <pre><code>imax    1 number of bins\njmax    9 number of processes minus 1\nkmax    * number of nuisance parameters\n--------------------------------------------------------------------------------\n--------------------------------------------------------------------------------\nbin          vhbb_Zmm_gt400_13TeV\nobservation  12.0\n--------------------------------------------------------------------------------\nbin                                   vhbb_Zmm_gt400_13TeV   vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV   vhbb_Zmm_gt400_13TeV     vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV\nprocess                               ggZH_lep_PTV_GT400_hbb ZH_lep_PTV_GT400_hbb ZH_lep_PTV_250_400_hbb ggZH_lep_PTV_250_400_hbb Zj1b            Zj0b_c          Zj0b_udsg       VVLF            Zj2b            VVHF\nprocess                               -3                     -2                   -1                     0                        1               2               3               4               5               6\nrate                                  0.0907733              0.668303             0.026293               0.00434588               3.78735         2.58885         4.09457         0.413716        7.02731         0.642605\n--------------------------------------------------------------------------------\n</code></pre> <p>You can see the contributions from various background processes, namely Z+jets, \\(t\\bar{t}\\) and the single top, as well as the signal processes (ggZH and ZH) corresponding to the STXS scheme discussed above. Note that for each generator-level bin being measured, we assign a different process in combine. This is so that the signal strengths for each of their contributions can float independently in the measurement. Also note, that due to migrations, each detector-level bin will receive contributions from multiple generator-level bins.</p> <p>One of the most important stages in the analysis design, is to make sure that the detector-level categories are well-chosen to target the corresponding generator-level processes.</p> <p>To explicitly check the correspondance between detector- and generator-level, one can plot the contributions of each of the generator-level bins in all of the detector-level bins. You can use the script provided in the tutorial git-lab page. This script uses <code>CombineHarvester</code> to loop over detector-level bins, and get the rate at which each of the signal processes (generator-level bins) contributes to that detector-level bin; which is then used to plot the migration matrix. </p> <p><pre><code>python scripts/get_migration_matrix.py counting/combined_ratesOnly.txt\n</code></pre> </p> <p>The migration matrix shows the generator-level bins on the x-axis and the corresponding detector-level bins on the y-axis. The entries are normalized such that the sum of all contributions for a given generator-level bin sum up to 1. With this convention, the numbers in each bin represent the probability that an event from a given generator-level bin is reconstructed in a given detector-level bin if it is reconstructed at all within the considered bins.</p> <p>Now that we checked the response matrix we can attempt the maximum likelihood unfolding. We can use the <code>multiSignalModel</code> physics model available in <code>Combine</code>, which assigns a parameter of interest <code>poi</code> to a process <code>p</code> within a bin <code>b</code> using the syntax <code>--PO 'map=b/p:poi[init, min, max]'</code> to linearly scale the normalisation of this process under the parameter of interest (POI) variations. To create the workspace we can run the following command:  <pre><code>text2workspace.py -m 125  counting/combined_ratesOnly.txt -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/.*ZH_lep_PTV_75_150_hbb:r_zh_75_150[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_150_250_0J_hbb:r_zh_150_250noj[1,-5,5]'  --PO 'map=.*/.*ZH_lep_PTV_150_250_GE1J_hbb:r_zh_150_250wj[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_250_400_hbb:r_zh_250_400[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_GT400_hbb:r_zh_gt400[1,-5,5]' -o ws_counting.root\n</code></pre> In the example given above a signal POI is assigned to each generator-level bin independent of detector-level bin. This allows the measurement to take into account migrations. </p> <p>To extract the measurement let's run the initial fit first using the <code>MultiDimFit</code> method implemented in <code>Combine</code> to extract the best-fit values and uncertainties on all floating parameters:  </p> <pre><code>combineTool.py -M MultiDimFit --datacard ws_counting.root --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 -t -1 \n</code></pre> <p>With the option <code>-t -1</code> we set <code>Combine</code> to fit the asimov dataset instead of actual data. The <code>--setParameters &lt;param&gt;=&lt;value&gt;</code> set the initial value of parameter named . <code>--redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400</code> set the POIs to the comma-separated list, instead of the default one <code>r</code>.</p> <p>While the uncertainties on the parameters of interest (POIs) can be extracted in multiple ways, the most robust way is to run the likelihood scans for a POI corresponding to each generator-level bin, it allows you to spot discontinuities in the likelihood shape in case of problems with the fit or the model. </p> <p><pre><code>combineTool.py -M MultiDimFit --datacard ws_counting.root -t -1 --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --algo=grid --points=100 -P r_zh_75_150 --floatOtherPOIs=1 -n scan_r_zh_75_150\n</code></pre> Now we can plot the likelihood scan and extract the expected intervals.</p> <p><pre><code>python scripts/plot1DScan.py higgsCombinescan_r_zh_75_150.MultiDimFit.mH120.root -o r_zh_75_150 --POI r_zh_75_150\n</code></pre> * Repeat for all POIs</p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#shape-analysis-with-control-regions","title":"Shape analysis with control regions","text":"<p>One of the advantages of the maximum likelihood unfolding is the flexibility to choose the analysis observable and include more information on the event kinematics, consequently improving the analysis sensitivity. This analysis benefits from the shape information of the DNN output trained to differentiate the VH(bb) signal from the SM backgrounds. </p> <p>The datacards for this part of the exercise located <code>full_model_datacards/</code>, where you can find a separate datacard for each region within <code>full_model_datacards/regions</code> directory and also a combined datacard <code>full_model_datacards/comb_full_model.txt</code>. In this case, each of the detector-level bins being used in the unfolding above is now split into multiple bins according to the DNN output score. This provides extra discrimination power to separate the signal from background and improve the measurement.</p> <p>As you will find, the datacards also contain several background processes. To control them properly we will also add regions enriched in the respective backgrounds. Then we can define a common set of rate parameters for signal and control regions to scale the rates or other parameters affecting their shape.  </p> <p>For the shape datacards one has to specify the mapping of histograms and channels/processes as given described below:</p> <p><pre><code>shapes [process] [channel] [file] [nominal] [systematics_templates]\n</code></pre> Then the <code>shape</code> nuisance parameters can be defined in the systematics block in the datacard. More details can be found in <code>Combine</code> documentation pages.</p> <p>In many CMS analyses there are hundreds of nuisance parameters corresponding to various source of systematics. </p> <p>When we unfold to the generator-level quantities we should remove the nuisances affecting the rate of the generator-level bins, i.e. when measuring a given cross-section such as \\(\\sigma_{\\textrm{gen1}}\\), the nuisance parameters should not change the value of that parameter itself; they should only change the relationship between that parameter and the observations.  This means that, for example, effects of renormalization and factorization scales on the generator-level cross section within each bin need to be removed. Only their effects on the detector-level distribution through changes of shape within each bin as well as acceptances and efficiencies should be considered. </p> <p>For this analysis, that means removing the <code>lnN</code> nuisance parameters: <code>THU_ZH_mig*</code> and  <code>THU_ZH_inc</code>; keeping only the acceptance <code>shape</code> uncertainties: <code>THU_ZH_acc</code> and <code>THU_ggZH_acc</code>, which do not scale the inclusive cross sections by construction. In this analysis the normalisation effects in the <code>THU_ZH_acc</code> and <code>THU_ggZH_acc</code> templates were already removed from the shape histograms. Removing the normalization effects can be achieved by removing them from the datacard. Alternatively, freezing the respective nuisance parameters with the option <code>--freezeParameters par_name1,par_name2</code>. Or you can create a group following the syntax given below at the end of the combined datacard, and freeze the parameters with the <code>--freezeNuisanceGroups group_name</code> option.</p> <pre><code>[group_name] group = uncertainty_1 uncertainty_2 ... uncertainty_N\n</code></pre> <p>Now we can create the workspace using the same <code>multiSignalmodel</code> as before:</p> <pre><code>text2workspace.py -m 125  full_model_datacards/comb_full_model.txt -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/.*ZH_lep_PTV_75_150_hbb:r_zh_75_150[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_150_250_0J_hbb:r_zh_150_250noj[1,-5,5]'  --PO 'map=.*/.*ZH_lep_PTV_150_250_GE1J_hbb:r_zh_150_250wj[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_250_400_hbb:r_zh_250_400[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_GT400_hbb:r_zh_gt400[1,-5,5]' --for-fits --no-wrappers --X-pack-asympows --optimize-simpdf-constraints=cms --use-histsum -o ws_full.root\n</code></pre> <p>As you might have noticed we are using a few extra versions <code>--for-fits --no-wrappers --X-pack-asympows --optimize-simpdf-constraints=cms --use-histsum</code> to create a workspace. They are needed to construct a more optimised pdf using the <code>CMSHistSum</code> class implemented in Combine to significantly lower the memory consumption.</p> <ul> <li>Following the instructions given earlier, create the workspace and run the initial fit with <code>-t -1</code>. </li> </ul> <p>Since this time the datacards include shape uncertainties as well as additional categories to improve the background description the fit might take much longer, but we can submit jobs to a batch system by using the combine tool and have results ready to look at in a few minutes.  </p> <pre><code>combineTool.py -M MultiDimFit -d ws_full.root --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400  -t -1 --X-rtd FAST_VERTICAL_MORPH --algo=grid --points=50 --floatOtherPOIs=1 -n .scans_blinded --job-mode condor --task-name scans_zh  --split-points 1 --generate P:n::r_zh_gt400,r_zh_gt400:r_zh_250_400,r_zh_250_400:r_zh_150_250wj,r_zh_150_250wj:r_zh_150_250noj,r_zh_150_250noj:r_zh_75_150,r_zh_75_150\n</code></pre> <p>The option <code>--X-rtd FAST_VERTICAL_MORPH</code> is added here and for all <code>combineTool.py -M MultiDimFit ...</code> to speed up the minimisation. </p> <p>The job submission is handled by the <code>CombineHarvester</code>, the combination of options <code>--job-mode condor --task-name scans_zh  --split-points 1 --generate P:n::r_zh_gt400,r_zh_gt400:r_zh_250_400,r_zh_250_400:r_zh_150_250wj,r_zh_150_250wj:r_zh_150_250noj,r_zh_150_250noj:r_zh_75_150,r_zh_75_150</code> will submit the jobs to HTCondor for each POI.  The <code>--generate</code> option is is being used to automatically generate jobs attaching the options <code>-P &lt;POI&gt; -n &lt;name&gt;</code> with each of the pairs of values <code>&lt;POI&gt;,&lt;name&gt;</code> specified between the colons. You can add <code>--dry-run</code> option to create the submissions files first and check them, and then submit the jobs with <code>condor_submit condor_scans_zh.sub</code>. </p> <p>If you are running the tutorial from a cluster where HTCondor is not available you can also submit the jobs to the slurm system, just change the <code>--job-mode condor</code> to <code>--job-mode slurm</code>. </p> <p>After all jobs are completed we can combine the files for each POI: </p> <pre><code>for p in r_zh_75_150 r_zh_150_250noj r_zh_150_250wj r_zh_250_400 r_zh_gt400\ndo\n    hadd -k -f scan_${p}_blinded.root higgsCombine.scans_blinded.${p}.POINTS.*.MultiDimFit.mH120.root\ndone\n</code></pre> <p>And finally plot the likelihood scans </p> <p><pre><code>python scripts/plot1DScan.py scan_r_zh_75_150_blinded.root  -o scan_r_zh_75_150_blinded --POI r_zh_75_150 --json summary_zh_stxs_blinded.json\n</code></pre> </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#impacts","title":"Impacts","text":"<p>One of the important tests before we move to the unblinding stage is to check the impacts of nuisance parameters on each POI. For this we can run the <code>combineTool.py</code> with <code>-M Impacts</code> method. We start with the initial fit, which should take about 20 minutes (good time to have a coffee break!):</p> <pre><code>combineTool.py -M Impacts -d ws_full.root -m 125 --robustFit 1 --doInitialFit --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --X-rtd FAST_VERTICAL_MORPH\n</code></pre> <p>Note that it is important to add the option <code>--redefineSignalPOIs [list of parameters]</code>, to produce the impacts for all POIs we defined when the workspace was created with the <code>multiSignalModel</code>.</p> <p>After the initial fit is completed we can perform the likelihood scans for each nuisance parameter.  We will submit the jobs to the HTCondor to speed up the process.</p> <pre><code>combineTool.py -M Impacts -d ws_full.root -m 125 --robustFit 1 --doFits --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --job-mode condor --task-name impacts_zh --X-rtd FAST_VERTICAL_MORPH \n</code></pre> <p>Now we can combine the results into the <code>.json</code> format and use it to produce the impact plots.</p> <pre><code>combineTool.py -M Impacts -d ws_full.root -m 125 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --output impacts.json \n\nplotImpacts.py -i impacts.json -o impacts_r_zh_75_150 --POI r_zh_75_150\n</code></pre> <p> * Do you observe differences in impacts plots for different POIs, do these differences make sense to you? </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#unfolded-measurements","title":"Unfolded measurements","text":"<p>Now that we studied the nuisance parameter impacts for each POI, we can finally perform the measurement. Note that for the purposes of the tutorial, we are skipping further checks and validation that you should do on your analysis. Namely the goodness of fit test and the post-fit plots of folded observables. Both of these checks were detailed in the previous exercises, which you can find under the following link. </p> <p>At this stage we'll run the <code>MultiDimFit</code> again scanning each POI to calculate the intervals, but this time we'll remove the <code>-t -1</code> option to extract the unblinded results. </p> <p>Also since we want to unfold the measurements to the generator-level observables, i.e. extract the cross sections, we remove the theoretical uncertainties affecting the rates of signal processes,  we can do this be freezing them <code>--freezeNuisanceGroups &lt;group_name&gt;</code>, using the <code>group_name</code> you assigned earlier in the tutorial. </p> <p>Now plot the scans and collect the measurements in the json file <code>summary_zh_stxs.json</code>. </p> <pre><code>python scripts/plot1DScan.py scan_r_zh_75_150.root -o r_zh_75_150 --POI r_zh_75_150 --json summary_zh_stxs.json  \n</code></pre> <p></p> <p>Repeat the same command for other POIs to fill the <code>summary_zh_stxs.json</code>, which can then be used to make the cross section plot by multiplying the standard model cross sections by the signal strengths' best-fit values as shown below. </p> <p><pre><code>python scripts/make_XSplot.py summary_zh_stxs.json\n</code></pre> </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#poi-correlations","title":"POI correlations","text":"<p>In addition to the cross-section measurements it is very important to publish covariance or correlation information of the measured cross sections.  This allows the measurement to be properly intepreted or reused in combined fits.  </p> <p>The correlation matrix or covariance matrix can be extracted from the results after the fit. Here we can use the <code>FitDiagnostics</code> or <code>MultiDimFit</code> method.</p> <pre><code>combineTool.py -M FitDiagnostics --datacard ws_full.root --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400  --robustHesse 1 -n .full_model --X-rtd FAST_VERTICAL_MORPH\n</code></pre> <p>Then the <code>RooFitResult</code>, containing correlations matrix, can be found in the <code>fitDiagnostics.full_model.root</code> file under the name <code>fit_s</code>. The script <code>plotCorrelations_pois.py</code> from the exercise git-lab repository can help to plot the correlation matrix.</p> <p><pre><code>python scripts/plotCorrelations_pois.py -i fitDiagnostics.full_model.root:fit_s -p r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400\n</code></pre> </p>"},{"location":"tutorial_stat_routines/stat_routines/","title":"Understanding Statistical Routines in Combine","text":""},{"location":"tutorial_stat_routines/stat_routines/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>After setting up <code>Combine</code>, you can access the working directory for this tutorial which contains all of the inputs and scripts needed to run the unfolding fitting exercise:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/statistical_routines_tutorial\n</code></pre>"},{"location":"tutorial_stat_routines/stat_routines/#the-model","title":"The model","text":"<p>This tutorial will go through various statistical routines in combine in detail using a very simple counting experiment model. There is a single channel with contributions from Higgs production and WW production, and three nuisance parameters.</p> <p>The model details can be seen in the <code>datacard.txt</code> file in the tutorial directory.</p> <p>The parameter of interest (POI) for this model is a single signal strength parameter (called <code>r</code> or \\(\\mu\\)) which scales the total yield of the signal (Higgs) process.</p> <p>We will use this model to run statistical tests such as estimating the higgs cross section, attempting to discover the higgs, and setting limits on the cross section.</p>"},{"location":"tutorial_stat_routines/stat_routines/#estimating-a-best-fit-value","title":"Estimating a best fit value","text":"<p>The most commonly used statistical routines in combine are frequentist maximum-likelihood based routines. For these routines, \"best-fit\" values of a parameter, \\(\\mu\\), are denoted \\(\\hat{\\mu}\\) and they are determined by finding the value of that parameter which maximizes the likelihood, \\(\\mathrm{L}(\\mu)\\).</p> <p>In combine you can find the best-fit value of your parameter of interest with the <code>MultiDimFit</code> routine:</p> <pre><code>combine -M MultiDimFit datacard.txt\n</code></pre> <p>you should get some output, which prints the best fit signal strength</p> <pre><code>Doing initial fit: \n\n --- MultiDimFit ---\nbest fit parameter values: \n   r :    +0.266\n</code></pre>"},{"location":"tutorial_stat_routines/stat_routines/#uncertainty-intervals","title":"Uncertainty Intervals","text":"<p>to get more information, you can add the <code>--algo singles</code> flag which will also calculate the uncertainty on the parameter. In order to get the full uncertainty, let's also change the limits on <code>r</code>, which are \\([0,20]\\) by default:</p> <pre><code>combine -M MultiDimFit datacard.txt --algo singles --rMin -10 --rMax 10\n</code></pre> <p>now the output should contain the uncertainties as well as the best fit value</p> <pre><code>Doing initial fit: \n\n --- MultiDimFit ---\nbest fit parameter values and profile-likelihood uncertainties: \n   r :    +0.260   -1.004/+1.265 (68%)\nDone in 0.00 min (cpu), 0.00 min (real)\n</code></pre> <p>These uncertainty intervals have been computed internally as part of the fit. What do they mean, and how are they determined?</p> <p>These are frequentist confidence intervals, which means that if our statistical model is good enough and we were to perform repeated experiments of this type, we would expected that 68% of the confidence intervals we produce would contain the true value of our signal strength parameter, <code>r</code>.</p> <p>These can be constructed from first principles using the Neyman Construction, by finding, but in practice they are usually constructed assuming Wilks' theorem. Wilks' theorem tells us what the expected distribution of the likelihood ratio \\(\\Lambda = \\frac{\\mathrm{L}(r)}{\\mathrm{L}(\\hat{r})}\\) is, and from this we can construct confidence intervals. In practice, we use the log-likelihood ratio \\(t_r \\equiv -2 \\ln( \\Lambda )\\), rather than the likelihood ratio itself. The confidence interval is constructed by finding all values of <code>r</code> for which the \\(-2 \\ln(\\Lambda)\\) is below a threshold value which depends on the confidence level we are using.</p> <p>We can also calculat the best fit value and confidence interval using the <code>FitDiagnostics</code> routine:</p> <pre><code>combine -M FitDiagnostics datacard.txt --rMin -10 --rMax 10\n</code></pre> <p>which should give you a compatible result.  The <code>FitDiagnostics</code> routine also produces an output file called <code>fitDiagnosticsTest.root</code> which contains the full result of fits of both the background-only and signal + background model.</p> <p>You can see the results of the signal + background model fit by opening the file and checking the fit result:</p> <pre><code>root -l fitDiagnosticsTest.root\nroot [1]&gt; fit_s-&gt;Print()\n  RooFitResult: minimized FCN value: -2.92406e-05, estimated distance to minimum: 7.08971e-07\n                covariance matrix quality: Full, accurate covariance matrix\n                Status : MINIMIZE=0 HESSE=0 \n\n    Floating Parameter    FinalValue +/-  Error   \n  --------------------  --------------------------\n                  lumi    3.1405e-04 +/-  1.00e+00\n                     r    2.6039e-01 +/-  1.12e+00\n                 xs_WW    8.6964e-04 +/-  1.00e+00\n                xs_ggH    7.3756e-04 +/-  1.00e+00\n</code></pre> <p>Notice that in this case, the uncertainty interval for <code>r</code> is reported as a symmetric interval.  What's the difference between this interval and the asymmetric one?</p> <p>In both cases, the interval is found by determining the values of <code>r</code> for which \\(-2 \\ln(\\Lambda)\\) is below the threshold value, which in this case for the 68% interval is 1. Both algorithms take the best fit value \\(\\hat{r}\\) for which \\(-2 \\ln(\\Lambda)\\) will always be 0, and then try to find the interval by estimating the crossing points where \\(-2 \\ln(\\Lambda) = 1\\).</p> <p>However, the different intervals estimate this value in different ways. The asymmetric intervals are \"minos errors\", which means that the crossing points were determined by explicitly scanning the likelihood as a function of <code>r</code> to look for the crossing, while minimizing other parameters at each step (profiling). The symmetric intervals are \"hesse errors\", which means that the crossing points were determined by taking the matrix of second-order partial derivatives (Hessian) at the minimum, and inverting it to estimate the crossing assuming all other derivatives vanish.</p> <p>The information printed under the <code>Status</code> section of the <code>RooFitResult</code> is showing that the minimization suceeded and that the hessian was positive definite, i.e. that all the second derivates are positive, as they should be at the minimum of a function.  If the HESSE status is not 0 or the covariance matrix quality indicates it had to be forced positive definite, this indicates that there are problems with the fit.</p>"},{"location":"tutorial_stat_routines/stat_routines/#running-an-explicit-likelihood-scan","title":"Running an explicit likelihood scan","text":"<p>You can see that the minos errors should match the crossing points of the likelihood-ratio by explicitly scanning the likelihood function with <code>MultiDimFit</code>, using the <code>--algo grid</code> option and specifying the range and how many points to scan:</p> <pre><code>combine -M MultiDimFit datacard.txt --algo grid --points 100 --rMin -2 --rMax 6\n</code></pre> <p>The results can then be plotted using the <code>plot1Dscan.py</code> script, using the file <code>higgsCombineTest.MultiDimFit.mH120.root</code> which was output by the scan:</p> <pre><code>python3 ../../../scripts/plot1DScan.py --POI r higgsCombineTest.MultiDimFit.mH120.root\n</code></pre> <p>it should produce an output pdf and png files which look like the one shown below. You can see the best fit values, as well as the crossing points for the \\(1\\sigma\\) and \\(2\\sigma\\) intervals.</p> <p></p>"},{"location":"tutorial_stat_routines/stat_routines/#uncertainty-intervals-from-first-principles","title":"Uncertainty intervals from first principles","text":"<p>All the methods mentioned above rely on Wilks' theorem, which only holds under certain conditions. In some cases, particularly those of low statistics or some other cases, such as sometimes when the yield depends quadratically on the parameter of interest, Wilks' theorem will not be a good approximation.</p> <p>One thing you can do is check the uncertainty intervals explicitly, following the Neyman Construction. In order to do this you would scan your signal strength parameter <code>r</code>, at each value generating a set of pseudodata toys to determine the expected distribution of the test statistic \\(t_\\mu\\) under the hypothesis that the true signal strength is \\(\\mu\\). Then, you can check the distribution of \\(t_\\mu\\) and find the critical value of the test statistic, \\(t'\\), such that:</p> \\[ \\int_{t'}^{\\infty} p(t_{\\mu}) \\mathrm{d}t_{\\mu} =(1 - \\mathrm{CL}) \\] <p>where \\(\\mathrm{CL}\\) is the confidence level you are trying to reach (e.g. 68%).</p> <p>For a given value of <code>r</code>, we can check the test statistic distribution explicitly and determine the crossing point of the interval. Let's do that for the upper end of our confidence interval, using the <code>MultiDimFit</code> method.</p> <p>We do it by generating a number of toy datasets with <code>r = 1.525</code>, which is our upper bound value.  Then we calculate the test statistic: </p> \\[ t_{\\mu=1.525} = -2 \\ln (\\frac{\\mathrm{L}(r=1.525)}{\\mathrm{L}(r=\\hat{r})}) \\] <p>on each of these toy datasets, and fill a histogram with the results to determine the expect distribution under the the null hypothesis (in this case that <code>r</code> = 1.525). We could do this all in one command using the <code>-t &lt;n_toys&gt;</code> functionality of combine, but let's split it into two steps to make the logic more clear.</p> <p>First, lets generate a set of 1000 toys from our model with <code>r</code> = 1.525. Since we want to generate frequentist toys (since we are calculating a frequentist confidence interval), we also need the <code>--toysFrequentist</code> option.</p> <pre><code>combine -M GenerateOnly datacard.txt -t 1000 --toysFrequentist --setParameters r=1.525 --saveToys \n</code></pre> <p>Now we can tell <code>MultiDimFit</code> to run over these toys by using the output from the previous step, with the command line argument <code>--toysFile &lt;output_file_from_toy_generation&gt;</code>. To calculate the test statistic with MultiDimFit we will use <code>--algo fixed --fixedPointPOIs r=1.525</code> to tell <code>MultiDimFit</code> to calculate the log-likelihood ratio using that point in the numerator. The full command is then:</p> <pre><code>combine -M MultiDimFit datacard.txt --rMin -10 --rMax 10 --algo fixed --fixedPointPOIs r=1.525 -t 500 --toysFrequentist   --toysFile higgsCombineTest.GenerateOnly.mH120.123456.root\n</code></pre> <p>We can inspect the results of all of our toy fits by opening the <code>higgsCombineTest.MultiDimFit.mH120.123456.root</code> file our command created, and looking at the <code>limit</code> tree contained in it. The log-likelihood ratio \\(-\\ln(\\Lambda)\\) is stored in the <code>deltaNLL</code> branch of the tree. For the <code>fixed</code> algorithm, there are two entries stored in the tree for every dataset: one for the best fit point, and one for the fixed point passed as the aregument to <code>--fixedPointPOIs</code>. In order to select only the values we are interest in we can pass the requirement <code>quantileExpected &gt;= 0</code> to our TTree selection, because combine uses the value <code>-1</code> for <code>quantileExpected</code> to indicate best fit points.</p> <p>You can draw the \\(t_{\\mu}\\) distribution with:</p> <pre><code>root -l higgsCombineTest.MultiDimFit.mH120.123456.root\nroot [1] &gt; limit-&gt;Draw(\"2*deltaNLL\",\"quantileExpected &gt;= 0\")\n</code></pre> <p>To test wether or not this point should be rejected, we first define the confidence level of our rejection, say \\(1\\sigma\\) (approximately 68%), then we use the empirical distribution of the test statistic to estimate the cut-off value of the test statistic. This is done for you in the script <code>get_quantile.py</code>, which you can run:</p> <pre><code>python3 get_quantile.py --input higgsCombineTest.MultiDimFit.mH120.123456.root\n</code></pre> <ul> <li>How does the value estimated from this method compare to the value using Wilks' theorem and the methods above?</li> <li>How does the value change if you generate more toys?</li> <li>Check the observed value of the test statistic on the data, how does it compare to threshold value for this point? Is the point accepted or rejected?</li> </ul> <p>You can do the toy data generation and the fits in one step for a given parameter value with the command:</p> <pre><code>combine -M MultiDimFit datacard.txt --rMin -10 --rMax 10 --algo fixed --fixedPointPOIs r=&lt;r_val&gt; --setParameters r=&lt;r_val&gt; -t &lt;ntoys&gt; --toysFrequentist\n</code></pre> <p>Test out a few values of <code>r</code> and see if they all give you the same result.  What happens for <code>r</code> less than about -1? Can you explain why? (hint: look at the values in the datacard)</p>"},{"location":"tutorial_stat_routines/stat_routines/#significance-testing","title":"Significance Testing","text":"<p>For significance testing, we want to test the compatibility of our model with the background-only hypothesis \\(\\mu = 0\\). However, when performing significance testing we are typically only interested in rejecting the null hypothesis if the confidence level is very high (e.g. \\(5\\sigma\\)). Furthermore, we typically use a modified test-statistic \\(q_0\\) which is equal to 0 whenever the best-fit signal strength is less than 0, to avoid rejecting the null hypothesis due to a deficit of events.</p> <p>A typical significance test can be run with combine using the <code>Significance</code> method:</p> <pre><code>combine -M Significance datacard.txt\n</code></pre> <p>for this datacard, we get a very modest significance of about <code>0.24</code>, meaning we fail to reject the null hypothesis. This method is run using the asymptotic approximation, which relies on Wilks' theorem, similar to as it was used above. Under this approximation the significance is directly related to our test statistic, \\(q_0\\) by: Significance = \\(\\sqrt{q_0}\\). So for positive values of \\(\\hat{r}\\) we can read the Significance from the likelihood scan, by checking the value at the origin.</p> <pre><code>combine -M MultiDimFit datacard.txt --algo grid --points 100 --rMin -0.1 --rMax 1\npython3 ../../../scripts/plot1DScan.py --POI r higgsCombineTest.MultiDimFit.mH120.root --y-max 0.5\n</code></pre> <p>This will produce the same likelihood scan as before, but where we've restricted the range to be able to see the value of the curve at <code>r</code> = 0 more clearly. As expected, the crossing happens at around \\(0.24^2\\)</p> <p></p>"},{"location":"tutorial_stat_routines/stat_routines/#going-beyond-the-asymptotic-approximation-with-hybrid-new","title":"Going beyond the Asymptotic Approximation with Hybrid New","text":"<p>We could move beyond the asymptotic approximation as we did before by generating toys and explicitly calculating the test statistic. In order to do this, we would simply run <code>MultiDimFit</code> using:</p> <pre><code>combine -M MultiDimFit datacard.txt --rMin -10 --rMax 10 --algo fixed --fixedPointPOIs r=0 --setParameters r=0 -t 500 --toysFrequentist\n</code></pre> <p>and then calculate the value of \\(q_0\\) for every toy, check their distribution and compare the observed value in data to the distribution from the toys.</p> <p>However, we can also use the <code>HybridNew</code> method, which has a built-in routine to do this for us, and save us the work of calculating the test-statistic values ourselves.</p> <pre><code>combine -M HybridNew --LHCmode LHC-significance datacard.txt\n</code></pre> <p>We see that in this case, the value is a little different from the asymptotic approximation, though not drastically so.</p> <pre><code> -- Hybrid New -- \nSignificance: 0.306006  -0.0127243/+0.012774\nNull p-value: 0.3798 +/- 0.00485337\n</code></pre>"},{"location":"tutorial_stat_routines/stat_routines/#limit-setting","title":"Limit Setting","text":"<p>NOTE: This section explores several methods which are not recommended to be used in limit setting, in order to better understand their limitations before getting to the commonly used procedure</p> <p>One might be tempted to set limits by simply setting a confidence level (e.g. 95%), using the profile-likelihood ratio test statistic, \\(t_\\mu\\), and finding the values of the signal strength which are rejected. This is not what is typically done for setting limits, but lets try to set some limits this way as an exercise.</p> <p>Under the asymptotic approximation, then, we can read off the values which we would reject from our earlier likelihood scan, they are all the values above about 4.</p> <p>Let's see what happens if we were to have observed 12 events instead of 6. There is a modified datacard <code>datacard_obs12.txt</code> with this already set for you.</p> <pre><code>combine -M MultiDimFit datacard_obs12.txt --algo grid --points 100 --rMin -2 --rMax 8\npython3 ../../../scripts/plot1DScan.py --POI r higgsCombineTest.MultiDimFit.mH120.root\n</code></pre> <p></p> <p>In this case we would reject values of <code>r</code> above about 6.7, but also values of <code>r</code> below about 0.3 at the 95% CL. However, despite rejecting <code>r</code> = 0, our 95% CL is far below typical particle physics standards for claiming discovery. We therefore prefer to set only an upper bound, which we can do by modifying the test statistic to be 0 for all values below the best fit value.</p> <p>However, even with such a modification, there is another problem, with a large enough under-fluctuation of the background we will set our limit below <code>r</code> = 0. You can check thiswith the <code>HybridNew</code> method, and the slightly modified datacard <code>datacard_underfluctuation.txt</code>:</p> <pre><code>combine -M HybridNew --frequentist --testStat=Profile datacard_underfluctuation.txt  --rule Pmu --rMin -5 --rMax 10\n</code></pre> <p>The above command is telling combine to calculate the limit, but we have to pass the non-standard arguemnts <code>--testStat=Profile --rule Pmu</code> to tell combine to use the profile likelihood ratio test statistic \\(t_{\\mu}\\) directly, and not to use the \\(\\mathrm{CL}_\\mathrm{s}\\) criterion which is normally applied.</p> <p>Usually at the LHC, for upper limits we use the modified test statistic \\(\\tilde{q}_{\\mu}\\) which is set to 0 for \\(\\mu &lt; \\hat{\\mu}\\) but also replaces \\(\\hat{\\mu}\\) with 0 if \\(\\min(\\hat{\\mu},0)\\) so that upper limits are always positive.</p> <p>If we use the standard LHC test statistic we will get a positive limit:</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule Pmu  datacard_underfluctuation.txt  \n</code></pre> <p>gives the result:</p> <pre><code> -- Hybrid New -- \nLimit: r &lt; 0.0736126 +/- 0.0902187 @ 95% CL\n</code></pre> <p>But this is an extremely tight bound on our signal strength, given that a signal strength of 1 is still within the statistical uncertainty of the background.</p>"},{"location":"tutorial_stat_routines/stat_routines/#the-cl_s-criterion","title":"the CL_s criterion","text":"<p>With the limit setting procedure above we set a limit of <code>r</code> &lt; 0.07, due to an underfluctuation in the observed data. However, if we had designed our experiment better so that the expected background were lower, we never would have been able to set a limit that strong. We can see this with the datacard <code>datacard_lowbackground.txt</code> where the expected background is 1 event, but 0 events are observed.  In this case, we only manage to set a limit of <code>r</code> &lt; 0.5:</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule Pmu  datacard_lowbackground.txt  \n</code></pre> <pre><code> -- Hybrid New -- \nLimit: r &lt; 0.509028 +/- 0.0188724 @ 95% CL\n</code></pre> <p>The CL_s criterion takes into account how likely the data are under the background only hypothesis as well as under the signal + background hypothesis. This has the effect of increasing the limit if the data are unlikely under the background-only hypothesis as well as the signal hypothesis. This prevents setting limits below the expected sensitivity of the experiment, as we can see by rerunning our cards with the option <code>--rule CLs</code>, which is actually the default value.</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule CLs datacard_lowbackground.txt  \n</code></pre> <p>gives a limit around 1.2, whereas</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule CLs datacard_underfluctuation.txt \n</code></pre> <p>sets a limit around 2.7. This is reasonable, given that we should expect a better limit when we have a better experimental design which manages to reduce backgrounds without any change in the signal acceptance.</p> <p>These are the default settings for setting limits at the LHC and the arguments <code>--frequentist --testStat LHC --rule CLs</code> can be replaced by <code>--LHCmode LHC-limits</code>.</p>"},{"location":"tutorial_stat_routines/stat_routines/#asymptotic-limits","title":"Asymptotic Limits","text":"<p>The <code>HybridNew</code> method generates pseudodata to estimate the distributions of the test statistics and set a limit, however often the asymptotic distributions, which have analytically known forms, can be used. This is computationally much faster than running <code>HybridNew</code> and can be run with:</p> <pre><code>combine -M AsymptoticLimits &lt;datacard&gt;\n</code></pre> <ul> <li>Try comparing the asymptotic limits to the results with HybridNew computed above,  how do they compare for the two cases?</li> </ul> <p>As well as the observed limit, the <code>AsymptoticLimits</code> method will automatically tell you the 2.5%, 16.0%, 50.0%, 84.0% and 97.5% expected limits. These are calculated by taking the appropriate quantiles from the distribution of the test-statistic under the background-only hypothesis.</p>"},{"location":"tutorial_stat_routines/stat_routines/#the-limit-setting-algorithm","title":"The limit setting algorithm","text":"<p>Because the \\(CL_s\\) value as a function of \\(r\\) is not known analytically, the limit setting algorithm follows an iterative process. It starts by picking a value of the signal strength, then it calculates the expected distributions of the test-statistics for that signal strength \\(\\tilde{q}_{\\mu}\\) under both the background-only and signal+background hypotheses.</p> <p>We can try this ourselves by running the <code>HybridNew</code> method with the <code>--singlePoint</code> option. We can also use the <code>--saveHybridResult</code> argument to save the distribution of the test-statistic:</p> <pre><code>combine -M HybridNew --LHCmode LHC-limits --singlePoint r=2 --saveHybridResult datacard.txt\n</code></pre> <p>Then the test-statistic distributions can be plotted:</p> <pre><code>python3  ../../../test/plotTestStatCLs.py --input higgsCombineTest.HybridNew.mH120.root --poi r --val all --mass 120\n</code></pre> <p></p> <p>This plot shows the expected distribution of the test statistic under the signal hypothesis (blue histogram) and the background-only hypothesis (red histogram), as well as the actual observed value in the data (black arrow). the p-values of each of the hypotheses are calculated, as is the CLs value.</p> <p>This particular point is not rejected, and so to set the limit, we'd need to increase the value of <code>r</code>, find the expected distributions, observed value and calculate CLs again. Repeating this many times, we could find the value of <code>r</code> at which the CLs value crosses our threshold (in this case 0.05, for a 95% CL).</p> <p>When we run without the <code>--singlePoint</code> option, combine does this all for us internally, but running individual points manually can be helpful for debugging or splitting up fits of large models across parallel jobs.</p> <p>You can see the various <code>r</code> values at which combine calculated the CLs and the interpolation it performs to find the crossing by using the <code>--plot</code> option:</p> <pre><code>combine -M HybridNew --LHCmode LHC-limits --plot r_scan.png datacard.txt\n</code></pre> <p></p> <ul> <li>Where do the uncertainties on the CLs value at each value of <code>r</code> come from in the plot?</li> <li>How could the precision of the limit be increased?</li> </ul>"},{"location":"tutorial_stat_routines/stat_routines/#debugging","title":"Debugging","text":"<p>If you see some strange or unexpected behaviour in your limits, you can look at the distributions of the test static, or the various CLs values computed in order to better understand where the problem might be coming from.</p> <p>One situation you might encounter is observing the discrete nature or the counts when you are in the low statistics regime.</p> <pre><code>combine -M HybridNew --LHCmode LHC-limits --singlePoint r=1 --saveHybridResult datacard_lowbackground.txt\npython3  ../../../test/plotTestStatCLs.py --input higgsCombineTest.HybridNew.mH120.root --poi r --val all --mass 120\n</code></pre> <p></p> <p>There is nothing wrong with this distribution, but noting its features may help you understand the results you are seeing and if they are reasonable or there might be an issue with the fit. In a case like this, we can certainly expect the asymptotic approximation not to be very reliable. With low backgrounds, the shapes of the signal-hypothesis and signal+background hypothesis distributions can also start to look very similar. In such cases, some of the quantiles of the expected limits may be very compressed, and statistical fluctuations in the empirical distributions may be more apparent.</p>"},{"location":"what_combine_does/fitting_concepts/","title":"Likelihood based fitting","text":"<p>\"Fitting\" simply means estimating some parameters of a model (or really a set of models) based on data. Likelihood-based fitting does this through the likelihood function.</p> <p>In frequentist frameworks, this typically means doing maximum likelihood estimation. In bayesian frameworks, usually posterior distributions of the parameters are calculated from the likelihood.</p>"},{"location":"what_combine_does/fitting_concepts/#fitting-frameworks","title":"Fitting Frameworks","text":"<p>Likelihood fits typically either follow a frequentist framework of maximum likelihood estimation, or a bayesian framework of updating estimates to find posterior distributions given the data.</p>"},{"location":"what_combine_does/fitting_concepts/#maximum-likelihood-fits","title":"Maximum Likelihood fits","text":"<p>A maximum likelihood fit means finding the values of the model parameters \\((\\vec{\\mu}, \\vec{\\nu})\\) which maximize the likelihood, \\(\\mathcal{L}(\\vec{\\mu},\\vec{\\nu})\\) The values which maximize the likelihood, are the parameter estimates, denoted with a \"hat\" (\\(\\hat{}\\)):</p> \\[(\\vec{\\hat{\\mu}}, \\vec{\\hat{\\nu}}) \\equiv \\underset{\\vec{\\mu},\\vec{\\nu}}{\\operatorname{argmax}} \\mathcal{L}(\\vec{\\mu}, \\vec{\\nu})\\] <p>These values provide point estimates for the parameter values.</p> <p>Because the likelihood is equal to the probability of observing the data given the model, the maximum likelihood estimate finds the parameter values for which the data is most probable.</p>"},{"location":"what_combine_does/fitting_concepts/#bayesian-posterior-calculation","title":"Bayesian Posterior Calculation","text":"<p>In a bayesian framework, the likelihood represents the probability of observing the data given the model and some prior probability distribution over the model parameters.</p> <p>The prior probability of the parameters, \\(\\pi(\\vec{\\Phi})\\), are updated based on the data to provide a posterior distributions</p> \\[ p(\\vec{\\Phi};\\mathrm{data}) = \\frac{ p(\\mathrm{data};\\vec{\\Phi}) \\pi(\\vec{\\Phi}) }{\\int p(\\mathrm{data};\\vec{\\Phi}') \\pi(\\vec{\\Phi}') \\mathrm{d}\\vec{\\Phi}' } = \\frac{ \\mathcal{L}(\\vec{\\Phi}) \\pi(\\vec{\\Phi}) }{ \\int \\mathcal{L}(\\vec{\\Phi}') \\pi(\\vec{\\Phi}') \\mathrm{d}\\vec{\\Phi}' }\\] <p>The posterior distribution \\(p(\\vec{\\Phi};\\mathrm{data})\\) defines the updated belief about the parameters \\(\\vec{\\Phi}\\).</p>"},{"location":"what_combine_does/fitting_concepts/#methods-for-considering-subsets-of-models","title":"Methods for considering subsets of models","text":"<p>Often, one is interested in some particular aspect of a model. This may be for example information related to the parameters of interest, but not the nuisance parameters. In this case, one needs a method for specifying precisely what is meant by a model considering only those parameters of interest.</p> <p>There are several methods for considering sub models which each have their own interpretations and use cases.</p>"},{"location":"what_combine_does/fitting_concepts/#conditioning","title":"Conditioning","text":"<p>Conditional Sub-models can be made by simply restricting the values of some parameters. The conditional likelihood of the parameters \\(\\vec{\\mu}\\) conditioned on particular values of the parameters \\(\\vec{\\nu}\\) is:</p> \\[ \\mathcal{L}(\\vec{\\mu},\\vec{\\nu}) \\xrightarrow{\\mathrm{conditioned\\ on\\ } \\vec{\\nu} = \\vec{\\nu}_0} \\mathcal{L}(\\vec{\\mu}) = \\mathcal{L}(\\vec{\\mu},\\vec{\\nu}_0) \\]"},{"location":"what_combine_does/fitting_concepts/#profiling","title":"Profiling","text":"<p>The profiled likelihood \\(\\mathcal{L}(\\vec{\\mu})\\) is defined from the full likelihood, \\(\\mathcal{L}(\\vec{\\mu},\\vec{\\nu})\\), such that for every point \\(\\vec{\\mu}\\) it is equal to the full likelihood at \\(\\vec{\\mu}\\) maximized over \\(\\vec{\\nu}\\).</p> \\[ \\mathcal{L}(\\vec{\\mu},\\vec{\\nu}) \\xrightarrow{\\mathrm{profiling\\ } \\vec{\\nu}} \\mathcal{L}({\\vec{\\mu}}) = \\max_{\\vec{\\nu}} \\mathcal{L}(\\vec{\\mu},\\vec{\\nu})\\] <p>In some sense, the profiled likelihood is the best estimate of the likelihood at every point \\(\\vec{\\mu}\\), it is sometimes also denoted with a double hat notation \\(\\mathcal{L}(\\vec{\\mu},\\vec{\\hat{\\hat{\\nu}}}(\\vec{\\mu}))\\).</p>"},{"location":"what_combine_does/fitting_concepts/#marginalization","title":"Marginalization","text":"<p>Marginalization is a procedure for producing a probability distribution \\(p(\\vec{\\mu};\\mathrm{data})\\) for a set of parameters \\(\\vec{\\mu}\\), which are only a subset of the parameters in the full distribution \\(p(\\vec{\\mu},\\vec{\\nu};\\mathrm{data})\\). The marginal probability density \\(p(\\vec{\\mu})\\) is defined such that for every point \\(\\vec{\\mu}\\) it is equal to the probability at \\(\\vec{\\mu}\\) integrated over \\(\\vec{\\nu}\\).</p> \\[ p(\\vec{\\mu},\\vec{\\nu}) \\xrightarrow{\\mathrm{marginalizing\\ } \\vec{\\nu}} p({\\vec{\\mu}}) = \\int_{\\vec{\\nu}} p(\\vec{\\mu},\\vec{\\nu})\\] <p>The marginalized probability \\(p(\\vec{\\mu})\\) is the probability for the parameter values \\(\\vec{\\mu}\\) taking into account all possible values of \\(\\vec{\\nu}\\).</p> <p>Marginalized likelihoods can also be defined, by their relationship to the probability distributions.</p>"},{"location":"what_combine_does/fitting_concepts/#parameter-uncertainties","title":"Parameter Uncertainties","text":"<p>Parameter uncertainties describe regions of parameter values which are considered reasonable parameter values, rather than single estimates. These can be defined either in terms of frequentist confidence regions or bayesian credibility regions.</p> <p>In both cases the region is defined by a confidence or credibility level \\(CL\\), which quantifies the meaning of the region. For frequentist confidence regions, the confidence level \\(CL\\) describes how often the confidence region will contain the true parameter values if the model is a sufficiently accurate approximation of the truth. For bayesian credibility regions, the credibility level \\(CL\\) describes the bayesian probability that the true parameter value is in that region for under the given model.</p> <p>The confidence or credibility regions are described by a set of points \\(\\{ \\vec{\\mu} \\}_{\\mathrm{CL}}\\) which meet some criteria. In most situations of interest, the credibility region or confidence region for a single parameter, \\(\\mu\\), is effectively described by an interval:</p> \\[ \\{ \\mu \\}_{\\mathrm{CL}} = [ \\mu^{-}_{\\mathrm{CL}}, \\mu^{+}_{\\mathrm{CL}} ] \\] <p>Typically indicated as:</p> \\[ \\mu = X^{+\\mathrm{up}}_{-\\mathrm{down}} \\] <p>or, if symmetric intervals are used:</p> \\[ \\mu = X \\pm \\mathrm{unc.} \\]"},{"location":"what_combine_does/fitting_concepts/#frequentist-confidence-regions","title":"Frequentist Confidence Regions","text":"<p>Frequentist confidence regions are random variables of the observed data. These are very often the construction used to define the uncertainties reported on a parameter.</p> <p>If the same experiment is repeated multiple times, different data will be osbserved each time and a different confidence set \\(\\{ \\vec{\\mu}\\}_{\\mathrm{CL}}^{i}\\) will be found for each experiment. If the data are generated by the model with some set of values \\(\\vec{\\mu}_{\\mathrm{gen}}\\), then the fraction of the regions \\(\\{ \\vec{\\mu}\\}_{\\mathrm{CL}}^{i}\\) which contain the values \\(\\vec{\\mu}_{\\mathrm{gen}}\\) will be equal to the confidence level \\({\\mathrm{CL}}\\). The fraction of intervals which contain the generating parameter value is referred to as the \"coverage\".</p> <p>From first principles, the intervals can be constructed using the Neyman construction.</p> <p>In practice, the likelihood can be used to construct confidence regions for a set of parameters \\(\\vec{\\mu}\\) by using the profile likelikhood ratio:</p> \\[ \\Lambda \\equiv \\frac{\\mathcal{L}(\\vec{\\mu},\\vec{\\hat{\\nu}}(\\vec{\\mu}))}{\\mathcal{L}(\\vec{\\hat{\\mu}},\\vec{\\hat{\\nu}})} \\] <p>i.e. the ratio of the profile likelihood at point \\(\\vec{\\mu}\\) to the maxmimum likelihood. For technical reasons, the negative logarithm of this quantity is typically used in practice.</p> <p>Each point \\(\\vec{\\mu}\\) can be tested to see if it is in the confidence region, by checking the value of the likelihood ratio at that point and comparing it to the expected distribution if that point were the true generating value of the data.</p> \\[ \\{ \\vec{\\mu} \\}_{\\mathrm{CL}} = \\{ \\vec{\\mu} : -\\log(\\Lambda) \\lt  \\gamma_{\\mathrm{CL}}(\\vec{\\mu}) \\} \\] <p>The cutoff value \\(\\gamma_{\\mathrm{CL}}\\) must be chosen to match this desired coverage of the confidence set.</p> <p>Under some conditions, the value of \\(\\gamma_{\\mathrm{CL}}\\) is known analytically for any desired confidence level, and is independent of \\(\\vec{\\mu}\\), which greatly simplifies estimating confidence regions.</p> Constructing Frequentist Confidence Regions in Practice <p>When a single fit is performed by some numerical minimization program and parameter values are reported along with some uncertainty values, they are usually reported as frequentist intervals. The MINUIT minimizer which evaluates likelihood functions has two methods for estimating parameter uncertainties.</p> <p>These two methods are the most commonly used methods for estimating confidence regions in a fit; they are the minos method, and the hessian method. In both cases, Wilk's theorem is assumed to hold at all points in parameter space, such that \\(\\gamma_{\\mathrm{CL}}\\) is independent of \\(\\vec{\\mu}\\).</p> <p>When \\(\\gamma_{\\mathrm{CL}}\\) is independent of \\(\\vec{\\mu}\\) the problem simplifies to finding the boundaries where \\(-\\log(\\Lambda) = \\gamma_{\\mathrm{CL}}\\). This boundary point is referred to as the \"crossing\", i.e. where \\(-\\log(\\Lambda)\\) crosses the threshold value.</p>"},{"location":"what_combine_does/fitting_concepts/#the-minos-method-for-estimating-confidence-regions","title":"The Minos method for estimating confidence regions","text":"<p>In the minos method, once the best fit point \\(\\vec{\\hat{\\mu}}\\) is determined, the confidence region for any parameter \\(\\mu_i\\) can be found by moving away from its best fit value \\(\\hat{\\mu}_i\\). At each value of \\(\\mu_i\\), the other parameters are profiled, and \\(-\\log{\\Lambda}\\) is calculated.</p> <p>Following this procedure, \\(\\mu_i\\) is searched for the boundary of the confidence regions, where \\(-\\log{\\Lambda} = \\gamma_{\\mathrm{CL}}\\).</p> <p>The search is performed in both directions, away from the best fit value of the parameter and the two crossings are taken as the borders of the confidence region.</p> <p>This procedure has to be followed sepately for each parameter \\(\\mu_i\\) for which a confidence interval is calculated.</p>"},{"location":"what_combine_does/fitting_concepts/#the-hessian-method-for-estimating-confidence-regions","title":"The Hessian method for estimating confidence regions","text":"<p>The Hessian method relies on the second derivatives (i.e. the hessian) of the likelihood at the best fit point.</p> <p>By assuming that the shape of the likelihood function is well described by its second-order approximation, the values at which \\(-\\log(\\Lambda) = \\gamma_{\\mathrm{CL}}\\) can be calculated analytically without the need for a seach</p> \\[ \\mu_i^{\\mathrm{crossing}} - \\hat{\\mu} \\propto \\left(\\frac{\\partial^2{\\mathcal{L}(\\vec{\\hat{\\mu}})}}{\\partial\\mu_i^2}\\right)^{-2} \\] <p>By computing and then inverting the full hessian matrix, all individual confidence regions and the full covariance matrix are determined. By construction, this method always reports symmetric confidence intervals, as it assumes that the likelihood is well described by a second order expansion.</p>"},{"location":"what_combine_does/fitting_concepts/#bayesian-credibility-regions","title":"Bayesian Credibility Regions","text":"<p>Often the full posterior probability distribution is summarized in terms of some credible region which contains some specified portion of the posterior probability of the parameter.</p> \\[ \\{ \\vec{\\mu} \\}_{\\mathrm{CL}} =  \\{ \\vec{\\mu} : \\vec{\\mu} \\in \\Omega, \\int_{\\Omega} p(\\vec{\\mu};\\mathrm{data}) = \\mathrm{CL}  \\}\\] <p>The credible region represents a region in which the bayesian probability of the parameter being in that region is equal to the chosen Credibility Level.</p>"},{"location":"what_combine_does/introduction/","title":"Introduction And Capabilities","text":"<p>Combine is a tool for making statistical analyses based on a model of expected observations and a dataset. Example statistical analyses are claiming discovery of a new particle or process, setting limits on the existence of new physics, and measuring cross sections.</p> <p>The package has no physics-specific knowledge, it is completely agnostic to the interpretation of the analysis being performed, but its usage and development is based around common cases in High Energy Physics. This documentation is a description of what combine does and how you can use it to run your analyses.</p> <p>Roughly, combine does three things:</p> <ol> <li>Helps you to build a statistical model of expected observations;</li> <li>Runs statistical tests on the model and observed data;</li> <li>Provides tools for validating, inspecting, and understanding the model and the statistical tests.</li> </ol> <p>Combine can be used for analyses in HEP ranging from simple counting experiments to unfolded measurements, new physics searches, combinations of measurements, and EFT fits.</p>"},{"location":"what_combine_does/introduction/#model-building","title":"Model Building","text":"<p>Combine provides a powerful, human-readable, and lightweight interface for building likelihood models for both binned and unbinned data. The likelihood definition allows the user to define many processes which contribute to the observation, as well as multiple channels which may be fit simultaneously.</p> <p>Furthermore, combine provides a powerful and intuitive interface for combining models, as it was originally developped for combinations of higgs boson analysis at the CMS experiment.</p> <p>The interface simplifies many common tasks, while providing many options for customizations. Common nuisance parameter types are defined for easy use, while user-defined functions can also be provided. Input histograms defining the model can be provide in root format, or in other tabular formats compatable with pandas.</p> <p>Custom physics models can be defined in python which determine how the parameters of interest alter the model, and a number of predefined models are provided by default.</p> <p>A number of tools are also provided for run-time alterations of the model, allowing for straightforward comparisons of alternative models.</p>"},{"location":"what_combine_does/introduction/#statistical-tests","title":"Statistical Tests","text":"<p>Combine can be used for statistical tests in frequentist or bayesian frameworks as well as perform some hybrid frequentist-bayesian analysis tasks.</p> <p>Combine implements various methods for commonly used statistical tests in high energy physics, including for discovery, limit setting, and parameter estimation. Statistical tests can be customized to use various test statistics and confidence levels, as well as providing different output formats.</p> <p>A number of asymptotic methods, relying on Wilks' theorem, and valid in appropriate conditions are implemented for fast evaluation. Generation of pseudo-data from the model can also be performed, and tests are implemented to automatically run over emprical distributions without relying on asymptotic approximations. Pseudo-data generation and fitting over the pseudo-data can be customized in a number of ways.</p>"},{"location":"what_combine_does/introduction/#validation-and-inspection","title":"Validation and Inspection","text":"<p>Combine provides tools for inspecting the model for things like potentially problematic input templates.</p> <p>Various methods are provided for inspecting the likelihood function and the performance of the fits.</p> <p>Methods are provided for comparing pre-fit and postfit results of all values including nuisance parameters, and summaries of the results can produced.</p> <p>Plotting utilities allow the pre- and post-fit model expectations and their uncertainties to be plotted, as well as plotted summaries of debugging steps such as the nuisance parameter values and likelihood scans.</p>"},{"location":"what_combine_does/model_and_likelihood/","title":"Observation Models and Likelihoods","text":""},{"location":"what_combine_does/model_and_likelihood/#the-observation-model","title":"The Observation Model","text":"<p>The observation model, \\(\\mathcal{M}( \\vec{\\Phi})\\) defines the probability for any set of observations given specific values of the input parameters of the model \\(\\vec{\\Phi}\\). The probability for any observed data is denoted:</p> \\[ p_{\\mathcal{M}}(\\mathrm{data}; \\vec{\\Phi} ) \\] <p>where the subscript \\(\\mathcal{M}\\) is given here to remind us that these are the probabilities according to this particular model (though usually we will omit it for brevity).</p> <p>Combine is designed for counting experiments, where the number of events with particular features are counted. The events can either be binned, as in histograms, or unbinned, where continuous values are stored for each event. The event counts are assumed to be of independent events, such as individual proton-proton collisions, which are not correlated with each other.</p> <p>The event-count portion of the model consists of a sum over different processes. The expected observations, \\(\\vec{\\lambda}\\), are then the sum of the expected observations for each of the processes, \\(\\vec{\\lambda} =\\sum_{p} \\vec{\\lambda}_{p}\\).</p> <p>The model can also be composed of multiple channels, in which case the expected observation is the set of all expected observations from the various channels \\(\\vec{\\lambda} = \\{ \\vec{\\lambda}_{c1}, \\vec{\\lambda}_{c2}, .... \\vec{\\lambda}_{cN}\\}\\).</p> <p>The model can also include data and parameters related to non-count values, such as the observed luminosity or detector calibration constant. These non-count data are usually considered as auxiliary information which are used to constrain our expectations about the observed event counts.</p> <p>The full model therefore defines the probability of any given observations over all the channels, given all the processes and model parameters.</p> <p>Combining full models is possible by combining their channels, assuming that the channels are mutually independent.</p> A Simple Example <p>Consider performing an analysis searching for a Higgs boson by looking for events where the Higgs decays into two photons.</p> <p>The event count data may be binned histograms of the number of events with two photons with different bins of invariant mass of the photons. The expected counts would include signal contributions from processes where a Higgs boson is produced, as well as background contributions from processes where two photons are produced through other mechanisms, like radiation off a quark. The expected counts may also depend on parameters such as the energy resolution of the measured photons and the total luminosity of collisions being considered in the dataset, these can be parameterized in the model as auxiliary information.</p> <p>The analysis itself might be split into multiple channels, targetting different Higgs production modes with different event selection criteria. Furthermore, the analysis may eventually be combined with other analyses, such as a measurement targetting Higgs production where the Higgs boson decays into four leptons, rather than two photons.</p> <p>Combine provides the functionality for building the statistical models and combining all the channels or analyses together into one common analysis.</p>"},{"location":"what_combine_does/model_and_likelihood/#sets-of-observation-models","title":"Sets of Observation Models","text":"<p>We are typically not interested in a single model, but in a set of models, parameterized by a set of real numbers representing possible versions of the model.</p> <p>Model parameters include the parameters of interest ( \\(\\vec{\\mu}\\), those being measured such as a cross section) as well as nuisance parameters (\\(\\vec{\\nu}\\)), which may not be of interest but still affect the model expectation.</p> <p>Combine provides tools and interfaces for defining the model as pre-defined or user-defined functions of the input parameters. In practice, however, there are a number of most commonly used functional forms which define how the expected events depend on the model parameters. These are discussed in detail in the context of the full likelihood below.</p>"},{"location":"what_combine_does/model_and_likelihood/#the-likelihood","title":"The Likelihood","text":"<p>For any given model, \\(\\mathcal{M}(\\vec{\\Phi})\\), the likelihood defines the probability of observing a given dataset. It is numerically equal to the probability of observing the data, given the model.</p> \\[ \\mathcal{L}_\\mathcal{M}(\\vec{\\Phi}) = p_{\\mathcal{M}}(\\mathrm{data};\\vec{\\Phi}) \\] <p>Note, however that the likelihood is a function of the model parameters, not the data, which is why we distinguish it from the probability itself.</p> <p>The likelihood in combine takes the general form:</p> \\[ \\mathcal{L} =  \\mathcal{L}_{\\textrm{primary}} \\cdot \\mathcal{L}_{\\textrm{auxiliary}} \\] <p>Where \\(\\mathcal{L}_{\\mathrm{auxiliary}}\\) is equal to the probability of observing the event count data for a given set of model parameters, and \\(\\mathcal{L}_{\\mathrm{auxiliary}}\\) represent some external constraints on the parameters. The constraint term may be constraints from previous measurements (such as Jet Energy Scales) or prior beliefs about the value some parameter in the model should have.</p> <p>Both \\(\\mathcal{L}_{\\mathrm{primary}}\\) and \\(\\mathcal{L}_{\\mathrm{auxiliary}}\\) can be composed of many sublikelihoods, for example for observations of different bins and constraints on different nuisance parameters.</p> <p>This form is entirely general. However, as with the model itself, there are typical forms that the likelihood takes which will cover most use cases, and for which combine is primarily designed.</p>"},{"location":"what_combine_does/model_and_likelihood/#primary-likelihoods-for-binned-data","title":"Primary Likelihoods for binned data","text":"<p>For a binned likelihood, the probability of observing a certain number of counts, given a model takes on a simple form. For each bin:</p> \\[ \\mathcal{L}_{\\mathrm{bin}}(\\vec{\\Phi}) = \\mathrm{Poiss}(n_{\\mathrm{obs}}; n_{\\mathrm{exp}}(\\vec{\\Phi})) \\] <p>i.e. it is a poisson distribution with the mean given by the expected number of events in that bin. The full primary likelihood for binned data is simply the product of each of the bins' likelihoods:</p> \\[ \\mathcal{L}_\\mathrm{primary} = \\prod_\\mathrm{bins} \\mathcal{L}_\\mathrm{bin}. \\] <p>This is the underlying likelihood model used for every binned analysis. The freedom in the analysis comes in how \\(n_\\mathrm{exp}\\) depends on the model parameters, and the constraints that are placed on those parameters.</p>"},{"location":"what_combine_does/model_and_likelihood/#primary-likelihoods-for-unbinned-data","title":"Primary Likelihoods for unbinned data","text":"<p>For unbinned likelihood models, a likelihood can be given to each data point. It is proportional to the probability density function at that point, \\(\\vec{x}\\). For the full set of observed data points, information about the total number of data points is also included:</p> \\[ \\mathcal{L}_\\mathrm{data} = \\mathrm{Poiss}(n_{\\mathrm{obs}} ; n_{\\mathrm{exp}}(\\vec{\\Phi})) \\prod_{i}^{N_{\\mathrm{obs}}} \\mathrm{pdf}(\\vec{x}_i ; \\vec{\\Phi} ) \\] <p>Where \\(n_{\\mathrm{obs}}\\) and \\(n_{\\mathrm{exp}}\\) are the total number of observed and expected events, respectively. This is sometimes referred to as an 'extended' likelihood, as the probability density has been 'extended' to include information about the total number of observations.</p>"},{"location":"what_combine_does/model_and_likelihood/#auxiliary-likelihoods","title":"Auxiliary Likelihoods","text":"<p>The auxiliary likelihood terms encode the probability of model nuisance parameters taking on a certain value, without regards to the primary data. In frequentist frameworks, this usually represents the result of a previous measurement (such as of the jet energy scale). We will write in a mostly frequentist framework, though combine can be used for either frequentist or bayesian analyses[^1].</p> <p>[^1]: see: the first paragraphs of the PDGs statistics review for more information on these two frameworks</p> <p>In this framework, each auxiliary term represents the likelihood of some parameter, \\(\\nu\\), given some previous observation \\(y\\); the quantity \\(y\\) is sometimes referred to as a \"global observable\".</p> \\[ \\mathcal{L}_{\\mathrm{auxiliary}}( \\nu ) = p( y ; \\nu ) \\] <p>In principle the form of the likelihood can be any function where the corresponding \\(p\\) is a valid probability distribution. In practice, most of the auxiliary terms are gaussian, and the definition of \\(\\nu\\) is chosen such that the central observation \\(y = 0\\) , and the width of the gaussian is one.</p> <p>Note that on its own, the form of the auxiliary term is not meaningful; what is meaningful is the relationship between the auxiliary term and how the model expectation is altered by the parameter. Any co-ordinate transformation of the parameter values can be absorbed into the definition of the parameter. A reparameterization would change the mathematical form of the auxiliary term, but would also simultaneously change how the model depends on the parameter in such a way that the total likelihood is unchanged. e.g. if you define  \\(\\nu = \\sigma(tt)\\) or \\(\\nu = \\sigma(tt) - \\sigma_0\\) you will change the form of the constraint term, but the you will not change the overall likelihood.</p>"},{"location":"what_combine_does/model_and_likelihood/#likelihoods-implemented-in-combine","title":"Likelihoods implemented in Combine","text":"<p>Combine builds on the generic forms of the likelihood for counting experiments given above to provide specific functional forms which are commonly most useful in high energy physics, such as separating contributions between different processes.</p>"},{"location":"what_combine_does/model_and_likelihood/#binned-likelihoods-using-templates","title":"Binned Likelihoods using Templates","text":"<p>Binned likelihood models can be defined by the user by providing simple inputs such as a set of histograms and systematic uncertainties. These likelihood models are referred to as template-based because they rely heavily on histograms as templates for building the full likelihood function.</p> <p>Here, we describe the details of the mathematical form of these likelihoods. As already mentioned, the likelihood can be written as a product of two parts:</p> \\[ \\mathcal{L} =  \\mathcal{L}_\\mathrm{primary} \\cdot \\mathcal{L}_\\mathrm{auxiliary} = \\prod_{c=1}^{N_c} \\prod_{b=1}^{N_b^c} \\mathrm{Poiss}(n_{cb}; n^\\mathrm{exp}_{cb}(\\vec{\\mu},\\vec{\\nu})) \\cdot \\prod_{e=1}^{N_E}  p_e(y_e ; \\nu_e) \\] <p>Where \\(c\\) indexes the channel, \\(b\\) indexes the histogram bin, and \\(e\\) indexes the nuisance parameter.</p>"},{"location":"what_combine_does/model_and_likelihood/#model-of-expected-event-counts-per-bin","title":"Model of expected event counts per bin","text":"<p>The generic model of the expected event count in a given bin, \\(n^\\mathrm{exp}_{cb}\\), implemented in combine for template based analyses is given by:</p> \\[n^\\mathrm{exp}_{cb} = \\mathrm{max}(0, \\sum_{p} M_{cp}(\\vec{\\mu})N_{cp}(\\nu_G, \\vec{\\nu}_L,\\vec{\\nu}_S,\\vec{\\nu}_{\\rho})\\omega_{cbp}(\\vec{\\nu}_S) + E_{cb}(\\vec{\\nu}_B) ) \\] <p>where here:</p> <ul> <li>\\(p\\) indexes the processes contributing to the channel;</li> <li>\\(\\nu_{G}, \\vec{\\nu}_L, \\vec{\\nu}_S, \\vec{\\nu}_{\\rho}\\) and \\(\\vec{\\nu}_B\\) are different types of nuisance parameters which modify the processes with different functional forms;<ul> <li>\\(\\nu_{G}\\) is a gamma nuisances,</li> <li>\\(\\vec{\\nu}_{L}\\) are log-normal nuisances,</li> <li>\\(\\vec{\\nu}_{S}\\) are \"shape\" nuisances,</li> <li>\\(\\vec{\\nu}_{\\rho}\\) are user defined rate parameters, and</li> <li>\\(\\vec{\\nu}_{B}\\) are nuisance parameters related to the statistical uncertainties in the simulation used to build the model.</li> </ul> </li> <li>\\(M\\) defines the effect of the parameters of interest on the signal process;</li> <li>\\(N\\) defines the overall normalization effect of the nuisance parameters;</li> <li>\\(\\omega\\) defines the shape effects (i.e. bin-dependent effects) of the nuisance parameters; and</li> <li>\\(E\\) defines the impact of statistical uncertainties from the samples used to derive the histogram templates used to build the model.</li> </ul>"},{"location":"what_combine_does/model_and_likelihood/#parameter-of-interest-model","title":"Parameter of Interest Model","text":"<p>The function \\(M\\) can take on custom functional forms, as defined by the user, but in the most common case, the parameter of interest \\(\\mu\\) simply scales the contributions from signal processes:</p> \\[\\label{eq:sig_param} M_{cp}(\\mu) = \\begin{cases}     \\mu  &amp;\\mathrm{if\\ } p \\in \\mathrm{signal} \\\\     1    &amp;\\mathrm{otherwise} \\end{cases} \\] <p>However, combine supports many more models beyond this. As well as built-in support for models with multiple parameters of interest, combine comes with many pre-defined models which go beyond simple process normalization, which are targetted at various types of searches and measurements.</p>"},{"location":"what_combine_does/model_and_likelihood/#normalization-effects","title":"Normalization Effects","text":"<p>The overall normalization \\(N\\) is affected differently by the different types of nuisances parameters, and takes the general form</p> \\[N = \\prod_X \\prod_i f_X(\\vec{\\nu}_{X}^{i})\\mathrm{,}\\] <p>With \\(X\\) identifying a given nuisance parameter type; i.e. \\(N\\) multiplies together the morphings from each of the individual nuisance parameters from each of the nuisance types.</p> Normalization Parameterization Details <p>The full functional form of the normalization term is given by:</p> \\[ N_{cp} = N_{\\mathrm{0}}(\\nu_{G})\\prod_{n} {\\kappa_{n}}^{\\nu_{L,n}}\\prod_{a} {\\kappa^{\\mathrm{A}}_{a}(\\nu_{L(S)}^{a},\\kappa^{+}_{a}, \\kappa^{-}_{a})}^{\\nu_{L(S)}^{a}} \\prod_{r}F_{r}(\\nu_\\rho) \\] <p>where:</p> <ul> <li>\\(N_{\\mathrm{0}}(\\nu_{G}) \\equiv \\frac{\\nu_{G}}{y_{G}}\\), is the normalization effect of a gamma uncertainty. \\(y_{G}\\) is taken as the observed number of events in some external control region and \\(\\nu_{G}\\) has a constraint pdf \\(\\mathrm{Poiss}(\\nu; y)\\)</li> <li>\\(\\kappa_{n}^{\\nu_{L,n}}\\), are log-normal uncertainties specified by a fixed value \\(\\kappa\\);</li> <li>\\(\\kappa^{\\mathrm{A}}_{a}(\\nu_{L(S)}^{a},\\kappa^{+}_{a}, \\kappa^{-}_{a})^{\\nu_{L(S)}^{a}}\\) are asymmetric log-normal uncertainties, in which the value of \\(\\kappa^{\\mathrm{A}}\\) depends on the nuisance parameter and two fixed values \\(\\kappa^{+}_{a}\\) and \\(\\kappa^{-}_{a}\\). The functions, \\(\\kappa^A\\), define a smooth interpolation for the asymmetric uncertainty; and</li> <li>\\(F_{r}(\\vec{\\nu}_\\rho)\\) are user-defined functions of the user defined nuisance parameters which may have uniform or gaussian constraint terms.</li> </ul> <p>The function for the asymmetric normalization modifier, \\(\\kappa^A\\) is</p> \\[     \\kappa^{\\mathrm{A}}(\\nu,\\kappa^{+}, \\kappa^{-}) =     \\begin{cases}         \\kappa^{+}, &amp;\\mathrm{for\\,} \\nu \\geq 0.5 \\\\         \\frac{1}{\\kappa^{-}}, &amp;\\mathrm{for\\,} \\nu \\leq -0.5 \\\\         \\exp\\left(\\frac{1}{2} \\left( (\\ln{\\kappa^{+}}-\\ln{\\kappa^{-}}) + \\frac{1}{4}(\\ln{\\kappa^{+}}+\\ln{\\kappa^{-}})I(\\nu)\\right)\\right), &amp;\\mathrm{otherwise}\\end{cases} \\] <p>where \\(I(\\nu) = 48\\nu^5 - 40\\nu^3 + 15\\nu\\), which ensures \\(\\kappa^{\\mathrm{A}}\\) and its first and second derivatives are continuous for all values of \\(\\nu\\).</p> <p>and the \\(\\kappa^{+}\\) and \\(\\kappa^{-}\\) are the relative normalizations of the two systematics variations; i.e.:</p> \\[ \\kappa^{\\pm}_{s} = \\frac{\\sum_{b}\\omega_{b}^{s,\\pm}}{\\sum_{b}\\omega_{b}^{0}}. \\] <p>where \\(\\omega_{b}^{s,\\pm}\\) is the bin yield as defined by the two shifted values  \\(\\nu_{S} = \\nu_{S}^{\\pm}\\), and \\(\\omega_{b}^{0}\\) is the bin yield when \\(\\nu_{S} = \\omega_{S}\\).</p>"},{"location":"what_combine_does/model_and_likelihood/#shape-morphing-effects","title":"Shape Morphing Effects","text":"<p>The number of events in a given bin \\(b\\), \\(\\omega_{cbp}\\), is a function of the shape parameters \\(\\vec{\\nu}_{S}\\). The shape interpolation works with the fractional yields in each bin, where the interpolation can be performed either directly on the fractional yield, or on the logarithm of the fraction yield, which is then exponentiated again.</p> Shape parameterization Details <p>In the following, the channel and process labels \\(c\\) and \\(p\\) apply to every term, and so are omitted.</p> <p>The fixed nominal number of events is denoted \\(\\omega_{b}^{0}\\). For each applicable shape uncertainty \\(s\\), two additional predictions are specified, \\(\\omega_{b}^{s,+}\\) and \\(\\omega_{b}^{s,-}\\), typically corresponding to the \\(+1\\sigma\\) and \\(-1\\sigma\\) variations, respectively. These may change both the shape and normalization of the process. The two effects are separated; the shape transformation is constructed in terms of the fractional event counts in the templates via a smooth vertical interpolation, and the normalization is treated as an asymmetric log-normal uncertainty, as described above in the description of the \\(N\\) term in the likelihood.</p> <p>For a given process, the shape may be interpolated either directly in terms of the fractional bin yields, \\(f_b = \\omega_b / \\sum \\omega_{b}\\) or their logarithms, \\(\\ln(f_b)\\). The transformed yield is then given as, respectively,</p> \\[ \\omega_{b}(\\vec{\\nu}) = \\begin{cases} \\max\\left(0, y^{0}\\left(f^{0}_{b} + \\sum_{s} F(\\nu_{s}, \\delta^{s,+}_{b}, \\delta^{s,-}_{b}, \\epsilon_{s})\\right)\\right) &amp; \\text{(direct),}\\\\ \\max\\left(0, y^{0}\\exp\\left(\\ln(f^{0}_{b}) + \\sum_{s} F(\\nu_{s}, \\Delta^{s,+}_{b}, \\Delta^{s,-}_{b}, \\epsilon_{s})\\right) \\right) &amp; \\text{(logarithmic)}, \\end{cases} \\] <p>where \\(\\omega^{0} = \\sum \\omega_{b}^{0}\\), \\(\\delta^{\\pm} = f^{\\pm}_{i} - f^{0}_{i}\\), and \\(\\Delta^{\\pm} = \\ln\\left(\\frac{f^{\\pm}_{i}}{f^{0}_{i}}\\right)\\).</p> <p>The smooth interpolating function \\(F\\), defined below, depends on a set of coefficients, \\(\\epsilon_{s}\\). These are assumed to be unity by default, but may be set to different values, for example if the \\(\\omega_{b}^{s,\\pm}\\) correspond to the \\(\\pm X\\sigma\\) variations, then \\(\\epsilon_{s} = 1/X\\) is typically set. The minimum value of \\(\\epsilon\\) over the shape uncertainties for a given process is  \\(q = \\min({{\\epsilon_{s}}})\\). The function \\({F}\\) is then defined as</p> \\[ F(\\nu, \\delta^{+}, \\delta^{-}, \\epsilon) = \\begin{cases} \\frac{1}{2}\\nu^{'} \\left( (\\delta^{+}-\\delta^{-}) + \\frac{1}{8}(\\delta^{+}+\\delta^{-})(3\\bar{\\nu}^5 - 10\\bar{\\nu}^3 + 15\\bar{\\nu}) \\right), &amp; \\text{for } -q &lt; \\nu' &lt; q; \\\\ \\nu^{'}\\delta^{+}, &amp; \\text{for } \\nu' \\ge q;\\\\ -\\nu^{'}\\delta^{-}, &amp; \\text{for } \\nu' \\le -q;\\\\ \\end{cases} \\] <p>where \\(\\nu^{'} = \\nu\\epsilon\\), \\(\\bar{\\nu} = \\nu^{'} / q\\), and the label \\(s\\) has been omitted. This function ensures the yield and its first and second derivatives are continuous for all values of \\(\\nu\\).</p>"},{"location":"what_combine_does/model_and_likelihood/#statistical-uncertainties-in-the-simulation-used-to-build-the-model","title":"Statistical Uncertainties in the Simulation used to build the Model","text":"<p>Since the histograms used in a binned shape analysis are typically created from simulated samples, the yields in each bin are also subject to statistical uncertainties on the bin yields. These are taken into account by either assigning one nuisance parameter per bin, or as many parameters as contributing processes per bin.</p> Model Statistical Uncertainty Details <p>If the uncertainty in each bin is modelled as a single nuisance parameter it takes the form:</p> \\[ E_{cb}(\\vec{\\mu},\\vec{\\nu},\\nu) = \\nu\\left(\\sum_{p} (e_{cpb}N_{cp}M_{cp}(\\vec{\\mu},\\vec{\\nu}))^{2}\\right)^{\\frac{1}{2}}. \\] <p>where \\(e_{cbp}\\) is the uncertainty in the bin content for the histogram defining process \\(p\\) in the channel \\(c\\).</p> <p>Alternatively, one parameter is assigned per process, which may be modelled with either a Poisson or Gaussian constraint pdf:</p> \\[     E_{cb}(\\vec{\\mu},\\vec{\\nu},\\vec{\\nu}_{\\alpha},\\vec{\\nu}_{\\beta}) = \\sum_{\\alpha}^{\\text{Poisson}} \\left(\\frac{\\nu_{\\alpha}}{\\omega_{\\alpha}} - 1\\right)\\omega_{c\\alpha b}N_{c\\alpha}(\\vec{\\nu})M_{c\\alpha}(\\vec{\\mu},\\vec{\\nu}) + \\sum_{\\beta}^{\\text{Gaussian}} \\nu_{\\beta}e_{c\\beta b}N_{c\\beta}(\\vec{\\nu})M_{c\\beta}(\\vec{\\mu},\\vec{\\nu}), \\] <p>where the indices \\(\\alpha\\) and \\(\\beta\\) runs over the Poisson- and Gaussian-constrained processes, respectively. The parameters \\(\\omega_{\\alpha}\\) represent the nominal unweighted numbers of events, and are treated as the external measurements and \\(N_{cp}\\) and \\(\\omega_{c\\alpha b}\\) are defined as above.</p>"},{"location":"what_combine_does/model_and_likelihood/#customizing-the-form-of-the-expected-event-counts","title":"Customizing the form of the expected event counts","text":"<p>Although the above likelihood defines some specific functional forms, users are also able to implement custom functional forms for \\(M\\),  \\(N\\), and  \\(\\omega_{cbp}\\). In practice, this makes the functional form much more general than the default forms used above.</p> <p>However, some constraints do exist, such as the requirement that bin contents be positive, and that the function \\(M\\) only depends on \\(\\vec{\\mu}\\), whereas \\(N\\), and \\(\\omega_{cbp}\\) only depend on \\(\\vec{\\nu}\\).</p>"},{"location":"what_combine_does/model_and_likelihood/#auxiliary-likelihood-terms","title":"Auxiliary Likelihood terms","text":"<p>The auxiliary constraint terms implemented in combine are Gaussian, Poisson or Uniform:</p> \\[ p_{e} \\propto \\exp{\\left(-0.5 \\left(\\frac{(\\nu_{e} - y_{e})}{\\sigma}\\right)^2 \\right)}\\mathrm{;~} \\\\ p_{e} = \\mathrm{Poiss}( \\nu_{e}; y_{e} ) \\mathrm{;\\ or~} \\\\ p_{e} \\propto \\mathrm{constant\\ (on\\ some\\ interval\\ [a,b])}. \\] <p>Which form they have depends on the type of nuisance paramater:</p> <ul> <li>The shape (\\(\\vec{\\nu}_{S}\\)) and log-normal (\\(\\vec{\\nu}_{L}\\)), nuisance parameters always use gaussian constraint terms;</li> <li>The gamma (\\(\\vec{\\nu}_{G}\\)) nuisance parameters always use Poisson constraints;</li> <li>The rate parameters (\\(\\vec{\\nu}_{\\rho}\\)) may have either Gaussian or Uniform constraints; and</li> <li>The model statistical uncertiainties (\\(\\vec{\\nu}_{B}\\)) may use Gaussian or  Poisson Constraints.</li> </ul> <p>While combine does not provide functionality for user-defined auxiliary pdfs, the effect of nuisance paramters is highly customizable through the form of the dependence of \\(n^\\mathrm{exp}_{cb}\\) on the parameter.</p>"},{"location":"what_combine_does/model_and_likelihood/#overview-of-the-template-based-likelihood-model-in-combine","title":"Overview of the template-based likelihood model in Combine","text":"<p>An overview of the binned likelihood model built by combine is given below. Note that \\(M_{cp}\\) can be chosen by the user from a set of predefined models, or defined by the user themselves.</p> <p></p>"},{"location":"what_combine_does/model_and_likelihood/#parametric-likelihoods-in-combine","title":"Parametric Likelihoods in Combine","text":"<p>As with the template likelihood, the parameteric likelihood implemented in combine implements likelihoods for multiple process and multiple channels. Unlike the template likelihoods, the parametric likelihoods are defined using custom probability density functions, which are functions of continuous observables, rather than discrete, binned counts. Because the pdfs are functions of a continuous variable, the likelihood can be evaluated over unbinned data. They can still, also, be used for analysis on binned data.</p> <p>The unbinned model implemented in combine is given by:</p> \\[ \\mathcal{L} = \\mathcal{L}_\\mathrm{primary} \\cdot \\mathcal{L}_\\mathrm{auxiliary}  = \\\\ \\left(\\prod_c \\mathrm{Poiss}(n_{c,\\mathrm{tot}}^{\\mathrm{obs}} ; n_{c,\\mathrm{tot}}^{\\mathrm{exp}}(\\vec{\\mu},\\vec{\\nu})) \\prod_{i}^{n_c^{\\mathrm{obs}}} \\sum_p f_{cp}^{\\mathrm{exp}} \\mathrm{pdf}_{cp}(\\vec{x}_i ; \\vec{\\mu}, \\vec{\\nu} ) \\right) \\cdot \\prod_e p_e( y_e ; \\nu_e) \\] <p>where \\(c\\) indexes the channel, \\(p\\) indexes the process, and \\(e\\) indexes the nuisance parameter.</p> <ul> <li>\\(n_{c,\\mathrm{tot}}\\) is the total number of expected events in channel \\(c\\);</li> <li>\\(\\mathrm{pdf}_{cp}\\) are user defined probability density functions, which may take on the form of any valid probability density; and</li> <li>\\(f_{cp}^{\\mathrm{exp}}\\) is the fraction of the total events in channel \\(c\\) from process \\(p\\), \\(f_{cp} = \\frac{n_{cp}}{\\sum_p n_{cp}}\\).</li> </ul> <p>for parametric likelihoods on binned data, the data likelihood is first converted into the binned data likelihood format before evaluation. i.e.</p> \\[ \\mathcal{L} = \\prod_c \\prod_b  \\mathrm{Poiss}(n_{cb}^{\\mathrm{obs}}; n_{cb}^{\\mathrm{exp}})  \\prod_e p_e( y_e ; \\nu_e) \\] <p>where \\(n^\\mathrm{exp}\\) is calculated from the input pdf and normalization, based on the model parameters.</p>"},{"location":"what_combine_does/model_and_likelihood/#model-of-expected-event-counts","title":"Model of expected event counts","text":"<p>The total number of expected events is modelled as:</p> \\[n_{c,\\mathrm{tot}}^\\mathrm{exp} = \\mathrm{max}(0, \\sum_{p} n^{cp}_0 M_{cp}(\\vec{\\mu})N_{cp}(\\nu_{G},\\vec{\\nu}_L,\\vec{\\nu}_{\\rho})) \\] <p>where, \\(n^{cp}_0\\)  is a default normalization for the process; and as for the binned likelihoods \\(\\nu_G, \\vec{\\nu}_L\\), and \\(\\vec{\\nu}_{\\rho}\\)  are different types of nuisance parameters which modify the processes normalizations with different functional forms, as in the binned case;</p> Details of Process Normalization <p>As in the template-based case, the different types of nuisance parameters affecting the process normalizations are:</p> <ul> <li>\\(\\nu_{G}\\) is a gamma nuisance, with linear normalization effects and a poisson constraint term.</li> <li>\\(\\vec{\\nu}_{L}\\) are log-normal nuisances, with log-normal normalization effects and gaussian constraint terms.</li> <li>\\(\\vec{\\nu}_{\\rho}\\) are user defined rate parameters, with user-defined normalization effects and gaussian or uniform constraint terms.</li> <li>\\(N\\) defines the overall normalization effect of the nuisance parameters;</li> </ul> <p>and \\(N\\) is defined as in the template-based case, except that there are no \\(\\vec{\\nu}_S\\) uncertainties.</p> \\[ N_{cp} = N_{\\mathrm{0}}(\\nu_{G})\\prod_{n} {\\kappa_{n}}^{\\nu_{L,n}}\\prod_{a} {\\kappa^{\\mathrm{A}}_{a}(\\nu_{L}^{a},\\kappa^{+}_{a}, \\kappa^{-}_{a})}^{\\nu_{L}^{a}} \\prod_{r}F_{r}(\\nu_\\rho) \\] <p>The function \\(F_{r}\\) is any user-defined mathematical expression. The functions \\(\\kappa(\\nu,\\kappa^+,\\kappa^-)\\) are defined to create smooth asymmetric log-normal uncertainties. The details of the interpolations which are used are found in the section on normalization effects in the binned model.</p>"},{"location":"what_combine_does/model_and_likelihood/#parameter-of-interest-model_1","title":"Parameter of Interest Model","text":"<p>As in the template-based case, the parameter of interest model, \\(M_{cp}(\\vec{\\mu})\\), can take on different forms defined by the user. The default model is one where \\(\\vec{\\mu}\\) simply scales the signal processes' normalizations.</p>"},{"location":"what_combine_does/model_and_likelihood/#shape-morphing-effects_1","title":"Shape Morphing Effects","text":"<p>The user may define any number of nuisance parameters which morph the shape of the pdf according to functional forms defined by the user. These nuisance parameters are included as \\(\\vec{\\nu}_\\rho\\) uncertainties, which may have gaussian or uniform constraints, and include user-defined process normalization effects.</p>"},{"location":"what_combine_does/model_and_likelihood/#combining-template-based-and-parametric-likelihoods","title":"Combining template-based and parametric Likelihoods","text":"<p>While we presented the likelihoods for the template and parameteric models separately, they can also be combined into a single likelihood, by treating them each as separate channels. When combining the models, the data likelihoods of the binned and unbinned channels are multiplied.</p> \\[ \\mathcal{L}_{\\mathrm{combined}} = \\mathcal{L}_{\\mathrm{primary}} \\cdot \\mathcal{L}_\\mathrm{auxiliary} =  \\left(\\prod_{c_\\mathrm{template}} \\mathcal{L}_{\\mathrm{primary}}^{c_\\mathrm{template}}\\right) \\left(\\prod_{c_\\mathrm{parametric}} \\mathcal{L}_{\\mathrm{primary}}^{c_\\mathrm{parametric}}\\right)\\cdot \\mathcal{L}_{\\mathrm{auxiliary}} \\]"},{"location":"what_combine_does/model_and_likelihood/#references-and-external-literature","title":"References and External Literature","text":"<ul> <li>See the Particle Data Group's Review of Statistics for various fundamental concepts used here.</li> <li>The Particle Data Group's Review of Probability also has definitions of commonly used distributions, some of which are used here.</li> </ul>"},{"location":"what_combine_does/statistical_tests/","title":"Statistical Tests","text":"<p>Combine is a likelihood based statistical tool. That means that it uses the likelihood function to define statistical tests.</p> <p>Combine provides a number of customization options for each test; as always it is up to the user to chose an appropriate test and options.</p>"},{"location":"what_combine_does/statistical_tests/#general-framework","title":"General Framework","text":""},{"location":"what_combine_does/statistical_tests/#statistical-tests_1","title":"Statistical tests","text":"<p>Combine implements a number of different customizable statistical tests. These tests can be used for purposes such as determining the significance of some new physics model over the standard model, setting limits, estimating parameters, and checking goodness of fit.</p> <p>These tests are all performed on a given model (null hypothesis), and often require additional specification of an alternative model. The statistical test then typically requires defining some \"test statistic\", \\(t\\), which is simply any real-valued function of the observed data:</p> \\[ t(\\mathrm{data}) \\in \\mathbb{R} \\] <p>For example, in a simple coin-flipping experiment, the number of heads could be used as the test statistic.</p> <p>The distribution of the test statistic should be estimated under the null hypothesis (and the alternative hypothesis, if applicable). Then the value of the test statistic on the actual observed data, \\(t^{\\mathrm{obs}}\\) is compared with its expected value under the relevant hypotheses.</p> <p>This comparison, which depends on the test in question, defines the results of the test, which may be simple binary results (e.g. this model point is rejected at a given confidence level), or continuous (e.g. defining the degree to which the data are considered surprising, given the model). Often, as either a final result or as an intermediate step, the p-value of the observed test statistic under a given hypothesis is calculated.</p> How p-values are calculated <p>The distribution of the test statistic, \\(t\\) under some model hypothesis \\(\\mathcal{M}\\) is:</p> \\[t \\stackrel{\\mathcal{M}}{\\sim} D_{\\mathcal{M}}\\] <p>And the observed value of the test statistic is \\(t_{\\mathrm{obs}}\\). The p-value of the observed result gives the probability of having observed a test statistic at least as extreme as the actual observation. For example, this may be:</p> \\[p = \\int_{t_{\\mathrm{min}}}^{t_\\mathrm{obs}} D_{\\mathcal{M}} \\mathrm{d}t\\] <p>In some cases, the bounds of the integral may be modified, such as \\(( t_{\\mathrm{obs}}, t_{\\mathrm{max}} )\\) or \\((-t_{\\mathrm{obs}}, t_{\\mathrm{obs}} )\\), depending on the details of the test being performed. And specifically, for the distribution in question, whether an observed value in the right tail, left tail, or either tail of the distribution is considered as unexpected.</p> <p>The p-values using the left-tail and right tail are related to each other via \\(p_{\\mathrm{left}} = 1 - p_{\\mathrm{right}}\\).</p>"},{"location":"what_combine_does/statistical_tests/#test-statistics","title":"Test Statistics","text":"<p>The test statistic can be any real valued function of the data. While in principle, many valid test statistics can be used, the choice of tests statistic is very important as it influences the power of the statistical test.</p> <p>By associating a single real value with every observation, the test statistic allows us to recast the question \"how likely was this observation?\" in the form of a quantitative question about the value of the test statistic. Ideally a good test statistic should return different values for likely outcomes as compared to unlikely outcomes and the expected distributions under the null and alternate hypotheses should be well-separated.</p> <p>In many situations, extremely useful test statistics, sometimes optimal ones for particular tasks, can be constructed from the likelihood function itself:</p> \\[ t(\\mathrm{data}) = f(\\mathcal{L}) \\] <p>Even for a given statistical test, several likelihood-based test-statistics may be suitable, and for some tests combine implements multiple test-statistics from which the user can choose.</p>"},{"location":"what_combine_does/statistical_tests/#tests-with-likelihood-ratio-test-statistics","title":"Tests with Likelihood Ratio Test Statistics","text":"<p>The likelihood function itself often forms a good basis for building test statistics.</p> <p>Typically the absolute value of the likelihood itself is not very meaningful as it depends on many fixed aspects we are usually not interested in on their own, like the size of the parameter space and the number of observations. However, quantities such as the ratio of the likelihood at two different points in parameter space are very informative about the relative merits of those two models.</p>"},{"location":"what_combine_does/statistical_tests/#the-likelihood-ratio-and-likelihood-ratio-based-test-statistics","title":"The likelihood ratio and likelihood ratio based test statistics","text":"<p>A very useful test statistic is the likelihood ratio of two models:</p> \\[ \\Lambda \\equiv \\frac{\\mathcal{L}_{\\mathcal{M}}}{\\mathcal{L}_{\\mathcal{M}'}} \\] <p>For technical and convenience reasons, often the negative logarithm of the likelihood ratio is used:</p> \\[t \\propto -\\log(\\Lambda) = \\log(\\mathcal{L}_{\\mathcal{M}'}) - \\log(\\mathcal{L}_{\\mathcal{M}})\\] <p>With different proportionality constants being most convenient in different circumstances. The negative sign is used by convention since usually the ratios are constructed so that the larger likelihood value must be in the denominator. This way, \\(t\\) is positive, and larger values of \\(t\\) represent larger differences between the likelihoods of the two models.</p>"},{"location":"what_combine_does/statistical_tests/#sets-of-test-statistics","title":"Sets of test statistics","text":"<p>If the parameters of both likelihoods in the ratio are fixed to a single value, then that defines a single test statistic. Often, however, we are interested in testing \"sets\" of models, parameterized by some set of values \\((\\vec{\\mu}, \\vec{\\nu})\\).</p> <p>This is important in limit setting for example, where we perform statistical tests to exclude entire ranges of the parameter space.</p> <p>In these cases, the likelihood ratio (or a function of it) can be used to define a set of test statistics parameterized by the model parameters. For example, a very useful set of test statistics is:</p> \\[ t_{\\vec{\\mu}} \\propto -\\log\\left(\\frac{\\mathcal{L}(\\vec{\\mu})}{\\mathcal{L}(\\vec{\\hat{\\mu}})}\\right) \\] <p>Where the likelihood parameters in the bottom are fixed to their maximum likelihood values, but the parameter \\(\\vec{\\mu}\\) indexing the test statistic appears in the numerator of the likelihood ratio.</p> <p>When calculating the p-values for these statistical tests, the p-values are calculated at each point in parameter space using the test statistic for that point. In other words, the observed and expected distributions of the test statistics are computed separately at each parameter point \\(\\vec{\\mu}\\) being considered.</p>"},{"location":"what_combine_does/statistical_tests/#expected-distributions-of-likelihood-ratio-test-statistics","title":"Expected distributions of likelihood ratio test statistics","text":"<p>Under appropriate conditions, the distribution of \\(t_\\vec{\\mu}\\) can be approximated analytically, via Wilks' Theorem or other extensions of that work. Then, the p-value of the observed test statistic can be calculated from the known form of the expected distribution. This is also true for a number of the other test statistics derived from the likelihood ratio, where asymptotic approximations have been derived.</p> <p>Combine provides asymptotic methods, for limit setting, significance tests, and computing confidence intervals which make used of these approximations for fast calculations.</p> <p>In the general case, however, the distribution of the test statistic is not known, and it must be estimated. Typically it is estimated by generating many sets of pseudo-data from the model and using the emprirical distribution of the test statistic.</p> <p>Combine also provides methods for limit setting, significance tests, and computing confidence intervals which use pseudodata generation to estimate the expected test-statistic distributions, and therefore don't depend on the asymptotic approximation. Methods are also provided for generating pseudodata without running a particular test, which can be saved and used for estimating expected distributions.</p>"},{"location":"what_combine_does/statistical_tests/#parameter-estimation-using-the-likelihood-ratio","title":"Parameter Estimation using the likelihood ratio","text":"<p>A common use case for likelihood ratios is estimating the values of some parameters, such as the parameters of interest, \\(\\vec{\\mu}\\). The point estimate for the parameters is simply the maximum likelihood estimate, but the likelihood ratio can be used for estimating the uncertainty as a confidence region.</p> <p>A confidence region for the parameters \\(\\vec{\\mu}\\) can be defined by using an appropriate test statistic. Typically, we use the profile likelihood ratio:</p> \\[ t_{\\vec{\\mu}} \\propto -\\log\\left(\\frac{\\mathcal{L}(\\vec{\\mu},\\vec{\\hat{\\nu}}(\\vec{\\mu}))}{\\mathcal{L}(\\vec{\\hat{\\mu}},\\vec{\\hat{\\nu}})}\\right) \\] <p>Where the likelihood in the top is the value of the likelihood at a point \\(\\vec{\\mu}\\) profiled over \\(\\vec{\\nu}\\); and the likelihood on the bottom is at the best fit point.</p> <p>Then the confidence region can be defined as the region where the p-value of the observed test-statistic is less than the confidence level:</p> \\[ \\{ \\vec{\\mu}_{\\mathrm{CL}} \\} =  \\{ \\vec{\\mu} : p_{\\vec{\\mu}} \\lt \\mathrm{CL} \\}.\\] <p>This construction will satisfy the frequentist coverage property that the confidence region contains the parameter values used to generate the data in \\(\\mathrm{CL}\\) fraction of cases.</p> <p>In many cases, Wilks' theorem can be used to calculate the p-value and the criteria on \\(p_{\\vec{\\mu}}\\) can be converted directly into a criterion on \\(t_{\\vec{\\mu}}\\) itself, \\(t_{\\vec{\\mu}} \\lt \\gamma_{\\mathrm{CL}}\\). Where \\(\\gamma_{\\mathrm{CL}}\\) is a known function of the confidence level which depends on the parameter space being considered.</p>"},{"location":"what_combine_does/statistical_tests/#discoveries-using-the-likelihood-ratio","title":"Discoveries using the likelihood ratio","text":"<p>A common method for claiming discovery is based on a likelihood ratio test by showing that the new physics model has a \"significantly\" larger likelihood than the standard model.</p> <p>This could be done by using the standard profile likelihood ratio test statistic:</p> \\[ t_{\\mathrm{NP}} = -2\\log\\left(\\frac{\\mathcal{L}(\\mu_{\\mathrm{NP}} = 0, \\vec{\\hat{\\nu}}(\\mu_{\\mathrm{NP}} = 0))}{\\mathcal{L}(\\hat{\\mu}_{\\mathrm{NP}},\\vec{\\hat{\\nu}})}\\right) \\] <p>Where \\(\\mu_{\\mathrm{NP}}\\) represents the strength of some new physics quantity, such as the cross section for creation of a new particle. However, this would also allow for claiming \"discovery\" in cases where  the best fit value is negative, i.e. \\(\\hat{\\mu} \\lt 0\\), which in particle physics is often an unphysical model, such as a negative cross section. In order to avoid such a situation, we typically use a modified test statistic:</p> \\[ q_{0} = \\begin{cases}     0 &amp; \\hat{\\mu} \\lt 0 \\\\     -2\\log\\left(\\frac{\\mathcal{L}(\\mathrm{\\mu}_{\\mathrm{NP}} = 0)}{\\mathcal{L}(\\hat{\\mu}_{\\mathrm{NP}})}\\right) &amp; \\hat{\\mu} \\geq 0 \\end{cases} \\] <p>which excludes the possibility of claiming discovery when the best fit value of \\(\\mu\\) is negative.</p> <p>As with the likelihood ratio test statistic, \\(t\\), defined above, under suitable conditions, analytic expressions for the distribution of \\(q_0\\) are known.</p> <p>Once the value \\(q_{0}(\\mathrm{data})\\) is calculated, it can be compared to the expected distribution of \\(q_{0}\\) under the standard model hypothesis to calculate the p-value. If the p-value is below some threshold, discovery is often claimed. In high-energy physics the standard threshold is \\(\\sim 5\\times10^{-7}\\).</p>"},{"location":"what_combine_does/statistical_tests/#limit-setting-using-the-likelihood-ratio","title":"Limit Setting using the likelihood ratio","text":"<p>Various test statistics built from likelihood ratios can be used for limit setting, i.e. excluding some parameter values.</p> <p>One could set limits on a parameter \\(\\mu\\) by finding the values of \\(\\mu\\) that are outside the confidence regions defined above by using the likelihood ratio test statistic:</p> \\[ t_{\\mu} = -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\hat{\\mu})}\\right) \\] <p>However, this could \"exclude\" \\(\\mu = 0\\) or small values of \\(\\mu\\) at a typical limit setting confidence level, such as 95%, while still not claiming a discovery. This is considered undesirable, and often we only want to set upper limits on the value of \\(\\mu\\), rather than excluding any possible set of parameters outside our chosen confidence interval.</p> <p>This can be done using a modified test statistic:</p> \\[ \\tilde{t}_{\\mu} = -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\min(\\mu,\\hat{\\mu}))}\\right) = \\begin{cases}     -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\hat{\\mu})}\\right)&amp; \\hat{\\mu} \\lt \\mu  \\\\     0 &amp;  \\mu \\leq \\hat{\\mu} \\end{cases} \\] <p>However, this can also have undesirable properties when the best fit value, \\(\\hat{\\mu}\\), is less than 0. In that case, we may set limits below 0. In order to avoid these situations, another modified test statistic can be used:</p> \\[ \\tilde{q}_{\\mu} = \\begin{cases}     -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\mu = 0)}\\right)&amp; \\hat{\\mu} \\lt 0  \\\\     -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\hat{\\mu})}\\right)&amp; 0 \\lt \\hat{\\mu} \\lt \\mu  \\\\     0&amp;  \\mu \\lt \\hat{\\mu} \\end{cases} \\] <p>Which also has a known distribution under appropriate conditions, or can be estimated from pseudo-experiments. One can then set a limit at a given confidence level, \\(\\mathrm{CL}\\), by finding the smallest value of \\(\\mu\\) for which \\(p_{\\mu} \\equiv P( t_{\\mu} \\gt t_{\\mu}(\\mathrm{data});\\mathcal{M}_{\\mu}) = 1 - \\mathrm{CL}\\). Larger values of \\(\\mu\\) will have smaller p-values and are considered excluded at the given confidence level.</p> <p>However, this procedure is rarely used, in almost every case we use a modified test procedure which uses the \\(\\mathrm{CL}_{s}\\) criterion, explained below.</p>"},{"location":"what_combine_does/statistical_tests/#the-cls-criterion","title":"The CLs criterion","text":"<p>Regardless of which of these test statistics is used, the standard test-methodology has some undesirable properties for limit setting.</p> <p>Even for an experiment with almost no sensitivity to new physics, 5% of the time the experiment is performed we expect the experimenter to find \\(p_{\\mu} \\lt 0.05\\) for small values of \\(\\mu\\) and set limits on parameter values to which the experiment is not sensitive!</p> <p>In order to avoid such situations the \\(\\mathrm{CL}_{s}\\) criterion was developped, as explained in these two papers. Rather than requiring \\(p_{\\mu} \\lt (1-\\mathrm{CL})\\) to exclude \\(\\mu\\), as would be done in the general framework described above, the \\(\\mathrm{CL}_{s}\\) criterion requires:</p> \\[ \\frac{p_{\\mu}}{1-p_{b}} \\lt (1-\\mathrm{CL}) \\] <p>Where \\(p_{\\mu}\\) is the usual probability of observing the observed value of the test statistic under the signal + background model with signal strength \\(\\mu\\), and \\(p_{b}\\) is the p-value for the background-only hypothesis, with the p-value defined using the opposite tail from the definition of \\(p_{\\mu}\\).</p> <p>Using the \\(\\mathrm{CL}_{s}\\) criterion fixes the issue of setting limits much stricter than the experimental sensitivity, because for values of \\(\\mu\\) to which the experiment is not sensitive the distribution of the test statistic under the signal hypothesis is nearly the same as under the background hypothesis. Therefore, given the use of opposite tails in the p-value definition, \\(p_{\\mu} \\approx 1-p_{b}\\), and the ratio approaches 1.</p> <p>Note that this means that a limit set using the \\(\\mathrm{CL}_{s}\\) criterion at a given \\(\\mathrm{CL}\\) will exclude the true parameter value \\(\\mu\\) with a frequency less than the nominal rate of \\(1-\\mathrm{CL}\\). The actual frequency at which it is excluded depends on the sensitivity of the experiment to that parameter value.</p>"},{"location":"what_combine_does/statistical_tests/#goodness-of-fit-tests-using-the-likelihood-ratio","title":"Goodness of fit tests using the likelihood ratio","text":"<p>The likelihood ratio can also be used as a measure of goodness of fit, i.e. describing how well the data match the model for binned data.</p> <p>A standard likelihood-based measure of the goodness of fit is determined by using the log likelihood ratio with the likelihood in the denominator coming from the saturated model.</p> \\[ t_{\\mathrm{saturated}} \\propto -\\log\\left(\\frac{\\mathcal{L}_{\\mathcal{M}}}{\\mathcal{L}_{\\mathcal{M}_\\mathrm{saturated}}}\\right) \\] <p>Here \\(\\mathcal{M}\\) is whatever model one is testing the goodness of fit for, and the saturated model is a model for which the prediction matches the observed value in every bin. Typically, the saturated model would be one in which there are as many free parameters as bins.</p> <p>This ratio is then providing a comparison between how well the actual data are fit as compared to a hypothetical optimal fit.</p> <p>Unfortunately, the distribution of \\(t_{\\mathcal{saturated}}\\) usually is not known a priori and has to be estimated by generating pseudodata from the model \\(\\mathcal{L}\\) and calculating the empirical distribution of the statistic.</p> <p>Once the distribution is determined, a p-value for the statistic can be derived which indicates the probability of observing data with that quality of fit given the model, and therefore serves as a measure of the goodness of fit.</p>"},{"location":"what_combine_does/statistical_tests/#channel-compatibility-test-using-the-likelihood-ratio","title":"Channel Compatibility test using the likelihood ratio","text":"<p>When performing an anlysis across many different channels (for example, different Higgs decay modes), it is often interesting to check the level of compatibility of the various channels.</p> <p>Combine implements a channel compatibility test, by considering the a model, \\(\\mathcal{M}_{\\mathrm{c-independent}}\\), in which the signal is independent in every channel. As a test statistic, this test uses the likelihood ratio between the best fit value of the nominal model and the model with independent signal strength for each channel:</p> \\[ t = -\\log\\left(\\frac{\\mathcal{L}_{\\mathcal{M}}(\\vec{\\hat{\\mu}},\\vec{\\hat{\\nu}})}{\\mathcal{L}_{\\mathcal{M}_{\\mathrm{c-indep}}}(\\vec{\\hat{\\mu}}_{c1}, \\vec{\\hat{\\mu}}_{c2}, ..., \\vec{\\hat{\\nu}})}\\right) \\] <p>The distribution of the test statistic is not known a priori, and needs to be calculated by generating pseudo-data samples.</p>"},{"location":"what_combine_does/statistical_tests/#other-statistical-tests","title":"Other Statistical Tests","text":"<p>While combine is a likelihood based statistical framework, it does not require that all statistical tests use the likelihood ratio.</p>"},{"location":"what_combine_does/statistical_tests/#other-goodness-of-fit-tests","title":"Other Goodness of Fit Tests","text":"<p>As well as the saturated goodness of fit test, defined above, combine implements Kolmogorov-Smirnov and Anderson-Darling goodness of fit tests.</p> <p>For the Kolomogorov-Smirnov (KS) test, the test statistic is the maximum absolute difference between the cumulative distribution function between the data and the model:</p> \\[ D = \\max_{x} | F_{\\mathcal{M}}(x) - F_{\\mathrm{data}}(x) | \\] <p>Where \\(F(x)\\) is the Cumulative Distribution Function (i.e. cumulative sum) of the model or data at point \\(\\vec{x}\\).</p> <p>For the Anderson-Darling (AD) test, the test statistic is based on the integral of the square of the difference between the two cumulative distribution functions. The square difference is modified by a weighting function which gives more importance to differences in the tails:</p> \\[ A^2 = \\int_{x_{\\mathrm{min}}}^{x_{\\mathrm{max}}} \\frac{ (F_{\\mathcal{M}}(x) - F_{\\mathrm{data}}(x))^2}{ F_\\mathcal{M}(x) (1 - F_{\\mathcal{M}}(x)) } \\mathrm{d}F_\\mathcal{M}(x) \\] <p>Notably, both the Anderson-Darling and Kolmogorov-Smirnov test rely on the cumulative distribution. Because the ordering of different channels of a model is not well defined, the tests themselves are not unambiguously defined over multiple channels.</p>"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"<p> These pages document the RooStats / RooFit - based software tool used for statistical analysis within the CMS experiment - Combine. Note that while this tool was originally developed in the Higgs Physics Analysis Group (PAG), its usage is now widespread within CMS. </p> <p>Combine provides a command-line interface to many different statistical techniques, available inside RooFit/RooStats, that are used widely inside CMS.</p> <p>The package exists on GitHub under https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit</p> <p>For more information about Git, GitHub and its usage in CMS, see http://cms-sw.github.io/cmssw/faq.html</p> <p>The code can be checked out from GitHub and compiled on top of a CMSSW release that includes a recent RooFit/RooStats, or via standalone compilation without CMSSW dependencies. See the instructions for installation of Combine below.</p>"},{"location":"#installation-instructions","title":"Installation instructions","text":"<p>Installation instructions and recommended versions can be found below.  Since v9.0.0, the versioning follows the semantic versioning 2.0.0 standard. Earlier versions are not guaranteed to follow the standard.</p>"},{"location":"#within-cmssw-recommended-for-cms-users","title":"Within CMSSW (recommended for CMS users)","text":"<p>The instructions below are for installation within a CMSSW environment. For end users that do not need to commit or do any development, the following recipes should be sufficient. To choose a release version, you can find the latest releases on github under https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit/releases</p>"},{"location":"#combine-v10-recommended-version","title":"Combine v10 - recommended version","text":"<p>The nominal installation method is inside CMSSW. The current release targets the CMSSW <code>14_1_X</code> series because of the recent switch to el9 at lxplus machines.</p> <p><pre><code>cmsrel CMSSW_14_1_0_pre4\ncd CMSSW_14_1_0_pre4/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag is v10.0.2: see release notes</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v10.0.2\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#combine-v9","title":"Combine v9","text":"<p>The nominal installation method is inside CMSSW. The current release targets the CMSSW <code>11_3_X</code> series because this release has both python2 and python3 ROOT bindings, allowing a more gradual migration of user code to python3. Combine is fully python3-compatible and, with some adaptations, can also work in 12_X releases. </p> <p>CMSSW <code>11_3_X</code> runs on slc7, which can be setup using apptainer (see detailed instructions): <pre><code>cmssw-el7\ncmsrel CMSSW_11_3_4\ncd CMSSW_11_3_4/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag is v9.2.1: see release notes</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v9.2.1\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#combine-v8-cmssw_10_2_x-release-series","title":"Combine v8: <code>CMSSW_10_2_X</code> release series","text":"<p>Setting up the environment (once):</p> <p><pre><code>cmssw-el7\ncmsrel CMSSW_10_2_13\ncd CMSSW_10_2_13/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag is v8.2.0: see release notes</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v8.2.0\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#slc6cc7-release-cmssw_8_1_x","title":"SLC6/CC7 release <code>CMSSW_8_1_X</code>","text":"<p>Setting up OS using apptainer (see detailed instructions):</p> <p><pre><code># For CC7:\ncmssw-el7\n# For SLC6:\ncmssw-el6\n\ncmsrel CMSSW_8_1_0\ncd CMSSW_8_1_0/src\ncmsenv\ngit clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n</code></pre> Update to a recommended tag - currently the recommended tag for CMSSW_8_1_X is v7.0.13:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit\ngit fetch origin\ngit checkout v7.0.13\nscramv1 b clean; scramv1 b # always make a clean build\n</code></pre>"},{"location":"#oustide-of-cmssw-recommended-for-non-cms-users","title":"Oustide of CMSSW (recommended for non-CMS users)","text":"<p>Pre-compiled versions of the tool are available as container images from the CMS cloud. These containers can be downloaded and run using Docker. If you have docker running you can pull and run the image using, </p> <p><pre><code>docker run --name combine -it gitlab-registry.cern.ch/cms-cloud/combine-standalone:&lt;tag&gt;\n</code></pre> where you must replace <code>&lt;tag&gt;</code> with a particular version of the tool - eg - <code>v9.2.1</code>. See the top of this page for the latest recommended versions. </p> <p>You will now have the compiled Combine binary available as well as the complete package of tool.  The container can be re-started using <code>docker start -i combine</code>. </p>"},{"location":"#standalone-compilation","title":"Standalone compilation","text":"<p>The standalone version can be easily compiled using cvmfs as it relies on dependencies that are already installed at <code>/cvmfs/cms.cern.ch/</code>. Access to <code>/cvmfs/cms.cern.ch/</code> can be obtained from lxplus machines or via <code>CernVM</code>. See CernVM for further details on the latter. In case you do not want to use the <code>cvmfs</code> area, you will need to adapt the locations of the dependencies listed in both the <code>Makefile</code> and <code>env_standalone.sh</code> files.</p> <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit/ \n# git checkout &lt;some release&gt;\n. env_standalone.sh\nmake -j 4\n</code></pre> <p>You will need to source <code>env_standalone.sh</code> each time you want to use the package, or add it to your login environment.</p>"},{"location":"#compilation-of-slc7-compatible-versions","title":"Compilation of slc7 compatible versions","text":"<p>For Combine versions before v10 release you will need to do the compilation in an slc7 environment using apptainer. You can then source the standalone script outside of the apptainer. On lxplus this can be done as follows:</p> <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit/ \n# git checkout &lt;some release&gt;\ncmssw-el7\n. env_standalone.sh\nmake -j 4\nexit\nsource . env_standalone.sh\n</code></pre>"},{"location":"#standalone-compilation-with-lcg","title":"Standalone compilation with LCG","text":"<p>For compilation outside of CMSSW, for example to use ROOT versions not yet available in CMSSW, one can compile against LCG releases. The current default is to compile with LCG_102, which contains ROOT 6.26: <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\nsource env_lcg.sh \nmake LCG=1 -j 8\n</code></pre> To change the LCG version, edit <code>env_lcg.sh</code>. </p> <p>The resulting binaries can be moved for use in a batch job if the following files are included in the job tarball: <pre><code>tar -zcf Combine_LCG_env.tar.gz build interface src/classes.h --exclude=obj\n</code></pre></p>"},{"location":"#standalone-compilation-with-conda","title":"Standalone compilation with <code>conda</code>","text":"<p>This recipe will work both for linux and MacOS <pre><code>git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit\ncd HiggsAnalysis/CombinedLimit\n\nconda install --name base mamba # faster conda\nmamba env create -f conda_env.yml\n\nconda activate combine\nsource set_conda_env_vars.sh\n# Need to reactivate\nconda deactivate\nconda activate combine\n\nmake CONDA=1 -j 8\n</code></pre></p> <p>Using Combine from then on should only require sourcing the conda environment  <pre><code>conda activate combine\n</code></pre></p> <p>Note: on OS X, Combine can only accept workspaces, so run <code>text2workspace.py</code> first. This is due to an issue with child processes and <code>LD_LIBRARY_PATH</code> (see note in Makefile)</p>"},{"location":"#standalone-compilation-with-cernvm","title":"Standalone compilation with CernVM","text":"<p>Combine, either standalone or not, can be compiled via CVMFS using access to <code>/cvmfs/cms.cern.ch/</code>  obtained using a virtual machine - <code>CernVM</code>. To use <code>CernVM</code> You should have access to CERN IT resources. If you are a CERN user you can use your account, otherwise you can request a lightweight account. If you have a CERN user account, we strongly suggest you simply run one of the other standalone installations, which are simpler and faster than using a VM.</p> <p>You should have a working VM on your local machine, compatible with CernVM, such as <code>VirtualBox</code>. All the required software can be downloaded here. At least 2GB of disk space should be reserved on the virtual machine for Combine to work properly and the machine must be contextualized to add the <code>CMS</code> group to CVMFS. A minimal working setup is described below.</p> <ol> <li> <p>Download the CernVM-launcher for your operating system, following the instructions available [<code>here</code>] for your operating system (https://cernvm.readthedocs.io/en/stable/cpt-launch.html#installation</p> </li> <li> <p>Prepare a CMS context. You can use the CMS open data one already available on gitHub:  <code>wget https://raw.githubusercontent.com/cernvm/public-contexts/master/cms-opendata-2011.context)</code></p> </li> <li> <p>Launch the virtual machine <code>cernvm-launch create --name combine --cpus 2 cms-opendata-2011.context</code></p> </li> <li> <p>In the VM, proceed with an installation of combine</p> </li> </ol> <p>Installation through CernVM is maintained on a best-effort basis and these instructions may not be up to date. </p>"},{"location":"#what-has-changed-between-tags","title":"What has changed between tags?","text":"<p>You can generate a diff of any two tags (eg for <code>v9.2.1</code> and <code>v9.2.0</code>) by using the following url:</p> <p>https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit/compare/v9.2.0...v9.2.1</p> <p>Replace the tag names in the url to any tags you would like to compare.</p>"},{"location":"#for-developers","title":"For developers","text":"<p>We use the Fork and Pull model for development: each user creates a copy of the repository on GitHub, commits their requests there, and then sends pull requests for the administrators to merge.</p> <p>Prerequisites</p> <ol> <li> <p>Register on GitHub, as needed anyway for CMSSW development: http://cms-sw.github.io/cmssw/faq.html</p> </li> <li> <p>Register your SSH key on GitHub: https://help.github.com/articles/generating-ssh-keys </p> </li> <li> <p>Fork the repository to create your copy of it: https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit/fork (more documentation at https://help.github.com/articles/fork-a-repo )</p> </li> </ol> <p>You will now be able to browse your fork of the repository from https://github.com/your-github-user-name/HiggsAnalysis-CombinedLimit</p> <p>We strongly encourage you to contribute any developments you make back to the main repository.  See contributing.md for details about contributing. </p>"},{"location":"#combineharvestercombinetools","title":"CombineHarvester/CombineTools","text":"<p>CombineHarvester/CombineTools is a package for the creation of datacards/workspaces used with Combine v10 for a number of analyses in CMS. See the <code>CombineHarvester</code> documentation pages for more details on using this tool and additional features available in the full package.</p> <p>This package also comes with useful features for Combine such as the automated datacard validation (see instructions). The repository can be checked out and compiled using, </p> <pre><code>git clone https://github.com/cms-analysis/CombineHarvester.git CombineHarvester\nscram b\n</code></pre> <p>See the <code>CombineHarvester</code> documentation for full instructions and reccomended versions. </p> <p>Info</p> <p>Starting with Combine v10, specific ombineTool functionalities for job submition and parallelization (<code>combineTool.py</code>) as well as many plotting functions have been integrated into the Combine package. For these tasks you no longer have to follow the instructions above.</p>"},{"location":"#citation","title":"Citation","text":"<p>If you use Combine, please cite the following CMS publication here. </p> Show BibTex Entry <pre><code>@unpublished{\n    CMS:2024onh,\n    author = \"Hayrapetyan, Aram and others\",\n    collaboration = \"CMS\",\n    title = \"The {CMS} statistical analysis and combination tool: {\\textsc{Combine}}\",\n    eprint = \"2404.06614\",\n    archivePrefix = \"arXiv\",\n    primaryClass = \"physics.data-an\",\n    reportNumber = \"CMS-CAT-23-001, CERN-EP-2024-078\",\n    year = \"2024\",\n    note = \"Submitted to \\textit{Comput. Softw. Big Sci.}\"\n}\n</code></pre>"},{"location":"CernVM/","title":"CernVM","text":""},{"location":"CernVM/#standalone-use-inside-cernvm","title":"Standalone use inside CernVM","text":"<p>Standalone by adding the <code>CMS</code> group to the CVMFS Configuration. A minimal <code>CernVM</code> working context setup can be found in the CernVM Marketplace under <code>Experimental/HiggsCombine</code> or at https://cernvm-online.cern.ch/context/view/9ee5960ce4b143f5829e72bbbb26d382. At least 2GB of disk space should be reserved on the virtual machine for Combine to work properly.</p>"},{"location":"CernVM/#available-machines-for-standalone-combine","title":"Available machines for standalone combine","text":"<p>The standalone version can be easily compiled via CVMFS as it relies on dependencies which are already installed at /cvmfs/cms.cern.ch/. Access to /cvmfs/cms.cern.ch/ can be obtained from lxplus machines or via <code>CernVM</code>. The only requirement will be to add the CMS group to the CVMFS configuration as shown in the picture</p> <p></p> <p>At least 2GB of disk space should be reserved on the virtual machine for combine to work properly. A minimal CernVM working context setup can be found in the CernVM Marketplace under <code>Experimental/HiggsCombine</code>. </p> <p>To use this predefined context, first locally launch the CernVM (eg you can use the .ova with VirtualBox, by downloading from here and launching the downloaded file. You can click on \"pair an instance of CernVM\" from the cernvm-online dashboard, which displays a PIN. In the VirtualBox terminal, pair the virtual machine with this PIN code (enter in the terminal using #PIN eg <code>#123456</code>. After this, you will be asked again for username (use <code>user</code>) and then a password (use <code>hcomb</code>).</p> <p>In case you do not want to use the cvmfs area, you will need to adapt the location of the dependencies listed in both the Makefile and env_standalone.sh files.</p>"},{"location":"releaseNotes/","title":"Release notes","text":""},{"location":"releaseNotes/#cmssw-10_2_x-v800","title":"CMSSW 10_2_X - v8.0.0","text":"<p>This release contains all of the changes listed for v7.0.13 below. In addition:</p> <ul> <li>New documentation pages, using the mkdocs framework. The documentation source is included in the repository as simple markdown files. Users are welcome to make additions and corrections as pull requests to this repo.</li> <li>It is now possible to include additional constraint terms for regularisiation when unfolding using combine. Detailed documentation for this is given here.</li> <li>The option <code>-S 0</code> to remove all systematic uncertainties has been removed. Instead, to freeze all constrained nuisance parameters the option <code>--freezeParameters allConstrainedNuisances</code> should be used, which replaces the previous shortcut of <code>--freezeParameters all</code>.</li> <li>The possibility to use some old method names has now been fully removed. When setting the <code>-M</code> option, <code>FitDiagnostics</code>, <code>AsymptoticLimits</code> and <code>Significance</code> must be used instead of, respectively, <code>MaxLikelihoodFit</code>, <code>Asymptotic</code> and <code>ProfileLikelihood</code>.</li> </ul>"},{"location":"releaseNotes/#cmssw-8_1_x-v7013","title":"CMSSW 8_1_X - v7.0.13","text":"<ul> <li>Nuisance <code>edit</code> selections for bins, processes or systematic names now require a complete string match. For example, <code>nuisance edit add procA binA [...]</code> will no longer match <code>procAB</code> and <code>binAB</code>. Note that regex selections can still be used to match multiple labels, but again are now required to match the full strings.</li> <li>Nuisance parameters can now be frozen using attributes that have been assigned to the corresponding RooRealVars. Syntax is <code>--freezeWithAttributes attr1,attr2,...,attrN</code>.</li> <li>For Higgs analyses: added YR4 cross sections, branching ratios and partial width uncertainties in <code>data/lhc-hxswg/sm/</code>, as used in HIG-17-031</li> <li>[EXPERIMENTAL] For binned analyses using autoMCStats a faster implementation of the vertical template morphing for shape uncertainties can be enabled at runtime with the option <code>--X-rtd FAST_VERTICAL_MORPH</code>. Any results using this flag should be validated carefully against the default.</li> </ul>"},{"location":"model_building_tutorial2024/model_building_exercise/","title":"Building statistical models with Combine","text":""},{"location":"model_building_tutorial2024/model_building_exercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of Combine, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>After setting up Combine, you can access the working directory for this tutorial which contains all of the inputs and scripts needed in this excercise exercise:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/\ngit checkout main \nscram b -j 8\ncd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#exercise-outline","title":"Exercise outline","text":"<p>This tutorial focuses and extends on the model building topic, and it is not going to give a full picture on the statistical methods, which are extensively covered in the long exercise and statistical methods exercise. </p> <p>1) Building analysis with templates </p> <p>2) Using keywords </p> <p>3) Control regions </p> <p>4) Rate parameters </p> <p>5) Extra arguments  </p> <p>6) Physics Models</p>"},{"location":"model_building_tutorial2024/model_building_exercise/#introduction","title":"Introduction","text":"<p>The most general definition for the binned model likelihood can be given as </p> \\[ \\mathcal{L} =  \\mathcal{L}_\\mathrm{primary} \\cdot \\mathcal{L}_\\mathrm{auxiliary} = \\prod_{c=1}^{N_c} \\prod_{b=1}^{N_b^c} \\mathrm{Poiss}(n_{cb}; n^\\mathrm{exp}_{cb}(\\vec{\\mu},\\vec{\\nu})) \\cdot \\prod_{e=1}^{N_E}  p_e(y_e ; \\nu_e) \\] <p>Where \\(c\\) indexes the channel, \\(b\\) indexes the histogram bin, and \\(e\\) indexes the nuisance parameter.</p> <p>The generic model of the expected event count in a given bin, \\(n^\\mathrm{exp}_{cb}\\), implemented in combine for template based analyses is given by:</p> \\[n^\\mathrm{exp}_{cb} = \\mathrm{max}(0, \\sum_{p} M_{cp}(\\vec{\\mu})N_{cp}(\\nu_G, \\vec{\\nu}_L,\\vec{\\nu}_S,\\vec{\\nu}_{\\rho})\\omega_{cbp}(\\vec{\\nu}_S) + E_{cb}(\\vec{\\nu}_B) ) \\] <p>In terms of datacard structure there are several differences with respect to the counting datacard:</p> <ul> <li>A new block of lines at the top defining how channels and processes are mapped to the histograms (more than one line can be used)</li> <li>In the list of systematic uncertainties we now have entries marked as <code>shape</code></li> </ul> <p>The \"shapes\" line has to follow the following syntax: </p> <pre><code>shapes   &lt;process_name&gt;   &lt;channel_name&gt;   &lt;path/to/input_shape.root&gt;   &lt;histograms_nominal&gt;  &lt;histograms_with_variations&gt;\n</code></pre> <p>To start the hands-on for this section:  <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/simple_shape\n</code></pre></p> <p>The input shapes for the first example (<code>datacard-2-template-analysis.txt</code>) are located in the <code>template-analysis-datacard-input.root</code>, it contains the observed distribution <code>data_obs</code>, the nominal histograms for each process and systematic uncertainties templates:  </p> <pre><code>root [0] \nAttaching file template-analysis-datacard-input.root as _file0...\n(TFile *) 0x556fd5b0fea0\nroot [1] .ls\nTFile**     template-analysis-datacard-input.root   \n TFile*     template-analysis-datacard-input.root   \n  KEY: TH1F background;1    Histogram of background__x\n  KEY: TH1F background_alphaUp;1    Histogram of background__x\n  KEY: TH1F background_alphaDown;1  Histogram of background__x\n  KEY: TH1F data_obs;1  Histogram of data_obs__x\n  KEY: TH1F signal;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaUp;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaDown;1  Histogram of signal__x\n</code></pre> <p>To define the mapping to the systematic uncertainties templates the <code>$SYSTEMATIC</code> keyword should be used, which connects the systematic uncertainties marked as <code>shape</code> type with the input shapes. </p> <pre><code>imax 1\njmax 1\nkmax 4\n# ---------------\nshapes  signal  ch1 template-analysis-datacard-input.root signal signal_$SYSTEMATIC\nshapes  background  ch1 template-analysis-datacard-input.root background background_$SYSTEMATIC\n# ---------------\nbin         ch1\nobservation 85\n# ------------------------------\nbin             ch1        ch1\nprocess         signal     background\nprocess         0          1\nrate            24         100\n# --------------------------------\nlumi     lnN    1.1       1.0\nbgnorm   lnN    -         1.3\nalpha  shape    -          1   # uncertainty in the background template.\nsigma  shape    0.5        -   # uncertainty in the signal template.\n</code></pre> <p>To simplify the shape mapping line the keywords <code>$PROCESS</code>, <code>$CHANNEL</code> can be used. The <code>$PROCESS</code> keyword is associated with the processes listed in the datacard: <code>[signal, background]</code>, it is also possible to use the <code>*</code> wildcard to map multiple processes and/or channels with one line as shown below. </p> <pre><code>imax 1\njmax 1\nkmax 4\n# ---------------\nshapes * * template-analysis-datacard-input.root $PROCESS $PROCESS_$SYSTEMATIC\n# ---------------\nbin         ch1\nobservation 85\n# ------------------------------\nbin             ch1        ch1\nprocess         signal     background\nprocess         0          1\nrate            24         100\n# --------------------------------\nlumi     lnN    1.1       1.0\nbgnorm   lnN    -         1.3\nalpha  shape    -          1   # uncertainty in the background template.\nsigma  shape    0.5        -   # uncertainty in the signal template.\n</code></pre> <p>If there are more than one category it can be useful to store the input shapes corresponding to different regions in separate <code>TDirectory</code>s and use $CHANNEL keyword as shown below: </p> <pre><code>shapes * * &lt;input-file.root&gt; $CHANNEL/$PROCESS $CHANNEL/$PROCESS_$SYSTEMATIC\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#keywords","title":"Keywords","text":"<p>Go to the datacards location corresponding to this section of the tutorial: <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/keywords\n</code></pre></p> <p>The datacard can also contain <code>$MASS</code> keyword, it allows to setup a single datacard for various mass points. It will be replaced with the value passed to <code>-m</code> option when running the tool. In addition, user-defined keywords can be used. Any word in the datacard <code>$WORD</code> will be replaced by <code>VALUE</code> when including the option <code>--keyword-value WORD=VALUE</code>. This option can be repeated multiple times for multiple keywords.</p> <p><pre><code> KEY: TH1D  ggH110;1    \n KEY: TH1D  bbH110;1    \n KEY: TH1D  ggH110_CMS_eff_t_mssmHigh_tt_13TeVDown;1    \n.... \n KEY: TH1D  ggH120;1    \n KEY: TH1D  bbH120;1    \n KEY: TH1D  ggH120_CMS_eff_t_mssmHigh_tt_13TeVDown;1    \n.....\n</code></pre> In the <code>htt_tt_9_13TeV.txt</code> datacard you can find the following lines: </p> <p><pre><code>shapes * htt_tt_9_13TeV htt_input.root htt_tt_9_13TeV/$PROCESS htt_tt_9_13TeV/$PROCESS_$SYSTEMATIC\nshapes bbH htt_tt_9_13TeV htt_input.root htt_tt_9_13TeV/bbH$MASS htt_tt_9_13TeV/bbH$MASS_$SYSTEMATIC\nshapes ggH htt_tt_9_13TeV htt_input.root htt_tt_9_13TeV/ggH$MASS htt_tt_9_13TeV/ggH$MASS_$SYSTEMATIC\n</code></pre> defining the mapping for all mass points at the same time. One can use this datacard to estimate 95%CL for different mass points by assigning  in the command below.   <pre><code>combine -M AsymptoticLimits  -d htt_tt_9_13TeV.txt  -m &lt;mass_value&gt;\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#simultaneous-fit-in-multiple-categories","title":"Simultaneous fit in multiple categories","text":"<pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/control_regions\n</code></pre> <p>To combine the datacards corresponding to various (independent) regions into a single card one can use <code>combineCards.py</code>. </p> <p><pre><code>combineCards.py htt_tt_9_13TeV=htt_tt_9_13TeV.txt htt_tt_8_13TeV=htt_tt_8_13TeV.txt &gt;htt_tt_SRs.txt\n</code></pre> The combined card <code>htt_tt_SRs.txt</code> now has two categories:  <pre><code>----------------------------------------------------------------------------------------------------------------------------------\nbin          htt_tt_9_13TeV  htt_tt_8_13TeV\nobservation  3416            105545\n----------------------------------------------------------------------------------------------------------------------------------\n</code></pre></p>"},{"location":"model_building_tutorial2024/model_building_exercise/#rate-parameters","title":"Rate parameters","text":"<pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/control_regions\n</code></pre> <p>It is quite common to use data-drive background estimation methods. In Combine one can perform simultaneous fit of signal and control regions it allows to automatically handle the statistical uncertainty due to the number of data events in the control region, correctly handles signal contamination in the control region, allows to properly take into account systematic uncertainties affecting the backgrounds in the control regions. </p> <p>In the working directory for this section you can find the <code>htt_zmm_8_13TeV.txt, htt_zmm_9_13TeV.txt and htt_ttbar_1_13TeV.txt</code> cards, corresponding to the control regions enriched in ZLL and ttbar processes, in addition to the signal regions from the previous step. Let's combine all of the regions into one datacard. </p> <p><pre><code>combineCards.py htt_zmm_9_13TeV=htt_zmm_9_13TeV.txt htt_zmm_8_13TeV=htt_zmm_8_13TeV.txt htt_ttbar_1_13TeV=htt_ttbar_1_13TeV.txt htt_tt_9_13TeV=htt_tt_9_13TeV.txt htt_tt_8_13TeV=htt_tt_8_13TeV.txt &gt;htt_tt_combined.txt\n</code></pre> Now the <code>htt_tt_combined.txt</code> contains signal and control regions. To allow the rate of the background processes to be corrected from the control regions we can define common rate parameters, which linearly scale the predicted rates specified in the datacard, using the syntax </p> <pre><code>&lt;rate_param_name&gt; rateParam &lt;category&gt; &lt;process&gt; &lt;initial_value&gt; [min_value,max_value]\n</code></pre> <p>The following lines define <code>rate_TT</code> and <code>rate_ZMM</code> rate parameters scaling TT and ZLL processes in all regions simultaneously:  <pre><code>rate_TT                 rateParam  *          TT         1 [0,5]\nrate_TT                 rateParam  *          TTT        1 [0,5]\nrate_ZMM                rateParam  *          ZLL        1 [0,2]\n</code></pre></p> <p>Note that by default rate parameters are freely floating (unconstrained) parameters in Combine, however it is possible to add a constrain term to the likelihood by adding the <code>param</code> modifier with the same name as rate parameter: </p> <p><pre><code>rate_TT param &lt;mean&gt; &lt;sigma&gt;  \n</code></pre> Task: Add <code>param</code> nuisance named <code>rate_TT</code> with <code>mean = 1.</code> and <code>sigma = 1</code>, check how the uncertainty on <code>rate_TT</code> parameter changes, what happens if you change the width of the constraint term?</p> <p>In addition modifiers that are functions of other parameters can be included using the following syntax:</p> <p><pre><code>name rateParam bin process formula args\n</code></pre> This can be useful to constrain the ratio of two processes as shown below</p> <pre><code>rate_A rateParam *  process_A \nratio_BtoA param 1 1\nrate_B rateParam *  process_B  @0*@1 rate_A*ratio\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#extra-arguments","title":"Extra arguments","text":"<pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/model_building_2024/PhysicsModels\n</code></pre> <p>In one wants to connect different models with common parameters, or just use external functions it is possible to import parameters defined within external workspaces with <code>extArg</code>: </p> <p><pre><code>name extArg rootfile:workspacename\n</code></pre> The <code>extArg</code> syntax allows to import <code>RooAbsReal</code> object from an external workspace. This object can be another free floating parameter, or a function of other parameters. In this section we are going to import <code>RooSpline1D</code> objects which define how various Higgs production cross sections depend on the Higgs mass (<code>MH</code> parameter). </p> <p>The datacard we are going to use in this section <code>htt_tt_125_8TeV.txt</code> correspond to 8 TeV analysis and Higgs mass of 125 GeV. To excercise the <code>extArg</code> features, let's rescale the signal templates to 13 TeV cross section values.  The 13 and 8 TeV cross sections predictions from YR4 are stored in the <code>$CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/</code> in <code>sm_yr4_13TeV.root</code> and <code>sm_yr4_8TeV.root</code> files respectively, let's inspect the contents of <code>sm_yr4_13TeV.root</code>: </p> <p><pre><code> TFile*     /afs/cern.ch/work/a/anigamov/rootv630/CMSSW_14_1_0_pre4/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root \n  KEY: RooWorkspace xs_13TeV;1  xs_13TeV\n  KEY: TProcessID   ProcessID0;1    2fd49e90-f1a0-11e8-9717-b052b8bcbeef\nroot [2] xs_13TeV-&gt;Print()\n\nRooWorkspace(xs_13TeV) xs_13TeV contents\n\nvariables\n---------\n(MH)\n\nfunctions\n--------\nRooSpline1D::WH_13TeV[ xvar=MH ] = 1.369\nRooSpline1D::WminusH_13TeV[ xvar=MH ] = 0.5313\nRooSpline1D::WplusH_13TeV[ xvar=MH ] = 0.838\nRooSpline1D::ZH_13TeV[ xvar=MH ] = 0.8824\nRooSpline1D::bbH_13TeV[ xvar=MH ] = 0.4863\nRooSpline1D::ggH_13TeV[ xvar=MH ] = 48.52\nRooSpline1D::ggZH_13TeV[ xvar=MH ] = 0.1227\nRooFormulaVar::qqZH_13TeV[ actualVars=(ZH_13TeV,ggZH_13TeV) formula=\"@0-@1\" ] = 0.7597\nRooSpline1D::tHW_13TeV[ xvar=MH ] = 0.01517\nRooSpline1D::tHq_13TeV[ xvar=MH ] = 0.07714\nRooSpline1D::ttH_13TeV[ xvar=MH ] = 0.5065\nRooSpline1D::vbfH_13TeV[ xvar=MH ] = 3.779\n</code></pre> The <code>RooSpline1D::WH_13TeV[ xvar=MH ] = 1.369</code> contains cross sections values interpolated between various Higgs mass points.  We can import them into our model as shown below</p> <p><pre><code>vbfH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\nggH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\nZH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\nWH_13TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_13TeV.root:xs_13TeV\n\nvbfH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\nggH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\nZH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\nWH_8TeV     extArg     $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/lhc-hxswg/sm/sm_yr4_8TeV.root:xs_8TeV\n</code></pre> Then we can define <code>rateParam</code> as functions of imported <code>extArg</code>s to rescale signal processes WH, ZH, ggH, qqH:  <pre><code>WH_8to13TeV    rateParam  *   WH    @0/@1         WH_13TeV,WH_8TeV\nZH_8to13TeV    rateParam  *   ZH    @0/@1         ZH_13TeV,ZH_8TeV\nggH_8to13TeV    rateParam  *   ggH    @0/@1         ggH_13TeV,ggH_8TeV\nvbfH_8to13TeV    rateParam  *   qqH    @0/@1         vbfH_13TeV,vbfH_8TeV\n</code></pre></p> <p>When running Combine methods (e.g. <code>combine -M MultiDimFit --algo singles</code>) with these parameters you have to freeze <code>MH</code>, i.e. add <code>--freezeParameters MH</code> option.  </p> <p>Advanced task: rescale the signal templates to the cross-section corresponding to a different MH value (e.g. 120 GeV). </p>"},{"location":"model_building_tutorial2024/model_building_exercise/#physics-models","title":"Physics Models","text":"<p>With physics model one can instruct Combine how to scale the signal (background) yields with parameters of the model: \\(M_{cp}(\\vec{\\mu})\\) from </p> \\[n^\\mathrm{exp}_{cb} = \\mathrm{max}(0, \\sum_{p} M_{cp}(\\vec{\\mu})N_{cp}(\\nu_G, \\vec{\\nu}_L,\\vec{\\nu}_S,\\vec{\\nu}_{\\rho})\\omega_{cbp}(\\vec{\\nu}_S) + E_{cb}(\\vec{\\nu}_B) ) \\] <p>In Combine we can use PhysicsModel as a base class.</p> <p>This class has several useful methods, but the most important ones are <code>doParametersOfInterest()</code> which defines parameters and functions of the model, and <code>getYieldScale(self, bin, process)</code> defines how expected events are scaled with the model parameters.</p> <pre><code>class PhysicsModelBase(six.with_metaclass(ABCMeta, object)):\n...\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n\n    def getYieldScale(self, bin, process):\n        \"Return the name of a RooAbsReal to scale this yield by or the two special values 1 and 0 (don't scale, and set to zero)\"\n        return \"r\" if self.DC.isSignal[process] else 1\n...\n</code></pre> <p>There are many models available to use for different physics cases, follow the link for more information. In the following sections we will discuss how one can construct custom models. </p> <p>To use a different physics model instead of the default one, we are going to use the option -P as in</p> <pre><code>text2workspace.py datacard.txt -P HiggsAnalysis.CombinedLimit.PythonFile:modelName\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#default-physics-model","title":"Default physics model","text":"<p>The default physics model implemented in Combine defines a single POI that linearly scales all signal processes. We use this model by default when running <code>text2workspace.py -m &lt;mass_value&gt; &lt;datacard.txt&gt;</code>. </p> \\[ M_{cp}(\\mu) = \\begin{cases}     \\mu  &amp;\\mathrm{if\\ } p \\in \\mathrm{signal} \\\\     1    &amp;\\mathrm{otherwise} \\end{cases} \\] <pre><code>class PhysicsModel(PhysicsModelBase):\n    \"\"\"Example class with signal strength as only POI\"\"\"\n\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"r[1,0,20]\")\n        self.modelBuilder.doSet(\"POI\", \"r\")\n        # --- Higgs Mass as other parameter ----\n        if self.options.mass != 0:\n            if self.modelBuilder.out.var(\"MH\"):\n                self.modelBuilder.out.var(\"MH\").removeRange()\n                self.modelBuilder.out.var(\"MH\").setVal(self.options.mass)\n            else:\n                self.modelBuilder.doVar(\"MH[%g]\" % self.options.mass)\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#multi-signal-model","title":"Multi Signal model","text":"<p>Combine already contains a model <code>HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel</code> that can be used to assign different signal strengths to multiple processes in a datacard, configurable from the command line using the mapping <code>--PO 'map=&lt;bin&gt;/&lt;process&gt;:&lt;parameter_name&gt;'</code>. The wildcard <code>*</code> are allowed for <code>&lt;bin&gt;</code> and <code>&lt;process&gt;</code> entries. The following command assigns <code>r_ggH</code> signal strength to scale <code>ggH</code> processes in all regions (<code>bin</code>s). </p> <pre><code>text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose  --PO  'map=.*/ggH:r_ggH[1,-5,5]' --PO 'map=.*/qqH:r_qqH[1,-5,5]' PhysicsModel/htt_tt_125_8TeV.txt  -o ws_multiSignal.root -m 125\ncombine -M MultiDimFit --algo singles -d  ws_multiSignal.root -n .multiSignal.\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#custom-models","title":"Custom models","text":"<p>Now let's look at the example of how one can construct a model where the two leading signal processes (qqH and ggH) are scaled with relative fraction parameter <code>f</code> and the overall rate modifier <code>r</code>. </p> \\[ N_{qqH}(r, f) = r f N_{qqH};\\,N_{ggH}(r, f) = r (1 - f) N_{ggH} \\] <p>As discussed above first we have to define the parameters of the model <pre><code>    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"f[0,0,4]\") \n        self.modelBuilder.doVar(\"r[1,0,10]\")\n</code></pre> then we can use the in the scaling functions for ggH and qqH processes <pre><code>        self.modelBuilder.factory_( \"expr::scale_qqH(\\\"(1-@0)*@1\\\", f,r)\")\n        self.modelBuilder.factory_( \"expr::scale_ggH(\\\"@0*@1\\\", f,r)\")\n</code></pre> add the parameters to the set of POIs <pre><code>        self.modelBuilder.doSet(\"POI\", \",\".join([\"f\"]))\n        self.modelBuilder.doSet(\"POI\", \",\".join([\"r\"]))\n</code></pre> The <code>getYieldScale(self, bin, process)</code> method will scale <code>qqH</code> process with the <code>scale_qqH</code> object and <code>ggH</code> with <code>scale_ggH</code>.  <pre><code>    def getYieldScale(self, bin, process):\n        if process == \"qqH\": return \"scale_qqH\"\n        elif process == \"ggH\": return \"scale_ggH\"\n        else: return 1\n</code></pre></p> <p>To use this model we should add the directory where the corresponding file is located to the <code>PYTHON3PATH</code>: <pre><code>export PYTHON3PATH=${PYTHON3PATH}:${PWD}/models\n</code></pre> And now we can finally create the workspace using this model:  <pre><code>text2workspace.py PhysicsModels/htt_tt_125_8TeV.txt -P FractionModel:Fraction_2signals  -m 125  -o ws_fraction.root\n</code></pre></p> <p>One can inspect the created workspace to ensure that the model is correctly implemented</p> <pre><code>root -l ws_fraction.root\nroot [1] w-&gt;Print()\n</code></pre> <p>The created workspace is quite large, but it should have two new <code>RooFormulaVar</code> objects </p> <p><pre><code>RooFormulaVar::scale_ggH[ actualVars=(f,r) formula=\"x[0]*x[1]\" ] = 0\nRooFormulaVar::scale_qqH[ actualVars=(f,r) formula=\"(1-x[0])*x[1]\" ] = 1\n</code></pre> which modify the normalisation of the ggH and qqH processes</p> <pre><code>ProcessNormalization::n_exp_binhtt_tt_1_8TeV_proc_ggH[ thetaList=(CMS_eff_t_tt_8TeV,CMS_htt_scale_met_8TeV,QCDscale_ggH1in,UEPS,lumi_8TeV,pdf_gg) asymmThetaList=() otherFactorList=(scale_ggH) ] = 0\nProcessNormalization::n_exp_binhtt_tt_1_8TeV_proc_qqH[ thetaList=(CMS_eff_t_tt_8TeV,CMS_htt_scale_met_8TeV,CMS_scale_j_8TeV,QCDscale_qqH,UEPS,lumi_8TeV,pdf_qqbar) asymmThetaList=() otherFactorList=(scale_qqH) ] = 1.4954\n</code></pre>"},{"location":"model_building_tutorial2024/model_building_exercise/#eft-model","title":"EFT model","text":"<p>One can also define analytical BSM models with Combine. In this section we will extract the CI for one of the SMEFT Wilson coefficient. Without going into details, it can be shown that the dimension-6 SMEFT operators scale with quadratic equations. In this example will consider the \\(c_{Hg}\\) operator that among all signal processes in the datacard affects only ggH at the LO in SMEFT. </p> <p>\\(\\sigma(c_{g}) = \\sigma_{SM} (1 + A c_{g} + B c^{2}_{g})\\), where A and B coefficients are numbers that can be estimated from simulation.</p> <pre><code>class SMEFT_chg(PhysicsModel):\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"A[39.54]\")\n        self.modelBuilder.doVar(\"B[245.32]\")\n    self.modelBuilder.out.var(\"A\").setConstant(True) \n    self.modelBuilder.out.var(\"B\").setConstant(True)\n        self.modelBuilder.doVar(\"chg[0,-1,1]\")\n        self.modelBuilder.factory_( \"expr::ggH_scaling_chg(\\\"1+@1*@0+@2*@0*@0\\\", chg, A, B)\")\n    self.modelBuilder.doSet(\"POI\", \",\".join([\"chg\"]))\n\n    def getYieldScale(self, bin, process):\n        if process == \"ggH\": return \"ggH_scaling_chg\"\n        else: return 1\n\nsmeft_chg_tutorial = SMEFT_chg()\n</code></pre> <p>To create the workspace using this model one can run </p> <pre><code>export PYTHON3PATH=${PYTHON3PATH}:${PWD}/models\ntext2workspace.py PhysicsModels/htt_tt_125_8TeV.txt -P EFT_simple:smeft_chg_tutorial  -m 125  -o ws_chg.root\n</code></pre> <p>Run the likelihood scan for \\(c_{Hg}\\) parameter and make the plot: <pre><code>combine -M MultiDimFit --algo grid -d  ws_chg.root --setParameterRanges chg=-0.2,0.2\nplot1DScan.py higgsCombineTest.MultiDimFit.mH120.root --POI chg\n</code></pre></p>"},{"location":"part2/bin-wise-stats/","title":"Automatic statistical uncertainties","text":""},{"location":"part2/bin-wise-stats/#introduction","title":"Introduction","text":"<p>The <code>text2workspace.py</code> script is able to produce a type of workspace, using a set of new histogram classes, in which bin-wise statistical uncertainties are added automatically. This can be built for shape-based datacards where the inputs are in TH1 format. Datacards that use RooDataHists are not supported. The bin errrors (i.e. values returned by <code>TH1::GetBinError</code>) are used to model the uncertainties.</p> <p>By default the script will attempt to assign a single nuisance parameter to scale the sum of the process yields in each bin, constrained by the total uncertainty, instead of requiring separate parameters, one per process. This is sometimes referred to as the Barlow-Beeston-lite approach, and is useful as it minimises the number of parameters required in the maximum likelihood fit. A useful description of this approach may be found in section 5 of this report.</p>"},{"location":"part2/bin-wise-stats/#usage-instructions","title":"Usage instructions","text":"<p>The following line should be added at the bottom of the datacard, underneath the systematics, to produce a new-style workspace and optionally enable the automatic bin-wise uncertainties:</p> <pre><code>[channel] autoMCStats [threshold] [include-signal = 0] [hist-mode = 1]\n</code></pre> <p>The first string <code>channel</code> should give the name of the channels (bins) in the datacard for which the new histogram classes should be used. The wildcard <code>*</code> is supported for selecting multiple channels in one go. The value of <code>threshold</code> should be set to a value greater than or equal to zero to enable the creation of automatic bin-wise uncertainties, or <code>-1</code> to use the new histogram classes without these uncertainties. A positive value sets the threshold on the effective number of unweighted events above which the uncertainty will be modeled with the Barlow-Beeston-lite approach described above. Below the threshold an individual uncertainty per-process will be created. The algorithm is described in more detail below.</p> <p>The last two settings are optional. The first of these, <code>include-signal</code> has a default value of <code>0</code> but can be set to <code>1</code> as an alternative. By default, the total nominal yield and uncertainty used to test the threshold excludes signal processes. The reason for this is that typically the initial signal normalization is arbitrary, and could unduly lead to a bin being considered well-populated despite poorly populated background templates. Setting this flag will include the signal processes in the uncertainty analysis. Note that this option only affects the logic for creating a single Barlow-Beeston-lite parameter vs. separate per-process parameters - the uncertainties on all signal processes are always included in the actual model! The second flag changes the way the normalization effect of shape-altering uncertainties is handled. In the default mode (<code>1</code>) the normalization is handled separately from the shape morphing via a an asymmetric log-normal term. This is identical to how Combine has always handled shape morphing. When set to <code>2</code>, the normalization will be adjusted in the shape morphing directly. Unless there is a strong motivation we encourage users to leave this on the default setting.</p>"},{"location":"part2/bin-wise-stats/#description-of-the-algorithm","title":"Description of the algorithm","text":"<p>When <code>threshold</code> is set to a number of effective unweighted events greater than or equal to zero, denoted \\(n^{\\text{threshold}}\\), the following algorithm is applied to each bin:</p> <ol> <li>Sum the yields \\(n_{i}\\) and uncertainties \\(e_{i}\\) of each background process \\(i\\) in the bin. Note that the \\(n_i\\) and \\(e_i\\) include the nominal effect of any scaling parameters that have been set in the datacard, for example <code>rateParams</code>. \\(n_{\\text{tot}} = \\sum_{i\\,\\in\\,\\text{bkg}}n_i\\), \\(e_{\\text{tot}} = \\sqrt{\\sum_{i\\,\\in\\,\\text{bkg}}e_i^{2}}\\)</li> <li>If \\(e_{\\text{tot}} = 0\\), the bin is skipped and no parameters are created. If this is the case, it is a good idea to check why there is no uncertainty in the background prediction in this bin!</li> <li>The effective number of unweighted events is defined as \\(n_{\\text{tot}}^{\\text{eff}} = n_{\\text{tot}}^{2} / e_{\\text{tot}}^{2}\\), rounded to the nearest integer.</li> <li>If \\(n_{\\text{tot}}^{\\text{eff}} \\leq n^{\\text{threshold}}\\): separate uncertainties will be created for each process. Processes where \\(e_{i} = 0\\) are skipped. If the number of effective events for a given process is lower than \\(n^{\\text{threshold}}\\) a Poisson-constrained parameter will be created. Otherwise a Gaussian-constrained parameter is used.</li> <li>If \\(n_{\\text{tot}}^{\\text{eff}} \\gt n^{\\text{threshold}}\\): A single Gaussian-constrained Barlow-Beeston-lite parameter is created that will scale the total yield in the bin.</li> <li>Note that the values of \\(e_{i}\\), and therefore \\(e_{tot}\\), will be updated automatically in the model whenever the process normalizations change.</li> <li>A Gaussian-constrained parameter \\(\\nu\\) has a nominal value of zero and scales the yield as \\(n_{\\text{tot}} + \\nu \\cdot e_{\\text{tot}}\\). The Poisson-constrained parameters are expressed as a yield multiplier with nominal value one: \\(n_{\\text{tot}} \\cdot \\nu\\).</li> </ol> <p>The output from <code>text2workspace.py</code> will give details on how each bin has been treated by this algorithm, for example:</p> Show example output <pre><code>============================================================\nAnalysing bin errors for: prop_binhtt_et_6_7TeV\nPoisson cut-off: 10\nProcesses excluded for sums: ZH qqH WH ggH\n============================================================\nBin        Contents        Error           Notes\n0          0.000000        0.000000        total sum\n0          0.000000        0.000000        excluding marked processes\n  =&gt; Error is zero, ignore\n------------------------------------------------------------\n1          0.120983        0.035333        total sum\n1          0.120983        0.035333        excluding marked processes\n1          12.000000       3.464102        Unweighted events, alpha=0.010082\n  =&gt; Total parameter prop_binhtt_et_6_7TeV_bin1[0.00,-7.00,7.00] to be gaussian constrained\n------------------------------------------------------------\n2          0.472198        0.232096        total sum\n2          0.472198        0.232096        excluding marked processes\n2          4.000000        2.000000        Unweighted events, alpha=0.118049\n  =&gt; Number of weighted events is below poisson threshold\n    ZH                   0.000000        0.000000\n      =&gt; Error is zero, ignore\n  ----------------------------------------------------------\n    W                    0.050606        0.029220\n                         3.000000        1.732051        Unweighted events, alpha=0.016869\n      =&gt; Product of prop_binhtt_et_6_7TeV_bin2_W[1.00,0.00,12.15] and const [3] to be poisson constrained\n  ----------------------------------------------------------\n    ZJ                   0.142444        0.140865\n                         1.000000        1.000000        Unweighted events, alpha=0.142444\n      =&gt; Product of prop_binhtt_et_6_7TeV_bin2_ZJ[1.00,0.00,30.85] and const [1] to be poisson constrained\n  ----------------------------------------------------------\n</code>"},{"location":"part2/bin-wise-stats/#analytic-minimisation","title":"Analytic minimisation","text":"<p>One significant advantage of the Barlow-Beeston-lite approach is that the maximum likelihood estimate of each nuisance parameter has a simple analytic form that depends only on \\(n_{\\text{tot}}\\), \\(e_{\\text{tot}}\\) and the observed number of data events in the relevant bin. Therefore when minimising the negative log-likelihood of the whole model it is possible to remove these parameters from the fit and set them to their best-fit values automatically. For models with large numbers of bins this can reduce the fit time and increase the fit stability. The analytic minimisation is enabled by default starting in combine v8.2.0, you can disable it by adding the option <code>--X-rtd MINIMIZER_no_analytic</code> when running Combine.</p>\n<p>The figure below shows a performance comparison of the analytical minimisation versus the number of bins in the likelihood function. The real time (in sections) for a typical minimisation of a binned likelihood is shown as a function of the number of bins when invoking the analytic minimisation of the nuisance parameters versus the default numerical approach.</p>\n\nShow Comparison\n<p></p>"},{"location":"part2/bin-wise-stats/#technical-details","title":"Technical details","text":"<p>Up until recently <code>text2workspace.py</code> would only construct the PDF for each channel using a <code>RooAddPdf</code>, i.e. each component process is represented by a separate PDF and normalization coefficient. However, in order to model bin-wise statistical uncertainties, the alternative <code>RooRealSumPdf</code> can be more useful, as each process is represented by a RooFit function object instead of a PDF, and we can vary the bin yields directly. As such, a new RooFit histogram class <code>CMSHistFunc</code> is introduced, which offers the same vertical template morphing algorithms offered by the current default histogram PDF, <code>FastVerticalInterpHistPdf2</code>. Accompanying this is the <code>CMSHistErrorPropagator</code> class. This evaluates a sum of <code>CMSHistFunc</code> objects, each multiplied by a coefficient. It is also able to scale the summed yield of each bin to account for bin-wise statistical uncertainty nuisance parameters.</p>\n\n<p>Warning</p>\n<p>One disadvantage of this new approach comes when evaluating the expectation for individual processes, for example when using the <code>--saveShapes</code> option in the <code>FitDiagnostics</code> mode of Combine. The Barlow-Beeston-lite parameters scale the sum of the process yields directly, so extra work is needed to distribute this total scaling back to each individual process. To achieve this, an additional class <code>CMSHistFuncWrapper</code> has been created that, given a particular <code>CMSHistFunc</code>, the <code>CMSHistErrorPropagator</code> will distribute an appropriate fraction of the total yield shift to each bin. As a consequence of the extra computation needed to distribute the yield shifts in this way, the evaluation of individual process shapes in <code>--saveShapes</code> can take longer then previously.</p>"},{"location":"part2/bsm-higgs-models/","title":"Physics Models for Extended Higgs Sector Searches","text":"<p>This page lists the physics models that can be used to perform searches for additional Higgs boson measurements at the LHC, when the SM-like Higgs boson should be accounted for in the data.</p>"},{"location":"part2/bsm-higgs-models/#two-higgs-models","title":"Two Higgs Models","text":"<p>These models are for the case where there are just Two Higgs bosons, one of which is the SM-like Higgs boson that was discovered at the LHC. The two Higgs models are implemented in the python file <code>TwoHiggsModels.py</code>. For each of these models, we assume that the SM-like Higgs boson has mass specified by <code>MH_SM</code>, while the additional boson has mass <code>MH</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.TwoHiggsModels:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Two Higgs bosons <code>twoHiggsUnconstrained</code> N/A <code>r</code>, <code>r_SM</code> The SM-like Higgs boson signal strength will be <code>r_SM</code> and its mass will be assumed to be <code>MH_SM</code> (default value 125.8), while the additional Higgs boson signal strength will be scaled by <code>r</code> and assumed to have mass <code>MH</code>. Singlet Mixing Model <code>singletMixing</code> <code>--PO BSMDecays</code>,<code>--PO UseVisibleMu</code> <code>r</code>,<code>BR_BSM</code> Without any options, the SM like Higgs boson will have signal strength <code>r</code>, while the additional Higgs boson is scaled by <code>1-r</code> and <code>BR_BSM</code> will not be a POI. If the option <code>BSMDecays</code> is included, the additional boson's signal strength will be <code>r(1-BR_BSM)</code> and the SM like Higgs boson will have signal strength of <code>1-r</code>. If the option <code>UseVisibleMu</code> is included too, then instead, the additional Higgs boson will get a signal strength <code>r</code> while the SM one will have <code>1-r(1-BR_BSM)</code>. Singlet Mixing Model for Exclusions <code>singletMixingInvisible</code> <code>--PO BSMDecays</code> <code>r</code>,<code>x</code>,<code>BR_BSM</code> The SM like Higgs boson will be scaled by <code>r*x</code> while the additional boson is scaled by <code>r*(1-x)</code>. If the option <code>BSMDecays</code> is included, then <code>BR_BSM</code> is also a POI and the additional Higgs boson signal strength is scaled accounting for this BSM branching fraction <code>r*(1-x)*(1-BR_BSM)</code>. Two Higgs bosons with \\(c_{V}\\), \\(c_{F}\\) couplings <code>twoHiggsCvCf</code> N/A <code>CV</code>, <code>CF</code> Both Higgs bosons signal strengths scale accoring to the coupling to vector bosons <code>CV</code> and fermions <code>CF</code> where the scaling is determined for each contributing production/decay vertex. In this case, the additioal boson is assumed to follow the same coupling structure (i.e the SM like Higgs boson couplings)."},{"location":"part2/bsm-higgs-models/#two-higgs-doublet-models","title":"Two Higgs Doublet Models","text":"<p>In these models, the couplings of the SM-like Higgs boson are modified according to the type of 2HDM with parameters \\(\\cos(\\beta-\\alpha)\\) and \\(\\tan\\beta\\). The two Higgs doublet models are implemented in the python file <code>AdditionalModels.py</code>.  In this model, the Higgs boson mass is <code>MH</code>, and it can be promoted to a POI by including the option <code>--PO higgsMassRange=[low,high]</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.AdditionalModels:model_name\n</code></pre> <code>model</code> <code>--PO</code> POIs Description 2HDM Type-1 <code>twohdm</code> <code>--PO thdmtype=1</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to fermions and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\kappa_{d}=\\cos(\\alpha)/\\sin(\\beta)\\). 2HDM Type-2 <code>twohdm</code> <code>--PO thdmtype=2</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to fermions and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\cos(\\alpha)/\\sin(\\beta)\\), \\(\\kappa_{d}=-\\sin(\\alpha)/\\cos(\\beta)\\). 2HDM Type-3 <code>twohdm</code> <code>--PO thdmtype=3</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to quarks, leptons and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\kappa_{d}=\\cos(\\alpha)/\\sin(\\beta)\\), \\(\\kappa_{l}=-\\sin(\\alpha)/\\cos(\\beta)\\). 2HDM Type-4 <code>twohdm</code> <code>--PO thdmtype=4</code> <code>cosbma</code>,<code>tanbeta</code> The couplings of the Higgs boson to quarks, leptons and vector bosons are modified by the parameters <code>cosbma</code> (\\(\\cos(\\beta-\\alpha)\\)) and <code>tanbeta</code> (\\(\\tan\\beta\\)). The couplings dependencies are \\(\\kappa_V = \\sqrt(1-\\cos^{2}(\\beta-\\alpha))\\), \\(\\kappa_{u}=\\kappa_{l}=\\cos(\\alpha)/\\sin(\\beta)\\), \\(\\kappa_{d}=-\\sin(\\alpha)/\\cos(\\beta)\\)."},{"location":"part2/bsm-higgs-models/#fermiophobic-higgs-model","title":"Fermiophobic Higgs Model","text":"<p>This model is for the case where the additional Higgs boson does not couple to fermions. The fermiophobic Higgs model is implemented in the python file <code>HiggsFermiophobic.py</code>. In this model, the Higgs boson mass is <code>MH</code>, and it can be promoted to a POI by including the option <code>--PO higgsMassRange=[low,high]</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.HiggsFermiophobic:model_name\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Fermiophobic Higgs <code>fp</code> N/A <code>r</code> The Higgs boson signal strength will be <code>r</code> for any production/decay that involves vector boson couplints only. The branching ratios are recalculated assuming no Higgs boson couplings to fermions."},{"location":"part2/higgscouplings/","title":"Physics Models for SM Higgs Boson Couplings","text":"<p>This page lists the physics models that can be used to perform measurements of the Higgs Boson couplings in the Standard Model (SM). These models follow the recommendations of the LHC Higgs Cross Section Working Group LHCHXSWG YR3 and have been used in the combination of the ATLAS and CMS measurements of the Higgs boson properties.</p>"},{"location":"part2/higgscouplings/#lhc-hcg-models","title":"LHC HCG Models","text":"<p>The following models are used in the LHC Higgs Combination Group (LHC HCG) to perform measurements of the Higgs boson couplings. They are well defined only for a SM Higgs boson mass of a few GeV around the measured value \\(m_H \\approx 125\\) GeV. The models are implemented in the python file  <code>LHCHCGModels.py</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.LHCHCGModels:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Couplings with resolved loops <code>K1</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_W</code>,<code>kappa_Z</code>,<code>kappa_b</code>, <code>kappa_t</code>, <code>kappa_tau</code>,<code>kappa_mu</code> Higgs boson couplings to fermions and bosons in loops such as the \\(gg\\to H\\) and \\(H\\to\\gamma\\gamma\\) loops are scaled using the appropriate SM Higgs couplings. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), \\(H\\to Z\\gamma\\) are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings with effective loops <code>K2</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>kappa_b</code>, <code>kappa_t</code>, <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\). By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2Width</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>,<code>c7_Gscal_tot</code>, <code>kappa_t</code>, <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\). In this model, the total Higgs width is allowed to float by effectively replacing the coupling \\(\\kappa_{b}\\) as a parameter of interest. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2Inv</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>,<code>kappa_b</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this). The total width is modified accordingly. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2InvC</code> <code>--PO dohmm</code>, <code>--PO dohzg</code>, <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>kappa_b</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code>,<code>kappa_c</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this), and the coupling to charm quarks is included as a parameter of interest.  The total width is modified accordingly. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2Undet</code> <code>--PO dohmm</code>, <code>--PO dohzg</code>, <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>kappa_b</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code>,<code>BRundet</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this), and the undetected decay modes are scaled by <code>BRundet</code>.  The total width is modified accordingly. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K2UndetWidth</code> <code>--PO dohmm</code>, <code>--PO dohzg</code>, <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_g</code>,<code>kapppa_gam</code>,<code>kappa_Zgam</code>,<code>kappa_W</code>,<code>kappa_Z</code>, <code>c7_Gscal_tot</code>, <code>kappa_t</code>,  <code>kappa_tau</code>,<code>kappa_mu</code>,<code>kappa_Zg</code>,<code>BRinv</code>,<code>BRundet</code> Higgs boson couplings to fermions and bosons in which the \\(gg\\to H\\), \\(H\\to\\gamma\\gamma\\) and \\(H\\to Z\\gamma\\) loops are scaled by their own effective couplings \\(\\kappa_{g}^{2}\\), \\(\\kappa_{\\gamma}^{2}\\) and \\(\\kappa_{Z\\gamma}^{2}\\) and the branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this), and the undetected decay modes are scaled by <code>BRundet</code>.  The total width Higgs width is allowed to float by effectively replacing the coupling \\(\\kappa_{b}\\) as a parameter of interest. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings to vector bosons and fermions <code>K3</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_V</code>,<code>kappa_F</code> Higgs boson couplings to bosons and fermions. By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), \\(H\\to Z\\gamma\\) are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <code>K3Inv</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>kappa_V</code>,<code>kappa_F</code>, <code>BRinv</code> Higgs boson couplings to bosons and fermions and free floating branching ratio to invisible particles (any process with decay string <code>hinv</code> is scaled by this). By setting the options <code>doX=1</code>, the process specified will be included as its own process and scaled by the appropriate coupling modifiers. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\), \\(H\\to Z\\gamma\\) are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Ratios of coupling modifiers <code>L1</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_WZ</code>,<code>lambda_Zg</code>, <code>lambda_bZ</code>,<code>lambda_gamZ</code>,<code>lambda_tauZ</code>,<code>lambda_muZ</code>,<code>lambda_tg</code>,<code>kappa_gZ</code> Ratios of coupling modifiers \\(\\lambda_{WZ}=\\kappa_W/\\kappa_Z\\), \\(\\lambda_{Zg}=\\kappa_Z/\\kappa_{g}\\), \\(\\lambda_{bZ}=\\kappa_b/\\kappa_{Z}\\), \\(\\lambda_{\\gamma Z}=\\kappa_{\\gamma}/\\kappa_{Z}\\), \\(\\lambda_{\\tau Z}=\\kappa_{\\tau}/\\kappa_{Z}\\), \\(\\lambda_{\\mu Z}=\\kappa_{\\mu}/\\kappa_{Z}\\), \\(\\lambda_{tg}=\\kappa_{t}/\\kappa_{g}\\), \\(\\kappa_{gZ}=\\kappa_{g} \\kappa_{Z}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. <p>The next models are constrained models to study a limited set of ratios of couplings. These are particularly useful to study models such as 2HDM that modify the SM Higgs couplings in a specific way. The models are implemented in the python file  <code>HiggsCouplings.py</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.HiggsCouplings:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Ratios of up/down fermion couplings <code>lambdadu</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_du</code>,<code>lambda_Vu</code>,<code>kappa_uu</code> Ratios of coupling modifiers \\(\\lambda_{du}=\\kappa_d/\\kappa_u\\), \\(\\lambda_{Vu}=\\kappa_{V}/\\kappa_{u}\\), \\(\\kappa_{uu}=\\kappa_{u}^{2}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Ratios of lepton to quark couplings <code>lambdalq</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_lq</code>,<code>lambda_Vq</code>,<code>kappa_qq</code> Ratios of coupling modifiers \\(\\lambda_{lq}=\\kappa_l/\\kappa_q\\), \\(\\lambda_{Vq}=\\kappa_{V}/\\kappa_{q}\\), \\(\\kappa_{qq}=\\kappa_{q}^{2}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Ratios of fermion to vector boson couplings <code>lambdafv</code> <code>--PO dohmm</code>, <code>--PO dohzg</code> <code>--PO dohcchgluglu</code> <code>--PO BRU</code> <code>--PO higgsMassRange=x,y</code> <code>lambda_FV</code>,<code>kappa_VV</code> Ratios of coupling modifiers \\(\\lambda_{fV}=\\kappa_f/\\kappa_V\\) \\(\\kappa_{VV}=\\kappa_{V}^{2}/\\kappa_H\\), where \\(\\kappa_{H}\\) is the total width modifier. By default, the \\(H\\to\\mu\\mu\\), \\(H\\to cc\\)  are tied to other processes or fixed to their SM values. Set <code>BRU=1</code> to include the Higgs branching ratio uncertainties directly into the calculation of the partial widths, instead of just using the uncertainties in the datacard. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range."},{"location":"part2/higgscouplings/#outdated-couplings-modifer-models","title":"Outdated couplings modifer models","text":"<p>These models were used early in the Higgs discovery, but are now considered outdated. They are still available for backward compatibility and for simple studies, but are generally not recommended. The models are implemented in the python file  <code>HiggsCouplings.py</code>.</p> <p>You can produce the model by including the  following option in the <code>text2workspace.py</code> command:</p> <pre><code>-P HiggsAnalysis.CombinedLimit.HiggsCouplings:model\n</code></pre> <code>model</code> <code>--PO</code> POIs Description Custodial Symmetry Model <code>cWZ</code> <code>--PO higgsMassRange=x,y</code> <code>Cz</code>,<code>Cwz</code> Ratio of couplings to W bosons and Z bosons. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings with universal up or down-type fermion couplings <code>c5udHiggs</code> <code>--PO universalCF</code>, <code>--PO higgsMassRange=x,y</code> <code>Cg</code>,<code>Cv</code>,<code>Cglu</code>,<code>Cu</code>,<code>Cd</code> Treat photon, vector-boson and gluon coupling as independent couplings. Up-type and down-type fermions have independent couplings. include <code>universalCF</code> to replace <code>Cu</code> and <code>Cd</code> with universal fermion coupling <code>Cf</code>. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range. Couplings with universal lepton or quark couplings <code>c5qlHiggs</code> <code>--PO universalCF</code>, <code>--PO higgsMassRange=x,y</code> <code>Cg</code>,<code>Cv</code>,<code>Cglu</code>,<code>Cq</code>,<code>Cl</code> Treat photon, vector-boson and gluon coupling as independent couplings. quark and lepton fermions have independent couplings. include <code>universalCF</code> to replace <code>Cq</code> and <code>Cl</code> with universal fermion coupling <code>Cf</code>. Include the <code>higgsMassRange</code> option with range \\(x&lt;m_{H}&lt;y\\) to allow the Higgs boson mass to float in this range."},{"location":"part2/physicsmodels/","title":"Introduction to Physics Models","text":"<p>Combine can be run directly on the text-based datacard. However, for more advanced physics models, the internal step to convert the datacard to a binary workspace should be performed by the user. To create a binary workspace starting from a <code>datacard.txt</code>, you can run</p> <pre><code>text2workspace.py datacard.txt -o workspace.root\n</code></pre> <p>By default (without the <code>-o</code> option), the binary workspace will be named <code>datacard.root</code> - i.e the .txt suffix will be replaced by .root.</p> <p>A full set of options for <code>text2workspace</code> can be found by running <code>text2workspace.py --help</code>.</p> <p>The default model that will be produced when running <code>text2workspace</code> is one in which all processes identified as signal are multiplied by a common multiplier r. This is all that is needed for simply setting limits or calculating significances.</p> <p><code>text2workspace</code> will convert the datacard into a PDF that summarizes the analysis. For example, let's take a look at the data/tutorials/counting/simple-counting-experiment.txt datacard.</p> <pre><code># Simple counting experiment, with one signal and one background process\n# Extremely simplified version of the 35/pb H-&gt;WW analysis for mH = 200 GeV,\n# for 4th generation exclusion (EWK-10-009, arxiv:1102.5429v1)\nimax 1  number of channels\njmax 1  number of backgrounds\nkmax 2  number of nuisance parameters (sources of systematical uncertainties)\n------------\n# we have just one channel, in which we observe 0 events\nbin         1\nobservation 0\n------------\n# now we list the expected events for signal and all backgrounds in that bin\n# the second 'process' line must have a positive number for backgrounds, and 0 for signal\n# then we list the independent sources of uncertainties, and give their effect (syst. error)\n# on each process and bin\nbin             1      1\nprocess       ggh4G  Bckg\nprocess         0      1\nrate           4.76  1.47\n------------\ndeltaS  lnN    1.20    -    20% uncertainty on signal\ndeltaB  lnN      -   1.50   50% uncertainty on background\n</code></pre> <p>If we run <code>text2workspace.py</code> on this datacard and take a look at the workspace (<code>w</code>) inside the <code>.root</code> file produced, we will find a number of different objects representing the signal, background, and observed event rates, as well as the nuisance parameters and signal strength \\(r\\). Note that often in the statistics literature, this parameter is referred to as \\(\\mu\\).</p> <p>From these objects, the necessary PDF has been constructed (named <code>model_s</code>). For this counting experiment we will expect a simple PDF of the form</p> \\[ p(n_{\\mathrm{obs}}| r,\\nu_{S},\\nu_{B})\\propto \\dfrac{[r\\cdot n_{S}(\\nu_{S})+n_{B}(\\nu_{B})]^{n_{\\mathrm{obs}}} } {n_{\\mathrm{obs}}!}e^{-[r\\cdot n_{S}(\\nu_{S})+n_{B}(\\nu_{B})]} \\cdot e^{-\\frac{1}{2}(\\nu_{S}- y_{S})^{2}} \\cdot e^{-\\frac{1}{2}(\\nu_{B}- y_{B})^{2}} \\] <p>where the expected signal and background rates are expressed as functions of the nuisance parameters, \\(n_{S}(\\nu_{S}) = 4.76(1+0.2)^{\\nu_{S}}~\\) and \\(~n_{B}(\\nu_{B}) = 1.47(1+0.5)^{\\nu_{B}}\\). The \\(y_{S},~y_{B}\\) are the auxiliary observables. In the code, these will have the same name as the corresponding nuisance parameter, with the extension <code>_In</code>.</p> <p>The first term represents the usual Poisson expression for observing \\(n_{\\mathrm{obs}}\\) events, while the second two are the Gaussian constraint terms for the nuisance parameters. In this case \\({y_S}={y_B}=0\\), and the widths of both Gaussians are 1.</p> <p>A combination of counting experiments (or a binned shape datacard) will look like a product of PDFs of this kind. For parametric/unbinned analyses, the PDF for each process in each channel is provided instead of the using the Poisson terms and a product runs over the bin counts/events.</p>"},{"location":"part2/physicsmodels/#model-building","title":"Model building","text":"<p>For more complex models, <code>PhysicsModels</code> can be produced. To use a different physics model instead of the default one, use the option <code>-P</code> as in</p> <pre><code>text2workspace.py datacard -P HiggsAnalysis.CombinedLimit.PythonFile:modelName\n</code></pre> <p>Generic models can be implemented by writing a python class that:</p> <ul> <li>defines the model parameters (by default it is just the signal strength modifier <code>r</code>)</li> <li>defines how signal and background yields depend on the parameters (by default, the signal scales linearly with <code>r</code>, backgrounds are constant)</li> <li>potentially also modifies the systematic uncertainties (e.g. switch off theory uncertainties on cross section when measuring the cross section itself)</li> </ul> <p>In the case of SM-like Higgs boson measurements, the class should inherit from <code>SMLikeHiggsModel</code> (redefining <code>getHiggsSignalYieldScale</code>), while beyond that one can inherit from <code>PhysicsModel</code>. You can find some examples in PhysicsModel.py.</p> <p>In the 4-process model (<code>PhysicsModel:floatingXSHiggs</code>, you will see that each of the 4 dominant Higgs boson production modes get separate scaling parameters, <code>r_ggH</code>, <code>r_qqH</code>, <code>r_ttH</code> and <code>r_VH</code> (or <code>r_ZH</code> and <code>r_WH</code>) as defined in,</p> <pre><code>def doParametersOfInterest(self):\n  \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n  # --- Signal Strength as only POI ---\n  if \"ggH\" in self.modes: self.modelBuilder.doVar(\"r_ggH[1,%s,%s]\" % (self.ggHRange[0], self.ggHRange[1]))\n  if \"qqH\" in self.modes: self.modelBuilder.doVar(\"r_qqH[1,%s,%s]\" % (self.qqHRange[0], self.qqHRange[1]))\n  if \"VH\"  in self.modes: self.modelBuilder.doVar(\"r_VH[1,%s,%s]\"  % (self.VHRange [0], self.VHRange [1]))\n  if \"WH\"  in self.modes: self.modelBuilder.doVar(\"r_WH[1,%s,%s]\"  % (self.WHRange [0], self.WHRange [1]))\n  if \"ZH\"  in self.modes: self.modelBuilder.doVar(\"r_ZH[1,%s,%s]\"  % (self.ZHRange [0], self.ZHRange [1]))\n  if \"ttH\" in self.modes: self.modelBuilder.doVar(\"r_ttH[1,%s,%s]\" % (self.ttHRange[0], self.ttHRange[1]))\n  poi = \",\".join([\"r_\"+m for m in self.modes])\n  if self.pois: poi = self.pois\n  ...\n</code></pre> <p>The mapping of which POI scales which process is handled via the following function,</p> <pre><code>def getHiggsSignalYieldScale(self,production,decay, energy):\n  if production == \"ggH\": return (\"r_ggH\" if \"ggH\" in self.modes else 1)\n  if production == \"qqH\": return (\"r_qqH\" if \"qqH\" in self.modes else 1)\n  if production == \"ttH\": return (\"r_ttH\" if \"ttH\" in self.modes else (\"r_ggH\" if self.ttHasggH else 1))\n  if production in [ \"WH\", \"ZH\", \"VH\" ]: return (\"r_VH\" if \"VH\" in self.modes else 1)\n  raise RuntimeError, \"Unknown production mode '%s'\" % production\n</code></pre> <p>You should note that <code>text2workspace</code> will look for the python module in <code>PYTHONPATH</code>. If you want to keep your model local, you'll need to add the location of the python file to <code>PYTHONPATH</code>.</p> <p>A number of models used in the LHC Higgs combination paper can be found in LHCHCGModels.py.</p> <p>The models can be applied to the datacard by using the <code>-P</code> option, for example <code>-P HiggsAnalysis.CombinedLimit.HiggsCouplings:c7</code>, and others that are defined in HiggsCouplings.py.</p> <p>Below are some (more generic) example models that also exist in GitHub.</p>"},{"location":"part2/physicsmodels/#multisignalmodel-ready-made-model-for-multiple-signal-processes","title":"MultiSignalModel ready made model for multiple signal processes","text":"<p>Combine already contains a model <code>HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel</code> that can be used to assign different signal strengths to multiple processes in a datacard, configurable from the command line.</p> <p>The model is configured by passing one or more mappings in the form <code>--PO 'map=bin/process:parameter'</code> to text2workspace:</p> <ul> <li><code>bin</code> and <code>process</code> can be arbitrary regular expressions matching the bin names and process names in the datacard.     Note that mappings are applied both to signals and to background processes; if a line matches multiple mappings, precedence is given to the last one in the order they are in the command line.     It is suggested to put quotes around the argument of <code>--PO</code> so that the shell does not try to expand any <code>*</code> signs in the patterns.</li> <li><code>parameter</code> is the POI to use to scale that process (<code>name[starting_value,min,max]</code> the first time a parameter is defined, then just <code>name</code> if used more than once).     Special values are <code>1</code> and <code>0==; ==0</code> means \"drop the process completely from the model\", while <code>1</code> means to \"keep the yield as is in the card with no scaling\" (as normally done for backgrounds); <code>1</code> is the default that is applied to processes that have no mappings. Therefore it is normally not needed, but it may be used to override a previous more generic match in the same command line (e.g. <code>--PO 'map=.*/ggH:r[1,0,5]' --PO 'map=bin37/ggH:1'</code> would treat ggH as signal in general, but count it as background in the channel <code>bin37</code>).</li> </ul> <p>Passing the additional option <code>--PO verbose</code> will set the code to verbose mode, printing out the scaling factors for each process; we encourage the use this option to make sure that the processes are being scaled correctly.</p> <p>The MultiSignalModel will define all parameters as parameters of interest, but that can be then changed from the command line, as described in the following subsection.</p> <p>Some examples, taking as reference the toy datacard test/multiDim/toy-hgg-125.txt:</p> <ul> <li>Scale both <code>ggH</code> and <code>qqH</code> with the same signal strength <code>r</code> (that is what the default physics model of Combine does for all signals; if they all have the same systematic uncertainties, it is also equivalent to adding up their yields and writing them as a single column in the card)</li> </ul> <pre><code>  $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:r[1,0,10]' --PO 'map=.*/qqH:r' toy-hgg-125.txt -o toy-1d.root\n  [...]\n  Will create a POI  r  with factory  r[1,0,10]\n  Mapping  r  to  ['.*/ggH']  patterns\n  Mapping  r  to  ['.*/qqH']  patterns\n  [...]\n  Will scale  incl/bkg  by  1\n  Will scale  incl/ggH  by  r\n  Will scale  incl/qqH  by  r\n  Will scale  dijet/bkg  by  1\n  Will scale  dijet/ggH  by  r\n  Will scale  dijet/qqH  by  r\n</code></pre> <ul> <li>Define two independent parameters of interest <code>r_ggH</code> and <code>r_qqH</code></li> </ul> <pre><code>  $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:r_ggH[1,0,10]' --PO 'map=.*/qqH:r_qqH[1,0,20]' toy-hgg-125.txt -o toy-2d.root\n  [...]\n  Will create a POI  r_ggH  with factory  r_ggH[1,0,10]\n  Mapping  r_ggH  to  ['.*/ggH']  patterns\n  Will create a POI  r_qqH  with factory  r_qqH[1,0,20]\n  Mapping  r_qqH  to  ['.*/qqH']  patterns\n  [...]\n  Will scale  incl/bkg  by  1\n  Will scale  incl/ggH  by  r_ggH\n  Will scale  incl/qqH  by  r_qqH\n  Will scale  dijet/bkg  by  1\n  Will scale  dijet/ggH  by  r_ggH\n  Will scale  dijet/qqH  by  r_qqH\n</code></pre> <ul> <li>Fix <code>ggH</code> to SM, define only <code>qqH</code> as parameter</li> </ul> <pre><code>  $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:1' --PO 'map=.*/qqH:r_qqH[1,0,20]' toy-hgg-125.txt -o toy-1d-qqH.root\n  [...]\n  Mapping  1  to  ['.*/ggH']  patterns\n  Will create a POI  r_qqH  with factory  r_qqH[1,0,20]\n  Mapping  r_qqH  to  ['.*/qqH']  patterns\n  [...]\n  Will scale  incl/bkg  by  1\n  Will scale  incl/ggH  by  1\n  Will scale  incl/qqH  by  r_qqH\n  Will scale  dijet/bkg  by  1\n  Will scale  dijet/ggH  by  1\n  Will scale  dijet/qqH  by  r_qqH\n</code></pre> <ul> <li>Drop <code>ggH</code> , and define only <code>qqH</code> as parameter</li> </ul> <pre><code> $ text2workspace.py -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/ggH:0' --PO 'map=.*/qqH:r_qqH[1,0,20]' toy-hgg-125.txt -o toy-1d-qqH0-only.root\n [...]\n Mapping  0  to  ['.*/ggH']  patterns\n Will create a POI  r_qqH  with factory  r_qqH[1,0,20]\n Mapping  r_qqH  to  ['.*/qqH']  patterns\n [...]\n Will scale  incl/bkg  by  1\n Will scale  incl/ggH  by  0\n Will scale  incl/qqH  by  r_qqH\n Will scale  dijet/bkg  by  1\n Will scale  dijet/ggH  by  0\n Will scale  dijet/qqH  by  r_qqH\n</code></pre>"},{"location":"part2/physicsmodels/#two-hypothesis-testing","title":"Two Hypothesis testing","text":"<p>The <code>PhysicsModel</code> that encodes the signal model above is the twoHypothesisHiggs, which assumes signal processes with suffix _ALT will exist in the datacard. An example of such a datacard can be found under data/benchmarks/simple-counting/twoSignals-3bin-bigBSyst.txt</p> <pre><code> $ text2workspace.py twoSignals-3bin-bigBSyst.txt -P HiggsAnalysis.CombinedLimit.HiggsJPC:twoHypothesisHiggs -m 125.7 --PO verbose -o jcp_hww.root\n\n MH (not there before) will be assumed to be 125.7\n Process  S  will get norm  not_x\n Process  S_ALT  will get norm  x\n Process  S  will get norm  not_x\n Process  S_ALT  will get norm  x\n Process  S  will get norm  not_x\n Process  S_ALT  will get norm  x\n</code></pre> <p>The two processes (S and S_ALT) will get different scaling parameters. The LEP-style likelihood for hypothesis testing can now be used by setting x or not_x to 1 and 0 and comparing the two likelihood evaluations.</p>"},{"location":"part2/physicsmodels/#signal-background-interference","title":"Signal-background interference","text":"<p>Since negative probability distribution functions do not exist, the recommended way to implement this is to start from the expression for the individual amplitudes \\(A\\) and the parameter of interest \\(k\\),</p> \\[ \\mathrm{Yield} = |k * A_{s} + A_{b}|^2 = k^2 * |A_{s}|^2 + k * 2 \\Re(A_{s}^* A_{b}) + |A_{b}|^2 = \\mu * S + \\sqrt{\\mu} * I + B \\] <p>where</p> <p>\\(\\mu = k^2, ~S = |A_{s}|^2,~B = |A_b|^2\\) and \\(S+B+I = |A_s + A_b|^2\\).</p> <p>With some algebra you can work out that,</p> <p>\\(\\mathrm{Yield} = \\sqrt{\\mu} * \\left[S+B+I\\right] + (\\mu-\\sqrt{\\mu}) * \\left[S\\right] + (1-\\sqrt{\\mu}) * \\left[B\\right]\\)</p> <p>where square brackets represent the input (histograms as <code>TH1</code> or <code>RooDataHists</code>) that one needs to provide.</p> <p>An example of this scheme is implemented in a HiggsWidth and is completely general, since all of the three components above are strictly positive. In this example, the POI is <code>CMS_zz4l_mu</code> and the equations for the three components are scaled (separately for the qqH and ggH processes) as,</p> <pre><code> self.modelBuilder.factory_( \"expr::ggH_s_func(\\\"@0-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::ggH_b_func(\\\"1-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::ggH_sbi_func(\\\"sqrt(@0)\\\", CMS_zz4l_mu)\")\n\n self.modelBuilder.factory_( \"expr::qqH_s_func(\\\"@0-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::qqH_b_func(\\\"1-sqrt(@0)\\\", CMS_zz4l_mu)\")\n self.modelBuilder.factory_(  \"expr::qqH_sbi_func(\\\"sqrt(@0)\\\", CMS_zz4l_mu)\")\n</code></pre>"},{"location":"part2/physicsmodels/#multi-process-interference","title":"Multi-process interference","text":"<p>The above formulation can be extended to multiple parameters of interest (POIs). See AnalyticAnomalousCoupling for an example. However, the computational performance scales quadratically with the number of POIs, and can get extremely expensive for 10 or more, as may be encountered often with EFT analyses. To alleviate this issue, an accelerated interference modeling technique is implemented for template-based analyses via the <code>interferenceModel</code> physics model. In this model, each bin yield \\(y\\) is parameterized</p> \\[ y(\\vec{\\mu}) = y_0 (\\vec{\\mu}^\\top M \\vec{\\mu}) \\] <p>as a function of the POI vector \\(\\vec{\\mu}\\), a nominal template \\(y_0\\), and a scaling matrix \\(M\\). To see how this parameterization relates to that of the previous section, we can define:</p> \\[ y_0 = A_b^2, \\qquad M = \\frac{1}{A_b^2} \\begin{bmatrix}  |A_s|^2 &amp; \\Re(A_s^* A_b) \\\\  \\Re(A_s A_b^*) &amp; |A_b|^2  \\end{bmatrix}, \\qquad \\vec{\\mu} = \\begin{bmatrix}  \\sqrt{\\mu} \\\\  1  \\end{bmatrix} \\] <p>which leads to the same parameterization. At present, this technique only works with <code>CMSHistFunc</code>-based workspaces, as these are the most common workspace types encountered and the default when using autoMCStats. To use this model, for each bin find \\(y_0\\) and put it into the datacard as a signal process, then find \\(M\\) and save the lower triangular component as an array in a <code>scaling.json</code> file with a syntax as follows:</p> <pre><code>[\n  {\n    \"channel\": \"my_channel\",\n    \"process\": \"my_nominal_process\",\n    \"parameters\": [\"sqrt_mu[1,0,2]\", \"Bscaling[1]\"],\n    \"scaling\": [\n      [0.5, 0.1, 1.0],\n      [0.6, 0.2, 1.0],\n      [0.7, 0.3, 1.0]\n    ]\n  }\n]\n</code></pre> <p>where the parameters are declared using RooFit's factory syntax and each row of the <code>scaling</code> field represents the scaling information of a bin, e.g. if \\(y_0 = |A_b|^2\\) then each row would contain three entries:</p> \\[ |A_s|^2 / |A_b|^2,\\quad \\Re(A_s^* A_b)/|A_b|^2,\\quad 1 \\] <p>For several coefficients, one would enumerate as follows: <pre><code>scaling = []\nfor ibin in range(nbins):\n    binscaling = []\n    for icoef in range(ncoef):\n        for jcoef in range(icoef + 1):\n            binscaling.append(amplitude_squared_for(ibin, icoef, jcoef))\n    scaling.append(binscaling)\n</code></pre></p> <p>Then, to construct the workspace, run</p> <p><pre><code>text2workspace.py card.txt -P HiggsAnalysis.CombinedLimit.InterferenceModels:interferenceModel \\\n    --PO verbose --PO scalingData=scaling.json\n</code></pre> For large amounts of scaling data, you can optionally use gzipped json (<code>.json.gz</code>) or pickle (<code>.pkl.gz</code>) files with 2D numpy arrays for the scaling coefficients instead of lists. The function <code>numpy.tril_indices(ncoef)</code> is helpful for extracting the lower triangle of a square matrix.</p> <p>You could pick any nominal template, and adjust the scaling as appropriate. Generally it is advisable to use a nominal template corresponding to near where you expect the best-fit values of the POIs to be so that the shape systematic effects are well-modeled in that region.</p> <p>It may be the case that the relative contributions of the terms are themselves a function of the POIs. For example, in VBF di-Higgs production, BSM modifications to the production rate can be parameterized in the \"kappa\" framework via three diagrams, with scaling coefficients \\(\\kappa_V \\kappa_\\lambda\\), \\(\\kappa_V^2\\), and \\(\\kappa_{2V}\\), respectively, that interfere.  In that case, you can declare formulas with the factory syntax to represent each amplitude as follows:</p> <pre><code>[\n  {\n    \"channel\": \"a_vbf_channel\",\n    \"process\": \"VBFHH\",\n    \"parameters\": [\"expr::a0('@0*@1', kv[1,0,2], kl[1,0,2])\", \"expr::a1('@0*@0', kv[1,0,2])\", \"k2v[1,0,2]\"],\n    \"scaling\": [\n      [3.30353674666415, -8.54170982038222, 22.96464188467882, 4.2353483207128, -11.07996258835088, 5.504469544697623],\n      [2.20644332142891, -7.076836641962523, 23.50989689214267, 4.053185685866683, -13.08569222837996, 7.502346155380032]\n    ]\n  }\n]\n</code></pre> <p>However, you will need to manually specify what the POIs should be when creating the workspace using the <code>POIs=</code> physics option, e.g.</p> <pre><code>text2workspace.py card.txt -P HiggsAnalysis.CombinedLimit.InterferenceModels:interferenceModel \\\n  --PO scalingData=scaling.json --PO 'POIs=kl[1,0,2]:kv[1,0,2]:k2v[1,0,2]'\n</code></pre>"},{"location":"part2/settinguptheanalysis/","title":"Preparing the datacard","text":"<p>The input to Combine, which defines the details of the analysis, is a plain ASCII file we will refer to as datacard. This is true whether the analysis is a simple counting experiment or a shape analysis.</p>"},{"location":"part2/settinguptheanalysis/#a-simple-counting-experiment","title":"A simple counting experiment","text":"<p>The file data/tutorials/counting/realistic-counting-experiment.txt shows an example of a counting experiment.</p> <p>The first lines can be used to add some descriptive information. Those lines must start with a \"#\", and they are not parsed by Combine:</p> <pre><code># Simple counting experiment, with one signal and a few background processes\n# Simplified version of the 35/pb H-&gt;WW analysis for mH = 160 GeV\n</code></pre> <p>Following this, one declares the number of observables, <code>imax</code>, that are present in the model used to set limits / extract confidence intervals. The number of observables will typically be the number of channels in a counting experiment. The value <code>*</code> can be specified for <code>imax</code>, which tells Combine to determine the number of observables from the rest of the datacard. In order to better catch mistakes, it is recommended to explicitly specify the value. </p> <pre><code>imax 1  number of channels\n</code></pre> <p>This declaration is followed by a specification of the number of background sources to be considered, <code>jmax</code>, and the number of independent sources of systematic uncertainty, <code>kmax</code>:</p> <pre><code>jmax 3  number of backgrounds\nkmax 5  number of nuisance parameters (sources of systematic uncertainty)\n</code></pre> <p>In the example there is 1 channel, there are 3 background sources, and there are 5 independent sources of systematic uncertainty.</p> <p>After providing this information, the following lines describe what is observed in data: the number of events observed in each channel. The first line, starting with <code>bin</code>, defines the label used for each channel. In the example we have 1 channel, labelled <code>1</code>, and in the following line, <code>observation</code>, the number of observed events is given: <code>0</code> in this example.</p> <p><pre><code># we have just one channel, in which we observe 0 events\nbin bin1\nobservation 0\n</code></pre> This is followed by information related to the expected number of events, for each bin and process, arranged in (#channels)*(#processes) columns.</p> <pre><code>bin          bin1     bin1     bin1     bin1\nprocess         ggH  qqWW  ggWW  others\nprocess          0     1     2     3\nrate           1.47  0.63  0.06  0.22\n</code></pre> <ul> <li>The <code>bin</code> line identifies the channel that the column refers to. It ranges from <code>1</code> to the value of <code>imax</code> declared above.</li> <li>The first <code>process</code> line contains the names of the various process sources</li> <li>The second <code>process</code> line is a numerical process identifier. Backgrounds are given a positive number, while <code>0</code> and negative numbers are used for signal processes. Different process identifiers must be used for different processes.</li> <li>The last line, <code>rate</code>, gives the expected number of events for the given process in the specified bin</li> </ul> <p>If a process does not contribute in a given bin, it can be removed from the datacard, or the rate can be set to 0.</p> <p>The final section of the datacard describes the systematic uncertainties:</p> <pre><code>lumi    lnN    1.11    -   1.11    -    lumi affects both signal and gg-&gt;WW (mc-driven). lnN = lognormal\nxs_ggH  lnN    1.16    -     -     -    gg-&gt;H cross section + signal efficiency + other minor ones.\nWW_norm gmN 4    -   0.16    -     -    WW estimate of 0.64 comes from sidebands: 4 events in sideband times 0.16 (=&gt; ~50% statistical uncertainty)\nxs_ggWW lnN      -     -   1.50    -    50% uncertainty on gg-&gt;WW cross section\nbg_others lnN    -     -     -   1.30   30% uncertainty on the rest of the backgrounds\n</code></pre> <ul> <li>The first column is the name of the nuisance parameter, a label that is used to identify the uncertainty</li> <li>The second column identifies the type of distribution used to describe the nuisance parameter<ul> <li><code>lnN</code> stands for Log-normal, which is the recommended choice for multiplicative corrections (efficiencies, cross sections, ...).     If \u0394x/x is the relative uncertainty in the multiplicative correction, one should put 1+\u0394x/x in the column corresponding to the process and channel. Asymmetric log-normals are instead supported by providing \u03ba<sub>down</sub>/\u03ba<sub>up</sub> where \u03ba<sub>down</sub> is the ratio of the the yield to the nominal value for a -1\u03c3 deviation of the nuisance parameter and \u03ba<sub>up</sub> is the ratio of the yield to the nominal value for a \\(+1\\sigma\\) deviation. Note that for a single-value log-normal with value \\(\\kappa=1+\\Delta x/x\\), the yield of the process it is associated with is multiplied by \\(\\kappa^{\\theta}\\). At \\(\\theta=0\\) the nominal yield is retained, at \\(\\theta=1\\sigma\\) the yield is multiplied by \\(\\kappa\\) and at \\(\\theta=-1\\sigma\\) the yield is multiplied by \\(1/\\kappa\\). This means that an uncertainty represented as <code>1.2</code> does not multiply the nominal yield by 0.8 for \\(\\theta=-1\\sigma\\); but by 0.8333. It may therefore be desirable to encode large uncertainties that have a symmetric effect on the yield as asymmetric log-normals instead. </li> <li><code>gmN</code> stands for Gamma, and is the recommended choice for the statistical uncertainty in a background determined from the number of events in a control region (or in an MC sample with limited sample size).     If the control region or simulated sample contains N events, and the extrapolation factor from the control region to the signal region is \u03b1, one shoud put N just after the <code>gmN</code> keyword, and then the value of \u03b1 in the relevant (bin,process) column. The yield specified in the <code>rate</code> line for this (bin,process) combination should equal N\u03b1.</li> <li><code>lnU</code> stands for log-uniform distribution. A value of 1+\u03b5 in the column will imply that the yield of this background is allowed to float freely between x(1+\u03b5) and x/(1+\u03b5). In particular, if \u03b5 is small, this is approximately (x-\u0394x,x+\u0394x) with \u03b5=\u0394x/x.     This distribution is typically useful when you want to set a large a-priori uncertainty on a given background process, and then rely on the correlation between channels to constrain it. Note that for this use case, we usually recommend using a <code>rateParam</code> instead. If you do use <code>lnU</code>, please be aware that while Gaussian-like uncertainties behave in a similar way under profiling and marginalization, uniform uncertainties do not. This means the impact of the uncertainty on the result will depend on how the nuisance parameters are treated. </li> </ul> </li> <li>The next (#channels)*(#processes) columns indicate the relative effect of the systematic uncertainty on the rate of each process in each channel. The columns are aligned with those in the previous lines declaring bins, processes, and rates.</li> </ul> <p>In the example, there are 5 uncertainties:</p> <ul> <li>The first uncertainty has an 11% effect on the signal and on the <code>ggWW</code> process.</li> <li>The second uncertainty affects the signal by 16%, but leaves the background processes unaffected</li> <li>The third line specifies that the <code>qqWW</code> background comes from a sideband with 4 observed events and an extrapolation factor of 0.16; the resulting uncertainty in the expected yield is \\(1/\\sqrt{4+1}\\) = 45%</li> <li>The fourth uncertainty does not affect the signal, has a 50% effect on the <code>ggWW</code> background, and leaves the other backgrounds unaffected</li> <li>The fifth uncertainty does not affect the signal, has a 30% effect on the <code>others</code> background process, and does not affect the remaining backgrounds.</li> </ul>"},{"location":"part2/settinguptheanalysis/#shape-analyses","title":"Shape analyses","text":"<p>The datacard has to be supplemented with two extensions:</p> <ul> <li>A new block of lines defining how channels and processes are mapped into shapes.</li> <li>The block for systematics can now also contain rows with shape uncertainties.</li> </ul> <p>The expected shape can be parametric, or not. In the first case the parametric PDFs have to be given as input to the tool. In the latter case, for each channel, histograms have to be provided for the expected shape of each process. The data have to be provided as input as a histogram to perform a binned shape analysis, and as a RooDataSet to perform an unbinned shape analysis.</p> <p>Warning</p> <p>If using RooFit-based inputs (RooDataHists/RooDataSets/RooAbsPdfs) then you need to ensure you are using different RooRealVars as the observable in each category entering the statistical analysis. It is possible to use the same RooRealVar if the observable has the same range (and binning if using binned data) in each category, although in most cases it is simpler to avoid doing this.</p>"},{"location":"part2/settinguptheanalysis/#rates-for-shape-analyses","title":"Rates for shape analyses","text":"<p>As with the counting experiment, the total nominal rate of a given process must be identified in the rate line of the datacard. However, there are special options for shape-based analyses, as follows:</p> <ul> <li>A value of -1 in the rate line means Combine will calculate the rate from the input TH1 (via TH1::Integral) or RooDataSet/RooDataHist (via RooAbsData::sumEntries).</li> <li>For parametric shapes (RooAbsPdf), if a parameter with the name pdfname_norm is found in the input workspace, the rate will be multiplied by the value of that parameter. Note that since this parameter can be freely floating, the normalization of a process can be set freely float this way. This can also be achieved through the use of <code>rateParams</code>.</li> </ul>"},{"location":"part2/settinguptheanalysis/#binned-shape-analyses","title":"Binned shape analyses","text":"<p>For each channel, histograms have to be provided for the observed shape and for the expected shape of each process.</p> <ul> <li>Within each channel, all histograms must have the same binning.</li> <li>The normalization of the data histogram must correspond to the number of observed events.</li> <li>The normalization of the expected histograms must match the expected event yields.</li> </ul> <p>The Combine tool can take as input histograms saved as TH1, as RooAbsHist in a RooFit workspace (an example of how to create a RooFit workspace and save histograms is available in github), or from a pandas dataframe (example).</p> <p>The block of lines defining the mapping (first block in the datacard) contains one or more rows of the form</p> <pre><code>shapes process channel file histogram [histogram_with_systematics]\n</code></pre> <p>In this line,</p> <ul> <li><code>process</code>is any one the process names, or <code>*</code> for all processes, or <code>data_obs</code> for the observed data;</li> <li><code>channel</code>is any one the process names, or <code>*</code> for all channels;</li> <li><code>file</code>, <code>histogram</code> and <code>histogram_with_systematics</code> identify the names of the files and of the histograms within the file, after making some replacements (if any are found):<ul> <li><code>$PROCESS</code> is replaced with the process name (or \"<code>data_obs</code>\" for the observed data);</li> <li><code>$CHANNEL</code> is replaced with the channel name;</li> <li><code>$SYSTEMATIC</code> is replaced with the name of the systematic + (<code>Up</code>, <code>Down</code>);</li> <li><code>$MASS</code> is replaced with the chosen (Higgs boson) mass value that is passed as a command-line option when running the tool</li> </ul> </li> </ul> <p>In addition, user-defined keywords can be used. Any word in the datacard <code>$WORD</code> will be replaced by <code>VALUE</code> when including the option <code>--keyword-value WORD=VALUE</code>. This option can be repeated multiple times for multiple keywords.</p>"},{"location":"part2/settinguptheanalysis/#template-shape-uncertainties","title":"Template shape uncertainties","text":"<p>Shape uncertainties can be taken into account by vertical interpolation of the histograms. The shapes (fraction of events \\(f\\) in each bin) are interpolated using a spline for shifts below +/- 1\u03c3 and linearly outside of that. Specifically, for nuisance parameter values \\(|\\nu|\\leq 1\\) </p> \\[ f(\\nu) = \\frac{1}{2} \\left( (\\delta^{+}-\\delta^{-})\\nu + \\frac{1}{8}(\\delta^{+}+\\delta^{-})(3\\nu^6 - 10\\nu^4 + 15\\nu^2) \\right) \\] <p>and for \\(|\\nu|&gt; 1\\) (\\(|\\nu|&lt;-1\\)), \\(f(\\nu)\\) is a straight line with gradient \\(\\delta^{+}\\) (\\(\\delta^{-}\\)), where \\(\\delta^{+}=f(\\nu=1)-f(\\nu=0)\\), and \\(\\delta^{-}=f(\\nu=-1)-f(\\nu=0)\\), derived using the nominal and up/down histograms. This interpolation is designed so that the values of \\(f(\\nu)\\) and its derivatives are continuous for all values of \\(\\nu\\). </p> <p>The normalizations are interpolated linearly in log scale, just like we do for log-normal uncertainties. If the value in a given bin is negative for some value of \\(\\nu\\), the value will be truncated at 0.</p> <p>For each shape uncertainty and process/channel affected by it, two additional input shapes have to be provided. These are obtained by shifting the parameter up and down by one standard deviation. When building the likelihood, each shape uncertainty is associated to a nuisance parameter taken from a unit gaussian distribution, which is used to interpolate or extrapolate using the specified histograms.</p> <p>For each given shape uncertainty, the part of the datacard describing shape uncertainties must contain a row</p> <pre><code>name shape effect_for_each_process_and_channel\n</code></pre> <p>The effect can be \"-\" or 0 for no effect, 1 for the normal effect, and something different from 1 to test larger or smaller effects (in that case, the unit gaussian is scaled by that factor before using it as parameter for the interpolation).</p> <p>The datacard in data/tutorials/shapes/simple-shapes-TH1.txt provides an example of how to include shapes in the datacard. In the first block the following line specifies the shape mapping:</p> <pre><code>shapes * * simple-shapes-TH1.root $PROCESS $PROCESS_$SYSTEMATIC\n</code></pre> <p>The last block concerns the treatment of the systematic uncertainties that affect shapes. In this case there are two uncertainties with a shape-altering effect.</p> <pre><code>alpha  shape    -           1   uncertainty on background shape and normalization\nsigma  shape    0.5         -   uncertainty on signal resolution. Assume the histogram is a 2 sigma shift,\n#                                so divide the unit gaussian by 2 before doing the interpolation\n</code></pre> <p>There are two options for the interpolation algorithm in the \"shape\" uncertainty. Putting <code>shape</code> will result in an interpolation of the fraction of events in each bin. That is, the histograms are first normalized before interpolation. Putting <code>shapeN</code> while instead base the interpolation on the logs of the fraction in each bin. For both <code>shape</code>  and <code>shapeN</code>, the total normalization is interpolated using an asymmetric log-normal, so that the effect of the systematic on both the shape and normalization are accounted for. The following image shows a comparison of the two algorithms for the example datacard.</p> <p></p> <p>In this case there are two processes, signal and background, and two uncertainties affecting the background (alpha) and signal shapes (sigma). In the ROOT file, two histograms per systematic have to be provided, they are the shapes obtained, for the specific process, by shifting the parameter associated with the uncertainty up and down by a standard deviation: <code>background_alphaUp</code> and <code>background_alphaDown</code>, <code>signal_sigmaUp</code> and <code>signal_sigmaDown</code>.</p> <p>The content of the ROOT file simple-shapes-TH1.root  associated with the datacard data/tutorials/shapes/simple-shapes-TH1.txt is:</p> <pre><code>root [0]\nAttaching file simple-shapes-TH1.root as _file0...\nroot [1] _file0-&gt;ls()\nTFile**     simple-shapes-TH1.root\n TFile*     simple-shapes-TH1.root\n  KEY: TH1F signal;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaUp;1    Histogram of signal__x\n  KEY: TH1F signal_sigmaDown;1  Histogram of signal__x\n  KEY: TH1F background;1    Histogram of background__x\n  KEY: TH1F background_alphaUp;1    Histogram of background__x\n  KEY: TH1F background_alphaDown;1  Histogram of background__x\n  KEY: TH1F data_obs;1  Histogram of data_obs__x\n  KEY: TH1F data_sig;1  Histogram of data_sig__x\n</code></pre> <p>For example, without shape uncertainties there would only be one row with <code>shapes * * shapes.root $CHANNEL/$PROCESS</code> Then, to give a simple example for two channels (\"e\", \"mu\") with three processes ()\"higgs\", \"zz\", \"top\"), the ROOT file contents should look like:</p> histogram meaning <code>e/data_obs</code> observed data in electron channel <code>e/higgs</code> expected shape for higgs in electron channel <code>e/zz</code> expected shape for ZZ in electron channel <code>e/top</code> expected shape for top in electron channel <code>mu/data_obs</code> observed data in muon channel <code>mu/higgs</code> expected shape for higgs in muon channel <code>mu/zz</code> expected shape for ZZ in muon channel <code>mu/top</code> expected shape for top in muon channel <p>If there is also an uncertainty that affects the shape, e.g. the jet energy scale, shape histograms for the jet energy scale shifted up and down by one sigma need to be included. This could be done by creating a folder for each process and writing a line like</p> <p><code>shapes * * shapes.root $CHANNEL/$PROCESS/nominal  $CHANNEL/$PROCESS/$SYSTEMATIC</code></p> <p>or a postifx can be added to the histogram name:</p> <p><code>shapes * * shapes.root $CHANNEL/$PROCESS  $CHANNEL/$PROCESS_$SYSTEMATIC</code></p> <p>Warning</p> <p>If you have a nuisance parameter that has shape effects on some processes (using <code>shape</code>) and rate effects on other processes (using <code>lnN</code>) you should use a single line for the systematic uncertainty with <code>shape?</code>. This will tell Combine to fist look for Up/Down systematic templates for that process and if it doesnt find them, it will interpret the number that you put for the process as a <code>lnN</code> instead. </p> <p>For a detailed example of a template-based binned analysis, see the H\u2192\u03c4\u03c4 2014 DAS tutorial, or in our Tutorial pages. </p>"},{"location":"part2/settinguptheanalysis/#unbinned-or-parametric-shape-analyses","title":"Unbinned or parametric shape analyses","text":"<p>In some cases, it can be convenient to describe the expected signal and background shapes in terms of analytical functions, rather than templates. Typical examples are searches/measurements where the signal is apparent as a narrow peak over a smooth continuum background. In this context, uncertainties affecting the shapes of the signal and backgrounds can be implemented naturally as uncertainties in the parameters of those analytical functions. It is also possible to adopt an agnostic approach in which the parameters of the background model are left freely floating in the fit to the data, i.e. only requiring the background to be well described by a smooth function.</p> <p>Technically, this is implemented by means of the RooFit package, which allows writing generic probability density functions, and saving them into ROOT files. The PDFs can be either taken from RooFit's standard library of functions (e.g. Gaussians, polynomials, ...) or hand-coded in C++, and combined together to form even more complex shapes.</p> <p>In the datacard using templates, the column after the file name would have been the name of the histogram. For parametric analysis we need two names to identify the mapping, separated by a colon (<code>:</code>).</p> <p>shapes process channel shapes.root workspace_name:pdf_name</p> <p>The first part identifies the name of the input RooWorkspace containing the PDF, and the second part the name of the RooAbsPdf inside it (or, for the observed data, the RooAbsData). It is possible to have multiple input workspaces, just as there can be multiple input ROOT files. You can use any of the usual RooFit pre-defined PDFs for your signal and background models.</p> <p>Warning</p> <p>If in your model you are using RooAddPdfs, in which the coefficients are not defined recursively, Combine will not interpret them correctly. You can add the option <code>--X-rtd ADDNLL_RECURSIVE=0</code> to any Combine command in order to recover the correct interpretation, however we recommend that you instead re-define your PDF so that the coefficients are recursive (as described in the RooAddPdf documentation) and keep the total normalization (i.e the extended term) as a separate object, as in the case of the tutorial datacard.</p> <p>For example, take a look at the data/tutorials/shapes/simple-shapes-parametric.txt. We see the following line:</p> <pre><code>shapes * * simple-shapes-parametric_input.root w:$PROCESS\n[...]\nbin          1          1\nprocess      sig    bkg\n</code></pre> <p>which indicates that the input file <code>simple-shapes-parametric_input.root</code> should contain an input workspace (<code>w</code>) with PDFs named <code>sig</code> and <code>bkg</code>, since these are the names of the two processes in the datacard. Additionally, we expect there to be a data set named <code>data_obs</code>. If we look at the contents of the workspace in <code>data/tutorials/shapes/simple-shapes-parametric_input.root</code>, this is indeed what we see:</p> <pre><code>root [1] w-&gt;Print()\n\nRooWorkspace(w) w contents\n\nvariables\n---------\n(MH,bkg_norm,cc_a0,cc_a1,cc_a2,j,vogian_sigma,vogian_width)\n\np.d.f.s\n-------\nRooChebychev::bkg[ x=j coefList=(cc_a0,cc_a1,cc_a2) ] = 2.6243\nRooVoigtian::sig[ x=j mean=MH width=vogian_width sigma=vogian_sigma ] = 0.000639771\n\ndatasets\n--------\nRooDataSet::data_obs(j)\n</code></pre> <p>In this datacard, the signal is parameterized in terms of the hypothesized mass (<code>MH</code>). Combine will use this variable, instead of creating its own, which will be interpreted as the value for <code>-m</code>. For this reason, we should add the option <code>-m 30</code> (or something else within the observable range) when running Combine. You will also see there is a variable named <code>bkg_norm</code>. This is used to normalize the background rate (see the section on Rate parameters below for details).</p> <p>Warning</p> <p>Combine will not accept RooExtendedPdfs as input. This is to alleviate a bug that lead to improper treatment of the normalization when using multiple RooExtendedPdfs to describe a single process. You should instead use RooAbsPdfs and provide the rate as a separate object (see the Rate parameters section).</p> <p>The part of the datacard related to the systematics can include lines with the syntax</p> <ul> <li>name param  X Y</li> </ul> <p>These lines encode uncertainties in the parameters of the signal and background PDFs. The parameter is to be assigned a Gaussian uncertainty of Y around its mean value of X. One can change the mean value from 0 to 1 (or any value, if one so chooses) if the parameter in question is multiplicative instead of additive.</p> <p>In the data/tutorials/shapes/simple-shapes-parametric.txt datacard, there are lines for one such parametric uncertainty,</p> <pre><code>sigma   param 1.0      0.1\n</code></pre> <p>meaning there is a parameter in the input workspace called <code>sigma</code>, that should be constrained with a Gaussian centered at 1.0 with a width of 0.1. Note that the exact interpretation of these parameters is left to the user since the signal PDF is constructed externally by you. All Combine knows is that 1.0 should be the most likely value and 0.1 is its 1\u03c3 uncertainy. Asymmetric uncertainties are written using the syntax -1\u03c3/+1\u03c3 in the datacard, as is the case for <code>lnN</code> uncertainties. </p> <p>If one wants to specify a parameter that is freely floating across its given range, and not Gaussian constrained, the following syntax is used:</p> <pre><code>name flatParam\n</code></pre> <p>Though this is not strictly necessary in frequentist methods using profiled likelihoods, as Combine will still profile these nuisances when performing fits (as is the case for the <code>simple-shapes-parametric.txt</code> datacard).</p> <p>Warning</p> <p>All parameters that are floating or constant in the user's input workspaces will remain floating or constant. Combine will not modify those for you!</p> <p>A full example of a parametric analysis can be found in this H\u2192\u03b3\u03b3 2014 DAS tutorial or in our Tutorial pages.</p>"},{"location":"part2/settinguptheanalysis/#caveat-on-using-parametric-pdfs-with-binned-datasets","title":"Caveat on using parametric PDFs with binned datasets","text":"<p>Users should be aware of a feature that affects the use of parametric PDFs together with binned datasets.</p> <p>RooFit uses the integral of the PDF, computed analytically (or numerically, but disregarding the binning), to normalize it, but computes the expected event yield in each bin by evaluating the PDF at the bin center. This means that if the variation of the pdf is sizeable within the bin, there is a mismatch between the sum of the event yields per bin and the PDF normalization, which can cause a bias in the fits. More specifically, the bias is present if the contribution of the second derivative integrated in the bin size is not negligible. For linear functions, an evaluation at the bin center is correct. There are two recommended ways to work around this issue:</p> <p>1. Use narrow bins</p> <p>It is recommended to use bins that are significantly finer than the characteristic scale of the PDFs. Even in the absence of this feature, this would be advisable. Note that this caveat does not apply to analyses using templates (they are constant across each bin, so there is no bias), or using unbinned datasets.</p> <p>2. Use a RooParametricShapeBinPdf</p> <p>Another solution (currently only implemented for 1-dimensional histograms) is to use a custom PDF that performs the correct integrals internally, as in RooParametricShapeBinPdf.</p> <p>Note that this PDF class now allows parameters that are themselves RooAbsReal objects (i.e. functions of other variables). The integrals are handled internally by calling the underlying PDF's <code>createIntegral()</code> method with named ranges created for each of the bins. This means that if the analytical integrals for the underlying PDF are available, they will be used.</p> <p>The constructor for this class requires a RooAbsReal (eg any RooAbsPdf) along with a list of RooRealVars (the parameters, excluding the observable \\(x\\)),</p> <pre><code>RooParametricShapeBinPdf(const char *name, const char *title,  RooAbsReal&amp; _pdf, RooAbsReal&amp; _x, RooArgList&amp; _pars, const TH1 &amp;_shape )\n</code></pre> <p>Below is a comparison of a fit to a binned dataset containing 1000 events with one observable \\(0 \\leq x \\leq 100\\). The fit function is a RooExponential of the form \\(e^{xp}\\).</p> <p> </p> <p>In the upper plot, the data are binned in 100 evenly-spaced bins, while in the lower plot, there are three irregular bins. The blue lines show the result of the fit when using the RooExponential directly, while the red lines show the result when wrapping the PDF inside a RooParametricShapeBinPdf. In the narrow binned case, the two agree well, while for wide bins, accounting for the integral over the bin yields a better fit.</p> <p>You should note that using this class will result in slower fits, so you should first decide whether the added accuracy is enough to justify the reduced efficiency.</p>"},{"location":"part2/settinguptheanalysis/#beyond-simple-datacards","title":"Beyond simple datacards","text":"<p>Datacards can be extended in order to provide additional functionality and flexibility during runtime. These can also allow for the production of more complicated models and for producing more advanced results.</p>"},{"location":"part2/settinguptheanalysis/#rate-parameters","title":"Rate parameters","text":"<p>The overall rate \"expected\" of a particular process in a particular bin does not necessarily need to be a fixed quantity. Scale factors can be introduced to modify the rate directly in the datacards for ANY type of analysis. This can be achieved using the directive <code>rateParam</code> in the datacard with the following syntax,</p> <pre><code>name rateParam bin process initial_value [min,max]\n</code></pre> <p>The <code>[min,max]</code> argument is optional. If it is not included, Combine  will remove the range of this parameter. This will produce a new parameter, which multiplies the rate of that particular process in the given bin by its value, in the model (unless it already exists).</p> <p>You can attach the same <code>rateParam</code> to multiple processes/bins by either using a wild card (eg <code>*</code> will match everything, <code>QCD_*</code> will match everything starting with <code>QCD_</code>, etc.) in the name of the bin and/or process, or by repeating the <code>rateParam</code> line in the datacard for different bins/processes with the same name.</p> <p>Warning</p> <p><code>rateParam</code> is not a shortcut to evaluate the post-fit yield of a process since other nuisance parameters can also change the normalization. E.g., finding that the <code>rateParam</code> best-fit value is 0.9 does not necessarily imply that the process yield is 0.9 times the initial yield. The best is to evaluate the yield taking into account the values of all nuisance parameters using <code>--saveNormalizations</code>.</p> <p>This parameter is, by default, freely floating. It is possible to include a Gaussian constraint on any <code>rateParam</code> that is floating (i.e not a <code>formula</code> or spline) by adding a <code>param</code> nuisance line in the datacard with the same name.</p> <p>In addition to rate modifiers that are freely floating, modifiers that are functions of other parameters can be included using the following syntax,</p> <pre><code>name rateParam bin process formula args\n</code></pre> <p>where <code>args</code> is a comma-separated list of the arguments for the string <code>formula</code>. You can include other nuisance parameters in the <code>formula</code>, including ones that are Gaussian constrained (i,e via the <code>param</code> directive.)</p> <p>Below is an example datacard that uses the <code>rateParam</code> directive to implement an ABCD-like method in Combine. For a more realistic description of its use for ABCD, see the single-lepton SUSY search implementation described here.</p> <pre><code>imax 4  number of channels\njmax 0  number of processes -1\nkmax *  number of nuisance parameters (sources of systematical uncertainties)\n-------\nbin                   B      C       D        A\nobservation           50    100      500      10\n-------\nbin                   B      C       D        A\nprocess               bkg    bkg     bkg      bkg\nprocess               1      1       1         1\nrate                  1      1       1         1\n-------\n\nalpha rateParam A bkg (@0*@1/@2) beta,gamma,delta\nbeta  rateParam B bkg 50\ngamma rateParam C bkg 100\ndelta rateParam D bkg 500\n</code></pre> <p>For more examples of using <code>rateParam</code> (eg for fitting process normalizations in control regions and signal regions simultaneously) see this 2016 CMS tutorial</p> <p>Finally, any pre-existing RooAbsReal inside some ROOT file with a workspace can be imported using the following:</p> <pre><code>name rateParam bin process rootfile:workspacename\n</code></pre> <p>The name should correspond to the name of the object that is being picked up inside the RooWorkspace. A simple example using the SM XS and BR splines available in HiggsAnalysis/CombinedLimit can be found under data/tutorials/rate_params/simple_sm_datacard.txt</p>"},{"location":"part2/settinguptheanalysis/#extra-arguments","title":"Extra arguments","text":"<p>If a parameter is intended to be used, and it is not a user-defined <code>param</code> or <code>rateParam</code>, it can be picked up by first issuing an <code>extArgs</code> directive before this line in the datacard. The syntax for <code>extArgs</code> is:</p> <pre><code>name extArg rootfile:workspacename\n</code></pre> <p>The string \":RecycleConflictNodes\" can be added at the end of the final argument (i.e. rootfile:workspacename:RecycleConflictNodes) to apply the corresponding RooFit option when the object is imported into the workspace. It is also possible to simply add a RooRealVar using <code>extArg</code> for use in function <code>rateParams</code> with the following</p> <pre><code>name extArg init [min,max]\n</code></pre> <p>Note that the <code>[min,max]</code> argument is optional and if not included, the code will remove the range of this parameter.</p>"},{"location":"part2/settinguptheanalysis/#manipulation-of-nuisance-parameters","title":"Manipulation of Nuisance parameters","text":"<p>It can often be useful to modify datacards, or the runtime behavior, without having to modify individual systematic lines. This can be achieved through nuisance parameter modifiers.</p>"},{"location":"part2/settinguptheanalysis/#nuisance-modifiers","title":"Nuisance modifiers","text":"<p>If a nuisance parameter needs to be renamed for certain processes/channels, it can be done using a single <code>nuisance edit</code> directive at the end of a datacard</p> <p><pre><code>nuisance edit rename process channel oldname newname [options]\n</code></pre> Note that the wildcard (*) can be used for either a process, a channel, or both.  This will have the effect that nuisance parameters affecting a given process/channel will be renamed, thereby de-correlating between processes/channels.  Use the option <code>ifexists</code> to skip/avoid an error if the nuisance paremeter is not found.  This kind of command will only affect nuisances of the type <code>shape[N]</code>, <code>lnN</code>. Instead, if you also want to change the names of <code>param</code> type nuisances, you can use a global version </p> <p><pre><code>nuisance edit rename oldname newname\n</code></pre> which will rename all <code>shape[N]</code>, <code>lnN</code> and <code>param</code> nuisances found in one go. You should make sure these commands come after any process/channel specific ones in the datacard. This version does not accept options.  </p> <p>Other edits are also supported, as follows:</p> <ul> <li><code>nuisance edit add process channel name pdf value [options]</code>  -&gt; add a new nuisance parameter to a process</li> <li><code>nuisance edit drop process channel name [options]</code>  -&gt; remove this nuisance from the process/channel. Use the option <code>ifexists</code> to skip/avoid errors if the nuisance parameter is not found.</li> <li><code>nuisance edit changepdf name newpdf</code> -&gt; change the PDF type of a given nuisance parameter to <code>newpdf</code>.</li> <li><code>nuisance edit split process channel oldname newname1 newname2 value1 value2</code> -&gt; split a nuisance parameter line into two separate nuisance parameters called <code>newname1</code> and <code>newname2</code> with values <code>value1</code> and <code>value2</code>. This will produce two separate lines so that the original nuisance parameter <code>oldname</code> is split into two uncorrelated nuisances.</li> <li><code>nuisance edit freeze name [options]</code>  -&gt; set nuisance parameter frozen by default. Can be overridden on the command line using the <code>--floatNuisances</code> option. Use the option <code>ifexists</code> to skip/avoid errors if the nuisance parameter not found.</li> <li><code>nuisance edit merge process channel name1 name2</code> -&gt; merge systematic <code>name2</code> into <code>name1</code> by adding their values in quadrature and removing <code>name2</code>. This only works if, for each process and channel included, the uncertainties both increase or both reduce the process yield. For example, you can add 1.1 to 1.2, but not to 0.9.</li> </ul> <p>The above edits (excluding the renaming) support nuisance parameters of the types <code>shape[N]</code>, <code>lnN</code>, <code>lnU</code>, <code>gmN</code>, <code>param</code>, <code>flatParam</code>, <code>rateParam</code>, or <code>discrete</code>.</p>"},{"location":"part2/settinguptheanalysis/#groups-of-nuisances","title":"Groups of nuisances","text":"<p>Often it is desirable to freeze one or more nuisance parameters to check the impact they have on limits, likelihood scans, significances etc.</p> <p>However, for large groups of nuisance parameters (eg everything associated to theory) it is easier to define nuisance groups in the datacard. The following line in a datacard will, for example, produce a group of nuisance parameters with the group name <code>theory</code> that contains two parameters, <code>QCDscale</code> and <code>pdf</code>.</p> <pre><code>theory group = QCDscale pdf\n</code></pre> <p>Multiple groups can be defined in this way. It is also possible to extend nuisance parameters groups in datacards using += in place of =.</p> <p>These groups can be manipulated at runtime (eg for freezing all nuisance parameterss associated to a group at runtime, see Running the tool). You can find more info on groups of nuisances here</p> <p>Note that when using the automatic addition of statistical uncertainties (autoMCStats), the corresponding nuisance parameters are created by <code>text2workspace.py</code> and so do not exist in the datacards. It is therefore not possible to add autoMCStats parameters to groups of nuisances in the way described above. However, <code>text2workspace.py</code> will automatically create a group labelled <code>autoMCStats</code>, which contains all autoMCStats parameters.</p> <p>This group is useful for freezing all parameters created by autoMCStats. For freezing subsets of the parameters, for example if the datacard contains two categories, cat_label_1 and cat_label_2, to only freeze the autoMCStat parameters created for category cat_label_1, the regular expression features can be used. In this example this can be achieved by using <code>--freezeParameters 'rgx{prop_bincat_label_1_bin.*}'</code>.</p>"},{"location":"part2/settinguptheanalysis/#combination-of-multiple-datacards","title":"Combination of multiple datacards","text":"<p>If you have separate channels, each with their own datacard, it is possible to produce a combined datacard using the script <code>combineCards.py</code></p> <p>The syntax is simple: <code>combineCards.py Name1=card1.txt Name2=card2.txt .... &gt; card.txt</code> If the input datacards had just one bin each, the output channels will be called <code>Name1</code>, <code>Name2</code>, and so on. Otherwise, a prefix <code>Name1_</code> ... <code>Name2_</code> will be added to the bin labels in each datacard. The supplied bin names <code>Name1</code>, <code>Name2</code>, etc. must themselves conform to valid C++/python identifier syntax.</p> <p>Warning</p> <p>When combining datacards, you should keep in mind that systematic uncertainties that have different names will be assumed to be uncorrelated, and those with the same name will be assumed 100% correlated. An uncertainty correlated across channels must have the same PDF. in all cards (i.e. always <code>lnN</code>, or all <code>gmN</code> with same <code>N</code>. Note that <code>shape</code> and <code>lnN</code> can be interchanged via the <code>shape?</code> directive). Furthermore, when using parametric models, \"parameter\" objects such as <code>RooRealVar</code>, <code>RooAbsReal</code>, and <code>RooAbsCategory</code> (parameters, PDF indices etc) with the same name will be assumed to be the same object. If this is not intended, you may encounter unexpected behaviour, such as the order of combining cards having an impact on the results. Make sure that such objects are named differently in your inputs if they represent different things! Instead, Combine will try to rename other \"shape\" objects (such as PDFs) automatically. </p> <p>The <code>combineCards.py</code> script will fail if you are trying to combine a shape datacard with a counting datacard. You can however convert a counting datacard into an equivalent shape-based one by adding a line <code>shapes * * FAKE</code> in the datacard after the <code>imax</code>, <code>jmax</code>, and <code>kmax</code> section. Alternatively, you can add the option <code>-S</code> to <code>combineCards.py</code>, which will do this for you while creating the combined datacard.</p>"},{"location":"part2/settinguptheanalysis/#automatic-production-of-datacards-and-workspaces","title":"Automatic production of datacards and workspaces","text":"<p>For complicated analyses or cases in which multiple datacards are needed (e.g. optimization studies), you can avoid writing these by hand. The object Datacard defines the analysis and can be created as a python object. The template python script below will produce the same workspace as running <code>textToWorkspace.py</code> (see the section on Physics Models) on the realistic-counting-experiment.txt datacard.</p> <pre><code>from HiggsAnalysis.CombinedLimit.DatacardParser import *\nfrom HiggsAnalysis.CombinedLimit.ModelTools import *\nfrom HiggsAnalysis.CombinedLimit.ShapeTools import *\nfrom HiggsAnalysis.CombinedLimit.PhysicsModel import *\n\nfrom sys import exit\nfrom optparse import OptionParser\nparser = OptionParser()\naddDatacardParserOptions(parser)\noptions,args = parser.parse_args()\noptions.bin = True # make a binary workspace\n\nDC = Datacard()\nMB = None\n\n############## Setup the datacard (must be filled in) ###########################\n\nDC.bins =   ['bin1'] # &lt;type 'list'&gt;\nDC.obs =    {'bin1': 0.0} # &lt;type 'dict'&gt;\nDC.processes =  ['ggH', 'qqWW', 'ggWW', 'others'] # &lt;type 'list'&gt;\nDC.signals =    ['ggH'] # &lt;type 'list'&gt;\nDC.isSignal =   {'qqWW': False, 'ggWW': False, 'ggH': True, 'others': False} # &lt;type 'dict'&gt;\nDC.keyline =    [('bin1', 'ggH', True), ('bin1', 'qqWW', False), ('bin1', 'ggWW', False), ('bin1', 'others', False)] # &lt;type 'list'&gt;\nDC.exp =    {'bin1': {'qqWW': 0.63, 'ggWW': 0.06, 'ggH': 1.47, 'others': 0.22}} # &lt;type 'dict'&gt;\nDC.systs =  [('lumi', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 1.11, 'ggH': 1.11, 'others': 0.0}}), ('xs_ggH', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 0.0, 'ggH': 1.16, 'others': 0.0}}), ('WW_norm', False, 'gmN', [4], {'bin1': {'qqWW': 0.16, 'ggWW': 0.0, 'ggH': 0.0, 'others': 0.0}}), ('xs_ggWW', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 1.5, 'ggH': 0.0, 'others': 0.0}}), ('bg_others', False, 'lnN', [], {'bin1': {'qqWW': 0.0, 'ggWW': 0.0, 'ggH': 0.0, 'others': 1.3}})] # &lt;type 'list'&gt;\nDC.shapeMap =   {} # &lt;type 'dict'&gt;\nDC.hasShapes =  False # &lt;type 'bool'&gt;\nDC.flatParamNuisances =  {} # &lt;type 'dict'&gt;\nDC.rateParams =  {} # &lt;type 'dict'&gt;\nDC.extArgs =    {} # &lt;type 'dict'&gt;\nDC.rateParamsOrder  =  set([]) # &lt;type 'set'&gt;\nDC.frozenNuisances  =  set([]) # &lt;type 'set'&gt;\nDC.systematicsShapeMap =  {} # &lt;type 'dict'&gt;\nDC.nuisanceEditLines    =  [] # &lt;type 'list'&gt;\nDC.groups   =  {} # &lt;type 'dict'&gt;\nDC.discretes    =  [] # &lt;type 'list'&gt;\n\n\n###### User defined options #############################################\n\noptions.out      = \"combine_workspace.root\"     # Output workspace name\noptions.fileName = \"./\"             # Path to input ROOT files\noptions.verbose  = \"1\"              # Verbosity\n\n##########################################################################\n\nif DC.hasShapes:\n    MB = ShapeBuilder(DC, options)\nelse:\n    MB = CountingModelBuilder(DC, options)\n\n# Set physics models\nMB.setPhysics(defaultModel)\nMB.doModel()\n</code></pre> <p>Any existing datacard can be converted into such a template python script by using the <code>--dump-datacard</code> option in <code>text2workspace.py</code>, in case a more complicated template is needed.</p> <p>Warning</p> <p>The above is not advised for final results, as this script is not easily combined with other analyses so should only be used for internal studies.</p> <p>For the automatic generation of datacards that are combinable, you should instead use the CombineHarvester package, which includes many features for producing complex datacards in a reliable, automated way.</p>"},{"location":"part2/settinguptheanalysis/#sanity-checking-the-datacard","title":"Sanity checking the datacard","text":"<p>For large combinations with multiple channels/processes etc, the <code>.txt</code> file can get unwieldy to read through. There are some simple tools to help check and disseminate the contents of the cards. </p> <p>In order to get a quick view of the systematic uncertainties included in the datacard, you can use the <code>test/systematicsAnalyzer.py</code> tool. This will produce a list of the systematic uncertainties (normalization and shape), indicating what type they are, which channels/processes they affect and the size of the effect on the normalization (for shape uncertainties, this will just be the overall uncertainty on the normalization).</p> <p>The default output is a <code>.html</code> file that can be expanded to give more details about the effect of the systematic uncertainty for each channel/process. Add the option <code>--format brief</code> to obtain a simpler summary report direct to the terminal. An example output for the tutorial card <code>data/tutorials/shapes/simple-shapes-TH1.txt</code> is shown below.</p> <pre><code>python test/systematicsAnalyzer.py data/tutorials/shapes/simple-shapes-TH1.txt --all -f html &gt; out.html\n</code></pre> <p>This will produce the following output in html format: </p> Nuisance Report Nuisance Report Nuisance (types)RangeProcessesChannels lumi  (lnN) 1.0001.100  background, signal  bin1(1) [+] bin1signal(1.1), background(1.0) alpha  (shape) 1.1111.150  background  bin1(1) [+] bin1background(0.900/1.150 (shape)) bgnorm  (lnN) 1.0001.300  background, signal  bin1(1) [+] bin1signal(1.0), background(1.3) sigma  (shape) 1.0001.000  signal  bin1(1) [+] bin1signal(1.000/1.000 (shape)) <p>In case you only have a counting experiment datacard, include the option <code>--noshape</code>.</p> <p>If you have a datacard that uses several <code>rateParams</code> or a Physics model that includes a complicated product of normalization terms in each process, you can check the values of the normalization (and which objects in the workspace comprise them) using the <code>test/printWorkspaceNormalisations.py</code> tool. As an example, the first few blocks of output for the tutorial card <code>data/tutorials/counting/realistic-multi-channel.txt</code> are given below:</p> <pre><code>text2workspace.py data/tutorials/counting/realistic-multi-channel.txt \npython test/printWorkspaceNormalisations.py data/tutorials/counting/realistic-multi-channel.root     \n</code></pre> Show example output <pre><code>---------------------------------------------------------------------------\n---------------------------------------------------------------------------\nChannel - mu_tau\n---------------------------------------------------------------------------\n  Top-level normalisation for process ZTT -&gt; n_exp_binmu_tau_proc_ZTT\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_binmu_tau_proc_ZTT @ 0x6bbb610\n    nominal value: 329\n    log-normals (3):\n         kappa = 1.23, logKappa = 0.207014, theta = tauid = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = ZtoLL = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = effic = 0\n    asymm log-normals (0):\n    other terms (0):\n\n  -------------------------------------------------------------------------\n  default value =  329.0\n---------------------------------------------------------------------------\n  Top-level normalisation for process QCD -&gt; n_exp_binmu_tau_proc_QCD\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_binmu_tau_proc_QCD @ 0x6bbcaa0\n    nominal value: 259\n    log-normals (1):\n         kappa = 1.1, logKappa = 0.0953102, theta = QCDmu = 0\n    asymm log-normals (0):\n    other terms (0):\n\n  -------------------------------------------------------------------------\n  default value =  259.0\n---------------------------------------------------------------------------\n  Top-level normalisation for process higgs -&gt; n_exp_binmu_tau_proc_higgs\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_binmu_tau_proc_higgs @ 0x6bc6390\n    nominal value: 0.57\n    log-normals (3):\n         kappa = 1.11, logKappa = 0.10436, theta = lumi = 0\n         kappa = 1.23, logKappa = 0.207014, theta = tauid = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = effic = 0\n    asymm log-normals (0):\n    other terms (1):\n         term r (class RooRealVar), value = 1\n\n  -------------------------------------------------------------------------\n  default value =  0.57\n---------------------------------------------------------------------------\n---------------------------------------------------------------------------\nChannel - e_mu\n---------------------------------------------------------------------------\n  Top-level normalisation for process ZTT -&gt; n_exp_bine_mu_proc_ZTT\n  -------------------------------------------------------------------------\nDumping ProcessNormalization n_exp_bine_mu_proc_ZTT @ 0x6bc8910\n    nominal value: 88\n    log-normals (2):\n         kappa = 1.04, logKappa = 0.0392207, theta = ZtoLL = 0\n         kappa = 1.04, logKappa = 0.0392207, theta = effic = 0\n    asymm log-normals (0):\n    other terms (0):\n\n  -------------------------------------------------------------------------\n  default value =  88.0\n---------------------------------------------------------------------------\n</code></pre> <p>As you can see, for each channel, a report is given for the top-level rate object in the workspace, for each process contributing to that channel. You can also see the various terms that make up that rate. The default value is for the default parameters in the workspace (i.e when running <code>text2workspace</code>, these are the values created as default).</p> <p>Another example is shown below for the workspace produced from the data/tutorials/shapes/simple-shapes-parametric.txt datacard.</p> <pre><code>text2workspace.py data/tutorials/shapes/simple-shapes-parametric.txt -m 30\npython test/printWorkspaceNormalisations.py data/tutorials/shapes/simple-shapes-parametric.root -m 30\n</code></pre> Show example output <pre><code>  ---------------------------------------------------------------------------\n  ---------------------------------------------------------------------------\n  Channel - bin1\n  ---------------------------------------------------------------------------\n    Top-level normalisation for process bkg -&gt; n_exp_final_binbin1_proc_bkg\n    -------------------------------------------------------------------------\n  RooProduct::n_exp_final_binbin1_proc_bkg[ n_exp_binbin1_proc_bkg * shapeBkg_bkg_bin1__norm ] = 521.163\n   ... is a product, which contains  n_exp_binbin1_proc_bkg\n  RooRealVar::n_exp_binbin1_proc_bkg = 1 C  L(-INF - +INF)\n    -------------------------------------------------------------------------\n    default value =  521.163204829\n  ---------------------------------------------------------------------------\n    Top-level normalisation for process sig -&gt; n_exp_binbin1_proc_sig\n    -------------------------------------------------------------------------\n  Dumping ProcessNormalization n_exp_binbin1_proc_sig @ 0x464f700\n      nominal value: 1\n      log-normals (1):\n           kappa = 1.1, logKappa = 0.0953102, theta = lumi = 0\n      asymm log-normals (0):\n      other terms (1):\n           term r (class RooRealVar), value = 1\n\n    -------------------------------------------------------------------------\n    default value =  1.0\n</code></pre> <p>This tells us that the normalization for the background process, named <code>n_exp_final_binbin1_proc_bkg</code> is a product of two objects <code>n_exp_binbin1_proc_bkg * shapeBkg_bkg_bin1__norm</code>. The first object is just from the rate line in the datacard (equal to 1) and the second is a floating parameter. For the signal, the normalisation is called <code>n_exp_binbin1_proc_sig</code> and is a <code>ProcessNormalization</code> object that contains the rate modifications due to the systematic uncertainties. You can see that it also has a \"nominal value\", which again is just from the value given in the rate line of the datacard (again=1).</p>"},{"location":"part3/commonstatsmethods/","title":"Common Statistical Methods","text":"<p>In this section, the most commonly used statistical methods from Combine will be covered, including specific instructions on how to obtain limits, significances, and likelihood scans. For all of these methods, the assumed parameter of interest (POI) is the overall signal strength \\(r\\) (i.e the default PhysicsModel). In general however, the first POI in the list of POIs (as defined by the PhysicsModel) will be taken instead of r. This may or may not make sense for any particular method, so care must be taken.</p> <p>This section will assume that you are using the default physics model, unless otherwise specified.</p>"},{"location":"part3/commonstatsmethods/#asymptotic-frequentist-limits","title":"Asymptotic Frequentist Limits","text":"<p>The <code>AsymptoticLimits</code> method can be used to quickly compute an estimate of the observed and expected limits, which is accurate when the event yields are not too small and the systematic uncertainties do not play a major role in the result. The limit calculation relies on an asymptotic approximation of the distributions of the LHC test statistic, which is based on a profile likelihood ratio, under the signal and background hypotheses to compute two p-values \\(p_{\\mu}, p_{b}\\) and therefore \\(CL_s=p_{\\mu}/(1-p_{b})\\) (see the FAQ section for a description). This means it is the asymptotic approximation for evaluating limits with frequentist toys using the LHC test statistic for limits. In the definition below, the parameter \\(\\mu=r\\).</p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mu} = -2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\) , in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu\\). The value of \\(q_{\\mu}\\) is set to 0 when \\(\\hat{\\mu}&gt;\\mu\\), giving a one-sided limit. Furthermore, the constraint \\(\\mu&gt;0\\) is enforced in the fit. This means that if the unconstrained value of \\(\\hat{\\mu}\\) would be negative, the test statistic \\(q_{\\mu}\\) is evaluated as \\(-2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(0,\\hat{\\hat{\\nu}}(0))]\\)</li> </ul> <p>This method is the default Combine method: if you call Combine without specifying <code>-M</code>, the <code>AsymptoticLimits</code> method will be run.</p> <p>A realistic example of a datacard for a counting experiment can be found in the HiggsCombination package: data/tutorials/counting/realistic-counting-experiment.txt</p> <p>The <code>AsymptoticLimits</code> method can be run using</p> <pre><code>combine -M AsymptoticLimits realistic-counting-experiment.txt\n</code></pre> <p>The program will print the limit on the signal strength r (number of signal events / number of expected signal events) e .g. <code>Observed Limit: r &lt; 1.6297 @ 95% CL</code> , the median expected limit <code>Expected 50.0%: r &lt; 2.3111</code>, and edges of the 68% and 95% ranges for the expected limits.</p> <pre><code> &lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; method used to compute upper limit is AsymptoticLimits\n[...]\n -- AsymptoticLimits ( CLs ) --\nObserved Limit: r &lt; 1.6281\nExpected  2.5%: r &lt; 0.9640\nExpected 16.0%: r &lt; 1.4329\nExpected 50.0%: r &lt; 2.3281\nExpected 84.0%: r &lt; 3.9800\nExpected 97.5%: r &lt; 6.6194\n\nDone in 0.01 min (cpu), 0.01 min (real)\n</code></pre> <p>By default, the limits are calculated using the CL<sub>s</sub> prescription, as noted in the output, which takes the ratio of p-values under the signal plus background and background only hypothesis. This can be altered to using the strict p-value by using the option <code>--rule CLsplusb</code> (note that <code>CLsplusb</code> is the jargon for calculating the p-value \\(p_{\\mu}\\)). You can also change the confidence level (default is 95%) to 90% using the option <code>--cl 0.9</code> or any other confidence level. You can find the full list of options for <code>AsymptoticLimits</code> using <code>--help -M AsymptoticLimits</code>.</p> <p>Warning</p> <p>You may find that Combine issues a warning that the best fit for the background-only Asimov dataset returns a nonzero value for the signal strength;</p> <p><code>WARNING: Best fit of asimov dataset is at r = 0.220944 (0.011047 times</code> <code>rMax), while it should be at zero</code></p> <p>If this happens, you should check to make sure that there are no issues with the datacard or the Asimov generation used for your setup. For details on debugging, it is recommended that you follow the simple checks used by the HIG PAG here.</p> <p>The program will also create a ROOT file <code>higgsCombineTest.AsymptoticLimits.mH120.root</code> containing a ROOT tree <code>limit</code> that contains the limit values and other bookkeeping information. The important columns are <code>limit</code> (the limit value) and <code>quantileExpected</code> (-1 for observed limit, 0.5 for median expected limit, 0.16/0.84 for the edges of the 65% interval band of expected limits, 0.025/0.975 for 95%).</p> <pre><code>$ root -l higgsCombineTest.AsymptoticLimits.mH120.root\nroot [0] limit-&gt;Scan(\"*\")\n************************************************************************************************************************************\n*    Row   *     limit *  limitErr *        mh *      syst *      iToy *     iSeed *  iChannel *     t_cpu *    t_real * quantileE *\n************************************************************************************************************************************\n*        0 * 0.9639892 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.0250000 *\n*        1 * 1.4329109 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.1599999 *\n*        2 *  2.328125 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 *       0.5 *\n*        3 * 3.9799661 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.8399999 *\n*        4 * 6.6194028 *         0 *       120 *         1 *         0 *    123456 *         0 *         0 *         0 * 0.9750000 *\n*        5 * 1.6281188 * 0.0050568 *       120 *         1 *         0 *    123456 *         0 * 0.0035000 * 0.0055123 *        -1 *\n************************************************************************************************************************************\n</code></pre>"},{"location":"part3/commonstatsmethods/#blind-limits","title":"Blind limits","text":"<p>The <code>AsymptoticLimits</code> calculation follows the frequentist paradigm for calculating expected limits. This means that the routine will first fit the observed data, conditionally for a fixed value of r, and set the nuisance parameters to the values obtained in the fit for generating the Asimov data set. This means it calculates the post-fit or a-posteriori expected limit. In order to use the pre-fit nuisance parameters (to calculate an a-priori limit), you must add the option <code>--noFitAsimov</code> or <code>--bypassFrequentistFit</code>.</p> <p>For blinding the results completely (i.e not using the data) you can include the option <code>--run blind</code>.</p> <p>Warning</p> <p>While you can use <code>-t -1</code> to get blind limits, if the correct options are passed, we strongly recommend to use <code>--run blind</code>.</p>"},{"location":"part3/commonstatsmethods/#splitting-points","title":"Splitting points","text":"<p>In case your model is particularly complex, you can perform the asymptotic calculation by determining the value of CL<sub>s</sub> for a set grid of points (in <code>r</code>) and merging the results. This is done by using the option <code>--singlePoint X</code> for multiple values of X, hadd'ing the output files and reading them back in,</p> <pre><code>combine -M AsymptoticLimits realistic-counting-experiment.txt --singlePoint 0.1 -n 0.1\ncombine -M AsymptoticLimits realistic-counting-experiment.txt --singlePoint 0.2 -n 0.2\ncombine -M AsymptoticLimits realistic-counting-experiment.txt --singlePoint 0.3 -n 0.3\n...\n\nhadd limits.root higgsCombine*.AsymptoticLimits.*\n\ncombine -M AsymptoticLimits realistic-counting-experiment.txt --getLimitFromGrid limits.root\n</code></pre>"},{"location":"part3/commonstatsmethods/#asymptotic-significances","title":"Asymptotic Significances","text":"<p>The significance of a result is calculated using a ratio of profiled likelihoods, one in which the signal strength is set to 0 and the other in which it is free to float. The evaluated quantity is \\(-2\\ln[\\mathcal{L}(\\mu=0,\\hat{\\hat{\\nu}}(0))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\), in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu=0\\).</p> <p>The distribution of this test statistic can be determined using Wilks' theorem provided the number of events is large enough (i.e in the Asymptotic limit). The significance (or p-value) can therefore be calculated very quickly. The <code>Significance</code> method can be used for this.</p> <p>It is also possible to calculate the ratio of likelihoods between the freely floating signal strength to that of a fixed signal strength other than 0, by specifying it with the option <code>--signalForSignificance=X</code>.</p> <p>Info</p> <p>This calculation assumes that the signal strength can only be positive (i.e we are not interested in negative signal strengths). This behaviour can be altered by including the option <code>--uncapped</code>.</p>"},{"location":"part3/commonstatsmethods/#compute-the-observed-significance","title":"Compute the observed significance","text":"<p>The observed significance is calculated using the <code>Significance</code> method, as</p> <p><code>combine -M Significance datacard.txt</code></p> <p>The printed output will report the significance and the p-value, for example, when using the realistic-counting-experiment.txt datacard, you will see</p> <pre><code> &lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; method used is Significance\n[...]\n -- Significance --\nSignificance: 0\n       (p-value = 0.5)\nDone in 0.00 min (cpu), 0.01 min (real)\n</code></pre> <p>which is not surprising since 0 events were observed in that datacard.</p> <p>The output ROOT file will contain the significance value in the branch limit. To store the p-value instead, include the option <code>--pval</code>. The significance and p-value can be converted between one another using the RooFit functions <code>RooFit::PValueToSignificance</code> and <code>RooFit::SignificanceToPValue</code>.</p> <p>When calculating the significance, you may find it useful to resort to a brute-force fitting algorithm that scans the nll (repeating fits until a certain tolerance is reached), bypassing MINOS, which can be activated with the option <code>bruteForce</code>. This can be tuned using the options <code>setBruteForceAlgo</code>, <code>setBruteForceTypeAndAlgo</code> and <code>setBruteForceTolerance</code>.</p>"},{"location":"part3/commonstatsmethods/#computing-the-expected-significance","title":"Computing the expected significance","text":"<p>The expected significance can be computed from an Asimov data set of signal+background. There are two options for this:</p> <ul> <li>a-posteriori expected: will depend on the observed dataset.</li> <li>a-priori expected (the default behavior): does not depend on the observed dataset, and so is a good metric for optimizing an analysis when still blinded.</li> </ul> <p>The a-priori expected significance from the Asimov dataset is calculated as</p> <pre><code>combine -M Significance datacard.txt -t -1 --expectSignal=1\n</code></pre> <p>In order to produce the a-posteriori expected significance, just generate a post-fit Asimov data set by adding the option <code>--toysFreq</code> in the command above.</p> <p>The output format is the same as for observed significances: the variable limit in the tree will be filled with the significance (or with the p-value if you put also the option <code>--pvalue</code>)</p>"},{"location":"part3/commonstatsmethods/#bayesian-limits-and-credible-regions","title":"Bayesian Limits and Credible regions","text":"<p>Bayesian calculation of limits requires the user to assume a particular prior distribution for the parameter of interest (default r). You can specify the prior using the <code>--prior</code> option, the default is a flat pior in r.</p>"},{"location":"part3/commonstatsmethods/#computing-the-observed-bayesian-limit-for-simple-models","title":"Computing the observed bayesian limit (for simple models)","text":"<p>The <code>BayesianSimple</code> method computes a Bayesian limit performing classical numerical integration. This is very fast and accurate, but only works for simple models (a few channels and nuisance parameters).</p> <pre><code>combine -M BayesianSimple simple-counting-experiment.txt\n[...]\n\n -- BayesianSimple --\nLimit: r &lt; 0.672292 @ 95% CL\nDone in 0.04 min (cpu), 0.05 min (real)\n</code></pre> <p>The output tree will contain a single entry corresponding to the observed 95% confidence level upper limit. The confidence level can be modified to 100*X% using <code>--cl X</code>.</p>"},{"location":"part3/commonstatsmethods/#computing-the-observed-bayesian-limit-for-arbitrary-models","title":"Computing the observed bayesian limit (for arbitrary models)","text":"<p>The <code>MarkovChainMC</code> method computes a Bayesian limit performing a Monte Carlo integration. From the statistical point of view it is identical to the <code>BayesianSimple</code> method, only the technical implementation is different. The method is slower, but can also handle complex models. For this method you can increase the accuracy of the result by increasing the number of Markov Chains, at the expense of a longer running time (option <code>--tries</code>, default is 10). Let's use the realistic counting experiment datacard to test the method.</p> <p>To use the MarkovChainMC method, users need to specify this method in the command line, together with the options they want to use. For instance, to set the number of times the algorithm will run with different random seeds, use option <code>--tries</code>:</p> <pre><code>combine -M MarkovChainMC realistic-counting-experiment.txt --tries 100\n[...]\n\n -- MarkovChainMC --\nLimit: r &lt; 2.20438 +/- 0.0144695 @ 95% CL (100 tries)\nAverage chain acceptance: 0.078118\nDone in 0.14 min (cpu), 0.15 min (real)\n</code></pre> <p>Again, the resulting limit tree will contain the result. You can also save the chains using the option <code>--saveChain</code>, which will then also be included in the output file.</p> <p>Exclusion regions can be made from the posterior once an ordering principle is defined to decide how to grow the contour (there is an infinite number of possible regions that contain 68% of the posterior pdf). Below is a simple example script that can be used to plot the posterior distribution from these chains and calculate the smallest such region. Note that in this example we are ignoring the burn-in. This can be added by e.g. changing <code>for i in range(mychain.numEntries()):</code> to <code>for i in range(200,mychain.numEntries()):</code> for a burn-in of 200.</p> Show example script <pre><code>\nimport ROOT\n\nrmin = 0\nrmax = 30\nnbins = 100\nCL = 0.95\nchains = \"higgsCombineTest.MarkovChainMC.blahblahblah.root\"\n\ndef findSmallestInterval(hist,CL):\n bins = hist.GetNbinsX()\n best_i = 1\n best_j = 1\n bd = bins+1\n val = 0;\n for i in range(1,bins+1):\n   integral = hist.GetBinContent(i)\n   for j in range(i+1,bins+2):\n    integral += hist.GetBinContent(j)\n    if integral &gt; CL :\n      val = integral\n      break\n   if integral &gt; CL and  j-i &lt; bd :\n     bd = j-i\n     best_j = j+1\n     best_i = i\n     val = integral\n return hist.GetBinLowEdge(best_i), hist.GetBinLowEdge(best_j), val\n\nfi_MCMC = ROOT.TFile.Open(chains)\n# Sum up all of the chains (or we could take the average limit)\nmychain=0\nfor k in fi_MCMC.Get(\"toys\").GetListOfKeys():\n    obj = k.ReadObj\n    if mychain ==0:\n        mychain = k.ReadObj().GetAsDataSet()\n    else :\n        mychain.append(k.ReadObj().GetAsDataSet())\nhist = ROOT.TH1F(\"h_post\",\";r;posterior probability\",nbins,rmin,rmax)\nfor i in range(mychain.numEntries()):\n#for i in range(200,mychain.numEntries()): burn-in of 200\n  mychain.get(i)\n  hist.Fill(mychain.get(i).getRealValue(\"r\"), mychain.weight())\nhist.Scale(1./hist.Integral())\nhist.SetLineColor(1)\nvl,vu,trueCL = findSmallestInterval(hist,CL)\nhistCL = hist.Clone()\nfor b in range(nbins):\n  if histCL.GetBinLowEdge(b+1) &lt; vl or histCL.GetBinLowEdge(b+2)&gt;vu: histCL.SetBinContent(b+1,0)\nc6a = ROOT.TCanvas()\nhistCL.SetFillColor(ROOT.kAzure-3)\nhistCL.SetFillStyle(1001)\nhist.Draw()\nhistCL.Draw(\"histFsame\")\nhist.Draw(\"histsame\")\nll = ROOT.TLine(vl,0,vl,2*hist.GetBinContent(hist.FindBin(vl))); ll.SetLineColor(2); ll.SetLineWidth(2)\nlu = ROOT.TLine(vu,0,vu,2*hist.GetBinContent(hist.FindBin(vu))); lu.SetLineColor(2); lu.SetLineWidth(2)\nll.Draw()\nlu.Draw()\n\nprint \" %g %% (%g %%) interval (target)  = %g &lt; r &lt; %g \"%(trueCL,CL,vl,vu)\n</code></pre> <p>Running the script on the output file produced for the same datacard (including the <code>--saveChain</code> option) will produce the following output</p> <pre><code>0.950975 % (0.95 %) interval (target)  = 0 &lt; r &lt; 2.2\n</code></pre> <p>along with a plot of the posterior distribution shown below. This is the same as the output from Combine, but the script can also be used to find lower limits (for example) or credible intervals.</p> <p></p> <p>An example to make contours when ordering by probability density can be found in bayesContours.cxx. Note that the implementation is simplistic, with no clever handling of bin sizes nor smoothing of statistical fluctuations.</p> <p>The <code>MarkovChainMC</code> algorithm has many configurable parameters, and you are encouraged to experiment with those. The default configuration might not be the best for your analysis.</p>"},{"location":"part3/commonstatsmethods/#iterations-burn-in-tries","title":"Iterations, burn-in, tries","text":"<p>Three parameters control how the MCMC integration is performed:</p> <ul> <li>the number of tries (option <code>--tries</code>): the algorithm will run multiple times with different random seeds. The truncated mean and RMS of the different results are reported. The default value is 10, which should be sufficient for a quick computation. For a more accurate result you might want to increase this number up to even ~200.</li> <li>the number of iterations (option <code>-i</code>) determines how many points are proposed to fill a single Markov Chain. The default value is 10k, and a plausible range is between 5k (for quick checks) and 20-30k for lengthy calculations. Beyond 30k, the time vs accuracy can be balanced better by increasing the number of chains (option <code>--tries</code>).</li> <li>the number of burn-in steps (option <code>-b</code>) is the number of points that are removed from the beginning of the chain before using it to compute the limit. The default is 200. If the chain is very long, we recommend to increase this value a bit (e.g. to several hundreds). Using a number of burn-in steps below 50 is likely to result in a bias towards earlier stages of the chain before a reasonable convergence.</li> </ul>"},{"location":"part3/commonstatsmethods/#proposals","title":"Proposals","text":"<p>The option <code>--proposal</code> controls the way new points are proposed to fill in the MC chain.</p> <ul> <li>uniform: pick points at random. This works well if you have very few nuisance parameters (or none at all), but normally fails if you have many.</li> <li>gaus: Use a product of independent gaussians, one for each nuisance parameter. The sigma of the gaussian for each variable is 1/5 of the range of the variable. This behaviour can be controlled using the parameter <code>--propHelperWidthRangeDivisor</code>. This proposal appears to work well for up to around 15 nuisance parameters, provided that the range of the nuisance parameters is in the range \u00b15\u03c3. This method does not work when there are no nuisance parameters.</li> <li>ortho (default): This proposal is similar to the multi-gaussian proposal. However, at every step only a single coordinate of the point is varied, so that the acceptance of the chain is high even for a large number of nuisance parameters (i.e. more than 20).</li> <li>fit: Run a fit and use the uncertainty matrix from HESSE to construct a proposal (or the one from MINOS if the option <code>--runMinos</code> is specified). This can give biased results, so this method is not recommended in general.</li> </ul> <p>If you believe there is something going wrong, e.g. if your chain remains stuck after accepting only a few events, the option <code>--debugProposal</code> can be used to obtain a printout of the first N proposed points. This can help you understand what is happening; for example if you have a region of the phase space with probability zero, the gaus and fit proposal can get stuck there forever.</p>"},{"location":"part3/commonstatsmethods/#computing-the-expected-bayesian-limit","title":"Computing the expected bayesian limit","text":"<p>The expected limit is computed by generating many toy MC data sets and computing the limit for each of them. This can be done passing the option <code>-t</code> . E.g. to run 100 toys with the <code>BayesianSimple</code> method, you can run</p> <pre><code>combine -M BayesianSimple datacard.txt -t 100\n</code></pre> <p>The program will print out the mean and median limit, as well as the 68% and 95% quantiles of the distributions of the limits. This time, the output ROOT tree will contain one entry per toy.</p> <p>For more heavy methods (eg the <code>MarkovChainMC</code>) you will probably want to split this calculation into multiple jobs. To do this, just run Combine multiple times specifying a smaller number of toys (as low as <code>1</code>), using a different seed to initialize the random number generator each time. The option <code>-s</code> can be used for this; if you set it to -1, the starting seed will be initialized randomly at the beginning of the job. Finally, you can merge the resulting trees with <code>hadd</code> and look at the distribution in the merged file.</p>"},{"location":"part3/commonstatsmethods/#multidimensional-bayesian-credible-regions","title":"Multidimensional bayesian credible regions","text":"<p>The <code>MarkovChainMC</code> method allows the user to produce the posterior PDF as a function of (in principle) any number of POIs. In order to do so, you first need to create a workspace with more than one parameter, as explained in the physics models section.</p> <p>For example, let us use the toy datacard data/tutorials/multiDim/toy-hgg-125.txt (counting experiment that vaguely resembles an early H\u2192\u03b3\u03b3 analysis at 125 GeV) and convert the datacard into a workspace with 2 parameters, the ggH and qqH cross sections, using <code>text2workspace</code>.</p> <pre><code>text2workspace.py data/tutorials/multiDim/toy-hgg-125.txt -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH -o workspace.root\n</code></pre> <p>Now we just run one (or more) MCMC chain(s) and save them in the output tree. By default, the nuisance parameters will be marginalized (integrated) over their PDFs. You can ignore the complaints about not being able to compute an upper limit (since for more than 1D, this is not well-defined),</p> <pre><code>combine -M MarkovChainMC workspace.root --tries 1 --saveChain -i 1000000 -m 125 -s 12345\n</code></pre> <p>The output of the Markov Chain is again a RooDataSet of weighted events distributed according to the posterior PDF (after you cut out the burn in part), so it can be used to make histograms or other distributions of the posterior PDF. See as an example bayesPosterior2D.cxx.</p> <p>Below is an example of the output of the macro,</p> <pre><code>$ root -l higgsCombineTest.MarkovChainMC....\n.L bayesPosterior2D.cxx\nbayesPosterior2D(\"bayes2D\",\"Posterior PDF\")\n</code></pre> <p></p>"},{"location":"part3/commonstatsmethods/#computing-limits-with-toys","title":"Computing Limits with toys","text":"<p>The <code>HybridNew</code> method is used to compute either the hybrid bayesian-frequentist limits, popularly known as \"CL<sub>s</sub> of LEP or Tevatron type\", or the fully frequentist limits, which are the current recommended method by the LHC Higgs Combination Group. Note that these methods can be resource intensive for complex models.</p> <p>It is possible to define the criterion used for setting limits using <code>--rule CLs</code> (to use the CL<sub>s</sub> criterion) or <code>--rule CLsplusb</code> (to calculate the limit using \\(p_{\\mu}\\)) and as always the confidence level desired using <code>--cl=X</code>.</p> <p>The choice of test statistic can be made via the option <code>--testStat</code>. Different methodologies for the treatment of the nuisance parameters are available. While it is possible to mix different test statistics with different nuisance parameter treatments, we strongly do not recommend  this. Instead one should follow one of the following three procedures. Note that the signal strength \\(r\\) here is given the more common notation \\(\\mu\\).</p> <ul> <li> <p>LEP-style: <code>--testStat LEP --generateNuisances=1 --fitNuisances=0</code></p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mathrm{LEP}}=-2\\ln[\\mathcal{L}(\\mu=0)/\\mathcal{L}(\\mu)]\\).</li> <li>The nuisance parameters are fixed to their nominal values for the purpose of evaluating the likelihood, while for generating toys, the nuisance parameters are first randomized within their PDFs before generation of the toy.</li> </ul> </li> <li> <p>TEV-style: <code>--testStat TEV --generateNuisances=0 --generateExternalMeasurements=1 --fitNuisances=1</code></p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mathrm{TEV}}=-2\\ln[\\mathcal{L}(\\mu=0,\\hat{\\hat{\\mu}}(0))/\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))]\\), in which the nuisance parameters are profiled separately for \\(\\mu=0\\) and \\(\\mu\\).</li> <li>For the purposes of toy generation, the nuisance parameters are fixed to their post-fit values from the data (conditional on \\(\\mu\\)), while the constraint terms are randomized for the evaluation of the likelihood.</li> </ul> </li> <li> <p>LHC-style: <code>--LHCmode LHC-limits</code> , which is the shortcut for <code>--testStat LHC --generateNuisances=0 --generateExternalMeasurements=1 --fitNuisances=1</code></p> <ul> <li>The test statistic is defined using the ratio of likelihoods \\(q_{\\mu} = -2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\) , in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu\\).</li> <li>The value of \\(q_{\\mu}\\) set to 0 when \\(\\hat{\\mu}&gt;\\mu\\) giving a one-sided limit. Furthermore, the constraint \\(\\mu&gt;0\\) is enforced in the fit. This means that if the unconstrained value of \\(\\hat{\\mu}\\) would be negative, the test statistic \\(q_{\\mu}\\) is evaluated as \\(-2\\ln[\\mathcal{L}(\\mu,\\hat{\\hat{\\nu}}(\\mu))/\\mathcal{L}(0,\\hat{\\hat{\\nu}}(0))]\\).</li> <li>For the purposes of toy generation, the nuisance parameters are fixed to their post-fit values from the data (conditionally on the value of \\(\\mu\\)), while the constraint terms are randomized in the evaluation of the likelihood.</li> </ul> </li> </ul> <p>Warning</p> <p>The recommended style is the LHC-style. Please note that this method is sensitive to the observation in data since the post-fit (after a fit to the data) values of the nuisance parameters (assuming different values of r) are used when generating the toys. For completely blind limits you can first generate a pre-fit asimov toy data set (described in the toy data generation section) and use that in place of the data.  You can use this toy by passing the argument <code>-D toysFileName.root:toys/toy_asimov</code></p> <p>While the above shortcuts are the commonly used versions, variations can be tested. The treatment of the nuisances can be changed to the so-called \"Hybrid-Bayesian\" method, which effectively integrates over the nuisance parameters. This is especially relevant when you have very few expected events in your data, and you are using those events to constrain background processes. This can be achieved by setting <code>--generateNuisances=1 --generateExternalMeasurements=0</code>. In case you want to avoid first fitting to the data to choose the nominal values you can additionally pass <code>--fitNuisances=0</code>.</p> <p>Warning</p> <p>If you have unconstrained parameters in your model (<code>rateParam</code>, or if you are using a <code>_norm</code> variable for a PDF) and you want to use the \"Hybrid-Bayesian\" method, you must declare these as <code>flatParam</code> in your datacard. When running text2workspace you must add the option <code>--X-assign-flatParam-prior</code> in the command line. This will create uniform priors for these parameters. These are needed for this method and they would otherwise not get created.</p> <p>Info</p> <p>Note that (observed and expected) values of the test statistic stored in the instances of <code>RooStats::HypoTestResult</code> when the option <code>--saveHybridResult</code> is passed are defined without the factor 2. They are therefore twice as small as the values given by the formulas above. This factor is however included automatically by all plotting scripts supplied within the Combine package. If you use your own plotting scripts, you need to make sure to incorporate the factor 2.</p>"},{"location":"part3/commonstatsmethods/#simple-models","title":"Simple models","text":"<p>For relatively simple models, the observed and expected limits can be calculated interactively. Since the LHC-style is the recommended set of options for calculating limits using toys, we will use that in this section. However, the same procedure can be followed with the other sets of options.</p> <pre><code>combine realistic-counting-experiment.txt -M HybridNew --LHCmode LHC-limits\n</code></pre> Show output <pre><code>\n&lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; using the Profile Likelihood test statistics modified for upper limits (Q_LHC)\n&gt;&gt;&gt; method used is HybridNew\n&gt;&gt;&gt; random number generator seed is 123456\nComputing results starting from observation (a-posteriori)\nSearch for upper limit to the limit\n  r = 20 +/- 0\n    CLs = 0 +/- 0\n    CLs      = 0 +/- 0\n    CLb      = 0.264 +/- 0.0394263\n    CLsplusb = 0 +/- 0\n\nSearch for lower limit to the limit\nNow doing proper bracketing &amp; bisection\n  r = 10 +/- 10\n    CLs = 0 +/- 0\n    CLs      = 0 +/- 0\n    CLb      = 0.288 +/- 0.0405024\n    CLsplusb = 0 +/- 0\n\n  r = 5 +/- 5\n    CLs = 0 +/- 0\n    CLs      = 0 +/- 0\n    CLb      = 0.152 +/- 0.0321118\n    CLsplusb = 0 +/- 0\n\n  r = 2.5 +/- 2.5\n    CLs = 0.0192308 +/- 0.0139799\n    CLs = 0.02008 +/- 0.0103371\n    CLs = 0.0271712 +/- 0.00999051\n    CLs = 0.0239524 +/- 0.00783634\n    CLs      = 0.0239524 +/- 0.00783634\n    CLb      = 0.208748 +/- 0.0181211\n    CLsplusb = 0.005 +/- 0.00157718\n\n  r = 2.00696 +/- 1.25\n    CLs = 0.0740741 +/- 0.0288829\n    CLs = 0.0730182 +/- 0.0200897\n    CLs = 0.0694474 +/- 0.0166468\n    CLs = 0.0640182 +/- 0.0131693\n    CLs = 0.0595 +/- 0.010864\n    CLs = 0.0650862 +/- 0.0105575\n    CLs = 0.0629286 +/- 0.00966301\n    CLs = 0.0634945 +/- 0.00914091\n    CLs = 0.060914 +/- 0.00852667\n    CLs = 0.06295 +/- 0.00830083\n    CLs = 0.0612758 +/- 0.00778181\n    CLs = 0.0608142 +/- 0.00747001\n    CLs = 0.0587169 +/- 0.00697039\n    CLs = 0.0591432 +/- 0.00678587\n    CLs = 0.0599683 +/- 0.00666966\n    CLs = 0.0574868 +/- 0.00630809\n    CLs = 0.0571451 +/- 0.00608177\n    CLs = 0.0553836 +/- 0.00585531\n    CLs = 0.0531612 +/- 0.0055234\n    CLs = 0.0516837 +/- 0.0052607\n    CLs = 0.0496776 +/- 0.00499783\n    CLs      = 0.0496776 +/- 0.00499783\n    CLb      = 0.216635 +/- 0.00801002\n    CLsplusb = 0.0107619 +/- 0.00100693\n\nTrying to move the interval edges closer\n  r = 1.00348 +/- 0\n    CLs = 0.191176 +/- 0.0459911\n    CLs      = 0.191176 +/- 0.0459911\n    CLb      = 0.272 +/- 0.0398011\n    CLsplusb = 0.052 +/- 0.00992935\n\n  r = 1.50522 +/- 0\n    CLs = 0.125 +/- 0.0444346\n    CLs = 0.09538 +/- 0.0248075\n    CLs = 0.107714 +/- 0.0226712\n    CLs = 0.103711 +/- 0.018789\n    CLs = 0.0845069 +/- 0.0142341\n    CLs = 0.0828468 +/- 0.0126789\n    CLs = 0.0879647 +/- 0.0122332\n    CLs      = 0.0879647 +/- 0.0122332\n    CLb      = 0.211124 +/- 0.0137494\n    CLsplusb = 0.0185714 +/- 0.00228201\n\n  r = 1.75609 +/- 0\n    CLs = 0.0703125 +/- 0.0255807\n    CLs = 0.0595593 +/- 0.0171995\n    CLs = 0.0555271 +/- 0.0137075\n    CLs = 0.0548727 +/- 0.0120557\n    CLs = 0.0527832 +/- 0.0103348\n    CLs = 0.0555828 +/- 0.00998248\n    CLs = 0.0567971 +/- 0.00923449\n    CLs = 0.0581822 +/- 0.00871417\n    CLs = 0.0588835 +/- 0.00836245\n    CLs = 0.0594035 +/- 0.00784761\n    CLs = 0.0590583 +/- 0.00752672\n    CLs = 0.0552067 +/- 0.00695542\n    CLs = 0.0560446 +/- 0.00679746\n    CLs = 0.0548083 +/- 0.0064351\n    CLs = 0.0566998 +/- 0.00627124\n    CLs = 0.0561576 +/- 0.00601888\n    CLs = 0.0551643 +/- 0.00576338\n    CLs = 0.0583584 +/- 0.00582854\n    CLs = 0.0585691 +/- 0.0057078\n    CLs = 0.0599114 +/- 0.00564585\n    CLs = 0.061987 +/- 0.00566905\n    CLs = 0.061836 +/- 0.00549856\n    CLs = 0.0616849 +/- 0.0053773\n    CLs = 0.0605352 +/- 0.00516844\n    CLs = 0.0602028 +/- 0.00502875\n    CLs = 0.058667 +/- 0.00486263\n    CLs      = 0.058667 +/- 0.00486263\n    CLb      = 0.222901 +/- 0.00727258\n    CLsplusb = 0.0130769 +/- 0.000996375\n\n  r = 2.25348 +/- 0\n    CLs = 0.0192308 +/- 0.0139799\n    CLs = 0.0173103 +/- 0.00886481\n    CLs      = 0.0173103 +/- 0.00886481\n    CLb      = 0.231076 +/- 0.0266062\n    CLsplusb = 0.004 +/- 0.001996\n\n  r = 2.13022 +/- 0\n    CLs = 0.0441176 +/- 0.0190309\n    CLs = 0.0557778 +/- 0.01736\n    CLs = 0.0496461 +/- 0.0132776\n    CLs = 0.0479048 +/- 0.0114407\n    CLs = 0.0419333 +/- 0.00925719\n    CLs = 0.0367934 +/- 0.0077345\n    CLs = 0.0339814 +/- 0.00684844\n    CLs = 0.03438 +/- 0.0064704\n    CLs = 0.0337633 +/- 0.00597315\n    CLs = 0.0321262 +/- 0.00551608\n    CLs      = 0.0321262 +/- 0.00551608\n    CLb      = 0.230342 +/- 0.0118665\n    CLsplusb = 0.0074 +/- 0.00121204\n\n  r = 2.06859 +/- 0\n    CLs = 0.0357143 +/- 0.0217521\n    CLs = 0.0381957 +/- 0.0152597\n    CLs = 0.0368622 +/- 0.0117105\n    CLs = 0.0415097 +/- 0.0106676\n    CLs = 0.0442816 +/- 0.0100457\n    CLs = 0.0376644 +/- 0.00847235\n    CLs = 0.0395133 +/- 0.0080427\n    CLs = 0.0377625 +/- 0.00727262\n    CLs = 0.0364415 +/- 0.00667827\n    CLs = 0.0368015 +/- 0.00628517\n    CLs = 0.0357251 +/- 0.00586442\n    CLs = 0.0341604 +/- 0.00546373\n    CLs = 0.0361935 +/- 0.00549648\n    CLs = 0.0403254 +/- 0.00565172\n    CLs = 0.0408613 +/- 0.00554124\n    CLs = 0.0416682 +/- 0.00539651\n    CLs = 0.0432645 +/- 0.00538062\n    CLs = 0.0435229 +/- 0.00516945\n    CLs = 0.0427647 +/- 0.00501322\n    CLs = 0.0414894 +/- 0.00479711\n    CLs      = 0.0414894 +/- 0.00479711\n    CLb      = 0.202461 +/- 0.00800632\n    CLsplusb = 0.0084 +/- 0.000912658\n\n\n -- HybridNew, before fit --\nLimit: r &lt; 2.00696 +/- 1.25 [1.50522, 2.13022]\nWarning in : Could not create the Migrad minimizer. Try using the minimizer Minuit\nFit to 5 points: 1.91034 +/- 0.0388334\n\n -- Hybrid New --\nLimit: r &lt; 1.91034 +/- 0.0388334 @ 95% CL\nDone in 0.01 min (cpu), 4.09 min (real)\nFailed to delete temporary file roostats-Sprxsw.root: No such file or directory\n\n<p></p>\n\n<p>The result stored in the limit branch of the output tree will be the upper limit (and its error, stored in limitErr). The default behaviour will be, as above, to search for the upper limit on r. However, the values of \\(p_{\\mu}, p_{b}\\) and CL<sub>s</sub> can be calculated for a particular value r=X by specifying the option <code>--singlePoint=X</code>. In this case, the value stored in the branch limit will be the value of CL<sub>s</sub> (or \\(p_{\\mu}\\)) (see the FAQ section).</p>"},{"location":"part3/commonstatsmethods/#expected-limits","title":"Expected Limits","text":"<p>For simple models, we can run interactively 5 times to compute the median expected and the 68% and 95% central interval boundaries. For this, we can use the <code>HybridNew</code> method with the same options as for the observed limit, but adding a <code>--expectedFromGrid=&lt;quantile&gt;</code>. Here, the quantile should be set to 0.5 for the median, 0.84 for the +ve side of the 68% band, 0.16 for the -ve side of the 68% band, 0.975 for the +ve side of the 95% band, and 0.025 for the -ve side of the 95% band.</p>\n<p>The output file will contain the value of the quantile in the branch quantileExpected. This branch can therefore be used to separate the points.</p>"},{"location":"part3/commonstatsmethods/#accuracy","title":"Accuracy","text":"<p>The search for the limit is performed using an adaptive algorithm, terminating when the estimate of the limit value is below some limit or when the precision cannot be improved further with the specified options. The options controlling this behaviour are:</p>\n<ul>\n<li><code>rAbsAcc</code>, <code>rRelAcc</code>: define the accuracy on the limit at which the search stops. The default values are 0.1 and 0.05 respectively, meaning that the search is stopped when \u0394r &lt; 0.1 or \u0394r/r &lt; 0.05.</li>\n<li><code>clsAcc</code>: this determines the absolute accuracy up to which the CLs values are computed when searching for the limit. The default is 0.5%. Raising the accuracy above this value will significantly increase the time needed to run the algorithm, as you need N<sup>2</sup> more toys to improve the accuracy by a factor N. You can consider increasing this value if you are computing limits with a larger CL (e.g. 90% or 68%). Note that if you are using the <code>CLsplusb</code> rule, this parameter will control the uncertainty on \\(p_{\\mu}\\) rather than CL<sub>s</sub>.</li>\n<li><code>T</code> or <code>toysH</code>: controls the minimum number of toys that are generated for each point. The default value of 500 should be sufficient when computing the limit at 90-95% CL. You can decrease this number if you are computing limits at 68% CL, or increase it if you are using 99% CL.</li>\n</ul>\n<p>Note, to further improve the accuracy when searching for the upper limit, Combine will also fit an exponential function to several of the points and interpolate to find the crossing.</p>"},{"location":"part3/commonstatsmethods/#complex-models","title":"Complex models","text":"<p>For complicated models, it is best to produce a grid of test statistic distributions at various values of the signal strength, and use it to compute the observed and expected limit and central intervals. This approach is convenient for complex models, since the grid of points can be distributed across any number of jobs. In this approach we will store the distributions of the test statistic at different values of the signal strength using the option <code>--saveHybridResult</code>. The distribution at a single value of r=X can be determined by</p>\n<pre><code>combine datacard.txt -M HybridNew --LHCmode LHC-limits --singlePoint X --saveToys --saveHybridResult -T 500 --clsAcc 0\n</code></pre>\n\n<p>Warning</p>\n<p>We have specified the accuracy here by including <code>--clsAcc=0</code>, which turns off adaptive sampling, and specifying the number of toys to be 500 with the <code>-T N</code> option. For complex models, it may be necessary to internally split the toys over a number of instances of <code>HybridNew</code> using the option <code>--iterations I</code>. The total number of toys will be the product I*N.</p>\n\n<p>The above can be repeated several times, in parallel, to build the distribution of the test statistic (passing the random seed option <code>-s -1</code>). Once all of the distributions have been calculated, the resulting output files can be merged into one using hadd, and read back to calculate the limit, specifying the merged file with <code>--grid=merged.root</code>.</p>\n<p>The observed limit can be obtained with</p>\n<pre><code>combine datacard.txt -M HybridNew --LHCmode LHC-limits --readHybridResults --grid=merged.root\n</code></pre>\n<p>and similarly, the median expected and quantiles can be determined using</p>\n<pre><code>combine datacard.txt -M HybridNew --LHCmode LHC-limits --readHybridResults --grid=merged.root --expectedFromGrid &lt;quantile&gt;\n</code></pre>\n<p>substituting <code>&lt;quantile&gt;</code> with 0.5 for the median, 0.84 for the +ve side of the 68% band, 0.16 for the -ve side of the 68% band, 0.975 for the +ve side of the 95% band, and 0.025 for the -ve side of the 95% band. </p>\n\n<p>Warning</p>\n<p>Make sure that if you specified a particular mass value (<code>-m</code> or <code>--mass</code>) in the commands for calculating the toys, you also specify the same mass when reading in the grid of distributions.</p>\n\n<p>You should note that  Combine will update the grid to improve the accuracy on the extracted limit by default. If you want to avoid this, you can use the option <code>--noUpdateGrid</code>. This will mean only the toys/points you produced in the grid will be used to compute the limit.</p>\n\n<p>Warning</p>\n<p>This option should not be used with <code>--expectedFromGrid</code> if you did not create the grid with the same option. The reason is that the value of the test-statistic that is used to calculate the limit will not be properly calcualted if <code>--noUpdateGrid</code> is included. In future versions of the tool, this option will be ignored if using <code>--expectedFromGrid</code>. </p>\n\n<p>The splitting of the jobs can be left to the user's preference. However, users may wish to use <code>combineTool.py</code> for automating this, as described in the section on combineTool for job submission</p>"},{"location":"part3/commonstatsmethods/#plotting","title":"Plotting","text":"<p>A plot of the CL<sub>s</sub> (or \\(p_{\\mu}\\)) as a function of r, which is used to find the crossing, can be produced using the option <code>--plot=limit_scan.png</code>. This can be useful for judging if the chosen grid was sufficient for determining the upper limit.</p>\n<p>If we use our realistic-counting-experiment.txt datacard and generate a grid of points \\(r\\varepsilon[1.4,2.2]\\) in steps of 0.1, with 5000 toys for each point, the plot of the observed CL<sub>s</sub> vs r should look like the following,</p>\n<p></p>\n<p>You should judge in each case whether the limit is accurate given the spacing of the points and the precision of CL<sub>s</sub> at each point. If it is not sufficient, simply generate more points closer to the limit and/or more toys at each point.</p>\n<p>The distributions of the test statistic can also be plotted, at each value in the grid, using</p>\n<pre><code>python test/plotTestStatCLs.py --input mygrid.root --poi r --val all --mass MASS\n</code></pre>\n<p>The resulting output file will contain a canvas showing the distribution of the test statistics for the background only and signal+background hypotheses at each value of r. Use <code>--help</code> to see more options for this script.</p>\n\n<p>Info</p>\n<p>If you used the TEV or LEP style test statistic (using the commands as described above), then you should include the option <code>--doublesided</code>, which will also take care of defining the correct integrals for \\(p_{\\mu}\\) and \\(p_{b}\\). Click on the examples below to see what a typical output of this plotting tool will look like when using the LHC test statistic, or the TEV test statistic.</p>\n\n\nqLHC test stat example\n<p></p>\n\n\nqTEV test stat example\n<p></p>"},{"location":"part3/commonstatsmethods/#computing-significances-with-toys","title":"Computing Significances with toys","text":"<p>Computation of the expected significance with toys is a two-step procedure: first you need to run one or more jobs to construct the expected distribution of the test statistic. As for setting limits, there are a number of different possible configurations for generating toys.  However, we will use the most commonly used option,</p>\n<ul>\n<li>LHC-style: <code>--LHCmode LHC-significance</code>\n, which is the shortcut for <code>--testStat LHC --generateNuisances=0 --generateExternalMeasurements=1 --fitNuisances=1 --significance</code><ul>\n<li>The test statistic is defined using the ratio of likelihoods \\(q_{0} = -2\\ln[\\mathcal{L}(\\mu=0,\\hat{\\hat{\\nu}}(0))/\\mathcal{L}(\\hat{\\mu},\\hat{\\nu})]\\), in which the nuisance parameters are profiled separately for \\(\\mu=\\hat{\\mu}\\) and \\(\\mu=0\\).</li>\n<li>The value of the test statistic is set to 0 when \\(\\hat{\\mu}&lt;0\\)</li>\n<li>For the purposes of toy generation, the nuisance parameters are fixed to their post-fit values from the data assuming no signal, while the constraint terms are randomized for the evaluation of the likelihood.</li>\n</ul>\n</li>\n</ul>"},{"location":"part3/commonstatsmethods/#observed-significance","title":"Observed significance","text":"<p>To construct the distribution of the test statistic, the following command should be run as many times as necessary</p>\n<pre><code>combine -M HybridNew datacard.txt --LHCmode LHC-significance  --saveToys --fullBToys --saveHybridResult -T toys -i iterations -s seed\n</code></pre>\n<p>with different seeds, or using <code>-s -1</code> for random seeds, then merge all those results into a single ROOT file with <code>hadd</code>. The toys can then be read back into combine using the option <code>--toysFile=input.root --readHybridResult</code>.</p>\n<p>The observed significance can be calculated as</p>\n<pre><code>combine -M HybridNew datacard.txt --LHCmode LHC-significance --readHybridResult --toysFile=input.root [--pvalue ]\n</code></pre>\n<p>where the option <code>--pvalue</code> will replace the result stored in the limit branch output tree to be the p-value instead of the signficance.</p>"},{"location":"part3/commonstatsmethods/#expected-significance-assuming-some-signal","title":"Expected significance, assuming some signal","text":"<p>The expected significance, assuming a signal with r=X can be calculated, by including the option <code>--expectSignal X</code> when generating the distribution of the test statistic and using the option <code>--expectedFromGrid=0.5</code> when calculating the significance for the median. To get the \u00b11\u03c3 bands, use 0.16 and 0.84 instead of 0.5, and so on.</p>\n<p>The total number of background toys needs to be large enough to compute the value of the significance, but you need fewer signal toys (especially when you are only computing the median expected significance). For large significances, you can run most of the toys without the <code>--fullBToys</code> option, which will be about a factor 2 faster. Only a small part of the toys needs to be run with that option turned on.</p>\n<p>As with calculating limits with toys, these jobs can be submitted to the grid or batch systems with the help of the <code>combineTool.py</code> script, as described in the section on combineTool for job submission</p>"},{"location":"part3/commonstatsmethods/#goodness-of-fit-tests","title":"Goodness of fit tests","text":"<p>The <code>GoodnessOfFit</code> method can be used to evaluate how compatible the observed data are with the model PDF.</p>\n<p>This method implements several algorithms, and will compute a goodness of fit indicator for the chosen algorithm and the data. The procedure is therefore to first run on the real data</p>\n<pre><code>combine -M GoodnessOfFit datacard.txt --algo=&lt;some-algo&gt;\n</code></pre>\n<p>and then to run on many toy MC data sets to determine the distribution of the goodness-of-fit indicator</p>\n<pre><code>combine -M GoodnessOfFit datacard.txt --algo=&lt;some-algo&gt; -t &lt;number-of-toys&gt; -s &lt;seed&gt;\n</code></pre>\n<p>When computing the goodness-of-fit, by default the signal strength is left floating in the fit, so that the measure is independent from the presence or absence of a signal. It is possible to fixe the signal strength to some value by passing the option <code>--fixedSignalStrength=&lt;value&gt;</code>.</p>\n<p>The following algorithms are implemented:</p>\n<ul>\n<li>\n<p><code>saturated</code>: Compute a goodness-of-fit measure for binned fits based on the saturated model, as prescribed by the Statistics Committee (note). This quantity is similar to a chi-square, but can be computed for an arbitrary combination of binned channels with arbitrary constraints.</p>\n</li>\n<li>\n<p><code>KS</code>: Compute a goodness-of-fit measure for binned fits using the Kolmogorov-Smirnov test. It is based on the largest difference between the cumulative distribution function and the empirical distribution function of any bin.</p>\n</li>\n<li>\n<p><code>AD</code>: Compute a goodness-of-fit measure for binned fits using the Anderson-Darling test. It is based on the integral of the difference between the cumulative distribution function and the empirical distribution function over all bins. It also gives the tail ends of the distribution a higher weighting.</p>\n</li>\n</ul>\n<p>The output tree will contain a branch called <code>limit</code>, which contains the value of the test statistic in each toy. You can make a histogram of this test statistic \\(t\\). From the distribution that is obtained in this way (\\(f(t)\\)) and the single value obtained by running on the observed data (\\(t_{0}\\)) you can calculate the p-value \\(p = \\int_{t=t_{0}}^{\\mathrm{+inf}} f(t) dt\\). Note: in rare cases the test statistic value for the toys can be undefined (for AS and KD). In this case we set the test statistic value to -1. When plotting the test statistic distribution, those toys should be excluded. This is automatically taken care of if you use the GoF collection script which is described below.</p>\n<p>When generating toys, the default behavior will be used. See the section on toy generation for options that control how nuisance parameters are generated and fitted in these tests. It is recommended to use frequentist toys (<code>--toysFreq</code>) when running the <code>saturated</code> model, and the default toys for the other two tests.</p>\n<p>Further goodness-of-fit methods could be added on request, especially if volunteers are available to code them.\nThe output limit tree will contain the value of the test statistic in each toy (or the data)</p>\n\n<p>Warning</p>\n<p>The above algorithms are all concerned with one-sample tests. For two-sample tests, you can follow an example CMS HIN analysis described in this Twiki</p>"},{"location":"part3/commonstatsmethods/#masking-analysis-regions-in-the-saturated-model","title":"Masking analysis regions in the saturated model","text":"<p>For analyses that employ a simultaneous fit across signal and control regions, it may be useful to mask one or more analysis regions, either when the likelihood is maximized (fit) or when the test statistic is computed. This can be done by using the options <code>--setParametersForFit</code> and <code>--setParametersForEval</code>, respectively. The former will set parameters before each fit, while the latter is used to set parameters after each fit, but before the NLL is evaluated. Note, of course, that if the parameter in the list is floating, it will still be floating in each fit. Therefore, it will not affect the results when using <code>--setParametersForFit</code>.</p>\n<p>A realistic example for a binned shape analysis performed in one signal region and two control samples can be found in this directory of the Combine package Datacards-shape-analysis-multiple-regions.</p>\n<p>First of all, one needs to Combine the individual datacards to build a single model, and to introduce the channel masking variables as follow:</p>\n<pre><code>combineCards.py signal_region.txt dimuon_control_region.txt singlemuon_control_region.txt &gt; combined_card.txt\ntext2workspace.py combined_card.txt --channel-masks\n</code></pre>\n<p>More information about the channel masking can be found in this\nsection Channel Masking. The saturated test static value for a simultaneous fit across all the analysis regions can be calculated as:</p>\n<pre><code>combine -M GoodnessOfFit -d combined_card.root --algo=saturated -n _result_sb\n</code></pre>\n<p>In this case, signal and control regions are included in both the fit and in the evaluation of the test statistic, and the signal strength is freely floating. This measures the compatibility between the signal+background fit and the observed data. Moreover, it can be interesting to assess the level of compatibility between the observed data in all the regions and the background prediction obtained by only fitting the control regions (CR-only fit). This can be evaluated as follow:</p>\n<pre><code>combine -M GoodnessOfFit -d combined_card.root --algo=saturated -n _result_bonly_CRonly --setParametersForFit mask_ch1=1 --setParametersForEval mask_ch1=0 --freezeParameters r --setParameters r=0\n</code></pre>\n<p>where the signal strength is frozen and the signal region is not considered in the fit (<code>--setParametersForFit mask_ch1=1</code>), but it is included in the test statistic computation (<code>--setParametersForEval mask_ch1=0</code>). To show the differences between the two models being tested, one can perform a fit to the data using the FitDiagnostics method as:</p>\n<pre><code>combine -M FitDiagnostics -d combined_card.root -n _fit_result --saveShapes --saveWithUncertainties\ncombine -M FitDiagnostics -d combined_card.root -n _fit_CRonly_result --saveShapes --saveWithUncertainties --setParameters mask_ch1=1\n</code></pre>\n<p>By taking the total background, the total signal, and the data shapes from the FitDiagnostics output, we can compare the post-fit predictions from the S+B fit (first case) and the CR-only fit (second case) with the observation as reported below:</p>\n\nFitDiagnostics S+B fit\n<p></p>\n\n\nFitDiagnostics CR-only fit\n<p></p>\n\n<p>To compute a p-value for the two results, one needs to compare the observed goodness-of-fit value previously computed with the expected distribution of the test statistic obtained in toys:</p>\n<pre><code>    combine -M GoodnessOfFit combined_card.root --algo=saturated -n result_toy_sb --toysFrequentist -t 500\n    combine -M GoodnessOfFit -d combined_card.root --algo=saturated -n _result_bonly_CRonly_toy --setParametersForFit mask_ch1=1 --setParametersForEval mask_ch1=0 --freezeParameters r --setParameters r=0,mask_ch1=1 -t 500 --toysFrequentist\n</code></pre>\n<p>where the former gives the result for the S+B model, while the latter gives the test-statistic for CR-only fit. The command <code>--setParameters r=0,mask_ch1=1</code> is needed to ensure that toys are thrown using the nuisance parameters estimated from the CR-only fit to the data. The comparison between the observation and the expected distribition should look like the following two plots:</p>\n\nGoodness-of-fit for S+B model\n<p></p>\n\n\nGoodness-of-fit for CR-only model\n<p></p>"},{"location":"part3/commonstatsmethods/#making-a-plot-of-the-gof-test-statistic-distribution","title":"Making a plot of the GoF test statistic distribution","text":"<p>You can use the <code>combineTool.py</code> script to run batch jobs or on the grid (see here) and produce a plot of the results. Once the jobs have completed, you can hadd them together and run (e.g for the saturated model),</p>\n<pre><code>combineTool.py -M CollectGoodnessOfFit --input data_run.root toys_run.root -m 125.0 -o gof.json\nplotGof.py gof.json --statistic saturated --mass 125.0 -o gof_plot --title-right=\"my label\"\n</code></pre>"},{"location":"part3/commonstatsmethods/#channel-compatibility","title":"Channel Compatibility","text":"<p>The <code>ChannelCompatibilityCheck</code> method can be used to evaluate how compatible the measurements of the signal strength from the separate channels of a combination are with each other.</p>\n<p>The method performs two fits of the data, first with the nominal model in which all channels are assumed to have the same signal strength modifier \\(r\\), and then another allowing separate signal strengths \\(r_{i}\\) in each channel. A chisquare-like quantity is computed as \\(-2 \\ln \\mathcal{L}(\\mathrm{data}| r)/L(\\mathrm{data}|\\{r_{i}\\}_{i=1}^{N_{\\mathrm{chan}}})\\). Just like for the goodness-of-fit indicators, the expected distribution of this quantity under the nominal model can be computed from toy MC data sets.</p>\n<p>By default, the signal strength is kept floating in the fit with the nominal model. It can however be fixed to a given value by passing the option <code>--fixedSignalStrength=&lt;value&gt;</code>.</p>\n<p>In the default model built from the datacards, the signal strengths in all channels are constrained to be non-negative. One can allow negative signal strengths in the fits by changing the bound on the variable (option <code>--rMin=&lt;value&gt;</code>), which should make the quantity more chisquare-like under the hypothesis of zero signal; this however can create issues in channels with small backgrounds, since total expected yields and PDFs in each channel must be positive.</p>\n<p>Optionally, channels can be grouped together by using the option <code>-g &lt;name_fragment&gt;</code>, where <code>&lt;name_fragment&gt;</code> is a string which is common to all channels to be grouped together. The <code>-g</code> option can also be used to set the range for the each POI separately via <code>-g &lt;name&gt;=&lt;min&gt;,&lt;max&gt;</code>.</p>\n<p>When run with a verbosity of 1, as is the default, the program also prints out the best fit signal strengths in all channels. As the fit to all channels is done simultaneously, the correlation between the other systematic uncertainties is taken into account. Therefore, these results can differ from the ones obtained when fitting each channel separately.</p>\n<p>Below is an example output from Combine,</p>\n<pre><code>$ combine -M ChannelCompatibilityCheck comb_hww.txt -m 160 -n HWW\n &lt;&lt;&lt; Combine &gt;&gt;&gt;\n&gt;&gt;&gt; including systematics\n&gt;&gt;&gt; method used to compute upper limit is ChannelCompatibilityCheck\n&gt;&gt;&gt; random number generator seed is 123456\n\nSanity checks on the model: OK\nComputing limit starting from observation\n\n--- ChannelCompatibilityCheck ---\nNominal fit : r = 0.3431 -0.1408/+0.1636\nAlternate fit: r = 0.4010 -0.2173/+0.2724 in channel hww_0jsf_shape\nAlternate fit: r = 0.2359 -0.1854/+0.2297 in channel hww_0jof_shape\nAlternate fit: r = 0.7669 -0.4105/+0.5380 in channel hww_1jsf_shape\nAlternate fit: r = 0.3170 -0.3121/+0.3837 in channel hww_1jof_shape\nAlternate fit: r = 0.0000 -0.0000/+0.5129 in channel hww_2j_cut\nChi2-like compatibility variable: 2.16098\nDone in 0.08 min (cpu), 0.08 min (real)\n</code></pre>\n<p>The output tree will contain the value of the compatibility (chi-square variable) in the limit branch. If the option <code>--saveFitResult</code> is specified, the output ROOT file also contains two RooFitResult objects fit_nominal and fit_alternate with the results of the two fits.</p>\n<p>This can be read and used to extract the best fit value for each channel, and the overall best fit value, using</p>\n<pre><code>$ root -l\nTFile* _file0 = TFile::Open(\"higgsCombineTest.ChannelCompatibilityCheck.mH120.root\");\nfit_alternate-&gt;floatParsFinal().selectByName(\"*ChannelCompatibilityCheck*\")-&gt;Print(\"v\");\nfit_nominal-&gt;floatParsFinal().selectByName(\"r\")-&gt;Print(\"v\");\n</code></pre>\n<p>The macro cccPlot.cxx can be used to produce a comparison plot of the best fit signal strengths from all channels.</p>"},{"location":"part3/commonstatsmethods/#likelihood-fits-and-scans","title":"Likelihood Fits and Scans","text":"<p>The <code>MultiDimFit</code> method can be used to perform multi-dimensional fits and likelihood-based scans/contours using models with several parameters of interest.</p>\n<p>Taking a toy datacard data/tutorials/multiDim/toy-hgg-125.txt (counting experiment which vaguely resembles an early H\u2192\u03b3\u03b3 analysis at 125 GeV), we need to convert the datacard into a workspace with 2 parameters, the ggH and qqH cross sections:</p>\n<pre><code>text2workspace.py toy-hgg-125.txt -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH\n</code></pre>\n<p>A number of different algorithms can be used with the option <code>--algo &lt;algo&gt;</code>,</p>\n<ul>\n<li>\n<p><code>none</code> (default):  Perform a maximum likelihood fit <code>combine -M MultiDimFit toy-hgg-125.root</code>; The output ROOT tree will contain two columns, one for each parameter, with the fitted values.</p>\n</li>\n<li>\n<p><code>singles</code>: Perform a fit of each parameter separately, treating the other parameters of interest as unconstrained nuisance parameters: <code>combine -M MultiDimFit toy-hgg-125.root --algo singles --cl=0.68</code> . The output ROOT tree will contain two columns, one for each parameter, with the fitted values; there will be one row with the best fit point (and <code>quantileExpected</code> set to -1) and two rows for each fitted parameter, where the corresponding column will contain the maximum and minimum of that parameter in the 68% CL interval, according to a one-dimensional chi-square (i.e. uncertainties on each fitted parameter do not increase when adding other parameters if they are uncorrelated). Note that if you run, for example, with <code>--cminDefaultMinimizerStrategy=0</code>, these uncertainties will be derived from the Hessian, while <code>--cminDefaultMinimizerStrategy=1</code> will invoke Minos to derive them.</p>\n</li>\n<li>\n<p><code>cross</code>:  Perform a joint fit of all parameters: <code>combine -M MultiDimFit toy-hgg-125.root --algo=cross --cl=0.68</code>. The output ROOT tree will have one row with the best fit point, and two rows for each parameter, corresponding to the minimum and maximum of that parameter on the likelihood contour corresponding to the specified CL, according to an N-dimensional chi-square (i.e. the uncertainties on each fitted parameter do increase when adding other parameters, even if they are uncorrelated). Note that this method does not produce 1D uncertainties on each parameter, and should not be taken as such.</p>\n</li>\n<li>\n<p><code>contour2d</code>: Make a 68% CL contour \u00e0 la minos <code>combine -M MultiDimFit toy-hgg-125.root --algo contour2d --points=20 --cl=0.68</code>. The output will contain values corresponding to the best fit point (with <code>quantileExpected</code> set to -1) and for a set of points on the contour (with <code>quantileExpected</code> set to 1-CL, or something larger than that if the contour hits the boundary of the parameters). Probabilities are computed from the the n-dimensional \\(\\chi^{2}\\) distribution. For slow models, this method can be split by running several times with a different number of points, and merging the outputs. The contourPlot.cxx macro can be used to make plots out of this algorithm.</p>\n</li>\n<li>\n<p><code>random</code>: Scan N random points and compute the probability out of the profile likelihood ratio <code>combine -M MultiDimFit toy-hgg-125.root --algo random --points=20 --cl=0.68</code>. Again, the best fit will have <code>quantileExpected</code> set to -1, while each random point will have <code>quantileExpected</code> set to the probability given by the profile likelihood ratio at that point.</p>\n</li>\n<li>\n<p><code>fixed</code>: Compare the log-likelihood at a fixed point compared to the best fit. <code>combine -M MultiDimFit toy-hgg-125.root --algo fixed --fixedPointPOIs r=r_fixed,MH=MH_fixed</code>. The output tree will contain the difference in the negative log-likelihood between the points (\\(\\hat{r},\\hat{m}_{H}\\)) and (\\(\\hat{r}_{fixed},\\hat{m}_{H,fixed}\\)) in the branch <code>deltaNLL</code>.</p>\n<p>You can use the <code>combineTool.py</code> script to run multiple fixed points from a <code>.csv</code> file. For example, data/tutorials/multiDim/fixed.csv contains the points</p>\n<pre><code>r_ggH,r_qqH\n1.0,1.0\n1.0,2.0\n2.0,1.0\n2.0,2.0\n</code></pre>\n<p>and <code>combineTool.py -M MultiDimFit toy-hgg-125.root --fromfile fixed.csv</code> will run <code>--algo fixed</code> at each of these points.</p>\n</li>\n<li>\n<p><code>grid</code>:  Scan a fixed grid of points with approximately N points in total. <code>combine -M MultiDimFit toy-hgg-125.root --algo grid --points=10000</code>.</p>\n<ul>\n<li>You can partition the job in multiple tasks by using the options <code>--firstPoint</code> and <code>--lastPoint</code>. For complicated scans, the points can be split as described in the combineTool for job submission section. The output file will contain a column <code>deltaNLL</code> with the difference in negative log-likelihood with respect to the best fit point. Ranges/contours can be evaluated by filling TGraphs or TH2 histograms with these points.</li>\n<li>By default the \"min\" and \"max\" of the POI ranges are not included and the points that are in the scan are centred , eg <code>combine -M MultiDimFit --algo grid --rMin 0 --rMax 5 --points 5</code> will scan at the points \\(r=0.5, 1.5, 2.5, 3.5, 4.5\\). You can include the option <code>--alignEdges 1</code>, which causes the points to be aligned with the end-points of the parameter ranges - e.g. <code>combine -M MultiDimFit --algo grid --rMin 0 --rMax 5 --points 6 --alignEdges 1</code> will scan at the points \\(r=0, 1, 2, 3, 4, 5\\). Note - the number of points must be increased by 1 to ensure both end points are included.</li>\n</ul>\n</li>\n</ul>\n<p>With the algorithms <code>none</code> and <code>singles</code> you can save the RooFitResult from the initial fit using the option <code>--saveFitResult</code>. The fit result is saved into a new file called <code>multidimfit.root</code>.</p>\n<p>As usual, any floating nuisance parameters will be profiled. This behaviour can be modified by using the <code>--freezeParameters</code> option.</p>\n<p>For most of the methods, for lower-precision results you can turn off the profiling of the nuisance parameters by using the option <code>--fastScan</code>, which for complex models speeds up the process by several orders of magnitude. All nuisance parameters will be kept fixed at the value corresponding to the best fit point.</p>\n<p>As an example, let's produce the \\(-2\\Delta\\ln{\\mathcal{L}}\\) scan as a function of <code>r_ggH</code> and <code>r_qqH</code> from the toy \\(H\\rightarrow\\gamma\\gamma\\) datacard. The command below should be pretty fast, as the statistical model is quite simple,</p>\n<pre><code>combine toy-hgg-125.root -M MultiDimFit --algo grid --points 2500 --setParameterRanges r_qqH=0,12:r_ggH=-1,4 -m 125\n</code></pre>\n<p>The scan, along with the best fit point and \\(1\\sigma\\) CL contour can be drawn using ROOT using something like the script below,</p>\n\nShow script\n<pre><code>\nvoid plot2D_LHScan(){\n\n  TFile *_file0 = TFile::Open(\"higgsCombineTest.MultiDimFit.mH125.root\");\n  TTree *limit = (TTree*) _file0-&gt;Get(\"limit\");\n\n  // create histogram representing -2Delta Log(L)\n  TCanvas *can = new TCanvas(\"c\",\"c\",600,540);\n  limit-&gt;Draw(\"2*deltaNLL:r_qqH:r_ggH&gt;&gt;h(50,-1,4,50,0,12)\",\"2*deltaNLL&lt;50\",\"prof colz\");\n  TH2F *g2NLL = (TH2F*)gROOT-&gt;FindObject(\"h\");\n\n  g2NLL-&gt;SetName(\"g2NLL\");\n  g2NLL-&gt;SetTitle(\"\");\n  g2NLL-&gt;GetXaxis()-&gt;SetTitle(\"r_{ggH}\");\n  g2NLL-&gt;GetYaxis()-&gt;SetTitle(\"r_{qqH}\");\n\n  // Get best fit point\n  limit-&gt;Draw(\"r_qqH:r_ggH\",\"quantileExpected == -1\",\"P same\");\n  TGraph *best_fit = (TGraph*)gROOT-&gt;FindObject(\"Graph\");\n\n  best_fit-&gt;SetMarkerSize(3);\n  best_fit-&gt;SetMarkerStyle(34);\n  best_fit-&gt;Draw(\"p same\");\n\n  // get 1-sigma contour\n  TH2F *h68 = (TH2F*)g2NLL-&gt;Clone();\n  h68-&gt;SetContour(2);\n  h68-&gt;SetContourLevel(1,2.3);\n  h68-&gt;SetLineWidth(3);\n  h68-&gt;SetLineColor(1);\n  h68-&gt;Draw(\"CONT3same\");\n\n  gStyle-&gt;SetOptStat(0);\n  can-&gt;SaveAs(\"2D_LHScan.png\");\n\n }\n\n<p></p>\n\n<p>This will produce a plot like the one below,</p>\n<p></p>\n<p>However for 1D likelihood scans, a python script to make plots and extract crossing of the <code>2*deltaNLL</code>, e.g the 1\u03c3/2\u03c3 boundaries, is already available <code>plot1DScan.py</code></p>\n<pre><code>plot1DScan.py higgsCombineTest.MultiDimFit.mH125.root -o output\n</code></pre>"},{"location":"part3/commonstatsmethods/#useful-options-for-likelihood-scans","title":"Useful options for likelihood scans","text":"<p>A number of common, useful options (especially for computing likelihood scans with the grid algo) are,</p>\n<ul>\n<li><code>--autoBoundsPOIs arg</code>: Adjust bounds for the POIs if they end up close to the boundary. This can be a comma-separated list of POIs, or \"*\" to get all of them.</li>\n<li><code>--autoMaxPOIs arg</code>: Adjust maxima for the POIs if they end up close to the boundary. Can be a list of POIs, or \"*\" to get all.</li>\n<li><code>--autoRange X</code>: Set to any X &gt;= 0 to do the scan in the \\(\\hat{p}\\) \\(\\pm\\) X\u03c3 range, where \\(\\hat{p}\\) and \u03c3 are the best fit parameter value and uncertainty from the initial fit (so it may be fairly approximate). In case you do not trust the estimate of the error from the initial fit, you can just centre the range on the best fit value by using the option <code>--centeredRange X</code> to do the scan in the \\(\\hat{p}\\) \\(\\pm\\) X range centered on the best fit value.</li>\n<li><code>--squareDistPoiStep</code>:  POI step size based on distance from the midpoint ( either (max-min)/2 or the best fit if used with <code>--autoRange</code> or <code>--centeredRange</code> ) rather than linear separation.</li>\n<li><code>--skipInitialFit</code>: Skip the initial fit (saves time if, for example, a snapshot is loaded from a previous fit)</li>\n</ul>\n<p>Below is a comparison in a likelihood scan, with 20 points, as a function of <code>r_qqH</code> with our <code>toy-hgg-125.root</code> workspace with and without some of these options. The options added tell Combine to scan more points closer to the minimum (best-fit) than with the default.</p>\n<p></p>\n<p>You may find it useful to use the <code>--robustFit=1</code> option to turn on robust (brute-force) for likelihood scans (and other algorithms). You can set the strategy and tolerance when using the <code>--robustFit</code> option using the options <code>--setRobustFitAlgo</code> (default is <code>Minuit2,migrad</code>), <code>setRobustFitStrategy</code> (default is 0) and <code>--setRobustFitTolerance</code> (default is 0.1). If these options are not set, the defaults (set using <code>cminDefaultMinimizerX</code> options) will be used.</p>\n<p>If running <code>--robustFit=1</code> with the algo singles, you can tune the accuracy of the routine used to find the crossing points of the likelihood using the option <code>--setCrossingTolerance</code> (the default is set to 0.0001)</p>\n<p>If you suspect your fits/uncertainties are not stable, you may also try to run custom HESSE-style calculation of the covariance matrix. This is enabled by running <code>MultiDimFit</code> with the <code>--robustHesse=1</code> option. A simple example of how the default behaviour in a simple datacard is given here.</p>\n<p>For a full list of options use <code>combine -M MultiDimFit --help</code></p>"},{"location":"part3/commonstatsmethods/#fitting-only-some-parameters","title":"Fitting only some parameters","text":"<p>If your model contains more than one parameter of interest, you can still decide to fit a smaller number of them, using the option <code>--parameters</code> (or <code>-P</code>), with a syntax like this:</p>\n<pre><code>combine -M MultiDimFit [...] -P poi1 -P poi2 ... --floatOtherPOIs=(0|1)\n</code></pre>\n<p>If <code>--floatOtherPOIs</code> is set to 0, the other parameters of interest (POIs), which are not included as a <code>-P</code> option, are kept fixed to their nominal values. If it's set to 1, they are kept floating, which has different consequences depending on <code>algo</code>:</p>\n<ul>\n<li>When running with <code>--algo=singles</code>, the other floating POIs are treated as unconstrained nuisance parameters.</li>\n<li>When running with <code>--algo=cross</code> or <code>--algo=contour2d</code>, the other floating POIs are treated as other POIs, and so they increase the number of dimensions of the chi-square.</li>\n</ul>\n<p>As a result, when running with <code>--floatOtherPOIs</code> set to 1, the uncertainties on each fitted parameters do not depend on the selection of POIs passed to MultiDimFit, but only on the number of parameters of the model.</p>\n\n<p>Info</p>\n<p>Note that <code>poi</code> given to the the option <code>-P</code> can also be any nuisance parameter. However, by default, the other nuisance parameters are left floating, so in general this does not need to be specified.</p>\n\n<p>You can save the values of the other parameters of interest in the output tree by passing the option <code>--saveInactivePOI=1</code>. You can additionally save the post-fit values any nuisance parameter, function, or discrete index (RooCategory) defined in the workspace using the following options;</p>\n<ul>\n<li><code>--saveSpecifiedNuis=arg1,arg2,...</code> will store the fitted value of any specified constrained nuisance parameter. Use <code>all</code> to save every constrained nuisance parameter. Note that if you want to store the values of <code>flatParams</code> (or floating parameters that are not defined in the datacard) or <code>rateParams</code>,  which are unconstrained, you should instead use the generic option <code>--trackParameters</code> as described here.</li>\n<li><code>--saveSpecifiedFunc=arg1,arg2,...</code> will store the value of any function (eg <code>RooFormulaVar</code>) in the model.</li>\n<li><code>--saveSpecifiedIndex=arg1,arg2,...</code> will store the index of any <code>RooCategory</code> object - eg a <code>discrete</code> nuisance.</li>\n</ul>"},{"location":"part3/commonstatsmethods/#using-best-fit-snapshots","title":"Using best fit snapshots","text":"<p>This can be used to save time when performing scans so that the best fit does not need to be repeated. It can also be used to perform scans with some nuisance parameters frozen to their best-fit values. This can be done as follows,</p>\n<ul>\n<li>Create a workspace for a floating \\(r,m_{H}\\) fit</li>\n</ul>\n<pre><code>text2workspace.py hgg_datacard_mva_8TeV_bernsteins.txt -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingHiggsMass --PO higgsMassRange=120,130 -o testmass.root`\n</code></pre>\n<ul>\n<li>Perfom the fit, saving the workspace</li>\n</ul>\n<pre><code>combine -m 123 -M MultiDimFit --saveWorkspace -n teststep1 testmass.root  --verbose 9\n</code></pre>\n<p>Now we can load the best fit \\(\\hat{r},\\hat{m}_{H}\\) and fit for \\(r\\) freezing \\(m_{H}\\) and lumi_8TeV to their best-fit values,</p>\n<pre><code>combine -m 123 -M MultiDimFit -d higgsCombineteststep1.MultiDimFit.mH123.root -w w --snapshotName \"MultiDimFit\" -n teststep2  --verbose 9 --freezeParameters MH,lumi_8TeV\n</code></pre>"},{"location":"part3/commonstatsmethods/#feldman-cousins","title":"Feldman-Cousins","text":"<p>The Feldman-Cousins (FC) procedure for computing confidence intervals for a generic model is,</p>\n<ul>\n<li>use the profile likelihood ratio as the test statistic, \\(q(\\vec{\\mu}) = - 2 \\ln \\mathcal{L}(\\vec{\\mu},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}))/\\mathcal{L}(\\hat{\\vec{\\mu}},\\hat{\\vec{\\nu}})\\) where \\(\\vec{\\mu}\\) is a point in the (N-dimensional) parameter space, and \\(\\hat{\\vec{\\mu}}\\) is the point corresponding to the best fit. In this test statistic, the nuisance parameters are profiled, both in the numerator and denominator.</li>\n<li>for each point \\(\\vec{\\mu}\\):<ul>\n<li>compute the observed test statistic \\(q_{\\mathrm{obs}}(\\vec{\\mu})\\)</li>\n<li>compute the expected distribution of \\(q(\\vec{\\mu})\\) under the hypothesis of \\(\\vec{\\mu}\\) as the true value.</li>\n<li>accept the point in the region if \\(p_{\\vec{\\mu}}=P\\left[q(\\vec{\\mu}) &gt; q_{\\mathrm{obs}}(\\vec{\\mu})| \\vec{\\mu}\\right] &gt; \\alpha\\)</li>\n</ul>\n</li>\n</ul>\n<p>With a critical value \\(\\alpha\\).</p>\n<p>In Combine, you can perform this test on each individual point (param1, param2,...) = (value1,value2,...) by doing,</p>\n<pre><code>combine workspace.root -M HybridNew --LHCmode LHC-feldman-cousins --clsAcc 0 --singlePoint  param1=value1,param2=value2,param3=value3,... --saveHybridResult [Other options for toys, iterations etc as with limits]\n</code></pre>\n<p>Note that you can also split this calculationg into several separate runs (remembering to set a random seed <code>-s -1</code> each time the above command is run) and <code>hadd</code> the resulting <code>.root</code> output files into a single file <code>toys.root</code>. This can then be read in and used to calculate \\(p_{\\vec{\\mu}}\\) by using the same command as above but replacing the option <code>--saveHybridResult</code> with <code>--readHybridResult --toysFile toys.root</code>.  </p>\n<p>The point belongs to your confidence region if \\(p_{\\vec{\\mu}}\\) is larger than \\(\\alpha\\) (e.g. 0.3173 for a 1\u03c3 region, \\(1-\\alpha=0.6827\\)).</p>\n\n<p>Warning</p>\n<p>You should not use this method without the option <code>--singlePoint</code>. Although Combine will not complain, the algorithm to find the crossing will only find a single crossing and therefore not find the correct interval. Instead you should calculate the Feldman-Cousins intervals as described above.</p>"},{"location":"part3/commonstatsmethods/#physical-boundaries","title":"Physical boundaries","text":"<p>Imposing physical boundaries (such as requiring \\(r&gt;0\\) for a signal strength \\(r\\) ) is achieved by setting the ranges of the physics model parameters using</p>\n<pre><code>--setParameterRanges param1=param1_min,param1_max:param2=param2_min,param2_max ....\n</code></pre>\n<p>The boundary is imposed by restricting the parameter range(s) to those set by the user, in the fits. Note that this is a trick! The actual fitted value, as one of an ensemble of outcomes, can fall outside of the allowed region, while the boundary should be imposed on the physical parameter. The effect of restricting the parameter value in the fit is such that the test statistic is modified as follows ;</p>\n\\[q(\\vec{\\mu}) = - 2 \\ln \\mathcal{L}(\\vec{\\mu},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}))/\\mathcal{L}(\\hat{\\vec{\\mu}},\\hat{\\vec{\\nu}}),\\]\n<p>if \\(\\hat{\\vec{\\mu}}\\) in contained in the bounded range</p>\n<p>and,</p>\n\\[q(\\vec{\\mu}) = - 2 \\ln \\mathcal{L}(\\vec{\\mu},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}))/\\mathcal{L}(\\vec{\\mu}_{B},\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}_{B})),\\]\n<p>if \\(\\hat{\\vec{\\mu}}\\) is outside of the bounded range. Here \\(\\vec{\\mu}_{B}\\) and \\(\\hat{\\hat{\\vec{\\nu}}}(\\vec{\\mu}_{B})\\) are the values of \\(\\vec{\\mu}\\) and \\(\\vec{\\nu}\\) which maximise the likelihood excluding values outside of the bounded region for \\(\\vec{\\mu}\\) - typically, \\(\\vec{\\mu}_{B}\\) will be found at one of the boundaries which is imposed. For example if there is one parameter of interest \\(\\mu\\) , if the boundary \\(\\mu&gt;0\\) is imposed, you will typically expect \\(\\mu_{B}=0\\), when \\(\\hat{\\mu}\\leq 0\\), and \\(\\mu_{B}=\\hat{\\mu}\\) otherewise.</p>\n<p>This can sometimes be an issue as Minuit may not know if has successfully converged when the minimum lies outside of that range. If there is no upper/lower boundary, just set that value to something far from the region of interest.</p>\n\n<p>Info</p>\n<p>One can also imagine imposing the boundaries by first allowing Minuit to find the minimum in the unrestricted  region and then setting the test statistic to that in the case that minimum lies outside the physical boundary. This would avoid potential issues of convergence. If you are interested in implementing this version in Combine, please contact the development team.</p>"},{"location":"part3/commonstatsmethods/#extracting-contours-from-results-files","title":"Extracting contours from results files","text":"<p>As in general for <code>HybridNew</code>, you can split the task into multiple tasks (grid and/or batch) and then merge the outputs with <code>hadd</code>. You can also refer to the combineTool for job submission section for submitting the jobs to the grid/batch or if you have more than one parameter of interest, see the instructions for running <code>HybridNew</code> on a grid of parameter points on the CombineHarvest - HybridNewGrid documentation.</p>"},{"location":"part3/commonstatsmethods/#extracting-1d-intervals","title":"Extracting 1D intervals","text":"<p>For one-dimensional models only, and if the parameter behaves like a cross section, the code is able to interpolate and determine the values of your parameter on the contour (just like it does for the limits). As with limits, read in the grid of points and extract 1D intervals using,</p>\n<pre><code>combine workspace.root -M HybridNew --LHCmode LHC-feldman-cousins --readHybridResults --grid=mergedfile.root --cl &lt;1-alpha&gt;\n</code></pre>\n<p>The output tree will contain the values of the POI that crosses the critical value (\\(\\alpha\\)) - i.e, the boundaries of the confidence intervals.</p>\n<p>You can produce a plot of the value of \\(p_{\\vec{\\mu}}\\) vs the parameter of interest \\(\\vec{\\mu}\\) by adding the option <code>--plot &lt;plotname&gt;</code>.</p>\n<p>As an example, we will use the<code>data/tutorials/multiDim/toy-hgg-125.txt</code> datacard and find the 1D FC 68% interval for the \\(r_{qqH}\\) parameter. First, we construct the model as, </p>\n<pre><code>text2workspace.py -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH toy-hgg-125.txt -o toy-hgg-125.root\n</code></pre>\n<p>Now we generate the grid of test statistics in a suitable range. You could use the <code>combineTool.py</code> as below but for 1D, we can just generate the points in a for loop. </p>\n<pre><code>for i in range 0.1 1.1 2.1 3.1 4.1 5.1 6.1 7.1 8.1 9.1 10.1 ; do combine toy-hgg-125.root --redefineSignalPOI r_qqH   -M HybridNew --LHCmode LHC-feldman-cousins --clsAcc 0 --singlePoint r_qqH=${i} --saveToys --saveHybridResult -n ${i} ; done\n\nhadd -f FeldmanCousins1D.root higgsCombine*.1.HybridNew.mH120.123456.root\n</code></pre>\n<p>Next, we get combine to calculate the interval from this grid. \n<pre><code>combine toy-hgg-125.root -M HybridNew --LHCmode LHC-feldman-cousins --readHybridResults --grid=FeldmanCousins1D.root --cl 0.68 --redefineSignalPOI r_qqH\n</code></pre>\nand we should see the below as the output, </p>\n<pre><code> -- HybridNew --\nfound 68 % confidence regions\n  2.19388 (+/- 0.295316) &lt; r_qqH &lt; 8.01798 (+/- 0.0778685)\nDone in 0.00 min (cpu), 0.00 min (real)\n</code></pre>\n<p>Since we included the <code>--plot</code> option, we will also get a plot like the one below, </p>\n<p></p>"},{"location":"part3/commonstatsmethods/#extracting-2d-contours-general-intervals","title":"Extracting 2D contours / general intervals","text":"<p>For two-dimensional models, or if the parameter does not behave like a cross section, you will need to extract the contours from the output of <code>HybridNew</code> and plot them yourself. We will use the <code>data/tutorials/multiDim/toy-hgg-125.txt</code> datacard in the example below to demonstrate how this can be done. Let's build the model again as we did in the MultiDimFit section.</p>\n<pre><code>text2workspace.py -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:floatingXSHiggs --PO modes=ggH,qqH toy-hgg-125.txt -o toy-hgg-125.root\n</code></pre>\n<p>First, we use <code>combineTool.py</code> to create jobs for each point in our parameter scan. We want to impose the boundaries that \\(r_{ggH}&gt;0\\), \\(r_{qqH}&gt;0\\).\nIn the example below, we will run in interactive mode so this can take a little while. You can instead run using a batch cluster (eg <code>condor</code>) or the grid (<code>grid</code>) to submit sepaerate jobs for each point / set of points. We configure the tool by specifying the grid of points in <code>poi_grid_configuration.json</code> as below. Here we want 5000 toys for each point, and we choose a grid of \\(r_{ggH}\\in [0,4]\\) in steps of 0.2, and \\(r_{qqH}\\in[0,10]\\) in steps of 0.5.</p>\n<pre><code>{\n  \"verbose\" : true,\n  \"opts\" : \" --LHCmode LHC-feldman-cousins --saveHybridResult --clsAcc 0 \",\n  \"POIs\" : [\"r_ggH\", \"r_qqH\"],\n  \"grids\" : [\n    [\"0:4|.2\",\"0:10|.5\",\"\"]\n  ],\n  \"toys_per_cycle\"  : 5000,\n  \"FC\" : true,\n  \"min_toys\": 5000,\n  \"max_toys\": 50000,\n  \"output_incomplete\" : true,\n  \"make_plots\": false,\n  \"contours\":[\"obs\"],\n  \"CL\": 0.68,\n  \"output\": \"FeldmanCousins.root\",\n  \"zipfile\"         : \"collected.zip\",\n  \"statusfile\"      : \"status.json\"\n }\n</code></pre>\n<p>The command will look like,</p>\n<pre><code>combineTool.py -M HybridNewGrid  ./poi_grid_configuration.json -d toy-hgg-125.root --task-name fc2d --job-mode 'interactive' --cycles 1\n</code></pre>\n<p>As mentioned, this will take a while to run so you should consider going to make a cup of coffee at this point and reading through the HybridNewGrid documentation to learn more about this tool.\nOnce this is done, we extract the values of \\(p_{\\vec{\\mu}}\\) for each point in our parameter space using the same command, but this time setting <code>--cycles 0</code> and adding the option <code>--output</code>,</p>\n<pre><code>combineTool.py -M HybridNewGrid  ./poi_grid_configuration.json -d toy-hgg-125.root --task-name fc2d --job-mode 'interactive' --cycles 0 --output\n</code></pre>\n<p>which will produce a file <code>FeldmanCousins.root</code> (as defined in the <code>\"output\"</code> field of <code>poi_grid_configuration.json</code>) that contains a <code>TGraph2D</code> which stores the calculated value of \\(p_{\\vec{\\mu}}\\) for each point in the grid. Using something like the macro below, these values can be plotted along with a contour corresponding to 68% CL (\\(\\alpha=0.32\\)).</p>\n\nShow script\n<pre><code>\nvoid plot_2DFC(){\n\n  TFile *_file0 = TFile::Open(\"FeldmanCousins.root\");\n\n  TCanvas *can = new TCanvas(\"c\",\"c\",600,540);\n\n  // Draw p_x\n  TGraph2D *gpX = (TGraph2D*)_file0-&gt;Get(\"obs\");\n  gpX-&gt;Draw(\"colz\");\n\n  // Draw 68% contour\n  TH2F *h68 = (TH2F*)gpX-&gt;GetHistogram()-&gt;Clone(\"h68\");\n  h68-&gt;SetContour(2);\n  h68-&gt;SetContourLevel(1,0.32);\n  h68-&gt;SetLineWidth(3);\n  h68-&gt;SetLineColor(1);\n  h68-&gt;Draw(\"CONT3same\");\n\n  gpX-&gt;SetTitle(\"\");\n  gpX-&gt;GetXaxis()-&gt;SetTitle(\"r_{ggH}\");\n  gpX-&gt;GetYaxis()-&gt;SetTitle(\"r_{qqH}\");\n\n\n  gStyle-&gt;SetOptStat(0);\n  can-&gt;SaveAs(\"2D_FC.png\");\n }\n \n<p></p>\n\n<p>It will produce the plot below.</p>\n<p></p>\n<p>There are several options for reducing the running time, such as setting limits on the region of interest or the minimum number of toys required for a point to be included.</p>"},{"location":"part3/debugging/","title":"Debugging fits","text":"<p>When a fit fails there are several things you can do to investigate. CMS users can have a look at these slides from a previous Combine tutorial. This section contains a few pointers for some of the methods mentioned in the slides.</p>"},{"location":"part3/debugging/#analyzing-the-nll-shape-in-each-parameter","title":"Analyzing the NLL shape in each parameter","text":"<p>The <code>FastScan</code> mode of <code>combineTool.py</code> can be used to analyze the shape of the NLL as a function of each parameter in the fit model. The NLL is evaluated varying a single parameter at a time, the other parameters stay at the default values they have in the workspace. This produces a file with the NLL, plus its first and second derivatives, as a function of each parameter. Discontinuities in the derivatives, particularly if they are close to the minimum of the parameter, can be the source of issues with the fit. </p> <p>The usage is as follows:</p> <p><code>combineTool.py -M FastScan -w workspace.root:w</code></p> <p>Note that this will make use of the data in the workspace for evaluating the NLL. To run this on an asimov data set, with r=1 injected, you can do the following:</p> <pre><code>combine -M GenerateOnly workspace.root -t -1 --saveToys --setParameters r=1\n\ncombineTool.py -M FastScan -w workspace.root:w -d higgsCombineTest.GenerateOnly.mH120.123456.root:toys/toy_asimov\n</code></pre> <p><code>higgsCombineTest.GenerateOnly.mH120.123456.root</code> is generated by the first command; if you pass a value for <code>-m</code> or change the default output file name with <code>-n</code> the file name will be different and you should change the <code>combineTool</code> call accordingly.</p>"},{"location":"part3/nonstandard/","title":"Advanced Use Cases","text":"<p>This section will cover some of the more specific use cases for Combine that are not necessarily related to the main results of the analysis.</p>"},{"location":"part3/nonstandard/#fit-diagnostics","title":"Fit Diagnostics","text":"<p>If you want to diagnose your limits/fit results, you may first want to look at the HIG PAG standard checks, which are applied to all datacards and can be found here.</p> <p>If you have already found the Higgs boson but it's an exotic one, instead of computing a limit or significance you might want to extract its cross section by performing a maximum-likelihood fit. Alternatively, you might want to know how compatible your data and your model are, e.g. how strongly your nuisance parameters are constrained, to what extent they are correlated, etc. These general diagnostic tools are contained in the method <code>FitDiagnostics</code>.</p> <pre><code>    combine -M FitDiagnostics datacard.txt\n</code></pre> <p>The program will print out the result of two fits. The first one is performed with the signal strength r (or the first POI in the list, in models with multiple POIs) set to zero and a second with floating r. The output ROOT tree will contain the best fit value for r and its uncertainty. You will also get a <code>fitDiagnostics.root</code> file containing the following objects:</p> Object Description <code>nuisances_prefit</code> <code>RooArgSet</code> containing the pre-fit values of the nuisance parameters, and their uncertainties from the external constraint terms only <code>fit_b</code> <code>RooFitResult</code> object containing the outcome of the fit of the data with signal strength set to zero <code>fit_s</code> <code>RooFitResult</code> object containing the outcome of the fit of the data with floating signal strength <code>tree_prefit</code> <code>TTree</code> of pre-fit nuisance parameter values and constraint terms (_In) <code>tree_fit_sb</code> <code>TTree</code> of fitted nuisance parameter values and constraint terms (_In) with floating signal strength <code>tree_fit_b</code> <code>TTree</code> of fitted nuisance parameter values and constraint terms (_In) with signal strength set to 0 <p>by including the option <code>--plots</code>, you will additionally find the following contained in the ROOT file:</p> Object Description <code>covariance_fit_s</code> <code>TH2D</code> Covariance matrix of the parameters in the fit with floating signal strength <code>covariance_fit_b</code> <code>TH2D</code> Covariance matrix of the parameters in the fit with signal strength set to zero <code>category_variable_prefit</code> <code>RooPlot</code> plot of the pre-fit PDFs/templates with the data (or toy if running with <code>-t</code>) overlaid <code>category_variable_fit_b</code> <code>RooPlot</code> plot of the PDFs/templates from the background only fit with the data (or toy if running with <code>-t</code>) overlaid <code>category_variable_fit_s</code> <code>RooPlot</code> plot of the PDFs/templates from the signal+background fit with the data (or toy if running with <code>-t</code>) overlaid <p>There will be one <code>RooPlot</code> object per category in the likelihood, and one per variable if using a multi-dimensional dataset. For each of these additional objects a png file will also be produced.</p> <p>Info</p> <p>If you use the option <code>--name</code>, this additional name will be inserted into the file name for this output file.</p> <p>As well as the values of the constrained nuisance parameters (and their constraints), you will also find branches for the number of \"bad\" nll calls (which you should check is not too large) and the status of the fit <code>fit_status</code>. The fit status is computed as follows</p> <pre><code>fit_status = 100 * hesse_status + 10 * minos_status +  minuit_summary_status\n</code></pre> <p>The <code>minuit_summary_status</code> is the usual status from Minuit, details of which can be found here. For the other status values, check these documentation links for the <code>hesse_status</code> and the <code>minos_status</code>.</p> <p>A fit status of -1 indicates that the fit failed (Minuit summary was not 0 or 1) and hence the fit result is not valid.</p>"},{"location":"part3/nonstandard/#fit-options","title":"Fit options","text":"<ul> <li>If you only want to run the signal+background fit, and do not need the output file, you can run with <code>--justFit</code>. In case you would like to run only the signal+background fit but would like to produce the output file, you should use the option <code>--skipBOnlyFit</code> instead.</li> <li>You can use <code>--rMin</code> and <code>--rMax</code> to set the range of the first POI; a range that is not too large compared with the uncertainties you expect from the fit usually gives more stable and accurate results.</li> <li>By default, the uncertainties are computed using MINOS for the first POI and HESSE for all other parameters. For the nuisance parameters the uncertainties will therefore be symmetric. You can run MINOS for all parameters using the option <code>--minos all</code>, or for none of the parameters using <code>--minos none</code>. Note that running MINOS is slower so you should only consider using it if you think the HESSE uncertainties are not accurate.</li> <li>If MINOS or HESSE fails to converge, you can try running with <code>--robustFit=1</code>. This will do a slower, but more robust, likelihood scan, which can be further controlled with the parameter <code>--stepSize</code> (the default value is 0.1, and is relative to the range of the parameter).</li> <li>The strategy and tolerance when using the <code>--robustFit</code> option can be set using the options <code>setRobustFitAlgo</code> (default is <code>Minuit2,migrad</code>), <code>setRobustFitStrategy</code> (default is 0) and <code>--setRobustFitTolerance</code> (default is 0.1). If these options are not set, the defaults (set using <code>cminDefaultMinimizerX</code> options) will be used. You can also tune the accuracy of the routine used to find the crossing points of the likelihood using the option <code>--setCrossingTolerance</code> (the default is set to 0.0001)</li> <li>If you find the covariance matrix provided by HESSE is not accurate (i.e. <code>fit_s-&gt;Print()</code> reports this was forced positive-definite) then a custom HESSE-style calculation of the covariance matrix can be used instead. This is enabled by running <code>FitDiagnostics</code> with the <code>--robustHesse 1</code> option. Please note that the status reported by <code>RooFitResult::Print()</code> will contain <code>covariance matrix quality: Unknown, matrix was externally provided</code> when robustHesse is used, this is normal and does not indicate a problem. NB: one feature of the robustHesse algorithm is that if it still cannot calculate a positive-definite covariance matrix it will try to do so by dropping parameters from the hessian matrix before inverting. If this happens it will be reported in the output to the screen.</li> <li>For other fitting options see the generic minimizer options section.</li> </ul>"},{"location":"part3/nonstandard/#fit-parameter-uncertainties","title":"Fit parameter uncertainties","text":"<p>If you get a warning message when running <code>FitDiagnostics</code> that says <code>Unable to determine uncertainties on all fit parameters</code>. This means the covariance matrix calculated in <code>FitDiagnostics</code> was not correct.</p> <p>The most common problem is that the covariance matrix is forced positive-definite. In this case the constraints on fit parameters as taken from the covariance matrix are incorrect and should not be used. In particular, if you want to make post-fit plots of the distribution used in the signal extraction fit and are extracting the uncertainties on the signal and background expectations from the covariance matrix, the resulting values will not reflect the truth if the covariance matrix was incorrect. By default if this happens and you passed the <code>--saveWithUncertainties</code> flag when calling <code>FitDiagnostics</code>, this option will be ignored as calculating the uncertainties would lead to incorrect results. This behaviour can be overridden by passing <code>--ignoreCovWarning</code>.</p> <p>Such problems with the covariance matrix can be caused by a number of things, for example:</p> <ul> <li> <p>Parameters being close to their boundaries after the fit.</p> </li> <li> <p>Strong (anti-) correlations between some parameters.   A discontinuity in the NLL function or its derivatives at or near the minimum.</p> </li> </ul> <p>If you are aware that your analysis has any of these features you could try resolving these. Setting <code>--cminDefaultMinimizerStrategy 0</code> can also help with this problem.</p>"},{"location":"part3/nonstandard/#pre-and-post-fit-nuisance-parameters","title":"Pre- and post-fit nuisance parameters","text":"<p>It is possible to compare pre-fit and post-fit nuisance parameter values with the script diffNuisances.py. Taking as input a <code>fitDiagnosticsTest.root</code> file, the script will by default print out the parameters that have changed significantly with respect to their initial estimate. </p> <p>For each of those parameters, it will print out </p> <ul> <li>The shift in value and the post-fit uncertainty, both normalized to the initial (pre-fit) value from the s+b fit and the b-only fit. </li> <li>The linear correlation between the parameter and the signal strength <code>r</code> - \\(\\rho(r,\\nu)\\).</li> <li>The approximate impact of the nuisance parameter determined as \\(I(r,\\nu) = \\sigma_{r}\\sigma_{\\nu}\\rho(r,\\nu)\\), where \\(\\sigma_{r}\\) and \\(\\sigma_{\\nu}\\) are the symmetrized total uncertainties on the signal strength and nuisance parameter, respectively (see the section on Nuisance parameter impacts for our recommend calculation of impacts.).</li> </ul> <p>The script has several options to toggle the thresholds used to decide whether a parameter has changed significantly, to get the printout of the absolute value of the nuisance parameters, and to get the output in another format for use on a webpage or in a note (the supported formats are <code>html</code>, <code>latex</code>, <code>twiki</code>). To print all of the parameters, use the option <code>--all</code>. </p> <p>An example of using this script is shown below, </p> <pre><code>combine data/tutorials/counting/realistic-counting-experiment.txt -M FitDiagnostics --forceRecreateNLL --rMin -1 --rMax 1\npython diffNuisances.py fitDiagnosticsTest.root\n</code></pre> Show output <code>--format text</code> (default)<code>--format html</code> <pre><code>diffNuisances run on fitDiagnosticsTest.root, at 2024-07-01 18:05:37.585109 with the following options ... {'vtol': 0.3, 'stol': 0.1, 'vtol2': 2.0, 'stol2': 0.5, 'show_all_parameters': False, 'absolute_values': False, 'poi': 'r', 'format': 'text', 'plotfile': None, 'pullDef': '', 'skipFitS': False, 'skipFitB': False, 'sortBy': 'correlation', 'regex': '.*'}\n\nname                                              b-only fit            s+b fit         rho  approx impact\nCMS_scale_t_tautau_8TeV                          -0.60, 0.40     ! -0.73, 0.39!     !-0.19!      -0.051\nCMS_eff_t_tt_8TeV                                +0.57, 0.32     ! +0.50, 0.32!     !-0.17!      -0.037\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_12         +0.21, 0.89        -0.20, 0.96       -0.15      -0.103\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_11         +0.71, 0.87        +0.42, 0.90       -0.15      -0.095\nCMS_htt_QCDSyst_tauTau_vbf_8TeV                  +0.16, 0.82        -0.15, 0.84       -0.12      -0.071\nCMS_htt_tt_tauTau_vbf_8TeV_ZTT_bin_6             +0.32, 0.97        +0.09, 0.99       -0.08      -0.054\nCMS_htt_QCDSyst_tauTau_1jet_high_mediumhiggs_8TeV         +0.52, 0.20     ! +0.48, 0.20!     !-0.08!      -0.011\nCMS_htt_QCDSyst_tauTau_1jet_high_highhiggs_8TeV         -0.15, 0.84        -0.33, 0.84       -0.08      -0.044\nCMS_htt_extrap_ztt_tauTau_1jet_high_mediumhiggs_8TeV         +0.34, 0.95        +0.45, 0.95       +0.07      +0.049\nCMS_htt_extrap_ztt_tauTau_vbf_8TeV               +0.31, 0.96        +0.14, 0.96       -0.06      -0.040\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_14         +0.29, 0.88        +0.24, 0.89       -0.03      -0.018\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_6         -0.67, 0.94        -0.63, 0.93       +0.02      +0.014\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_23         +0.44, 0.92        +0.46, 0.92       +0.01      +0.004\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_18         +0.44, 0.92        +0.43, 0.93       -0.01      -0.005\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_17         -0.62, 1.00        -0.61, 1.00       +0.01      +0.004\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_15         -0.55, 0.98        -0.54, 0.98       +0.01      +0.005\nCMS_htt_extrap_ztt_tauTau_1jet_high_highhiggs_8TeV         -0.34, 0.95        -0.41, 0.95       -0.01      -0.006\nCMS_htt_tt_tauTau_1jet_high_mediumhiggs_8TeV_ZTT_bin_20         -0.34, 0.99        -0.33, 0.99       +0.00      +0.003\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_5         -0.49, 1.00        -0.48, 1.00       +0.00      +0.003\nCMS_htt_tt_tauTau_1jet_high_highhiggs_8TeV_ZTT_bin_26         -0.47, 0.96        -0.46, 0.97       +0.00      +0.003\n</code></pre> <p></p> <p>By default, the changes in the nuisance parameter values and uncertainties are given relative to their initial (pre-fit) values (usually relative to initial values of 0 and 1 for most nuisance types).</p> <p>The values in the output will be \\((\\nu-\\nu_{I})/\\sigma_{I}\\) if the nuisance has a pre-fit uncertainty, otherwise they will be \\(\\nu-\\nu_{I}\\) (for example, a <code>flatParam</code> has no pre-fit uncertainty).</p> <p>The reported uncertainty will be the ratio \\(\\sigma/\\sigma_{I}\\) - i.e the ratio of the post-fit to the pre-fit uncertainty. If there is no pre-fit uncertainty (as for <code>flatParam</code> nuisances), the post-fit uncertainty is shown.</p> <p>To print the pre-fit and post-fit values and (asymmetric) uncertainties, rather than the ratios, the option <code>--abs</code> can be used.</p> <p>Info</p> <p>We recommend that you include the options <code>--abs</code> and <code>--all</code> to get the full information on all of the parameters (including unconstrained nuisance parameters) at least once when checking your datacards. </p>"},{"location":"part3/nonstandard/#pulls","title":"Pulls","text":"<p>If instead of the nuisance parameter values, you wish to report the pulls, you can do so using the option <code>--pullDef X</code>, with <code>X</code> being one of the options listed below. You should note that since the pulls below are only defined when the pre-fit uncertainty exists, nothing will be reported for parameters that have no prior constraint (except in the case of the <code>unconstPullAsym</code> choice as described below). You may want to run without this option and <code>--all</code> to get information about those parameters.</p> <ul> <li> <p><code>relDiffAsymErrs</code>: This is the same as the default output of the tool, except that only constrained parameters (i.e. where the pre-fit uncertainty is defined) are reported. The uncertainty is also reported and calculated as \\(\\sigma/\\sigma_{I}\\).</p> </li> <li> <p><code>unconstPullAsym</code>: Report the pull as \\(\\frac{\\nu-\\nu_{I}}{\\sigma}\\), where \\(\\nu_{I}\\) and \\(\\sigma\\) are the initial value and post-fit uncertainty of that nuisance parameter. The pull defined in this way will have no error bar, but all nuisance parameters will have a result in this case.</p> </li> <li> <p><code>compatAsym</code>: The pull is defined as \\(\\frac{\\nu-\\nu_{D}}{\\sqrt{\\sigma^{2}+\\sigma_{D}^{2}}}\\), where \\(\\nu_{D}\\) and \\(\\sigma_{D}\\) are calculated as \\(\\sigma_{D} = (\\frac{1}{\\sigma^{2}} - \\frac{1}{\\sigma_{I}^{2}})^{-1}\\) and \\(\\nu_{D} = \\sigma_{D}(\\nu - \\frac{\\nu_{I}}{\\sigma_{I}^{2}})\\). In this expression \\(\\nu_{I}\\) and \\(\\sigma_{I}\\) are the initial value and uncertainty of that nuisance parameter. This can be thought of as a compatibility between the initial measurement (prior) and an imagined measurement where only the data (with no constraint on the nuisance parameter) is used to measure the nuisance parameter. There is no error bar associated with this value.</p> </li> <li> <p><code>diffPullAsym</code>: The pull is defined as \\(\\frac{\\nu-\\nu_{I}}{\\sqrt{\\sigma_{I}^{2}-\\sigma^{2}}}\\), where \\(\\nu_{I}\\) and \\(\\sigma_{I}\\) are the pre-fit value and uncertainty (from L. Demortier and L. Lyons). If the denominator is close to 0 or the post-fit uncertainty is larger than the pre-fit (usually due to some failure in the calculation), the pull is not defined and the result will be reported as <code>0 +/- 999</code>.</p> </li> </ul> <p>If using <code>--pullDef</code>, the results for all parameters for which the pull can be calculated will be shown (i.e <code>--all</code> will be set to <code>true</code>), not just those that have moved by some metric.</p> <p>This script has the option (<code>-g outputfile.root</code>) to produce plots of the fitted values of the nuisance parameters and their post-fit, asymmetric uncertainties. Instead, the pulls defined using one of the options above, can be plotted using the option <code>--pullDef X</code>. In addition this will produce a plot showing a comparison between the post-fit and pre-fit (symmetrized) uncertainties on the nuisance parameters.</p> <p>Info</p> <p>In the above options, if an asymmetric uncertainty is associated with the nuisance parameter, then the choice of which uncertainty is used in the definition of the pull will depend on the sign of \\(\\nu-\\nu_{I}\\).</p>"},{"location":"part3/nonstandard/#normalizations","title":"Normalizations","text":"<p>For a certain class of models, like those made from datacards for shape-based analysis, the tool can also compute and save the best fit yields of all processes to the output ROOT file. If this feature is turned on with the option <code>--saveNormalizations</code>, the file will also contain three <code>RooArgSet</code> objects <code>norm_prefit</code>, <code>norm_fit_s</code>, and <code>norm_fit_b</code>. These each contain one <code>RooConstVar</code> for each channel <code>xxx</code> and process <code>yyy</code> with name <code>xxx/yyy</code> and value equal to the best fit yield. You can use <code>RooRealVar::getVal</code> and <code>RooRealVar::getError</code> to estimate both the post-fit (or pre-fit) values and uncertainties of these normalizations.</p> <p>The sample <code>pyROOT</code> macro mlfitNormsToText.py can be used to convert the ROOT file into a text table with four columns: channel, process, yield from the signal+background fit, and yield from the background-only fit. To include the uncertainties in the table, add the option <code>--uncertainties</code>.</p> <p>Warning</p> <p>Note that when running with multiple toys, the <code>norm_fit_s</code>, <code>norm_fit_b</code>, and <code>norm_prefit</code> objects will be stored for the last toy dataset generated and so may not be useful to you.</p> <p>Note that this procedure works only for \"extended likelihoods\" like the ones used in shape-based analysis, not for counting experiment datacards. You can however convert a counting experiment datacard to an equivalent shape-based one by adding a line <code>shapes * * FAKE</code> in the datacard after the <code>imax</code>, <code>jmax</code>, <code>kmax</code> lines. Alternatively, you can use <code>combineCards.py countingcard.txt -S &gt; shapecard.txt</code> to do this conversion.</p>"},{"location":"part3/nonstandard/#per-bin-norms-for-shape-analyses","title":"Per-bin norms for shape analyses","text":"<p>If you have a shape-based analysis, you can include the option <code>--savePredictionsPerToy</code>. With this option, additional branches will be filled in the three output trees contained in <code>fitDiagnostics.root</code>.</p> <p>The normalization values for each toy will be stored in the branches inside the <code>TTrees</code> named n_exp[_final]_binxxx_proc_yyy. The _final will only be there if there are systematic uncertainties affecting this process.</p> <p>Additionally, there will be branches that provide the value of the expected bin content for each process, in each channel. These are named n_exp[_final]_binxxx_proc_yyy_i (where _final will only be in the name if there are systematic uncertainties affecting this process) for channel <code>xxx</code>, process <code>yyy</code>, bin number <code>i</code>. In the case of the post-fit trees (<code>tree_fit_s/b</code>), these will be the expectations from the fitted models, while for the pre-fit tree, they will be the expectation from the generated model (i.e if running toys with <code>-t N</code> and using <code>--genNuisances</code>, they will be randomized for each toy). These can be useful, for example, for calculating correlations/covariances between different bins, in different channels or processes, within the model from toys.</p> <p>Info</p> <p>Be aware that for unbinned models, a binning scheme is adopted based on the <code>RooRealVar::getBinning</code> for the observable defining the shape, if it exists, or Combine will adopt some appropriate binning for each observable.</p>"},{"location":"part3/nonstandard/#plotting","title":"Plotting","text":"<p><code>FitDiagnostics</code> can also produce pre- and post-fit plots of the model along with the data. They will be stored in the same directory as <code>fitDiagnostics.root</code>. To obtain these, you have to specify the option <code>--plots</code>, and then optionally specify the names of the signal and background PDFs/templates, e.g. <code>--signalPdfNames='ggH*,vbfH*'</code> and <code>--backgroundPdfNames='*DY*,*WW*,*Top*'</code> (by default, the definitions of signal and background are taken from the datacard). For models with more than 1 observable, a separate projection onto each observable will be produced.</p> <p>An alternative is to use the option <code>--saveShapes</code>. This will add additional folders in <code>fitDiagnostics.root</code> for each category, with pre- and post-fit distributions of the signals and backgrounds as TH1s, and the data as <code>TGraphAsymmErrors</code> (with Poisson intervals as error bars).</p> <p>Info</p> <p>If you want to save post-fit shapes at a specific r value, add the options <code>--customStartingPoint</code> and <code>--skipSBFit</code>, and set the r value. The result will appear in shapes_fit_b, as described below.</p> <p>Three additional folders (shapes_prefit, shapes_fit_sb and shapes_fit_b ) will contain the following distributions:</p> Object Description <code>data</code> <code>TGraphAsymmErrors</code> containing the observed data (or toy data if using <code>-t</code>). The vertical error bars correspond to the 68% interval for a Poisson distribution centered on the observed count (Garwood intervals), following the recipe provided by the CMS Statistics Committee. <code>$PROCESS</code> (id &lt;= 0) <code>TH1F</code> for each signal process in each channel, named as in the datacard <code>$PROCESS</code> (id &gt; 0) <code>TH1F</code> for each background process in each channel, named as in the datacard <code>total_signal</code> <code>TH1F</code> Sum over the signal components <code>total_background</code> <code>TH1F</code> Sum over the background components <code>total</code> <code>TH1F</code> Sum over all of the signal and background components <p>The above distributions are provided for each channel included in the datacard, in separate subfolders, named as in the datacard: There will be one subfolder per channel.</p> <p>Warning</p> <p>The pre-fit signal is evaluated for <code>r=1</code> by default, but this can be modified using the option <code>--preFitValue</code>.</p> <p>The distributions and normalizations are guaranteed to give the correct interpretation:</p> <ul> <li> <p>For shape datacards whose inputs are <code>TH1</code>, the histograms/data points will have the bin number as the x-axis and the content of each bin will be a number of events.</p> </li> <li> <p>For datacards whose inputs are <code>RooAbsPdf</code>/<code>RooDataHist</code>s, the x-axis will correspond to the observable and the bin content will be the PDF density / events divided by the bin width. This means the absolute number of events in a given bin, i, can be obtained from <code>h.GetBinContent(i)*h.GetBinWidth(i)</code> or similar for the data graphs. Note that for unbinned analyses Combine will make a reasonable guess as to an appropriate binning.</p> </li> </ul> <p>Uncertainties on the shapes will be added with the option <code>--saveWithUncertainties</code>. These uncertainties are generated by re-sampling of the fit covariance matrix, thereby accounting for the full correlation between the parameters of the fit.</p> <p>Warning</p> <p>It may be tempting to sum up the uncertainties in each bin (in quadrature) to get the total uncertainty on a process. However, this is (usually) incorrect, as doing so would not account for correlations between the bins. Instead you can refer to the uncertainties which will be added to the post-fit normalizations described above.</p> <p>Additionally, the covariance matrix between bin yields (or yields/bin-widths) in each channel will also be saved as a <code>TH2F</code> named total_covar. If the covariance between all bins across all channels is desired, this can be added using the option <code>--saveOverallShapes</code>. Each folder will now contain additional distributions (and covariance matrices) corresponding to the concatenation of the bins in each channel (and therefore the covaraince between every bin in the analysis). The bin labels should make it clear as to which bin corresponds to which channel.</p>"},{"location":"part3/nonstandard/#toy-by-toy-diagnostics","title":"Toy-by-toy diagnostics","text":"<p><code>FitDiagnostics</code> can also be used to diagnose the fitting procedure in toy experiments to identify potentially problematic nuisance parameters when running the full limits/p-values. This can be done by adding the option <code>-t &lt;num toys&gt;</code>. The output file, <code>fitDiagnostics.root</code> the three <code>TTrees</code> will contain the value of the constraint fitted result in each toy, as a separate entry. It is recommended to use the following options when investigating toys to reduce the running time: <code>--toysFrequentist</code> <code>--noErrors</code> <code>--minos none</code></p> <p>The results can be plotted using the macro test/plotParametersFromToys.C</p> <pre><code>$ root -l\n.L plotParametersFromToys.C+\nplotParametersFromToys(\"fitDiagnosticsToys.root\",\"fitDiagnosticsData.root\",\"workspace.root\",\"r&lt;0\")\n</code></pre> <p>The first argument is the name of the output file from running with toys, and the second and third (optional) arguments are the name of the file containing the result from a fit to the data and the workspace (created from <code>text2workspace.py</code>). The fourth argument can be used to specify a cut string applied to one of the branches in the tree, which can be used to correlate strange behaviour with specific conditions. The output will be 2 pdf files (<code>tree_fit_(s)b.pdf</code>) and 2 ROOT files (<code>tree_fit_(s)b.root</code>) containing canvases of the fit results of the tool. For details on the output plots, consult AN-2012/317.</p>"},{"location":"part3/nonstandard/#scaling-constraints","title":"Scaling constraints","text":"<p>It possible to scale the constraints on the nuisance parameters when converting the datacard to a workspace (see the section on physics models) with <code>text2workspace.py</code>. This can be useful for projection studies of the analysis to higher luminosities or with different assumptions about the sizes of certain systematics without changing the datacard by hand.</p> <p>We consider two kinds of scaling;</p> <ul> <li>A constant scaling factor to scale the constraints</li> <li>A functional scale factor that depends on some other parameters in the workspace, eg a luminosity scaling parameter (as a <code>rateParam</code> affecting all processes).</li> </ul> <p>In both cases these scalings can be introduced by adding some extra options at the <code>text2workspace.py</code> step.</p> <p>To add a constant scaling factor we use the option <code>--X-rescale-nuisance</code>, eg</p> <pre><code>text2workspace.py datacard.txt --X-rescale-nuisance '[some regular expression]' 0.5\n</code></pre> <p>will create the workspace in which every nuisance parameter whose name matches the specified regular expression will have the width of the gaussian constraint scaled by a factor 0.5.</p> <p>Multiple <code>--X-rescale-nuisance</code> options can be specified to set different scalings for different nuisances (note that you actually have to write <code>--X-rescale-nuisance</code> each time as in <code>--X-rescale-nuisance 'theory.*' 0.5  --X-rescale-nuisance 'exp.*' 0.1</code>).</p> <p>To add a functional scaling factor we use the option <code>--X-nuisance-function</code>, which works in a similar way. Instead of a constant value you should specify a <code>RooFit</code> factory expression.</p> <p>A typical case would be scaling by \\(1/\\sqrt{L}\\), where \\(L\\) is a luminosity scale factor. For example, assuming there is some parameter in the datacard/workspace called <code>lumiscale</code>,</p> <pre><code>text2workspace.py datacard.txt --X-nuisance-function '[some regular expression]' 'expr::lumisyst(\"1/sqrt(@0)\",lumiscale[1])'\n</code></pre> <p>This factory syntax is flexible, but for our use case the typical format will be: <code>expr::[function name](\"[formula]\", [arg0], [arg1], ...)</code>. The <code>arg0</code>, <code>arg1</code> ... are represented in the formula by <code>@0</code>, <code>@1</code>,... placeholders.</p> <p>Warning</p> <p>We are playing a slight trick here with the <code>lumiscale</code> parameter. At the point at which <code>text2workspace.py</code> is building these scaling terms the <code>lumiscale</code> for the <code>rateParam</code> has not yet been created. By writing <code>lumiscale[1]</code> we are telling RooFit to create this variable with an initial value of 1, and then later this will be re-used by the <code>rateParam</code> creation.</p> <p>A similar option, <code>--X-nuisance-group-function</code>, can be used to scale whole groups of nuisances (see groups of nuisances). Instead of a regular expression just give the group name instead,</p> <pre><code>text2workspace.py datacard.txt --X-nuisance-group-function [group name] 'expr::lumisyst(\"1/sqrt(@0)\",lumiscale[1])'\n</code></pre>"},{"location":"part3/nonstandard/#nuisance-parameter-impacts","title":"Nuisance parameter impacts","text":"<p>The impact of a nuisance parameter (NP) \u03b8 on a parameter of interest (POI) \u03bc is defined as the shift \u0394\u03bc that is induced as \u03b8 is fixed and brought to its +1\u03c3 or \u22121\u03c3 post-fit values, with all other parameters profiled as normal (see JHEP 01 (2015) 069 for a description of this method).</p> <p>This is effectively a measure of the correlation between the NP and the POI, and is useful for determining which NPs have the largest effect on the POI uncertainty.</p> <p>It is possible to use the <code>MultiDimFit</code> method of Combine with the option <code>--algo impact -P parameter</code> to calculate the impact of a particular nuisance parameter on the parameter(s) of interest. We will use the <code>combineTool.py</code> script to automate the fits.</p> <p>We will use an example workspace from the \\(H\\rightarrow\\tau\\tau\\) datacard,</p> <pre><code>$ cp HiggsAnalysis/CombinedLimit/data/tutorials/htt/125/htt_tt.txt .\n$ text2workspace.py htt_tt.txt -m 125\n</code></pre> <p>Calculating the impacts is done in a few stages. First we just fit for each POI, using the <code>--doInitialFit</code> option with <code>combineTool.py</code>, and adding the <code>--robustFit 1</code> option that will be passed through to Combine,</p> <pre><code>combineTool.py -M Impacts -d htt_tt.root -m 125 --doInitialFit --robustFit 1\n</code></pre> <p>Have a look at the options as for likelihood scans when using <code>robustFit 1</code>.</p> <p>Next we perform a similar scan for each nuisance parameter with the <code>--doFits</code> options,</p> <pre><code>combineTool.py -M Impacts -d htt_tt.root -m 125 --robustFit 1 --doFits\n</code></pre> <p>Note that this will run approximately 60 scans, and to speed things up the option <code>--parallel X</code> can be given to run X Combine jobs simultaneously. The batch and grid submission methods described in the combineTool for job submission section can also be used.</p> <p>Once all jobs are completed, the output can be collected and written into a json file:</p> <pre><code>combineTool.py -M Impacts -d htt_tt.root -m 125 -o impacts.json\n</code></pre> <p>A plot summarizing the nuisance parameter values and impacts can be made with <code>plotImpacts.py</code>,</p> <pre><code>plotImpacts.py -i impacts.json -o impacts\n</code></pre> <p>The first page of the output is shown below. Note that in these figures, the nuisance parameters are labelled as \\(\\theta\\) instead of \\(\\nu\\).</p> <p></p> <p>The direction of the +1\u03c3 and -1\u03c3 impacts (i.e. when the NP is moved to its +1\u03c3 or -1\u03c3 values) on the POI indicates whether the parameter is correlated or anti-correlated with it.</p> <p>For models with multiple POIs, the Combine option <code>--redefineSignalPOIs X,Y,Z...</code> should be specified in all three of the <code>combineTool.py -M Impacts [...]</code> steps above. The final step will produce the <code>impacts.json</code> file which will contain the impacts for all the specified POIs. In the <code>plotImpacts.py</code> script, a particular POI can be specified with <code>--POI X</code>.</p> <p>Warning</p> <p>The plot also shows the best fit value of the POI at the top and its uncertainty. You may wish to allow the range to go negative (i.e using <code>--setParameterRanges</code> or <code>--rMin</code>) to avoid getting one-sided impacts!</p> <p>This script also accepts an optional json-file argument with <code>-t</code>, which can be used to provide a dictionary for renaming parameters. A simple example would be to create a file <code>rename.json</code>,</p> <pre><code>{\n  \"r\" : \"#mu\"\n}\n</code></pre> <p>that will rename the POI label on the plot.</p> <p>Info</p> <p>Since <code>combineTool</code> accepts the usual options for combine you can also generate the impacts on an Asimov or toy dataset.</p> <p>The left panel in the summary plot shows the value of \\((\\nu-\\nu_{0})/\\Delta_{\\nu}\\) where \\(\\nu\\) and \\(\\nu_{0}\\) are the post and pre-fit values of the nuisance parameter and \\(\\Delta_{\\nu}\\) is the pre-fit uncertainty. The asymmetric error bars show the post-fit uncertainty divided by the pre-fit uncertainty meaning that parameters with error bars smaller than \\(\\pm 1\\) are constrained in the fit. The pull will additionally be shown. As with the <code>diffNuisances.py</code> script, the option <code>--pullDef</code> can be used (to modify the definition of the pull that is shown).</p>"},{"location":"part3/nonstandard/#breakdown-of-uncertainties","title":"Breakdown of uncertainties","text":"<p>Often you will want to report the breakdown of your total (systematic) uncertainty on a measured parameter due to one or more groups of nuisance parameters. For example, these groups could be theory uncertainties, trigger uncertainties, ... The prodecude to do this in Combine is to sequentially freeze groups of nuisance parameters and subtract (in quadrature) from the total uncertainty. Below are the steps to do so. We will use the <code>data/tutorials/htt/125/htt_tt.txt</code> datacard for this.</p> <ol> <li>Add groups to the datacard to group nuisance parameters. Nuisance parameters not in groups will be considered as \"rest\" in the later steps. The lines should look like the following and you should add them to the end of the datacard</li> </ol> <pre><code>theory      group = QCDscale_VH QCDscale_ggH1in QCDscale_ggH2in QCDscale_qqH UEPS pdf_gg pdf_qqbar\ncalibration group = CMS_scale_j_8TeV CMS_scale_t_tautau_8TeV CMS_htt_scale_met_8TeV\nefficiency  group = CMS_eff_b_8TeV   CMS_eff_t_tt_8TeV CMS_fake_b_8TeV\n</code></pre> <ol> <li> <p>Create the workspace with <code>text2workspace.py data/tutorials/htt/125/htt_tt.txt -m 125</code>.</p> </li> <li> <p>Run a fit with all nuisance parameters floating and store the workspace in an output file - <code>combine data/tutorials/htt/125/htt_tt.root -M MultiDimFit --saveWorkspace -n htt.postfit</code></p> </li> <li> <p>Run a scan from the postfit workspace</p> </li> </ol> <pre><code>combine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit -n htt.total --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4\n</code></pre> <ol> <li>Run additional scans using the post-fit workspace, sequentially adding another group to the list of groups to freeze</li> </ol> <pre><code>combine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeNuisanceGroups theory -n htt.freeze_theory\n\ncombine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeNuisanceGroups theory,calibration -n htt.freeze_theory_calibration\n\ncombine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeNuisanceGroups theory,calibration,efficiency -n htt.freeze_theory_calibration_efficiency\n</code></pre> <ol> <li>Run one last scan freezing all of the constrained nuisance parameters (this represents the statistical uncertainty only).</li> </ol> <pre><code>combine higgsCombinehtt.postfit.MultiDimFit.mH120.root -M MultiDimFit --algo grid --snapshotName MultiDimFit --setParameterRanges r=0,4  --freezeParameters allConstrainedNuisances -n htt.freeze_all\n</code></pre> <ol> <li>Use the <code>combineTool</code> script <code>plot1DScan.py</code> to report the breakdown of uncertainties.</li> </ol> <pre><code>plot1DScan.py higgsCombinehtt.total.MultiDimFit.mH120.root --main-label \"Total Uncert.\"  --others higgsCombinehtt.freeze_theory.MultiDimFit.mH120.root:\"freeze theory\":4 higgsCombinehtt.freeze_theory_calibration.MultiDimFit.mH120.root:\"freeze theory+calibration\":7 higgsCombinehtt.freeze_theory_calibration_efficiency.MultiDimFit.mH120.root:\"freeze theory+calibration+efficiency\":2 higgsCombinehtt.freeze_all.MultiDimFit.mH120.root:\"stat only\":6  --output breakdown --y-max 10 --y-cut 40 --breakdown \"theory,calibration,efficiency,rest,stat\"\n</code></pre> <p>The final step calculates the contribution of each group of nuisance parameters as the subtraction in quadrature of each scan from the previous one. This procedure guarantees that the sum in quadrature of the individual components is the same as the total uncertainty.</p> <p>The plot below is produced,</p> <p></p> <p>Warning</p> <p>While the above procedure is guaranteed the have the effect that the sum in quadrature of the breakdown will equal the total uncertainty, the order in which you freeze the groups can make a difference due to correlations induced by the fit. You should check if the answers change significantly if changing the order and we recommend you start with the largest group (in terms of overall contribution to the uncertainty) first, working down the list in order of the size of the contribution.</p>"},{"location":"part3/nonstandard/#channel-masking","title":"Channel Masking","text":"<p>The Combine tool has a number of features for diagnostics and plotting results of fits. It can often be useful to turn off particular channels in a combined analysis to see how constraints/shifts in parameter values can vary. It can also be helpful to plot the post-fit shapes and uncertainties of a particular channel (for example a signal region) without including the constraints from the data in that region.</p> <p>This can in some cases be achieved by removing a specific datacard when running <code>combineCards.py</code>. However, when doing so, the information of particular nuisance parameters and PDFs in that region will be lost. Instead, it is possible to mask that channel from the likelihood. This is achieved at the <code>text2Workspace</code> step using the option <code>--channel-masks</code>.</p>"},{"location":"part3/nonstandard/#example-removing-constraints-from-the-signal-region","title":"Example: removing constraints from the signal region","text":"<p>We will take the control region example from the rate parameters tutorial from data/tutorials/rate_params/.</p> <p>The first step is to combine the cards combineCards.py signal=signal_region.txt dimuon=dimuon_control_region.txt singlemuon=singlemuon_control_region.txt &gt; datacard.txt</p> <p>Note that we use the directive <code>CHANNELNAME=CHANNEL_DATACARD.txt</code> so that the names of the channels are under our control and easier to interpret. Next, we make a workspace and tell Combine to create the parameters used to mask channels</p> <pre><code>text2workspace.py datacard.txt --channel-masks\n</code></pre> <p>Now we will try to do a fit ignoring the signal region. We can turn off the signal region by setting the corresponding channel mask parameter to 1: <code>--setParameters mask_signal=1</code>. Note that <code>text2workspace</code> has created a masking parameter for every channel with the naming scheme mask_CHANNELNAME. By default, every parameter is set to 0 so that the channel is unmasked by default.</p> <pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUncertainties --setParameters mask_signal=1\n</code></pre> <p>Warning</p> <p>There will be a lot of warnings from Combine. These are safe to ignore as they are due to the s+b fit not converging. This is expected as the free signal parameter cannot be constrained because the data in the signal region is being ignored.</p> <p>We can compare the post-fit background and uncertainties with and without the signal region included by re-running with <code>--setParameters mask_signal=0</code> (or just removing that option completely). Below is a comparison of the background in the signal region with and without masking the data in the signal region. We take these from the shapes folder shapes_fit_b/signal/total_background in the <code>fitDiagnostics.root</code> output.</p> <p></p> <p>Clearly the background shape is different and much less constrained without including the signal region, as expected. Channel masking can be used with any method in Combine.</p>"},{"location":"part3/nonstandard/#roomultipdf-conventional-bias-studies","title":"RooMultiPdf conventional bias studies","text":"<p>Several analyses in CMS use a functional form to describe the background. This functional form is fit to the data. Often however, there is some uncertainty associated with the choice of which background function to use, and this choice will impact the fit results. It is therefore often the case that in these analyses, a bias study is performed. This study will give an indication of the size of the potential bias in the result, given a certain choice of functional form. These studies can be conducted using Combine.</p> <p>Below is an example script that will produce a workspace based on a simplified Higgs to diphoton (Hgg) analysis with a single category. It will produce the data and PDFs necessary for this example, and you can use it as a basis to construct your own studies.</p> <pre><code>void makeRooMultiPdfWorkspace(){\n\n   // Load the combine Library\n   gSystem-&gt;Load(\"libHiggsAnalysisCombinedLimit.so\");\n\n   // mass variable\n   RooRealVar mass(\"CMS_hgg_mass\",\"m_{#gamma#gamma}\",120,100,180);\n\n\n   // create 3 background pdfs\n   // 1. exponential\n   RooRealVar expo_1(\"expo_1\",\"slope of exponential\",-0.02,-0.1,-0.0001);\n   RooExponential exponential(\"exponential\",\"exponential pdf\",mass,expo_1);\n\n   // 2. polynomial with 2 parameters\n   RooRealVar poly_1(\"poly_1\",\"T1 of chebychev polynomial\",0,-3,3);\n   RooRealVar poly_2(\"poly_2\",\"T2 of chebychev polynomial\",0,-3,3);\n   RooChebychev polynomial(\"polynomial\",\"polynomial pdf\",mass,RooArgList(poly_1,poly_2));\n\n   // 3. A power law function\n   RooRealVar pow_1(\"pow_1\",\"exponent of power law\",-3,-6,-0.0001);\n   RooGenericPdf powerlaw(\"powerlaw\",\"TMath::Power(@0,@1)\",RooArgList(mass,pow_1));\n\n   // Generate some data (lets use the power lay function for it)\n   // Here we are using unbinned data, but binning the data is also fine\n   RooDataSet *data = powerlaw.generate(mass,RooFit::NumEvents(1000));\n\n   // First we fit the pdfs to the data (gives us a sensible starting value of parameters for, e.g - blind limits)\n   exponential.fitTo(*data);   // index 0\n   polynomial.fitTo(*data);   // index 1\n   powerlaw.fitTo(*data);     // index 2\n\n   // Make a plot (data is a toy dataset)\n   RooPlot *plot = mass.frame();   data-&gt;plotOn(plot);\n   exponential.plotOn(plot,RooFit::LineColor(kGreen));\n   polynomial.plotOn(plot,RooFit::LineColor(kBlue));\n   powerlaw.plotOn(plot,RooFit::LineColor(kRed));\n   plot-&gt;SetTitle(\"PDF fits to toy data\");\n   plot-&gt;Draw();\n\n   // Make a RooCategory object. This will control which of the pdfs is \"active\"\n   RooCategory cat(\"pdf_index\",\"Index of Pdf which is active\");\n\n   // Make a RooMultiPdf object. The order of the pdfs will be the order of their index, ie for below\n   // 0 == exponential\n   // 1 == polynomial\n   // 2 == powerlaw\n   RooArgList mypdfs;\n   mypdfs.add(exponential);\n   mypdfs.add(polynomial);\n   mypdfs.add(powerlaw);\n\n   RooMultiPdf multipdf(\"roomultipdf\",\"All Pdfs\",cat,mypdfs);\n   // By default the multipdf will tell combine to add 0.5 to the nll for each parameter (this is the penalty for the discrete profiling method)\n   // It can be changed with\n   //   multipdf.setCorrectionFactor(penalty)\n   // For bias-studies, this isn;t relevant however, so lets just leave the default\n\n   // As usual make an extended term for the background with _norm for freely floating yield\n   RooRealVar norm(\"roomultipdf_norm\",\"Number of background events\",1000,0,10000);\n\n   // We will also produce a signal model for the bias studies\n   RooRealVar sigma(\"sigma\",\"sigma\",1.2); sigma.setConstant(true);\n   RooRealVar MH(\"MH\",\"MH\",125); MH.setConstant(true);\n   RooGaussian signal(\"signal\",\"signal\",mass,MH,sigma);\n\n\n   // Save to a new workspace\n   TFile *fout = new TFile(\"workspace.root\",\"RECREATE\");\n   RooWorkspace wout(\"workspace\",\"workspace\");\n\n   data-&gt;SetName(\"data\");\n   wout.import(*data);\n   wout.import(cat);\n   wout.import(norm);\n   wout.import(multipdf);\n   wout.import(signal);\n   wout.Print();\n   wout.Write();\n}\n</code></pre> <p>The signal is modelled as a simple Gaussian with a width approximately that of the diphoton resolution. For the background there is a choice of 3 functions: an exponential, a power-law, and a 2nd order polynomial. This choice is accessible within Combine through the use of the RooMultiPdf object, which can switch between the functions by setting their associated indices (herein called pdf_index). This (as with all parameters in Combine) can be set via the <code>--setParameters</code> option.</p> <p>To assess the bias, one can throw toys using one function and fit with another. To do this, only a single datacard is needed: hgg_toy_datacard.txt.</p> <p>The bias studies are performed in two stages. The first is to generate toys using one of the functions, under some value of the signal strength r (or \\(\\mu\\)). This can be repeated for several values of r and also at different masses, but in this example the Higgs boson mass is fixed to 125 GeV.</p> <pre><code>    combine hgg_toy_datacard.txt -M GenerateOnly --setParameters pdf_index=0 --toysFrequentist -t 100 --expectSignal 1 --saveToys -m 125 --freezeParameters pdf_index\n</code></pre> <p>Warning</p> <p>It is important to freeze <code>pdf_index</code>, otherwise Combine will try to iterate over the index in the frequentist fit.</p> <p>Now we have 100 toys which, by setting <code>pdf_index=0</code>, sets the background PDF to the exponential function. This means we assume that the exponential is the true function. Note that the option <code>--toysFrequentist</code> is added; this first performs a fit of the PDF, assuming a signal strength of 1, to the data before generating the toys. This is the most obvious choice as to where to throw the toys from.</p> <p>The next step is to fit the toys under a different background PDF hypothesis. This time we set the <code>pdf_index</code> to 1, which selects the powerlaw, and run fits with the <code>FitDiagnostics</code> method, again freezing <code>pdf_index</code>.</p> <pre><code>    combine hgg_toy_datacard.txt -M FitDiagnostics  --setParameters pdf_index=1 --toysFile higgsCombineTest.GenerateOnly.mH125.123456.root  -t 100 --rMin -10 --rMax 10 --freezeParameters pdf_index --cminDefaultMinimizerStrategy=0\n</code></pre> <p>Note how we add the option <code>--cminDefaultMinimizerStrategy=0</code>. This is because we do not need the Hessian, as <code>FitDiagnostics</code> will run MINOS to get the uncertainty on <code>r</code>. If we do not do this, Minuit will think the fit failed as we have parameters (those not attached to the current PDF) for which the likelihood is flat.</p> <p>Warning</p> <p>You may get warnings about non-accurate errors such as <code>[WARNING]: Unable to determine uncertainties on all fit parameters in b-only fit</code> - These can be ignored since they are related to the free parameters of the background PDFs which are not active.</p> <p>In the output file <code>fitDiagnostics.root</code> there is a tree that contains the best fit results under the signal+background hypothesis. One measure of the bias is the pull defined as the difference between the measured value of \\(\\mu\\) and the generated value (here we used 1) relative to the uncertainty on \\(\\mu\\). The pull distribution can be drawn and the mean provides an estimate of the pull. In this example, we are averaging the positive and negative uncertainties, but we could do something smarter if the uncertainties are very asymmetric.</p> <pre><code>root -l fitDiagnostics.root\ntree_fit_sb-&gt;Draw(\"(r-1)/(0.5*(rHiErr+rLoErr))&gt;&gt;h(20,-5,5)\")\nh-&gt;Fit(\"gaus\")\n</code></pre> <p></p> <p>From the fitted Gaussian, we see the mean is at -1.29, which would indicate a bias of 129% of the uncertainty on mu from choosing the polynomial when the true function is an exponential.</p>"},{"location":"part3/nonstandard/#discrete-profiling","title":"Discrete profiling","text":"<p>If the <code>discrete</code> nuisance is left floating, it will be profiled by looping through the possible index values and finding the PDF that gives the best fit. This allows for the discrete profiling method to be applied for any method which involves a profiled likelihood (frequentist methods).</p> <p>Warning</p> <p>You should be careful since MINOS knows nothing about the discrete nuisances and hence estimations of uncertainties will be incorrect via MINOS. Instead, uncertainties from scans and limits will correctly account for these nuisance parameters. Currently the Bayesian methods will not properly treat the nuisance parameters, so some care should be taken when interpreting Bayesian results.</p> <p>As an example, we can peform a likelihood scan as a function of the Higgs boson signal strength in the toy Hgg datacard. By leaving the object <code>pdf_index</code> non-constant, at each point in the likelihood scan, the PDFs will be iterated over and the one that gives the lowest -2 times log-likelihood, including the correction factor \\(c\\) (as defined in the paper linked above) will be stored in the output tree. We can also check the scan when we fix at each PDF individually to check that the envelope is achieved. For this, you will need to include the option <code>--X-rtd REMOVE_CONSTANT_ZERO_POINT=1</code>. In this way, we can take a look at the absolute value to compare the curves, if we also include <code>--saveNLL</code>.</p> <p>For example for a full scan, you can run</p> <pre><code>    combine -M MultiDimFit -d hgg_toy_datacard.txt --algo grid --setParameterRanges r=-1,3 --cminDefaultMinimizerStrategy 0 --saveNLL -n Envelope -m 125 --setParameters myIndex=-1 --X-rtd REMOVE_CONSTANT_ZERO_POINT=1\n</code></pre> <p>and for the individual <code>pdf_index</code> set to <code>X</code>,</p> <pre><code>    combine -M MultiDimFit -d hgg_toy_datacard.txt --algo grid --setParameterRanges r=-1,3 --cminDefaultMinimizerStrategy 0 --saveNLL --freezeParameters pdf_index --setParameters pdf_index=X -n fixed_pdf_X -m 125 --X-rtd REMOVE_CONSTANT_ZERO_POINT=1\n</code></pre> <p>for <code>X=0,1,2</code></p> <p>You can then plot the value of <code>2*(deltaNLL+nll+nll0)</code> to plot the absolute value of (twice) the negative log-likelihood, including the correction term for extra parameters in the different PDFs.</p> <p>The above output will produce the following scans. </p> <p>As expected, the curve obtained by allowing the <code>pdf_index</code> to float (labelled \"Envelope\") picks out the best function (maximum corrected likelihood) for each value of the signal strength.</p> <p>In general, the performance of Combine can be improved when using the discrete profiling method by including the option <code>--X-rtd MINIMIZER_freezeDisassociatedParams</code>. This will stop parameters not associated to the current PDF from floating in the fits. Additionally, you can include the following options:</p> <ul> <li><code>--X-rtd MINIMIZER_multiMin_hideConstants</code>: hide the constant terms in the likelihood when recreating the minimizer</li> <li><code>--X-rtd MINIMIZER_multiMin_maskConstraints</code>: hide the constraint terms during the discrete minimization process</li> <li><code>--X-rtd MINIMIZER_multiMin_maskChannels=&lt;choice&gt;</code> mask the channels that are not needed from the NLL:</li> <li><code>&lt;choice&gt; 1</code>: keeps unmasked all channels that are participating in the discrete minimization.</li> <li><code>&lt;choice&gt; 2</code>: keeps unmasked only the channel whose index is being scanned at the moment.</li> </ul> <p>You may want to check with the Combine development team if you are using these options, as they are somewhat for expert use.</p>"},{"location":"part3/nonstandard/#roosplinend-multidimensional-splines","title":"RooSplineND multidimensional splines","text":"<p>RooSplineND can be used to interpolate from a tree of points to produce a continuous function in N-dimensions. This function can then be used as input to workspaces allowing for parametric rates/cross-sections/efficiencies. It can also be used to up-scale the resolution of likelihood scans (i.e like those produced from Combine) to produce smooth contours.</p> <p>The spline makes use of a radial basis decomposition to produce a continous \\(N \\to 1\\) map (function) from \\(M\\) provided sample points. The function of the \\(N\\) variables \\(\\vec{x}\\) is assumed to be of the form,</p> \\[ f(\\vec{x}) = \\sum_{i=1}^{M}w_{i}\\phi(||\\vec{x}-\\vec{x}_{i}||), \\] <p>where \\(\\phi(||\\vec{z}||) = e^{-\\frac{||\\vec{z}||}{\\epsilon^{2}}}\\). The distance \\(||.||\\) between two points is given by,</p> \\[ ||\\vec{x}-\\vec{y}||  = \\sum_{j=1}^{N}(x_{j}-y_{j})^{2}, \\] <p>if the option <code>rescale=false</code> and,</p> \\[ ||\\vec{x}-\\vec{y}||  = \\sum_{j=1}^{N} M^{1/N} \\cdot \\left( \\frac{ x_{j}-y_{j} }{ \\mathrm{max_{i=1,M}}(x_{i,j})-\\mathrm{min_{i=1,M}}(x_{i,j}) }\\right)^{2}, \\] <p>if the option <code>rescale=true</code>. Given the sample points, it is possible to determine the weights \\(w_{i}\\) as the solution of the set of equations,</p> \\[ \\sum_{i=1}^{M}w_{i}\\phi(||\\vec{x}_{j}-\\vec{x}_{i}||) = f(\\vec{x}_{j}). \\] <p>The solution is obtained using the <code>eigen</code> c++ package.</p> <p>The typical constructor of the object is as follows;</p> <pre><code>RooSplineND(const char *name, const char *title, RooArgList &amp;vars, TTree *tree, const char* fName=\"f\", double eps=3., bool rescale=false, std::string cutstring=\"\" ) ;\n</code></pre> <p>where the arguments are:</p> <ul> <li><code>vars</code>: A <code>RooArgList</code> of <code>RooRealVars</code> representing the \\(N\\) dimensions of the spline. The length of this list determines the dimension \\(N\\) of the spline.</li> <li><code>tree</code>: a TTree pointer where each entry represents a sample point used to construct the spline. The branch names must correspond to the names of the variables in <code>vars</code>.</li> <li><code>fName</code>: is a string representing the name of the branch to interpret as the target function \\(f\\).</li> <li><code>eps</code> : is the value of \\(\\epsilon\\) and represents the width of the basis functions \\(\\phi\\).</li> <li><code>rescale</code> : is an option to rescale the input sample points so that each variable has roughly the same range (see above in the definition of \\(||.||\\)).</li> <li><code>cutstring</code> : a string to remove sample points from the tree. Can be any typical cut string (eg \"var1&gt;10 &amp;&amp; var2&lt;3\").</li> </ul> <p>The object can be treated as a <code>RooAbsArg</code>; its value for the current values of the parameters is obtained as usual by using the <code>getVal()</code> method.</p> <p>Warning</p> <p>You should not include more variable branches than contained in <code>vars</code> in the tree, as the spline will interpret them as additional sample points. You will get a warning if there are two nearby points in the input samples and this will cause a failure in determining the weights. If you cannot create a reduced tree, you can remove entries by using the <code>cutstring</code>.</p> <p>The following script is an example that produces a 2D spline (<code>N=2</code>) from a set of 400 points (<code>M=400</code>) generated from a function.</p> Show script <pre><code>void splinend(){\n   // library containing the RooSplineND\n   gSystem-&gt;Load(\"libHiggsAnalysisCombinedLimit.so\");\n\n   TTree *tree = new TTree(\"tree_vals\",\"tree_vals\");\n   float xb,yb,fb;\n\n   tree-&gt;Branch(\"f\",&amp;fb,\"f/F\");\n   tree-&gt;Branch(\"x\",&amp;xb,\"x/F\");\n   tree-&gt;Branch(\"y\",&amp;yb,\"y/F\");\n\n   TRandom3 *r = new TRandom3();\n   int nentries = 20; // just use a regular grid of 20x20=400 points\n\n   double xmin = -3.2;\n   double xmax = 3.2;\n   double ymin = -3.2;\n   double ymax = 3.2;\n\n   for (int n=0;n&lt;nentries;n++){\n    for (int k=0;k&lt;nentries;k++){\n\n      xb=xmin+n*((xmax-xmin)/nentries);\n      yb=ymin+k*((ymax-ymin)/nentries);\n      // Gaussian * cosine function radial in \"F(x^2+y^2)\"\n      double R = (xb*xb)+(yb*yb);\n      fb = 0.1*TMath::Exp(-1*(R)/9)*TMath::Cos(2.5*TMath::Sqrt(R));\n      tree-&gt;Fill();\n     }\n   }\n\n   // 2D graph of points in tree\n   TGraph2D *p0 = new TGraph2D();\n   p0-&gt;SetMarkerSize(0.8);\n   p0-&gt;SetMarkerStyle(20);\n\n   int c0=0;\n   for (int p=0;p&lt;tree-&gt;GetEntries();p++){\n        tree-&gt;GetEntry(p);\n        p0-&gt;SetPoint(c0,xb,yb,fb);\n        c0++;\n        }\n\n\n   // ------------------------------ THIS IS WHERE WE BUILD THE SPLINE ------------------------ //\n   // Create 2 Real-vars, one for each of the parameters of the spline\n   // The variables MUST be named the same as the corresponding branches in the tree\n   RooRealVar x(\"x\",\"x\",0.1,xmin,xmax);\n   RooRealVar y(\"y\",\"y\",0.1,ymin,ymax);\n\n\n   // And the spline - arguments are\n   // Required -&gt;   name, title, arglist of dependants, input tree,\n   // Optional -&gt;  function branch name, interpolation width (tunable parameter), rescale Axis bool, cutstring\n   // The tunable parameter gives the radial basis a \"width\", over which the interpolation will be effectively taken\n\n   // the reascale Axis bool (if true) will first try to rescale the points so that they are of order 1 in range\n   // This can be helpful if for example one dimension is in much larger units than another.\n\n   // The cutstring is just a ROOT string which can be used to apply cuts to the tree in case only a sub-set of the points should be used\n\n   RooArgList args(x,y);\n   RooSplineND *spline = new RooSplineND(\"spline\",\"spline\",args,tree,\"f\",1,true);\n      // ----------------------------------------------------------------------------------------- //\n\n\n   //TGraph *gr = spline-&gt;getGraph(\"x\",0.1); // Return 1D graph. Will be a slice of the spline for fixed y generated at steps of 0.1\n\n   // Plot the 2D spline\n   TGraph2D *gr = new TGraph2D();\n   int pt = 0;\n   for (double xx=xmin;xx&lt;xmax;xx+=0.1){\n     for (double yy=xmin;yy&lt;ymax;yy+=0.1){\n        x.setVal(xx);\n        y.setVal(yy);\n        gr-&gt;SetPoint(pt,xx,yy,spline-&gt;getVal());\n        pt++;\n     }\n   }\n\n   gr-&gt;SetTitle(\"\");\n\n   gr-&gt;SetLineColor(1);\n   //p0-&gt;SetTitle(\"0.1 exp(-(x{^2}+y{^2})/9) #times Cos(2.5#sqrt{x^{2}+y^{2}})\");\n   gr-&gt;Draw(\"surf\");\n   gr-&gt;GetXaxis()-&gt;SetTitle(\"x\");\n   gr-&gt;GetYaxis()-&gt;SetTitle(\"y\");\n   p0-&gt;Draw(\"Pcolsame\");\n\n   //p0-&gt;Draw(\"surfsame\");\n   TLegend *leg = new TLegend(0.2,0.82,0.82,0.98);\n   leg-&gt;SetFillColor(0);\n   leg-&gt;AddEntry(p0,\"0.1 exp(-(x{^2}+y{^2})/9) #times Cos(2.5#sqrt{x^{2}+y^{2}})\",\"p\");\n   leg-&gt;AddEntry(gr,\"RooSplineND (N=2) interpolation\",\"L\");\n   leg-&gt;Draw();\n}\n</code></pre> <p>Running the script will produce the following plot. The plot shows the sampled points and the spline produced from them.</p> <p></p>"},{"location":"part3/nonstandard/#rooparametrichist-gamman-for-shapes","title":"RooParametricHist gammaN for shapes","text":"<p>Currently, there is no straightforward implementation of using per-bin gmN-like uncertainties with shape (histogram) analyses. Instead, it is possible to tie control regions (written as datacards) with the signal region using three methods.</p> <p>For analyses that take the normalization of some process from a control region, it is possible to use either lnU or rateParam directives to float the normalization in a correlated way of some process between two regions. Instead if each bin is intended to be determined via a control region, one can use a number of <code>RooFit</code> histogram PDFs/functions to accomplish this. The example below shows a simple implementation of a RooParametricHist to achieve this.</p> <p>Copy the script below into a file called <code>examplews.C</code> and create the input workspace using <code>root -l examplews.C</code>...</p> Show script <pre><code>void examplews(){\n    // As usual, load the combine library to get access to the RooParametricHist\n    gSystem-&gt;Load(\"libHiggsAnalysisCombinedLimit.so\");\n\n    // Output file and workspace\n    TFile *fOut = new TFile(\"param_ws.root\",\"RECREATE\");\n    RooWorkspace wspace(\"wspace\",\"wspace\");\n\n    // better to create the bins rather than use the \"nbins,min,max\" to avoid spurious warning about adding bins with different\n    // ranges in combine - see https://root-forum.cern.ch/t/attempt-to-divide-histograms-with-different-bin-limits/17624/3 for why!\n    const int nbins = 4;\n    double xmin=200.;\n    double xmax=1000.;\n    double xbins[5] = {200.,400.,600.,800.,1000.};\n\n    // A search in a MET tail, define MET as our variable\n\n    RooRealVar met(\"met\",\"E_{T}^{miss}\",200,xmin,xmax);\n    RooArgList vars(met);\n\n\n    // ---------------------------- SIGNAL REGION -------------------------------------------------------------------//\n    // Make a dataset, this will be just four bins in MET.\n    // its easiest to make this from a histogram. Set the contents to \"somehting\"\n    TH1F data_th1(\"data_obs_SR\",\"Data observed in signal region\",nbins,xbins);\n\n    data_th1.SetBinContent(1,100);\n    data_th1.SetBinContent(2,50);\n    data_th1.SetBinContent(3,25);\n    data_th1.SetBinContent(4,10);\n    RooDataHist data_hist(\"data_obs_SR\",\"Data observed\",vars,&amp;data_th1);\n    wspace.import(data_hist);\n\n    // In the signal region, our background process will be freely floating,\n    // Create one parameter per bin representing the yield. (note of course we can have multiple processes like this)\n    RooRealVar bin1(\"bkg_SR_bin1\",\"Background yield in signal region, bin 1\",100,0,500);\n    RooRealVar bin2(\"bkg_SR_bin2\",\"Background yield in signal region, bin 2\",50,0,500);\n    RooRealVar bin3(\"bkg_SR_bin3\",\"Background yield in signal region, bin 3\",25,0,500);\n    RooRealVar bin4(\"bkg_SR_bin4\",\"Background yield in signal region, bin 4\",10,0,500);\n    RooArgList bkg_SR_bins;\n    bkg_SR_bins.add(bin1);\n    bkg_SR_bins.add(bin2);\n    bkg_SR_bins.add(bin3);\n    bkg_SR_bins.add(bin4);\n\n    // Create a RooParametericHist which contains those yields, last argument is just for the binning,\n    // can use the data TH1 for that\n    RooParametricHist p_bkg(\"bkg_SR\", \"Background PDF in signal region\",met,bkg_SR_bins,data_th1);\n    // Always include a _norm term which should be the sum of the yields (thats how combine likes to play with pdfs)\n    RooAddition p_bkg_norm(\"bkg_SR_norm\",\"Total Number of events from background in signal region\",bkg_SR_bins);\n\n    // Every signal region needs a signal\n    TH1F signal_th1(\"signal_SR\",\"Signal expected in signal region\",nbins,xbins);\n\n    signal_th1.SetBinContent(1,1);\n    signal_th1.SetBinContent(2,2);\n    signal_th1.SetBinContent(3,3);\n    signal_th1.SetBinContent(4,8);\n    RooDataHist signal_hist(\"signal\",\"Data observed\",vars,&amp;signal_th1);\n    wspace.import(signal_hist);\n\n    // -------------------------------------------------------------------------------------------------------------//\n    // ---------------------------- CONTROL REGION -----------------------------------------------------------------//\n    TH1F data_CRth1(\"data_obs_CR\",\"Data observed in control region\",nbins,xbins);\n\n    data_CRth1.SetBinContent(1,200);\n    data_CRth1.SetBinContent(2,100);\n    data_CRth1.SetBinContent(3,50);\n    data_CRth1.SetBinContent(4,20);\n\n    RooDataHist data_CRhist(\"data_obs_CR\",\"Data observed\",vars,&amp;data_CRth1);\n    wspace.import(data_CRhist);\n\n    // This time, the background process will be dependent on the yields of the background in the signal region.\n    // The transfer factor TF must account for acceptance/efficiency etc differences in the signal to control\n    // In this example lets assume the control region is populated by the same process decaying to clean daughters with 2xBR\n    // compared to the signal region\n\n    // NB You could have a different transfer factor for each bin represented by a completely different RooRealVar\n\n    // We can imagine that the transfer factor could be associated with some uncertainty - lets say a 1% uncertainty due to efficiency and 2% due to acceptance.\n    // We need to make these nuisance parameters ourselves and give them a nominal value of 0\n\n\n    RooRealVar efficiency(\"efficiency\", \"efficiency nuisance parameter\",0);\n    RooRealVar acceptance(\"acceptance\", \"acceptance nuisance parameter\",0);\n\n    // We would need to make the transfer factor a function of those too. Here we've assumed Log-normal effects (i.e the same as putting lnN in the CR datacard)\n    // but note that we could use any function which could be used to parameterise the effect - eg if the systematic is due to some alternate template, we could\n    // use polynomials for example.\n\n\n    RooFormulaVar TF(\"TF\",\"Trasnfer factor\",\"2*TMath::Power(1.01,@0)*TMath::Power(1.02,@1)\",RooArgList(efficiency,acceptance) );\n\n    // Finally, we need to make each bin of the background in the control region a function of the background in the signal and the transfer factor\n    // N_CR = N_SR x TF\n\n    RooFormulaVar CRbin1(\"bkg_CR_bin1\",\"Background yield in control region, bin 1\",\"@0*@1\",RooArgList(TF,bin1));\n    RooFormulaVar CRbin2(\"bkg_CR_bin2\",\"Background yield in control region, bin 2\",\"@0*@1\",RooArgList(TF,bin2));\n    RooFormulaVar CRbin3(\"bkg_CR_bin3\",\"Background yield in control region, bin 3\",\"@0*@1\",RooArgList(TF,bin3));\n    RooFormulaVar CRbin4(\"bkg_CR_bin4\",\"Background yield in control region, bin 4\",\"@0*@1\",RooArgList(TF,bin4));\n\n    RooArgList bkg_CR_bins;\n    bkg_CR_bins.add(CRbin1);\n    bkg_CR_bins.add(CRbin2);\n    bkg_CR_bins.add(CRbin3);\n    bkg_CR_bins.add(CRbin4);\n    RooParametricHist p_CRbkg(\"bkg_CR\", \"Background PDF in control region\",met,bkg_CR_bins,data_th1);\n    RooAddition p_CRbkg_norm(\"bkg_CR_norm\",\"Total Number of events from background in control region\",bkg_CR_bins);\n    // -------------------------------------------------------------------------------------------------------------//\n\n\n    // we can also use the standard interpolation from combine by providing alternative shapes (as RooDataHists)\n    // here we're adding two of them (JES and ISR)\n    TH1F background_up(\"tbkg_CR_JESUp\",\"\",nbins,xbins);\n    background_up.SetBinContent(1,CRbin1.getVal()*1.01);\n    background_up.SetBinContent(2,CRbin2.getVal()*1.02);\n    background_up.SetBinContent(3,CRbin3.getVal()*1.03);\n    background_up.SetBinContent(4,CRbin4.getVal()*1.04);\n    RooDataHist bkg_CRhist_sysUp(\"bkg_CR_JESUp\",\"Bkg sys up\",vars,&amp;background_up);\n    wspace.import(bkg_CRhist_sysUp);\n\n    TH1F background_down(\"bkg_CR_JESDown\",\"\",nbins,xbins);\n    background_down.SetBinContent(1,CRbin1.getVal()*0.90);\n    background_down.SetBinContent(2,CRbin2.getVal()*0.98);\n    background_down.SetBinContent(3,CRbin3.getVal()*0.97);\n    background_down.SetBinContent(4,CRbin4.getVal()*0.96);\n    RooDataHist bkg_CRhist_sysDown(\"bkg_CR_JESDown\",\"Bkg sys down\",vars,&amp;background_down);\n    wspace.import(bkg_CRhist_sysDown);\n\n    TH1F background_2up(\"tbkg_CR_ISRUp\",\"\",nbins,xbins);\n    background_2up.SetBinContent(1,CRbin1.getVal()*0.85);\n    background_2up.SetBinContent(2,CRbin2.getVal()*0.9);\n    background_2up.SetBinContent(3,CRbin3.getVal()*0.95);\n    background_2up.SetBinContent(4,CRbin4.getVal()*0.99);\n    RooDataHist bkg_CRhist_sys2Up(\"bkg_CR_ISRUp\",\"Bkg sys 2up\",vars,&amp;background_2up);\n    wspace.import(bkg_CRhist_sys2Up);\n\n    TH1F background_2down(\"bkg_CR_ISRDown\",\"\",nbins,xbins);\n    background_2down.SetBinContent(1,CRbin1.getVal()*1.15);\n    background_2down.SetBinContent(2,CRbin2.getVal()*1.1);\n    background_2down.SetBinContent(3,CRbin3.getVal()*1.05);\n    background_2down.SetBinContent(4,CRbin4.getVal()*1.01);\n    RooDataHist bkg_CRhist_sys2Down(\"bkg_CR_ISRDown\",\"Bkg sys 2down\",vars,&amp;background_2down);\n    wspace.import(bkg_CRhist_sys2Down);\n\n    // import the pdfs\n    wspace.import(p_bkg);\n    wspace.import(p_bkg_norm,RooFit::RecycleConflictNodes());\n    wspace.import(p_CRbkg);\n    wspace.import(p_CRbkg_norm,RooFit::RecycleConflictNodes());\n    fOut-&gt;cd();\n    wspace.Write();\n\n    // Clean up\n    fOut-&gt;Close();\n    fOut-&gt;Delete();\n\n\n}\n</code></pre> <p>We will now discuss what the script is doing. First, the observable for the search is the missing energy, so we create a parameter to represent this observable.</p> <pre><code>   RooRealVar met(\"met\",\"E_{T}^{miss}\",xmin,xmax);\n</code></pre> <p>The following lines create a freely floating parameter for each of our bins (in this example, there are only 4 bins, defined for our observable <code>met</code>).</p> <pre><code>   RooRealVar bin1(\"bkg_SR_bin1\",\"Background yield in signal region, bin 1\",100,0,500);\n   RooRealVar bin2(\"bkg_SR_bin2\",\"Background yield in signal region, bin 2\",50,0,500);\n   RooRealVar bin3(\"bkg_SR_bin3\",\"Background yield in signal region, bin 3\",25,0,500);\n   RooRealVar bin4(\"bkg_SR_bin4\",\"Background yield in signal region, bin 4\",10,0,500);\n\n   RooArgList bkg_SR_bins;\n   bkg_SR_bins.add(bin1);\n   bkg_SR_bins.add(bin2);\n   bkg_SR_bins.add(bin3);\n   bkg_SR_bins.add(bin4);\n</code></pre> <p>They are put into a list so that we can create a <code>RooParametricHist</code> and its normalisation from that list</p> <pre><code>  RooParametricHist p_bkg(\"bkg_SR\", \"Background PDF in signal region\",met,bkg_SR_bins,data_th1);\n\n  RooAddition p_bkg_norm(\"bkg_SR_norm\",\"Total Number of events from background in signal region\",bkg_SR_bins);\n</code></pre> <p>For the control region, the background process will be dependent on the yields of the background in the signal region using a transfer factor. The transfer factor <code>TF</code> must account for acceptance/efficiency/etc differences between the signal region and the control regions.</p> <p>In this example we will assume the control region is populated by the same process decaying to a different final state with twice as large branching fraction as the one in the signal region.</p> <p>We could imagine that the transfer factor could be associated with some uncertainty - for example a 1% uncertainty due to efficiency and a 2% uncertainty due to acceptance differences. We need to make nuisance parameters ourselves to model this, and give them a nominal value of 0.</p> <pre><code>   RooRealVar efficiency(\"efficiency\", \"efficiency nuisance parameter\",0);\n   RooRealVar acceptance(\"acceptance\", \"acceptance nuisance parameter\",0);\n</code></pre> <p>We need to make the transfer factor a function of these parameters, since variations in these uncertainties will lead to variations of the transfer factor. Here we have assumed Log-normal effects (i.e the same as putting lnN in the CR datacard), but we could use any function which could be used to parameterize the effect - for example if the systematic uncertainty is due to some alternate template, we could use polynomials.</p> <pre><code>   RooFormulaVar TF(\"TF\",\"Trasnfer factor\",\"2*TMath::Power(1.01,@0)*TMath::Power(1.02,@1)\",RooArgList(efficiency,acceptance) );\n</code></pre> <p>Then, we need to make each bin of the background in the control region a function of the background in the signal region and the transfer factor - i.e $N{CR} = N{SR} \\times TF $.</p> <pre><code>   RooFormulaVar CRbin1(\"bkg_CR_bin1\",\"Background yield in control region, bin 1\",\"@0*@1\",RooArgList(TF,bin1));\n   RooFormulaVar CRbin2(\"bkg_CR_bin2\",\"Background yield in control region, bin 2\",\"@0*@1\",RooArgList(TF,bin2));\n   RooFormulaVar CRbin3(\"bkg_CR_bin3\",\"Background yield in control region, bin 3\",\"@0*@1\",RooArgList(TF,bin3));\n   RooFormulaVar CRbin4(\"bkg_CR_bin4\",\"Background yield in control region, bin 4\",\"@0*@1\",RooArgList(TF,bin4));\n</code></pre> <p>As before, we also need to create the <code>RooParametricHist</code> for this process in the control region but this time the bin yields will be the <code>RooFormulaVars</code> we just created instead of freely floating parameters.</p> <pre><code>   RooArgList bkg_CR_bins;\n   bkg_CR_bins.add(CRbin1);\n   bkg_CR_bins.add(CRbin2);\n   bkg_CR_bins.add(CRbin3);\n   bkg_CR_bins.add(CRbin4);\n\n   RooParametricHist p_CRbkg(\"bkg_CR\", \"Background PDF in control region\",met,bkg_CR_bins,data_th1);\n   RooAddition p_CRbkg_norm(\"bkg_CR_norm\",\"Total Number of events from background in control region\",bkg_CR_bins);\n</code></pre> <p>Finally, we can also create alternative shape variations (Up/Down) that can be fed to Combine as we do with <code>TH1</code> or <code>RooDataHist</code> type workspaces. These need to be of type <code>RooDataHist</code>. The example below is for a Jet Energy Scale type shape uncertainty.</p> <pre><code>   TH1F background_up(\"tbkg_CR_JESUp\",\"\",nbins,xbins);\n   background_up.SetBinContent(1,CRbin1.getVal()*1.01);\n   background_up.SetBinContent(2,CRbin2.getVal()*1.02);\n   background_up.SetBinContent(3,CRbin3.getVal()*1.03);\n   background_up.SetBinContent(4,CRbin4.getVal()*1.04);\n   RooDataHist bkg_CRhist_sysUp(\"bkg_CR_JESUp\",\"Bkg sys up\",vars,&amp;background_up);\n   wspace.import(bkg_CRhist_sysUp);\n\n   TH1F background_down(\"bkg_CR_JESDown\",\"\",nbins,xbins);\n   background_down.SetBinContent(1,CRbin1.getVal()*0.90);\n   background_down.SetBinContent(2,CRbin2.getVal()*0.98);\n   background_down.SetBinContent(3,CRbin3.getVal()*0.97);\n   background_down.SetBinContent(4,CRbin4.getVal()*0.96);\n   RooDataHist bkg_CRhist_sysDown(\"bkg_CR_JESDown\",\"Bkg sys down\",vars,&amp;background_down);\n   wspace.import(bkg_CRhist_sysDown);\n</code></pre> <p>Below are datacards (for signal and control regions) which can be used in conjunction with the workspace built above. In order to \"use\" the control region, simply combine the two cards as usual using <code>combineCards.py</code>.</p> Show Signal Region Datacard <pre><code>Signal Region Datacard -- signal category\n\nimax _ number of bins\njmax _ number of processes minus 1\nkmax \\* number of nuisance parameters\n\n---\n\nshapes data_obs signal param_ws.root wspace:data_obs_SR\nshapes background signal param_ws.root wspace:bkg_SR # the background model pdf which is freely floating, note other backgrounds can be added as usual\nshapes signal signal param_ws.root wspace:signal\n\n---\n\nbin signal\nobservation -1\n\n---\n\n# background rate must be taken from \\_norm param x 1\n\nbin signal signal\nprocess background signal\nprocess 1 0\nrate 1 -1\n\n---\n\n# Normal uncertainties in the signal region\n\n## lumi_8TeV lnN - 1.026\n\n# free floating parameters, we do not need to declare them, but its a good idea to\n\nbkg_SR_bin1 flatParam\nbkg_SR_bin2 flatParam\nbkg_SR_bin3 flatParam\nbkg_SR_bin4 flatParam\n</code></pre> Show Control Region Datacard <pre><code>Control Region Datacard -- control category\n\nimax _ number of bins\njmax _ number of processes minus 1\nkmax \\* number of nuisance parameters\n\n---\n\nshapes data*obs control param_ws.root wspace:data_obs_CR\nshapes background control param_ws.root wspace:bkg_CR wspace:bkg_CR*$SYSTEMATIC # the background model pdf which is dependant on that in the SR, note other backgrounds can be added as usual\n\n---\n\nbin control\nobservation -1\n\n---\n\n# background rate must be taken from \\_norm param x 1\n\nbin control\nprocess background\nprocess 1\nrate 1\n\n---\n\nJES shape 1\nISR shape 1\nefficiency param 0 1\nacceptance param 0 1\n</code></pre> <p>Note that for the control region, our nuisance parameters appear as <code>param</code> types, so that Combine will correctly constrain them.</p> <p>If we combine the two cards and fit the result with <code>-M MultiDimFit -v 3</code> we can see that the parameters that give the rate of background in each bin of the signal region, along with the nuisance parameters and signal strength, are determined by the fit - i.e we have properly included the constraint from the control region, just as with the 1-bin <code>gmN</code>.</p> <pre><code>acceptance = 0.00374312 +/- 0.964632 (limited)\nbkg_SR_bin1 = 99.9922 +/- 5.92062 (limited)\nbkg_SR_bin2 = 49.9951 +/- 4.13535 (limited)\nbkg_SR_bin3 = 24.9915 +/- 2.9267 (limited)\nbkg_SR_bin4 = 9.96478 +/- 2.1348 (limited)\nefficiency = 0.00109195 +/- 0.979334 (limited)\nlumi_8TeV = -0.0025911 +/- 0.994458\nr = 0.00716347 +/- 12.513 (limited)\n</code></pre> <p>The example given here is extremely basic and it should be noted that additional complexity in the transfer factors, as well as additional uncertainties/backgrounds etc in the cards are, as always, supported.</p> <p>Danger</p> <p>If trying to implement parametric uncertainties in this setup (eg on transfer factors) that are correlated with other channels and implemented separately, you MUST normalize the uncertainty effect so that the datacard line can read <code>param name X 1</code>. That is, the uncertainty on this parameter must be 1. Without this, there will be inconsistency with other nuisances of the same name in other channels implemented as shape or lnN.</p>"},{"location":"part3/nonstandard/#look-elsewhere-effect-for-one-parameter","title":"Look-elsewhere effect for one parameter","text":"<p>In case you see an excess somewhere in your analysis, you can evaluate the look-elsewhere effect (LEE) of that excess. For an explanation of the LEE, take a look at the CMS Statistics Committee Twiki here.</p> <p>To calculate the look-elsewhere effect for a single parameter (in this case the mass of the resonance), you can follow the instructions below. Note that these instructions assume you have a workspace that is parametric in your resonance mass \\(m\\), otherwise you need to fit each background toy with separate workspaces. We will assume the local significance for your excess is \\(\\sigma\\).</p> <ul> <li> <p>Generate background-only toys <code>combine ws.root -M GenerateOnly --toysFrequentist -m 16.5 -t 100 --saveToys --expectSignal=0</code>. The output will be something like <code>higgsCombineTest.GenerateOnly.mH16.5.123456.root</code>.</p> </li> <li> <p>For each toy, calculate the significance for a predefined range (e.g \\(m\\in [10,35]\\) GeV) in steps suitable to the resolution (e.g. 1 GeV). For <code>toy_1</code> the procedure would be: <code>for i in $(seq 10 35); do combine ws.root -M Significance --redefineSignalPOI r --freezeParameters MH --setParameter MH=$i -n $i -D higgsCombineTest.GenerateOnly.mH16.5.123456.root:toys/toy_1</code>. Calculate the maximum significance over all of these mass points - call this \\(\\sigma_{max}\\).</p> </li> <li> <p>Count how many toys have a maximum significance larger than the local one for your observed excess. This fraction of toys with \\(\\sigma_{max}&gt;\\sigma\\) is the global p-value.</p> </li> </ul> <p>You can find more tutorials on the LEE here</p>"},{"location":"part3/regularisation/","title":"Unfolding &amp; regularization","text":"<p>This section details how to perform an unfolded cross-section measurement, including regularization, within Combine. </p> <p>There are many resources available that describe unfolding, including when to use it (or not), and what the common issues surrounding it are. For CMS users, useful summary is available in the CMS Statistics Committee pages on unfolding. You can also  find an overview of unfolding and its usage in Combine in these slides.</p> <p>The basic idea behind the unfolding technique is to describe smearing introduced through the reconstruction (e.g. of the particle energy) in a given truth level bin \\(x_{i}\\) through a linear relationship with the effects in the nearby truth-bins. We can make statements about the probability \\(p_{j}\\) that the event falling in the truth bin \\(x_{i}\\) is reconstructed in the bin \\(y_{i}\\) via the linear relationship,</p> \\[ y_{obs} = \\tilde{\\boldsymbol{R}}\\cdot x_{true} + b \\] <p>or, if the truth bins are expressed relative to some particular model, we use the usual signal strength terminology, </p> \\[ y_{obs} = \\boldsymbol{R}\\cdot \\mu + b \\] <p>Unfolding aims to find the distribution at truth level \\(x\\), given the observations \\(y\\) at reco-level.</p>"},{"location":"part3/regularisation/#likelihood-based-unfolding","title":"Likelihood-based unfolding","text":"<p>Since Combine has access to the full likelihood for any analysis written in the usual datacard format, we will use likelihood-based unfolding  throughout - for other approaches, there are many other tools available (eg <code>RooUnfold</code> or <code>TUnfold</code>), which can be used instead. </p> <p>The benefits of the likelihood-based approach are that, </p> <ul> <li>Background subtraction is accounted for directly in the likelihood</li> <li>Systematic uncertainties are accounted for directly during the unfolding as nuisance parameters</li> <li>We can profile the nuisance parameters during the unfolding to make the most of the data available </li> </ul> <p>In practice, one must construct the response matrix and unroll it in the reconstructed bins:</p> <ul> <li>First, one derives the truth distribution, e.g. after the generator-level selection only, \\(x_{i}\\).</li> <li>Each reconstructed bin (e.g. each datacard) should describe the contribution from each truth bin - this is how Combine knows about the response matrix \\(\\boldsymbol{R}\\)  and folds in the acceptance/efficiency effects as usual.</li> <li>The out-of-acceptance contributions can also be included in the above.</li> </ul> <p>The model we use for this is then just the usual <code>PhysicsModel:multiSignalModel</code>, where each signal refers to a particular truth level bin. The results can be extracted through a  simple maximum-likelihood fit with, </p> <pre><code>    text2workspace.py -m 125 --X-allow-no-background -o datacard.root datacard.txt\n       -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO map='.*GenBin0.*:r_Bin0[1,-1,20]' --PO map='.*GenBin1.*:r_Bin1[1,-1,20]' --PO map='.*GenBin2.*:r_Bin2[1,-1,20]' --PO map='.*GenBin3.*:r_Bin3[1,-1,20]' --PO map='.*GenBin4.*:r_Bin4[1,-1,20]'\n\n    combine -M MultiDimFit --setParameters=r_Bin0=1,r_Bin1=1,r_Bin2=1,r_Bin3=1,r_Bin4=1 -t -1 -m 125 datacard.root\n    combine -M MultiDimFit --setParameters=r_Bin0=1,r_Bin1=1,r_Bin2=1,r_Bin3=1,r_Bin4=1 -t -1 -m 125 --algo=grid --points=100 -P r_Bin1 --setParameterRanges r_Bin1=0.5,1.5 --floatOtherPOIs=1 datacard.root\n</code></pre> <p>Notice that one can also perform the so called bin-by-bin unfolding (though it is strongly discouraged, except for testing) with, </p> <pre><code>    text2workspace.py -m 125 --X-allow-no-background -o datacard.root datacard.txt\n      -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO map='.*RecoBin0.*:r_Bin0[1,-1,20]' --PO map='.*RecoBin1.*:r_Bin1[1,-1,20]' --PO map='.*RecoBin2.*:r_Bin2[1,-1,20]' --PO map='.*RecoBin3.*:r_Bin3[1,-1,20]' --PO map='.*RecoBin4.*:r_Bin4[1,-1,20]'\n</code></pre> <p>Nuisance parameters can be added to the likelihood function and profiled in the usual way via the datacards. Theory uncertainties on the inclusive cross section are typically not included in unfolded measurements.</p> <p>The figure below shows a comparison of likelihood-based unfolding and a least-squares based unfolding as implemented in <code>RooUnfold</code>. </p> Show comparison <p></p>"},{"location":"part3/regularisation/#regularization","title":"Regularization","text":"<p>The main difference with respect to other models with multiple signal contributions is the introduction of Regularization, which is used to stabilize the unfolding process. </p> <p>An example of unfolding in Combine with and without regularization, can be found under  data/tutorials/regularization. </p> <p>Running <code>python createWs.py [-r]</code> will create a simple datacard and perform a fit both with and without including regularization.</p> <p>The simplest way to introduce regularization in the likelihood based approach, is to apply a penalty term, which  depends on the values of the truth bins, in the likelihood function (so-called Tikhonov regularization):</p> \\[ -2\\ln L = -2\\ln L + P(\\vec{x})  \\] <p>Here, \\(P\\) is a linear operator. There are two different approaches that are supported to construct \\(P\\). If you run <code>python makeModel.py</code>, you will create a more complex datacard with the two regularization schemes implemented. You will need  to uncomment the relevant sections of code to activate <code>SVD</code> or <code>TUnfold</code>-type regularization.</p> <p>Warning</p> <p>When using any unfolding method with regularization, you must perform studies of the potential bias/coverage properties introduced through the </p> <p>inclusion of regularization, and how strong the associated regularization is. Advice on this can be found in the CMS Statistics Committee pages. </p>"},{"location":"part3/regularisation/#singular-value-decomposition-svd","title":"Singular Value Decomposition (SVD)","text":"<p>In the SVD approach - as described in the SVD paper - the penalty term is constructed directly based on the strengths (\\(\\vec{\\mu}=\\{\\mu_{i}\\}_{i=1}^{N}\\)), </p> \\[ P = \\tau\\left| A\\cdot \\vec{\\mu} \\right|^{2}, \\] <p>where \\(A\\) is typically the discrete curvature matrix, with </p> \\[ A =  \\begin{bmatrix}  1 &amp; -1 &amp; ... \\\\ 1 &amp; -2 &amp; 1 &amp;  ... \\\\ ...  \\end{bmatrix} \\] <p>Penalty terms on the derivatives can also be included. Such a penalty term is included by modifying the likelihood to include one constraint for each  row of the product \\(A\\cdot\\vec{\\mu}\\), by including them as lines in the datacard of the form, </p> <p><pre><code>    name constr formula dependents delta\n</code></pre> where the regularization strength is \\(\\delta=\\frac{1}{\\sqrt{\\tau}}\\) and can either be a fixed value (e.g. by directly putting <code>0.01</code>) or as  a modifiable parameter with e.g. <code>delta[0.01]</code>. </p> <p>For example, for 3 bins and a regularization strength of 0.03, the first line would be </p> <pre><code>    name constr @0-2*@2+@1 r_Bin0,r_Bin1,r_Bin2 0.03\n</code></pre> <p>Alternative valid syntaxes are  </p> <pre><code>    constr1 constr r_bin0-r_bin1 0.01\n    constr1 constr r_bin0-r_bin1 delta[0.01]\n    constr1 constr r_bin0+r_bin1 r_bin0,r_bin1 0.01\n    constr1 constr r_bin0+r_bin1 {r_bin0,r_bin1} delta[0.01]\n</code></pre> <p>The figure below shows an example unfolding using the \"SVD regularization\" approach with the least squares method (as implemented by <code>RooUnfold</code>) and implemented as a penalty term added to the likelihood using the maximum likelihood approach in Combine.</p> Show comparison <p></p>"},{"location":"part3/regularisation/#tunfold-method","title":"TUnfold method","text":"<p>The Tikhonov regularization as implemented in <code>TUnfold</code> uses the MC information, or rather the density prediction, as a bias vector.  In order to give this information to Combine, a single datacard for each reconstruction-level bin needs to be produced, so that we have access to the proper normalization terms during the minimization. In this case the bias vector is \\(\\vec{x}_{obs}-\\vec{x}_{true}\\) </p> <p>Then one can write a constraint term in the datacard via, for example,</p> <pre><code>    constr1 constr (r_Bin0-1.)*(shapeSig_GenBin0_RecoBin0__norm+shapeSig_GenBin0_RecoBin1__norm+shapeSig_GenBin0_RecoBin2__norm+shapeSig_GenBin0_RecoBin3__norm+shapeSig_GenBin0_RecoBin4__norm)+(r_Bin2-1.)*(shapeSig_GenBin2_RecoBin0__norm+shapeSig_GenBin2_RecoBin1__norm+shapeSig_GenBin2_RecoBin2__norm+shapeSig_GenBin2_RecoBin3__norm+shapeSig_GenBin2_RecoBin4__norm)-2*(r_Bin1-1.)*(shapeSig_GenBin1_RecoBin0__norm+shapeSig_GenBin1_RecoBin1__norm+shapeSig_GenBin1_RecoBin2__norm+shapeSig_GenBin1_RecoBin3__norm+shapeSig_GenBin1_RecoBin4__norm) {r_Bin0,r_Bin1,r_Bin2,shapeSig_GenBin1_RecoBin0__norm,shapeSig_GenBin0_RecoBin0__norm,shapeSig_GenBin2_RecoBin0__norm,shapeSig_GenBin1_RecoBin1__norm,shapeSig_GenBin0_RecoBin1__norm,shapeSig_GenBin2_RecoBin1__norm,shapeSig_GenBin1_RecoBin2__norm,shapeSig_GenBin0_RecoBin2__norm,shapeSig_GenBin2_RecoBin2__norm,shapeSig_GenBin1_RecoBin3__norm,shapeSig_GenBin0_RecoBin3__norm,shapeSig_GenBin2_RecoBin3__norm,shapeSig_GenBin1_RecoBin4__norm,shapeSig_GenBin0_RecoBin4__norm,shapeSig_GenBin2_RecoBin4__norm} delta[0.03]\n</code></pre>"},{"location":"part3/runningthetool/","title":"How to run the tool","text":"<p>The executable Combine provided by the package is used to invoke the tools via the command line. The statistical analysis method, as well as user settings, are also specified on the command line. To see the full list of available options, you can run:</p> <pre><code>combine --help\n</code></pre> <p>The option <code>-M</code> is used to choose the statistical evaluation method. There are several groups of statistical methods:</p> <ul> <li>Asymptotic likelihood methods:<ul> <li><code>AsymptoticLimits</code>: limits calculated according to the asymptotic formulae in arxiv:1007.1727.</li> <li><code>Significance</code>: simple profile likelihood approximation, for calculating significances.</li> </ul> </li> <li>Bayesian methods:<ul> <li><code>BayesianSimple</code>: performing a classical numerical integration (for simple models only).</li> <li><code>MarkovChainMC</code>: performing Markov Chain integration, for arbitrarily complex models.</li> </ul> </li> <li>Frequentist or hybrid bayesian-frequentist methods:<ul> <li><code>HybridNew</code>: compute modified frequentist limits, significance/p-values and confidence intervals according to several possible prescriptions with toys. </li> </ul> </li> <li>Fitting<ul> <li><code>FitDiagnostics</code>: performs maximum likelihood fits to extract the signal rate, and provides diagnostic tools such as pre- and post-fit figures and correlations</li> <li><code>MultiDimFit</code>: performs maximum likelihood fits and likelihood scans with an arbitrary number of parameters of interest.</li> </ul> </li> <li>Miscellaneous other modules that do not compute limits or confidence intervals, but use the same framework:<ul> <li><code>GoodnessOfFit</code>: perform a goodness of fit test for models including shape information. Several GoF tests are implemented.</li> <li><code>ChannelConsistencyCheck</code>: study the consistency between individual channels in a combination.</li> <li><code>GenerateOnly</code>: generate random or asimov toy datasets for use as input to other methods</li> </ul> </li> </ul> <p>The command help is organized into five parts:</p> <ul> <li>The Main options section indicates how to pass the datacard as input to the tool (<code>-d datacardName</code>), how to choose the statistical method (<code>-M MethodName</code>), and how to set the verbosity level <code>-v</code></li> <li>Under Common statistics options, options common to different statistical methods are given. Examples are <code>--cl</code>, to specify the confidence level (default is 0.95), or <code>-t</code>, to give the number of toy MC extractions required.</li> <li>The Common input-output options section includes, for example, the options to specify the mass hypothesis under study (<code>-m</code>) or to include a specific string in the output filename (<code>--name</code>). </li> <li>Common miscellaneous options.</li> <li>Further method-specific options are available for each method. By passing the method name via the <code>-M</code> option, along with <code>--help</code>, the options for that specific method are shown in addition to the common options. </li> </ul> <p>Not all the available options are discussed in this online documentation; use <code>--help</code> to get the documentation of all options.</p>"},{"location":"part3/runningthetool/#common-command-line-options","title":"Common command-line options","text":"<p>There are a number of useful command-line options that can be used to alter the model (or parameters of the model) at run time. The most commonly used, generic options, are:</p> <ul> <li> <p><code>-H</code>: first run a different, faster, algorithm (e.g. the <code>ProfileLikelihood</code> described below) to obtain an approximate indication of the limit, which will allow the precise chosen algorithm to converge more quickly. We strongly recommend to use this option when using the <code>MarkovChainMC</code>, <code>HybridNew</code> or <code>FeldmanCousins</code> calculators, unless you know in which range your limit lies and you set this range manually (the default is <code>[0, 20]</code>)</p> </li> <li> <p><code>--rMax</code>, <code>--rMin</code>: manually restrict the range of signal strengths to consider. For Bayesian limits with MCMC, a rule of thumb is that <code>rMax</code> should be 3-5 times the limit (a too small value of <code>rMax</code> will bias your limit towards low values, since you are restricting the integration range, while a too large value will bias you to higher limits)</p> </li> <li> <p><code>--setParameters name=value[,name2=value2,...]</code> sets the starting values of the parameters, useful e.g. when generating toy MC or when setting the parameters as fixed. This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code>.</p> </li> <li> <p><code>--setParameterRanges name=min,max[:name2=min2,max2:...]</code> sets the ranges of the parameters (useful e.g. for scans in <code>MultiDimFit</code>, or for Bayesian integration). This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code>.</p> </li> <li> <p><code>--redefineSignalPOIs name[,name2,...]</code> redefines the set of parameters of interest.</p> <ul> <li>If the parameters were constant in the input workspace, they are set to be floating.</li> <li>Nuisance parameters promoted to parameters of interest are removed from the list of nuisances, and thus they are not randomized in methods that randomize nuisances (e.g. <code>HybridNew</code> in non-frequentist mode, or <code>BayesianToyMC</code>, or in toy generation with <code>-t</code> but without <code>--toysFreq</code>). This does not have any impact on algorithms that do not randomize nuisance parameters (e.g. fits, <code>AsymptoticLimits</code>, or <code>HybridNew</code> in fequentist mode) or on algorithms that treat all parameters in the same way (e.g. <code>MarkovChainMC</code>).</li> <li>Note that constraint terms for the nuisances are dropped after promotion to a POI using <code>--redefineSignalPOI</code>. To produce a likelihood scan for a nuisance parameter, using <code>MultiDimFit</code> with <code>--algo grid</code>, you should instead use the <code>--parameters (-P)</code> option, which will not cause the loss of the constraint term when scanning.</li> <li>Parameters of interest of the input workspace that are not selected by this command become unconstrained nuisance parameters, but they are not added to the list of nuisances so they will not be randomized (see above).</li> </ul> </li> <li> <p><code>--freezeParameters name1[,name2,...]</code> Will freeze the parameters with the given names to their set values. This option supports the use of regular expression by replacing <code>name</code> with <code>rgx{some regular expression}</code> for matching to constrained nuisance parameters or <code>var{some regular expression}</code> for matching to any parameter. For example <code>--freezeParameters rgx{CMS_scale_j.*}</code> will freeze all constrained nuisance parameters with the prefix <code>CMS_scale_j</code>, while <code>--freezeParameters var{.*rate_scale}</code> will freeze any parameter (constrained nuisance parameter or otherwise) with the suffix <code>rate_scale</code>.</p> <ul> <li>Use the option <code>--freezeParameters allConstrainedNuisances</code> to freeze all nuisance parameters that have a constraint term (i.e not <code>flatParams</code> or <code>rateParams</code> or other freely floating parameters).</li> <li>Similarly, the option <code>--floatParameters name1[,name2,...]</code> sets the parameter(s) floating and also accepts regular expressions.</li> <li>Groups of nuisance parameters (constrained or otherwise), as defined in the datacard, can be frozen using <code>--freezeNuisanceGroups</code>. You can also freeze all nuisances that are not contained in a particular group using a ^ before the group name (<code>--freezeNuisanceGroups=^group_name</code> will freeze everything except nuisance parameters in the group \"group_name\".)</li> <li>All constrained nuisance parameters (not <code>flatParam</code> or <code>rateParam</code>) can be set floating using <code>--floatAllNuisances</code>.</li> </ul> </li> </ul> <p>Warning</p> <p>Note that the floating/freezing options have a priority ordering from lowest to highest as <code>floatParameters &lt; freezeParameters &lt; freezeNuisanceGroups &lt; floatAllNuisances</code>. Options with higher priority will take precedence over those with lower priority.</p> <ul> <li> <p><code>--trackParameters name1[,name2,...]</code> will add a branch to the output tree for each of the named parameters. This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code></p> <ul> <li>The name of the branch will be trackedParam_name.</li> <li>The exact behaviour depends on the method used. For example, when using <code>MultiDimFit</code> with <code>--algo scan</code>, the value of the parameter at each point in the scan will be saved, while for <code>FitDiagnostics</code>, only the value at the end of the fit will be saved.</li> </ul> </li> <li> <p><code>--trackErrors name1[,name2,...]</code> will add a branch to the output tree for the error of each of the named parameters. This option supports the use of regular expressions by replacing <code>name</code> with <code>rgx{some regular expression}</code></p> <ul> <li>The name of the branch will be trackedError_name.</li> <li>The behaviour, in terms of which values are saved, is the same as <code>--trackParameters</code> above.</li> </ul> </li> </ul> <p>By default, the data set used by Combine will be the one listed in the datacard. You can tell Combine to use a different data set (for example a toy data set that you generated) by using the option <code>--dataset</code>. The argument should be <code>rootfile.root:workspace:location</code> or <code>rootfile.root:location</code>. In order to use this option, you must first convert your datacard to a binary workspace and use this binary workspace as the input to Combine. </p>"},{"location":"part3/runningthetool/#generic-minimizer-options","title":"Generic Minimizer Options","text":"<p>Combine uses its own minimizer class, which is used to steer Minuit (via RooMinimizer), named the <code>CascadeMinimizer</code>. This allows for sequential minimization, which can help in case a particular setting or algorithm fails. The <code>CascadeMinimizer</code> also knows about extra features of Combine such as discrete nuisance parameters.</p> <p>All of the fits that are performed in Combine's methods use this minimizer. This means that the fits can be tuned using these common options,</p> <ul> <li><code>--cminPoiOnlyFit</code>: First, perform a fit floating only the parameters of interest. This can be useful to find, roughly, where the global minimum is.</li> <li><code>--cminPreScan</code>: Do a scan before the first minimization.</li> <li><code>--cminPreFit arg</code> If set to a value N &gt; 0, the minimizer will perform a pre-fit with strategy (N-1), with the nuisance parameters frozen.<ul> <li><code>--cminApproxPreFitTolerance arg</code>: If non-zero, first do a pre-fit with this tolerance (or 10 times the final tolerance, whichever is largest)</li> <li><code>--cminApproxPreFitStrategy arg</code>:   Strategy to use in the pre-fit. The default is strategy 0.</li> </ul> </li> <li><code>--cminDefaultMinimizerType arg</code>: Set the default minimizer type. By default this is set to Minuit2.</li> <li><code>--cminDefaultMinimizerAlgo arg</code>: Set the default minimizer algorithm. The default algorithm is Migrad.</li> <li><code>--cminDefaultMinimizerTolerance arg</code>: Set the default minimizer tolerance, the default is 0.1.</li> <li><code>--cminDefaultMinimizerStrategy arg</code>: Set the default minimizer strategy between 0 (speed), 1 (balance - default), 2 (robustness). The Minuit documentation for this is pretty sparse but in general, 0 means evaluate the function less often, while 2 will waste function calls to get precise answers. An important note is that the <code>Hesse</code> algorithm (for error and correlation estimation) will be run only if the strategy is 1 or 2.</li> <li><code>--cminFallbackAlgo arg</code>: Provides a list of fallback algorithms, to be used in case the default minimizer fails. You can provide multiple options using the syntax <code>Type[,algo],strategy[:tolerance]</code>: eg <code>--cminFallbackAlgo Minuit2,Simplex,0:0.1</code> will fall back to the simplex algorithm of Minuit2 with strategy 0 and a tolerance 0.1, while <code>--cminFallbackAlgo Minuit2,1</code> will use the default algorithm (Migrad) of Minuit2 with strategy 1.</li> <li><code>--cminSetZeroPoint (0/1)</code>: Set the reference of the NLL to 0 when minimizing, this can help faster convergence to the minimum if the NLL itself is large. The default is true (1), set to 0 to turn off.</li> </ul> <p>The allowed combinations of minimizer types and minimizer algorithms are as follows:</p> Minimizer type Minimizer algorithm <code>Minuit</code> <code>Migrad</code>, <code>Simplex</code>, <code>Combined</code>, <code>Scan</code> <code>Minuit2</code> <code>Migrad</code>, <code>Simplex</code>, <code>Combined</code>, <code>Scan</code> <code>GSLMultiMin</code> <code>ConjugateFR</code>, <code>ConjugatePR</code>, <code>BFGS</code>, <code>BFGS2</code>, <code>SteepestDescent</code> <p>You can find details about these in the Minuit2 documentation here.</p> <p>More of these options can be found in the Cascade Minimizer options section when running <code>--help</code>.</p>"},{"location":"part3/runningthetool/#output-from-combine","title":"Output from combine","text":"<p>Most methods will print the results of the computation to the screen. However, in addition, Combine will also produce a root file containing a tree called limit with these results. The name of this file will be of the format,</p> <pre><code>higgsCombineTest.MethodName.mH$MASS.[word$WORD].root\n</code></pre> <p>where $WORD is any user defined keyword from the datacard which has been set to a particular value.</p> <p>A few command-line options can be used to control this output:</p> <ul> <li>The option <code>-n</code> allows you to specify part of the name of the root file. e.g. if you pass <code>-n HWW</code> the root file will be called <code>higgsCombineHWW....</code> instead of <code>higgsCombineTest</code></li> <li>The option <code>-m</code> allows you to specify the (Higgs boson) mass hypothesis, which gets written in the filename and in the output tree. This simplifies the bookeeping, as it becomes possible to merge multiple trees corresponding to different (Higgs boson) masses using <code>hadd</code>. Quantities can then be plotted as a function of the mass. The default value is m=120.</li> <li>The option <code>-s</code> can be used to specify the seed (eg <code>-s 12345</code>) used in toy generation. If this option is given, the name of the file will be extended by this seed, eg <code>higgsCombineTest.AsymptoticLimits.mH120.12345.root</code></li> <li>The option <code>--keyword-value</code> allows you to specify the value of a keyword in the datacard such that $WORD (in the datacard) will be given the value of VALUE in the command <code>--keyword-value WORD=VALUE</code>, eg  <code>higgsCombineTest.AsymptoticLimits.mH120.WORDVALUE.12345.root</code></li> </ul> <p>The output file will contain a <code>TDirectory</code> named toys, which will be empty if no toys are generated (see below for details) and a <code>TTree</code> called limit with the following branches;</p> Branch name Type Description <code>limit</code> <code>Double_t</code> Main result of combine run, with method-dependent meaning <code>limitErr</code> <code>Double_t</code> Estimated uncertainty on the result <code>mh</code> <code>Double_t</code> Value of MH, specified with <code>-m</code> option <code>iToy</code> <code>Int_t</code> Toy number identifier if running with <code>-t</code> <code>iSeed</code> <code>Int_t</code> Seed specified with <code>-s</code> <code>t_cpu</code> <code>Float_t</code> Estimated CPU time for algorithm <code>t_real</code> <code>Float_t</code> Estimated real time for algorithm <code>quantileExpected</code> <code>Float_t</code> Quantile identifier for methods that calculated expected (quantiles) and observed results (eg conversions from \\(\\Delta\\ln L\\) values), with method-dependent meaning. Negative values are reserved for entries that do not relate to quantiles of a calculation, with the default being set to -1 (usually meaning the observed result). <p>The value of any user-defined keyword $WORD that is set using <code>keyword-value</code> described above will also be included as a branch with type <code>string</code> named WORD. The option can be repeated multiple times for multiple keywords.</p> <p>In some cases, the precise meanings of the branches will depend on the method being used. In this case, it will be specified in this documentation.</p>"},{"location":"part3/runningthetool/#toy-data-generation","title":"Toy data generation","text":"<p>By default, each of the methods described so far will be run using the observed data as the input. In several cases (as detailed below), it is useful to run the tool using toy datasets, including Asimov data sets.</p> <p>The option <code>-t</code> is used to tell Combine to first generate one or more toy data sets, which will be used instead of the observed data. There are two versions,</p> <ul> <li> <p><code>-t N</code> with N &gt; 0. Combine will generate N toy datasets from the model and re-run the method once per toy. The seed for the toy generation can be modified with the option <code>-s</code> (use <code>-s -1</code> for a random seed). The output file will contain one entry in the tree for each of these toys.</p> </li> <li> <p><code>-t -1</code> will produce an Asimov data set, in which statistical fluctuations are suppressed. The procedure for generating this Asimov data set depends on the type of analysis you are using. More details are given below. </p> </li> </ul> <p>Warning</p> <p>The default values of the nuisance parameters (or any parameter) are used to generate the toy. This means that if, for example, you are using parametric shapes and the parameters inside the workspace are set to arbitrary values, those arbitrary values will be used to generate the toy. This behaviour can be modified through the use of the option <code>--setParameters x=value_x,y=value_y...</code>, which will set the values of the parameters (<code>x</code> and <code>y</code>) before toy generation. You can also load a snapshot from a previous fit to set the nuisance parameters to their post-fit values (see below).</p> <p>The output file will contain the toys (as <code>RooDataSets</code> for the observables, including global observables) in the toys directory if the option <code>--saveToys</code> is provided. If you include this option, the <code>limit</code> TTree in the output will have an entry corresponding to the state of the POI used for the generation of the toy, with the value of <code>quantileExpected</code> set to -2. </p> <p>The branches that are created by methods like <code>MultiDimFit</code> will not show the values used to generate the toy. If you also want the TTree to show the values of the POIs used to generate the toy, you should add additional branches using the <code>--trackParameters</code> option as described in the common command-line options section above. These branches will behave as expected when adding the option <code>--saveToys</code>. </p> <p>Warning</p> <p>For statistical methods that make use of toys (including <code>HybridNew</code>, <code>MarkovChainMC</code> and running with <code>-t N</code>), the results of repeated Combine commands will not be identical when using the datacard as the input. This is due to a feature in the tool that allows one to run concurrent commands that do not interfere with one another. In order to produce reproducible results with toy-based methods, you should first convert the datacard to a binary workspace using <code>text2workspace.py</code> and then use the resulting file as input to the Combine commands</p>"},{"location":"part3/runningthetool/#asimov-datasets","title":"Asimov datasets","text":"<p>If you are using either <code>-t -1</code> or  <code>AsymptoticLimits</code>, Combine will calculate results based on an Asimov data set.</p> <ul> <li> <p>For counting experiments, the Asimov data set will just be the total number of expected events (given the values of the nuisance parameters and POIs of the model)</p> </li> <li> <p>For shape analyses with templates, the Asimov data set will be constructed as a histogram using the same binning that is defined for your analysis.</p> </li> <li> <p>If your model uses parametric shapes, there are some options as to what Asimov data set to produce. By default, Combine will produce the Asimov data set as a histogram using the binning that is associated with each observable (ie as set using <code>RooRealVar::setBins</code>). If this binning does not exist, Combine will guess a suitable binning - it is therefore best to use <code>RooRealVar::setBins</code> to associate a binning with each observable, even if your data is unbinned, if you intend to use Asimov data sets.</p> </li> </ul> <p>You can also ask Combine to use a Pseudo-Asimov dataset, which is created from many weighted unbinned events.</p> <p>Setting <code>--X-rtd TMCSO_AdaptivePseudoAsimov=</code>\\(\\beta\\) with \\(\\beta&gt;0\\) will trigger the internal logic of whether to produce a Pseudo-Asimov dataset. This logic is as follows;</p> <ol> <li> <p>For each observable in your dataset, the number of bins, \\(n_{b}\\) is determined either from the value of <code>RooRealVar::getBins</code>, if it exists, or assumed to be 100.</p> </li> <li> <p>If \\(N_{b}=\\prod_{b}n_{b}&gt;5000\\), the number of expected events \\(N_{ev}\\) is determined. Note if you are combining multiple channels, \\(N_{ev}\\) refers to the number of expected events in a single channel. The logic is separate for each channel. If  \\(N_{ev}/N_{b}&lt;0.01\\) then a Pseudo-Asimov data set is created with the number of events equal to \\(\\beta \\cdot \\mathrm{max}\\{100*N_{ev},1000\\}\\). If \\(N_{ev}/N_{b}\\geq 0.01\\) , then a normal Asimov data set is produced.</p> </li> <li> <p>If \\(N_{b}\\leq 5000\\) then a normal Asimov data set will be produced</p> </li> </ol> <p>The production of a Pseudo-Asimov data set can be forced by using the option <code>--X-rtd TMCSO_PseudoAsimov=X</code> where <code>X&gt;0</code> will determine the number of weighted events for the Pseudo-Asimov data set. You should try different values of <code>X</code>, since larger values lead to more events in the Pseudo-Asimov data set, resulting in higher precision. However, in general, the fit will be slower. </p> <p>You can turn off the internal logic by setting <code>--X-rtd TMCSO_AdaptivePseudoAsimov=0 --X-rtd TMCSO_PseudoAsimov=0</code>, thereby forcing histograms to be generated.</p> <p>Info</p> <p>If you set <code>--X-rtd TMCSO_PseudoAsimov=X</code> with <code>X&gt;0</code> and also turn on <code>--X-rtd TMCSO_AdaptivePseudoAsimov=</code>\\(\\beta\\), with \\(\\beta&gt;0\\), the internal logic will be used, but this time the default will be to generate Pseudo-Asimov data sets, rather than the standard Asimov ones.</p>"},{"location":"part3/runningthetool/#nuisance-parameter-generation","title":"Nuisance parameter generation","text":"<p>The default method of handling systematics is to generate random values (around their nominal values, see above) for the nuisance parameters, according to their prior PDFs centred around their default values, before generating the data. The unconstrained nuisance parameters (eg <code>flatParam</code> or <code>rateParam</code>), or those with flat priors are not randomized before the data generation. If you wish to also randomize these parameters, you must declare them as <code>flatParam</code> in your datacard and, when running text2workspace, you must add the option <code>--X-assign-flatParam-prior</code> to the command line.</p> <p>The following options define how the toys will be generated,</p> <ul> <li> <p><code>--toysNoSystematics</code> the nuisance parameters in each toy are not randomized when generating the toy data sets - i.e their nominal values are used to generate the data. Note that for methods which profile (fit) the nuisances, the parameters are still floating when evaluating the likelihood.</p> </li> <li> <p><code>--toysFrequentist</code> the nuisance parameters in each toy are set to their nominal values which are obtained after first fitting to the observed data, with the POIs fixed, before generating the toy data sets. For evaluating likelihoods, the constraint terms are instead randomized within their PDFs around the post-fit nuisance parameter values.</p> </li> </ul> <p>If you are using <code>toysFrequentist</code>, be aware that the values set by <code>--setParameters</code> will be ignored for the toy generation as the post-fit values will instead be used (except for any parameter that is also a parameter of interest). You can override this behaviour and choose the nominal values for toy generation for any parameter by adding the option <code>--bypassFrequentistFit</code>, which will skip the initial fit to data, or by loading a snapshot (see below).</p> <p>Warning</p> <p>For methods such as <code>AsymptoticLimits</code> and <code>HybridNew --LHCmode LHC-limits</code>, the  \"nominal\" nuisance parameter values are taken from fits to the data and are, therefore, not \"blind\" to the observed data by default (following the fully frequentist paradigm). See the detailed documentation on these methods for how to run in fully \"blinded\" mode.</p>"},{"location":"part3/runningthetool/#generate-only","title":"Generate only","text":"<p>It is also possible to generate the toys first, and then feed them to the methods in Combine. This can be done using <code>-M GenerateOnly --saveToys</code>. The toys can then be read and used with the other methods by specifying <code>--toysFile=higgsCombineTest.GenerateOnly...</code> and using the same options for the toy generation. </p> <p>You can specify to run on a single toy, in place of the observed data, by including the option <code>-D file.root:toys/toy_i</code>. For example adding <code>-D higgsCombineTest.GenerateOnly.mH120.123456.root:toys/toy_10</code> will run on the  data set <code>toy_10</code> (the 10th toy) that was generated and saved in the file <code>higgsCombineTest.GenerateOnly.mH120.123456.root</code>. </p> <p>Warning</p> <p>Some methods also use toys within the method itself (eg <code>AsymptoticLimits</code> and <code>HybridNew</code>). For these, you should not specify the toy generation with <code>-t</code> or the options above. Instead, you should follow the method-specific instructions.</p>"},{"location":"part3/runningthetool/#loading-snapshots","title":"Loading snapshots","text":"<p>Snapshots from workspaces can be loaded and used in order to generate toys using the option <code>--snapshotName &lt;name of snapshot&gt;</code>. This will first set the parameters to the values in the snapshot, before any other parameter options are set and toys are generated.</p> <p>See the section on saving post-fit workspaces for creating workspaces with post-fit snapshots from <code>MultiDimFit</code>.</p> <p>Here are a few examples of calculations with toys from post-fit workspaces using a workspace with \\(r, m_{H}\\) as parameters of interest</p> <ul> <li> <p>Throw post-fit toy with b from s+b(floating \\(r,m_{H}\\)) fit, s with r=1.0, m=best fit MH, using nuisance parameter values and constraints re-centered on s+b(floating \\(r,m_{H}\\)) fit values (aka frequentist post-fit expected) and compute post-fit expected r uncertainty profiling MH <code>combine higgsCombinemumhfit.MultiDimFit.mH125.root --snapshotName MultiDimFit -M MultiDimFit --verbose 9 -n randomtest --toysFrequentist --bypassFrequentistFit -t -1 --expectSignal=1 -P r --floatOtherPOIs=1 --algo singles</code></p> </li> <li> <p>Throw post-fit toy with b from s+b(floating \\(r,m_{H}\\)) fit, s with r=1.0, m=128.0, using nuisance parameter values and constraints re-centered on s+b(floating \\(r,m_{H}\\)) fit values (aka frequentist post-fit expected) and compute post-fit expected significance (with MH fixed at 128 implicitly)     <code>combine higgsCombinemumhfit.MultiDimFit.mH125.root -m 128 --snapshotName MultiDimFit -M ProfileLikelihood --significance --verbose 9 -n randomtest --toysFrequentist --bypassFrequentistFit --overrideSnapshotMass -t -1 --expectSignal=1 --redefineSignalPOIs r --freezeParameters MH</code></p> </li> <li> <p>Throw post-fit toy with b from s+b(floating \\(r,m_{H}\\)) fit, s with r=0.0, using nuisance parameter values and constraints re-centered on s+b(floating \\(r,m_{H}\\)) fit values (aka frequentist post-fit expected) and compute post-fit expected and observed asymptotic limit (with MH fixed at 128 implicitly)     <code>combine higgsCombinemumhfit.MultiDimFit.mH125.root -m 128 --snapshotName MultiDimFit -M AsymptoticLimits --verbose 9 -n randomtest --bypassFrequentistFit --overrideSnapshotMass--redefineSignalPOIs r --freezeParameters MH</code></p> </li> </ul>"},{"location":"part3/runningthetool/#combinetool-for-job-submission","title":"combineTool for job submission","text":"<p>For longer tasks that cannot be run locally, several methods in Combine can be split to run on a batch system or on the Grid. The splitting and submission is handled using the <code>combineTool.py</code> script.</p>"},{"location":"part3/runningthetool/#submission-to-condor","title":"Submission to Condor","text":"<p>The syntax for running on condor with the tool is</p> <pre><code>combineTool.py -M ALGO [options] --job-mode condor --sub-opts='CLASSADS' --task-name NAME [--dry-run]\n</code></pre> <p>with <code>options</code> being the usual list of Combine options. The help option <code>-h</code> will give a list of both Combine and <code>combineTool</code> options. It is possible to use this tool with several different methods from Combine.</p> <p>The <code>--sub-opts</code> option takes a string with the different ClassAds that you want to set, separated by <code>\\n</code> as argument (e.g. <code>'+JobFlavour=\"espresso\"\\nRequestCpus=1'</code>).</p> <p>The <code>--dry-run</code> option will show what will be run without actually doing so / submitting the jobs.</p> <p>For example, to generate toys (eg for use with limit setting) users running on lxplus at CERN can use the condor mode:</p> <p><pre><code>combineTool.py -d workspace.root -M HybridNew --LHCmode LHC-limits --clsAcc 0  -T 2000 -s -1 --singlePoint 0.2:2.0:0.05 --saveHybridResult -m 125 --job-mode condor --task-name condor-test --sub-opts='+JobFlavour=\"tomorrow\"'\n</code></pre> The <code>--singlePoint</code> option is over-ridden, so that this will produce a script for each value of the POI in the range 0.2 to 2.0 in steps of 0.05. You can merge multiple points into a script using <code>--merge</code> - e.g adding <code>--merge 10</code> to the above command will mean that each job contains at most 10 of the values. The scripts are labelled by the <code>--task-name</code> option. They will be submitted directly to condor, adding any options in <code>--sub-opts</code> to the condor submit script. Make sure multiple options are separated by <code>\\n</code>. The jobs will run and produce output in the current directory.</p> <p>Below is an example for splitting points in a multi-dimensional likelihood scan.</p>"},{"location":"part3/runningthetool/#splitting-jobs-for-a-multi-dimensional-likelihood-scan","title":"Splitting jobs for a multi-dimensional likelihood scan","text":"<p>The option <code>--split-points</code> issues the command to split the jobs for <code>MultiDimFit</code> when using <code>--algo grid</code>. The following example will split the jobs such that there are 10 points in each of the jobs, which will be submitted to the workday queue.</p> <pre><code>combineTool.py datacard.txt -M MultiDimFit --algo grid --points 50 --rMin 0 --rMax 1 --job-mode condor --split-points 10 --sub-opts='+JobFlavour=\"workday\"' --task-name mytask -n mytask\n</code></pre> <p>Remember, any usual options (such as redefining POIs or freezing parameters) are passed to Combine and can be added to the command line for <code>combineTool</code>.</p> <p>Info</p> <p>The option <code>-n NAME</code> should be included to avoid overwriting output files, as the jobs will be run inside the directory from which the command is issued.</p>"},{"location":"part3/runningthetool/#grid-submission-with-combinetool","title":"Grid submission with combineTool","text":"<p>For more CPU-intensive tasks, for example determining limits for complex models using toys, it is generally not feasible to compute all the results interactively. Instead, these jobs can be submitted to the Grid.</p> <p>In this example we will use the <code>HybridNew</code> method of Combine to determine an upper limit for a sub-channel of the Run 1 SM \\(H\\rightarrow\\tau\\tau\\) analysis. For full documentation, see the section on computing limits with toys.</p> <p>With this model it would take too long to find the limit in one go, so instead we create a set of jobs in which each one throws toys and builds up the test statistic distributions for a fixed value of the signal strength. These jobs can then be submitted to a batch system or to the Grid using <code>crab3</code>. From the set of output distributions it is possible to extract the expected and observed limits.</p> <p>For this we will use <code>combineTool.py</code></p> <p>First we need to build a workspace from the \\(H\\rightarrow\\tau\\tau\\) datacard,</p> <pre><code>$ text2workspace.py data/tutorials/htt/125/htt_mt.txt -m 125\n$ mv data/tutorials/htt/125/htt_mt.root ./\n</code></pre> <p>To get an idea of the range of signal strength values we will need to build test-statistic distributions for, we will first use the <code>AsymptoticLimits</code> method of Combine,</p> <pre><code>$ combine -M Asymptotic htt_mt.root -m 125\n &lt;&lt; Combine &gt;&gt;\n[...]\n -- AsymptoticLimits (CLs) --\nObserved Limit: r &lt; 1.7384\nExpected  2.5%: r &lt; 0.4394\nExpected 16.0%: r &lt; 0.5971\nExpected 50.0%: r &lt; 0.8555\nExpected 84.0%: r &lt; 1.2340\nExpected 97.5%: r &lt; 1.7200\n</code></pre> <p>Based on this, a range of 0.2 to 2.0 should be suitable.</p> <p>We can use the same command for generating the distribution of test statistics with <code>combineTool</code>. The <code>--singlePoint</code> option is now enhanced to support expressions that generate a set of calls to Combine with different values. The accepted syntax is of the form MIN:MAX:STEPSIZE, and multiple comma-separated expressions can be specified.</p> <p>The script also adds an option <code>--dry-run</code>, which will not actually call comCombinebine but just prints out the commands that would be run, e.g,</p> <pre><code>combineTool.py -M HybridNew -d htt_mt.root --LHCmode LHC-limits --singlePoint 0.2:2.0:0.2 -T 2000 -s -1 --saveToys --saveHybridResult -m 125 --dry-run\n...\n[DRY-RUN]: combine -d htt_mt.root --LHCmode LHC-limits -T 2000 -s -1 --saveToys --saveHybridResult -M HybridNew -m 125 --singlePoint 0.2 -n .Test.POINT.0.2\n[DRY-RUN]: combine -d htt_mt.root --LHCmode LHC-limits -T 2000 -s -1 --saveToys --saveHybridResult -M HybridNew -m 125 --singlePoint 0.4 -n .Test.POINT.0.4\n[...]\n[DRY-RUN]: combine -d htt_mt.root --LHCmode LHC-limits -T 2000 -s -1 --saveToys --saveHybridResult -M HybridNew -m 125 --singlePoint 2.0 -n .Test.POINT.2.0\n</code></pre> <p>When the <code>--dry-run</code> option is removed each command will be run in sequence.</p>"},{"location":"part3/runningthetool/#grid-submission-with-crab3","title":"Grid submission with crab3","text":"<p>Submission to the grid with <code>crab3</code> works in a similar way. Before doing so, ensure that the <code>crab3</code> environment has been sourced in addition to the CMSSW environment. We will use the example of generating a grid of test-statistic distributions for limits.</p> <pre><code>$ cmsenv; source /cvmfs/cms.cern.ch/crab3/crab.sh\n$ combineTool.py -d htt_mt.root -M HybridNew --LHCmode LHC-limits --clsAcc 0 -T 2000 -s -1 --singlePoint 0.2:2.0:0.05 --saveToys --saveHybridResult -m 125 --job-mode crab3 --task-name grid-test --custom-crab custom_crab.py\n</code></pre> <p>The option <code>--custom-crab</code> should point to a python file python containing a function of the form <code>custom_crab(config)</code> that will be used to modify the default crab configuration. You can use this to set the output site to your local grid site, or modify other options such as the voRole, or the site blacklist/whitelist.</p> <p>For example</p> <pre><code>def custom_crab(config):\n  print '&gt;&gt; Customising the crab config'\n  config.Site.storageSite = 'T2_CH_CERN'\n  config.Site.blacklist = ['SOME_SITE', 'SOME_OTHER_SITE']\n</code></pre> <p>Again it is possible to use the option <code>--dry-run</code> to see what the complete crab config will look like before actually submitting it.</p> <p>Once submitted, the progress can be monitored using the standard <code>crab</code> commands. When all jobs are completed, copy the output from your site's storage element to the local output folder.</p> <pre><code>$ crab getoutput -d crab_grid-test\n# Now we have to un-tar the output files\n$ cd crab_grid-test/results/\n$ for f in *.tar; do tar xf $f; done\n$ mv higgsCombine*.root ../../\n$ cd ../../\n</code></pre> <p>These output files should be combined with <code>hadd</code>, after which we invoke Combine as usual to calculate observed and expected limits from the merged grid, as usual.</p>"},{"location":"part3/simplifiedlikelihood/","title":"Procedure for creating and validating simplified likelihood inputs","text":"<p>This page is to give a brief outline for the creation of (potentially aggregated) predictions and their covariance to facilitate external reinterpretation using the simplified likelihood (SL) approach. Instructions for validating the simplified likelihood method (detailed in the CMS note here and \"The Simplified Likelihood Framework\" paper) are also given.</p>"},{"location":"part3/simplifiedlikelihood/#requirements","title":"Requirements","text":"<p>You need an up to date version of Combine. Note You should use the latest release of Combine for the exact commands on this page. You should be using Combine tag <code>v9.0.0</code> or higher or the latest version of the <code>112x</code> branch to follow these instructions.  </p> <p>You will find the python scripts needed to convert Combine outputs into simplified likelihood inputs under <code>test/simplifiedLikelihood</code></p> <p>If you're using the <code>102x</code> branch (not recommended), then you can obtain these scripts from here by running:  <pre><code>curl -s https://raw.githubusercontent.com/nucleosynthesis/work-tools/master/sparse-checkout-SL-ssh.sh &gt; checkoutSL.sh\nbash checkoutSL.sh\nls work-tools/stats-tools\n</code></pre></p> <p>If you also want to validate your inputs and perform fits/scans using them, you can use the package SLtools from The Simplified Likelihood Framework paper for this. <pre><code>git clone https://gitlab.cern.ch/SimplifiedLikelihood/SLtools.git\n</code></pre></p>"},{"location":"part3/simplifiedlikelihood/#producing-covariance-for-recasting","title":"Producing covariance for recasting","text":"<p>Producing the necessary predictions and covariance for recasting varies depending on whether or not control regions are explicitly included in the datacard when running fits. Instructions for cases where the control regions are and are not included are detailed below.</p> <p>Warning</p> <p>The instructions below will calculate moments based on the assumption that \\(E[x]=\\hat{x}\\), i.e it will use the maximum likelihood estimators for the yields as the expectation values. If instead you want to use the full definition of the moments, you can run the <code>FitDiagnostics</code> method with the <code>-t</code> option and include <code>--savePredictionsPerToy</code> and remove the other options, which will produce a tree of the toys in the output from which moments can be calculated. </p>"},{"location":"part3/simplifiedlikelihood/#type-a-control-regions-included-in-datacard","title":"Type A - Control regions included in datacard","text":"<p>For an example datacard 'datacard.txt' including two signal channels 'Signal1' and 'Signal2', make the workspace including the masking flags</p> <pre><code>text2workspace.py --channel-masks --X-allow-no-signal --X-allow-no-background datacard.txt -o datacard.root\n</code></pre> <p>Run the fit making the covariance (output saved as <code>fitDiagnostics.root</code>) masking the signal channels. Note that all signal channels must be masked!</p> <p><pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 2000 --setParameters mask_Signal1=1,mask_Signal2=1 --saveOverall  -N Name\n</code></pre> Where \"Name\" can be specified by you.</p> <p>Outputs, including predictions and covariance, will be saved in <code>fitDiagnosticsName.root</code> folder <code>shapes_fit_b</code></p>"},{"location":"part3/simplifiedlikelihood/#type-b-control-regions-not-included-in-datacard","title":"Type B - Control regions not included in datacard","text":"<p>For an example datacard 'datacard.txt' including two signal channels 'Signal1' and 'Signal2', make the workspace</p> <pre><code>text2workspace.py --X-allow-no-signal --X-allow-no-background datacard.txt -o datacard.root\n</code></pre> <p>Run the fit making the covariance (output saved as <code>fitDiagnosticsName.root</code>) setting no pre-fit signal contribution. Note we must set <code>--preFitValue 0</code> in this case since, we will be using the pre-fit uncertainties for the covariance calculation and we do not want to include the uncertainties on the signal. </p> <p><pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 2000 --saveOverall --preFitValue 0   -n Name\n</code></pre> Where \"Name\" can be specified by you.</p> <p>Outputs, including predictions and covariance, will be saved in <code>fitDiagnosticsName.root</code> folder <code>shapes_prefit</code></p> <p>In order to also extract the signal yields corresponding to <code>r=1</code> (in case you want to run the validation step later), you also need to produce a second file with the pre-fit value set to 1. For this you do not need to run many toys. To save time you can set <code>--numToysForShape</code> to a low value. </p> <pre><code>combine datacard.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 1 --saveOverall --preFitValue 1   -n Name2\n</code></pre> <p>You should check that the order of the bins in the covariance matrix is as expected.</p>"},{"location":"part3/simplifiedlikelihood/#produce-simplified-likelihood-inputs","title":"Produce simplified likelihood inputs","text":"<p>Head over to the <code>test/simplifiedLikelihoods</code> directory inside your Combine area. The following instructions depend on whether you are aggregating or not aggregating your signal regions. Choose the instructions for your case. </p>"},{"location":"part3/simplifiedlikelihood/#not-aggregating","title":"Not Aggregating","text":"<p>Run the <code>makeLHInputs.py</code> script to prepare the inputs for the simplified likelihood. The filter flag can be used to select only signal regions based on the channel names. To include all channels do not include the filter flag.</p> <p>The SL input must NOT include any control regions that were not masked in the fit.</p> <p>If your analysis is Type B (i.e everything in the datacard is a signal region), then you can just run </p> <pre><code>python makeLHInputs.py -i fitDiagnosticsName.root -o SLinput.root \n</code></pre> <p>If necessary (i.e as in Type B analyses) you may also need to run the same on the output of the run where the pre-fit value was set to 1. </p> <pre><code>python makeLHInputs.py -i fitDiagnosticsName2.root -o SLinput2.root \n</code></pre> <p>If you instead have a Type A analysis (some of the regions are control regions that were used to fit but not masked) then you should add the option <code>--filter SignalName</code> where <code>SignalName</code> is some string that defines the signal regions in your datacards (for example, \"SR\" is a common name for these).</p> <p>Note: If your signal regions cannot be easily identified by a string, follow the instructions below for aggregating, but define only one channel for each aggregate region. This will maintain the full information and will not actually aggregate any regions.</p>"},{"location":"part3/simplifiedlikelihood/#aggregating","title":"Aggregating","text":"<p>If aggregating based on covariance, edit the config file <code>aggregateCFG.py</code> to define aggregate regions based on channel names. Note that wildcards are supported. You can then make likelihood inputs using</p> <pre><code>python makeLHInputs.py -i fitDiagnosticsName.root -o SLinput.root --config aggregateCFG.py\n</code></pre> <p>At this point you have the inputs as ROOT files necessary to publish and run the simplified likelihood. </p>"},{"location":"part3/simplifiedlikelihood/#validating-the-simplified-likelihood-approach","title":"Validating the simplified likelihood approach","text":"<p>The simplified likelihood relies on several assumptions (detailed in the documentation at the top). To test the validity for your analysis, statistical results between Combine and the simplified likelihood can be compared. </p> <p>We will use the package SLtools from the Simplified Likelihood Paper for this. The first step is to convert the ROOT files into python configs to run in the tool. </p>"},{"location":"part3/simplifiedlikelihood/#convert-root-to-python","title":"Convert ROOT to Python","text":"<p>If you followed the steps above, you have all of the histograms already necessary to generate the python configs. The script <code>test/simplifiedLikelihoods/convertSLRootToPython.py</code>  can be used to do the conversion. Just provide the following options when running with python.</p> <ul> <li><code>-O/--outname</code> : The output python file containing the model (default is <code>test.py</code>)</li> <li><code>-s/--signal</code> : The signal histogram, should be of format <code>file.root:location/to/histogram</code></li> <li><code>-b/--background</code> : The background histogram, should be of format <code>file.root:location/to/histogram</code></li> <li><code>-d/--data</code> : The data TGraph, should be of format <code>file.root:location/to/graph</code></li> <li><code>-c/--covariance</code> : The covariance TH2 histogram, should be of format <code>file.root:location/to/histogram</code></li> </ul> <p>For example, to get the correct output from a Type B analysis with no aggregating, you can run </p> <pre><code>python test/simplifiedLikelihoods/convertSLRootToPython.py -O mymodel.py -s SLinput.root:shapes_prefit/total_signal  -b SLinput.root:shapes_prefit/total_M2 d -d SLinput.root:shapes_prefit/total_data -c SLinput.root:shapes_prefit/total_M2\n</code></pre> <p>The output will be a python file with the right format for the SL tool. You can mix different ROOT files for these inputs. Note that the <code>SLtools</code> package also has some tools to covert <code>.yaml</code>-based inputs into the python config for you.</p>"},{"location":"part3/simplifiedlikelihood/#run-a-likelihood-scan-with-the-sl","title":"Run a likelihood scan with the SL","text":"<p>If you have checked out the SLtools, you can create a simple python script as the one below to produce a scan of the simplified likelihood from your inputs.</p> <pre><code>#! /usr/bin/env python\nimport simplike as sl\n\nexec(open(\"mymodel.py\").read())\nslp1 = sl.SLParams(background, covariance, obs=data, sig=signal)\n\nimport numpy as np\nnpoints = 50\nmus = np.arange(-0.5, 2, (2+0.5)/npoints)\ntmus1 = [slp1.tmu(mu) for mu in mus]\nfrom matplotlib import pyplot as plt\nplt.plot(mus,tmus1)\nplt.show()\n</code></pre> <p>Where the <code>mymodel.py</code> config is a simple python file defined as;</p> <ul> <li><code>data</code> : A python array of observed data, one entry per bin.</li> <li><code>background</code> : A python array of expected background, one entry per bin.</li> <li><code>covariance</code> : A python array of the covariance between expected backgrounds. The format is a flat array which is converted into a 2D array inside the tool</li> <li><code>signal</code> : A python array of the expected signal, one entry per bin. This should be replaced with whichever signal model you are testing.</li> </ul> <p>This <code>model.py</code> can also just be the output of the previous section converted from the ROOT files for you.</p> <p>The example below is from the note CMS-NOTE-2017-001</p> Show example <pre><code>\nimport numpy\nimport array\n\nname = \"CMS-NOTE-2017-001 dummy model\"\nnbins = 8\ndata = array.array('d',[1964,877,354,182,82,36,15,11])\nbackground = array.array('d',[2006.4,836.4,350.,147.1,62.0,26.2,11.1,4.7])\nsignal = array.array('d',[47,29.4,21.1,14.3,9.4,7.1,4.7,4.3])\ncovariance = array.array('d', [ 18774.2, -2866.97, -5807.3, -4460.52, -2777.25, -1572.97, -846.653, -442.531, -2866.97, 496.273, 900.195, 667.591, 403.92, 222.614, 116.779, 59.5958, -5807.3, 900.195, 1799.56, 1376.77, 854.448, 482.435, 258.92, 134.975, -4460.52, 667.591, 1376.77, 1063.03, 664.527, 377.714, 203.967, 106.926, -2777.25, 403.92, 854.448, 664.527, 417.837, 238.76, 129.55, 68.2075, -1572.97, 222.614, 482.435, 377.714, 238.76, 137.151, 74.7665, 39.5247, -846.653, 116.779, 258.92, 203.967, 129.55, 74.7665, 40.9423, 21.7285, -442.531, 59.5958, 134.975, 106.926, 68.2075, 39.5247, 21.7285, 11.5732])\n</code>"},{"location":"part3/simplifiedlikelihood/#example-using-tutorial-datacard","title":"Example using tutorial datacard","text":"<p>For this example, we will use the tutorial datacard <code>data/tutorials/longexercise/datacard_part3.txt</code>. This datacard is of Type B since there are no control regions (all regions are signal regions). </p>\n<p>First, we will create the binary file (run <code>text2workspace</code>)\n<pre><code>text2workspace.py --X-allow-no-signal --X-allow-no-background data/tutorials/longexercise/datacard_part3.txt  -m 200\n</code></pre></p>\n<p>And next, we will generate the covariance between the bins of the background model. \n<pre><code>combine data/tutorials/longexercise/datacard_part3.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 10000 --saveOverall --preFitValue 0   -n SimpleTH1 -m 200\n\ncombine data/tutorials/longexercise/datacard_part3.root -M FitDiagnostics --saveShapes --saveWithUnc --numToysForShape 1 --saveOverall --preFitValue 1   -n SimpleTH1_Signal1 -m 200\n</code></pre>\nWe will also want to compare our scan to that from the full likelihood, which we can get as usual from Combine. </p>\n<pre><code>combine -M MultiDimFit data/tutorials/longexercise/datacard_part3.root --rMin -0.5 --rMax 2 --algo grid -n SimpleTH1 -m 200\n</code></pre>\n<p>Next, since we do not plan to aggregate any of the bins, we will follow the instructions for this and pick out the right covariance matrix.</p>\n<pre><code>python test/simplifiedLikelihoods/makeLHInputs.py -i fitDiagnosticsSimpleTH1.root -o SLinput.root \n\npython test/simplifiedLikelihoods/makeLHInputs.py -i fitDiagnosticsSimpleTH1_Signal1.root -o SLinput_Signal1.root \n</code></pre>\n<p>We now have everything we need to provide the simplified likelihood inputs:</p>\n<pre><code>$ root -l SLinput.root\nroot [0] .ls\n\nAttaching file SLinput.root as _file0...\n(TFile *) 0x3667820\nroot [1] .ls\nTFile**         SLinput.root\n TFile*         SLinput.root\n  KEY: TDirectoryFile   shapes_fit_b;1  shapes_fit_b\n  KEY: TDirectoryFile   shapes_prefit;1 shapes_prefit\n  KEY: TDirectoryFile   shapes_fit_s;1  shapes_fit_s\n</code></pre>\n<p>We can convert this to a python module that we can use to run a scan with the <code>SLtools</code> package. Note, since we have a Type B datacard, we will be using the pre-fit covariance matrix. Also, this means we want to take the signal from the file where the prefit value of <code>r</code> was 1. </p>\n<pre><code>python test/simplifiedLikelihoods/convertSLRootToPython.py -O mymodel.py -s SLinput_Signal1.root:shapes_prefit/total_signal  -b SLinput.root:shapes_prefit/total_M1-d SLinput.root:shapes_prefit/total_data -c SLinput.root:shapes_prefit/total_M2\n</code></pre>\n<p>We can compare the profiled likelihood scans from our simplified likelihood (using the python file we just created) and from the full likelihood (that we created with Combine.). For the former, we need to first checkout the <code>SLtools</code> package </p>\n<pre><code>git clone https://gitlab.cern.ch/SimplifiedLikelihood/SLtools.git\nmv higgsCombineSimpleTH1.MultiDimFit.mH200.root SLtools/ \nmv mymodel.py SLtools/\ncd SLtools\n</code></pre>\n<p>The script below will create a plot of the comparison for us. </p>\n<p><pre><code>#! /usr/bin/env python\nimport simplike as sl\n\nexec(open(\"mymodel.py\").read())\n\nslp1 = sl.SLParams(background, covariance, obs=data, sig=signal)\n\nimport ROOT \nfi = ROOT.TFile.Open(\"higgsCombineSimpleTH1.MultiDimFit.mH200.root\")\ntr = fi.Get(\"limit\")\n\npoints = []\nfor i in range(tr.GetEntries()):\n  tr.GetEntry(i)\n  points.append([tr.r,2*tr.deltaNLL])\npoints.sort()\n\nmus2=[pt[0] for pt in points]\ntmus2=[pt[1] for pt in points]\n\nimport numpy as np\nnpoints = 50\nmus1 = np.arange(-0.5, 2, (2+0.5)/npoints)\ntmus1 = [slp1.tmu(mu) for mu in mus1]\n\nfrom matplotlib import pyplot as plt\nplt.plot(mus1,tmus1,label='simplified likelihood')\nplt.plot(mus2,tmus2,label='full likelihood')\nplt.legend()\nplt.xlabel(\"$\\mu$\")\nplt.ylabel(\"$-2\\Delta \\ln L$\")\n\nplt.savefig(\"compareLH.pdf\")\n</code></pre>\nThis will produce a figure like the one below. </p>\n<p></p>\n<p>It is also possible to include the third moment of each bin to improve the precision of the simplified likelihood [ JHEP 64 2019 ]. The necessary information is stored in the outputs from Combine, therefore you just need to include the option <code>-t SLinput.root:shapes_prefit/total_M3</code> in the options list for <code>convertSLRootToPython.py</code> to  include this in the model file. The third moment information can be included in <code>SLtools</code> by using <code>sl.SLParams(background, covariance, third_moment, obs=data, sig=signal)</code></p>"},{"location":"part3/validation/","title":"Validating datacards","text":"<p>This section covers the main features of the datacard validation tool that helps you spot potential problems with your datacards at an early stage. The tool is implemented in the <code>CombineHarvester/CombineTools</code> subpackage. See the <code>combineTool</code>  section of the documentation for checkout instructions for the full tool, which is needed for this task.</p> <p>The datacard validation tool contains a number of checks. It is possible to call subsets of these checks when creating datacards within <code>CombineHarvester</code>. However, for now we will only describe the usage of the validation tool on already existing datacards. If you create your datacards with <code>CombineHarvester</code> and would like to include the checks at the datacard creation stage, please contact us via https://cms-talk.web.cern.ch/c/physics/cat/cat-stats/279.</p>"},{"location":"part3/validation/#how-to-use-the-tool","title":"How to use the tool","text":"<p>The basic syntax is:</p> <pre><code>ValidateDatacards.py datacard.txt\n</code></pre> <p>This will write the results of the checks to a json file (default: <code>validation.json</code>), and will print a summary to the screen, for example:</p> <pre><code>================================\n=======Validation results=======\n================================\n&gt;&gt;&gt;There were  7800 warnings of type  'up/down templates vary the yield in the same direction'\n&gt;&gt;&gt;There were  5323 warnings of type  'up/down templates are identical'\n&gt;&gt;&gt;There were no warnings of type  'At least one of the up/down systematic uncertainty templates is empty'\n&gt;&gt;&gt;There were  4406 warnings of type  'Uncertainty has normalisation effect of more than 10.0%'\n&gt;&gt;&gt;There were  8371 warnings of type  'Uncertainty probably has no genuine shape effect'\n&gt;&gt;&gt;There were no warnings of type 'Empty process'\n&gt;&gt;&gt;There were no warnings of type 'Bins of the template empty in background'\n&gt;&gt;&gt;INFO: there were  169  alerts of type  'Small signal process'\n</code></pre> <p>The meaning of each of these warnings/alerts is discussed below.</p> <p>The following arguments are possible: <pre><code>usage: ValidateDatacards.py [-h] [--printLevel PRINTLEVEL] [--readOnly]\n                            [--checkUncertOver CHECKUNCERTOVER]\n                            [--reportSigUnder REPORTSIGUNDER]\n                            [--jsonFile JSONFILE] [--mass MASS]\n                            cards\n\npositional arguments:\n  cards                 Specifies the full path to the datacards to check\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --printLevel PRINTLEVEL, -p PRINTLEVEL\n                        Specify the level of info printing (0-3, default:1)\n  --readOnly            If this is enabled, skip validation and only read the\n                        output json\n  --checkUncertOver CHECKUNCERTOVER, -c CHECKUNCERTOVER\n                        Report uncertainties which have a normalization effect\n                        larger than this fraction (default:0.1)\n  --reportSigUnder REPORTSIGUNDER, -s REPORTSIGUNDER\n                        Report signals contributing less than this fraction of\n                        the total in a channel (default:0.001)\n  --jsonFile JSONFILE   Path to the json file to read/write results from\n                        (default:validation.json)\n  --mass MASS           Signal mass to use (default:*)\n</code></pre> <code>printLevel</code> adjusts how much information is printed to the screen. When set to 0, the results are only written to the json file, but not to the screen. When set to 1 (default), the number of warnings/alerts of a given type is printed to the screen. Setting this option to 2 prints the same information as level 1, and additionally prints which uncertainties are affected (if the check is related to uncertainties) or which processes are affected (if the check is related only to processes). When <code>printLevel</code> is set to 3, the information from level 2 is printed, and additionaly for checks related to uncertainties it prints which processes are affected.</p> <p>To print information to screen, the script parses the json file that contains the results of the validation checks. Therefore, if you have already run the validation tool and produced this json file, you can simply change the <code>printLevel</code> by re-running the tool with <code>printLevel</code> set to a different value, and enabling the <code>--readOnly</code> option.</p> <p>The options <code>--checkUncertOver</code> and <code>--reportSigUnder</code> will be described in more detail in the section that discusses the checks for which they are relevant.</p> <p>Note: the <code>--mass</code> argument should only be set if you normally use it when running Combine, otherwise you can leave it at the default.</p> <p>The datacard validation tool is primarily intended for shape (histogram) based analyses. However, when running on a parametric model or counting experiment the checks for small signal processes, empty processes, and uncertainties with large normalization effects can still be performed. </p>"},{"location":"part3/validation/#details-on-checks","title":"Details on checks","text":""},{"location":"part3/validation/#uncertainties-with-large-normalization-effect","title":"Uncertainties with large normalization effect","text":"<p>This check highlights nuisance parameters that have a normalization effect larger than the fraction set by the option <code>--checkUncertOver</code>. The default value is 0.1, meaning that any uncertainties with a normalization effect larger than 10% are flagged up.</p> <p>The output file contains the following information for this check:</p> <pre><code>largeNormEff: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> <p>Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#at-least-one-of-the-updown-systematic-templates-is-empty","title":"At least one of the Up/Down systematic templates is empty","text":"<p>For shape uncertainties, this check reports all cases where the up and/or down template(s) are empty, when the nominal template is not.</p> <p>The output file contains the following information for this check:</p> <p><pre><code>emptySystematicShape: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#identical-updown-templates","title":"Identical Up/Down templates","text":"<p>This check applies to shape uncertainties only, and will highlight cases where the shape uncertainties have identical Up and Down templates (identical in shape and in normalization).</p> <p>The information given in the output file for this check is:</p> <p><pre><code>uncertTemplSame: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#up-and-down-templates-vary-the-yield-in-the-same-direction","title":"Up and Down templates vary the yield in the same direction","text":"<p>Again, this check only applies to shape uncertainties - it highlights cases where the 'Up' template and the 'Down' template both have the effect of increasing or decreasing the normalization of a process.</p> <p>The information given in the output file for this check is:</p> <p><pre><code>uncertVarySameDirect: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"value_d\":&lt;value&gt;\n        \"value_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>value_u</code> and <code>value_d</code> are the values of the 'up' and 'down' normalization effects.</p>"},{"location":"part3/validation/#uncertainty-probably-has-no-genuine-shape-effect","title":"Uncertainty probably has no genuine shape effect","text":"<p>In this check, applying only to shape uncertainties, the normalized nominal templates are compared with the normalized templates for the 'up' and 'down' systematic variations. The script calculates \\(\\Sigma_i \\frac{2|\\text{up}(i) - \\text{nominal}(i)|}{|\\text{up}(i)| + |\\text{nominal}(i)|}\\) and \\(\\Sigma_i \\frac{2|\\text{down}(i) - \\text{nominal}(i)|}{|\\text{down}(i)| + |\\text{nominal}(i)|}\\).</p> <p>where the sums run over all bins in the histograms, and 'nominal', 'up', and 'down' are the central template and up and down varied templates, all normalized.</p> <p>If both sums are smaller than 0.001, the uncertainty is flagged up as probably not having a genuine shape effect. This means a 0.1% variation in one bin is enough to avoid being reported, but many smaller variations can also sum to be large enough to pass the threshold. It should be noted that the chosen threshold is somewhat arbitrary: if an uncertainty is flagged up as probably having no genuine shape effect you should take this as a starting point to investigate. </p> <p>The information given in the output file for this check is:</p> <p><pre><code>smallShapeEff: {\n  &lt;Uncertainty name&gt;: {\n    &lt;analysis category&gt;: {\n      &lt;process&gt;: {\n        \"diff_d\":&lt;value&gt;\n        \"diff_u\":&lt;value&gt;\n      } \n    }\n  }\n}\n</code></pre> Where <code>diff_d</code> and <code>diff_u</code> are the values of the sums described above for the 'down' variation and the 'up' variation.</p>"},{"location":"part3/validation/#empty-process","title":"Empty process","text":"<p>If a process is listed in the datacard, but the yield is 0, it is flagged up by this check. </p> <p>The information given in the output file for this check is:</p> <pre><code>emptyProcessShape: {\n  &lt;analysis category&gt;: {\n    &lt;process1&gt;,\n    &lt;process2&gt;,\n    &lt;process3&gt;\n  }\n}\n</code></pre>"},{"location":"part3/validation/#bins-that-have-signal-but-no-background","title":"Bins that have signal but no background","text":"<p>For shape-based analyses, this checks whether there are any bins in the nominal templates that have signal contributions, but no background contributions. </p> <p>The information given in the output file for this check is:</p> <pre><code>emptyBkgBin: {\n  &lt;analysis category&gt;: {\n    &lt;bin_nr1&gt;,\n    &lt;bin_nr2&gt;,\n    &lt;bin_nr3&gt;\n  }\n}\n</code></pre>"},{"location":"part3/validation/#small-signal-process","title":"Small signal process","text":"<p>This reports signal processes that contribute less than the fraction specified by <code>--reportSigUnder</code> (default 0.001 = 0.1%) of the total signal in a given category. This produces an alert, not a warning, as it does not hint at a potential problem. However, in analyses with many signal contributions and with long fitting times, it can be helpful to remove signals from a category in which they do not contribute a significant amount.</p> <p>The information given in the output file for this check is:</p> <pre><code>smallSignalProc: {\n  &lt;analysis category&gt;: {\n    &lt;process&gt;: {\n      \"sigrate_tot\":&lt;value&gt;\n      \"procrate\":&lt;value&gt;\n    } \n  }\n}\n</code></pre> <p>Where <code>sigrate_tot</code> is the total signal yield in the analysis category and <code>procrate</code> is the yield of signal process <code>&lt;process&gt;</code>.</p>"},{"location":"part3/validation/#what-to-do-in-case-of-a-warning","title":"What to do in case of a warning","text":"<p>These checks are mostly a tool to help you investigate your datacards: a warning does not necessarily mean there is a mistake in your datacard, but you should use it as a starting point to investigate. Empty processes and emtpy shape uncertainties connected to nonempty processes will most likely be unintended. The same holds for cases where the 'up' and 'down' shape templates are identical. If there are bins that contain signal but no background contributions, this should be corrected. See the FAQ for more information on that point.</p> <p>For other checks it depends on the situation whether there is a problem or not. Some examples:</p> <ul> <li>An analysis-specific nonclosure uncertainty could be larger than 10%. A theoretical uncertainty in the ttbar normalization probably not.</li> <li>In an analysis with a selection that requires the presence of exactly 1 jet, 'up' and 'down' variations in the jet energy uncertainty could both change the process normalization in the same direction. (But they do not have to!)</li> </ul> <p>As always: think about whether you expect a check to yield a warning in case of your analysis, and if not, investigate to make sure there are no issues.</p>"},{"location":"part4/usefullinks/","title":"Useful links and further reading","text":""},{"location":"part4/usefullinks/#tutorials-and-reading-material","title":"Tutorials and reading material","text":"<p>There are several tutorials that have been run over the last few years with instructions and examples for running the Combine tool.</p> <p>Tutorial Sessions:</p> <ul> <li>1st tutorial 17th Nov 2015.</li> <li>2nd tutorial 30th Nov 2016.</li> <li>3rd tutorial 29th Nov 2017</li> <li>4th tutorial 31st Oct 2018 - Latest for <code>81x-root606</code> branch.</li> <li>5th tutorial 2nd-4th Dec 2019</li> <li>6th tutorial 14th-16th Dec 2020 - Latest for <code>102x</code> branch</li> <li>7th tutorial 3rd Feb 2023 - Uses <code>113x</code> branch</li> </ul> <p>Worked examples from Higgs analyses using Combine:</p> <ul> <li>The CMS DAS at CERN 2014</li> <li>The CMS DAS at DESY 2018</li> </ul> <p>Higgs combinations procedures</p> <ul> <li> <p>Conventions to be used when preparing inputs for Higgs combinations</p> </li> <li> <p>CMS AN-2011/298 Procedure for the LHC Higgs boson search combination in summer 2011. This describes in more detail some of the methods used in Combine.</p> </li> </ul>"},{"location":"part4/usefullinks/#citations","title":"Citations","text":"<p>The paper for the Combine tool is available here. In addition, you can cite the following papers for the methods used within the tool; </p> <ul> <li> <p>Summer 2011 public ATLAS-CMS note for any Frequentist limit setting procedures with toys or Bayesian limits, constructing likelihoods, descriptions of nuisance parameter options (like log-normals (<code>lnN</code>) or gamma (<code>gmN</code>), and for definitions of test-statistics.</p> </li> <li> <p>CCGV paper if you use any of the asymptotic (eg with <code>-M AsymptoticLimits</code> or <code>-M Significance</code> approximations for limits/p-values.</p> </li> <li> <p>If you use the Barlow-Beeston approach to MC stat (bin-by-bin) uncertainties, please cite their paper Barlow-Beeston. You should also cite this note if you use the <code>autoMCStats</code> directive to produce a single parameter per bin.</p> </li> <li> <p>If you use <code>shape</code> uncertainties for template (<code>TH1</code> or <code>RooDataHist</code>) based datacards, you can cite this note from J. Conway.</p> </li> <li> <p>If you are extracting uncertainties from LH scans - i.e using \\(-2\\Delta Log{L}=1\\) etc for the 1\\(\\sigma\\) intervals, you can cite either the ATLAS+CMS or CMS Higgs paper.</p> </li> <li> <p>There is also a long list of citation recommendations from the CMS Statistics Committee pages.</p> </li> </ul>"},{"location":"part4/usefullinks/#combine-based-packages","title":"Combine based packages","text":"<ul> <li> <p>SWGuideHiggs2TauLimits (Deprecated)</p> </li> <li> <p>ATGCRooStats</p> </li> <li> <p>CombineHarvester</p> </li> </ul>"},{"location":"part4/usefullinks/#contacts","title":"Contacts","text":"<ul> <li>CMStalk forum: https://cms-talk.web.cern.ch/c/physics/cat/cat-stats/279</li> </ul>"},{"location":"part4/usefullinks/#cms-statistics-committee","title":"CMS Statistics Committee","text":"<ul> <li>You can find much more statistics theory and reccomendations on various statistical procedures in the CMS Statistics Committee Twiki Pages</li> </ul>"},{"location":"part4/usefullinks/#faq","title":"FAQ","text":"<ul> <li>Why does Combine have trouble with bins that have zero expected contents?<ul> <li>If you are computing only upper limits, and your zero-prediction bins are all empty in data, then you can just set the background to a very small value instead of zero as the computation is regular for background going to zero (e.g. a counting experiment with \\(B\\leq1\\) will have essentially the same expected limit and observed limit as one with \\(B=0\\)). If you are computing anything else, e.g. p-values, or if your zero-prediction bins are not empty in data, you're out of luck, and you should find a way to get a reasonable background prediction there (and set an uncertainty on it, as per the point above)</li> </ul> </li> <li>How can an uncertainty be added to a zero quantity?<ul> <li>You can put an uncertainty even on a zero event yield if you use a gamma distribution. That is in fact the more proper way of doing it if the prediction of zero comes from the limited size of your MC or data sample used to compute it.</li> </ul> </li> <li>Why does changing the observation in data affect my expected limit?<ul> <li>The expected limit (if using either the default behaviour of <code>-M AsymptoticLimits</code> or using the <code>LHC-limits</code> style limit setting with toys) uses the post-fit expectation of the background model to generate toys. This means that first the model is fit to the observed data before toy generation. See the sections on blind limits and toy generation to avoid this behavior. </li> </ul> </li> <li>How can I deal with an interference term which involves a negative contribution?<ul> <li>You will need to set up a specific PhysicsModel to deal with this, however you can see this section to implement such a model that can incorperate a negative contribution to the physics process</li> </ul> </li> <li>How does Combine work?<ul> <li>That is not a question that can be answered without someone's head exploding; please try to formulate something specific.</li> </ul> </li> <li>What does fit status XYZ mean? <ul> <li>Combine reports the fit status in some routines (for example in the <code>FitDiagnostics</code> method). These are typically the status of the last call from Minuit. For details on the meanings of these status codes see the Minuit2Minimizer documentation page.</li> </ul> </li> <li>Why does my fit not converge? <ul> <li>There are several reasons why some fits may not converge. Often some indication can be obtained from the <code>RooFitResult</code> or status that you will see information from when using the <code>--verbose X</code> (with \\(X&gt;2\\)) option. Sometimes however, it can be that the likelihood for your data is very unusual. You can get a rough idea about what the likelihood looks like as a function of your parameters (POIs and nuisances) using <code>combineTool.py -M FastScan -w myworkspace.root</code> (use --help for options, see also here.</li> <li>We have often seen that fits in Combine using <code>RooCBShape</code> as a parametric function will fail. This is related to an optimization that fails. You can try to fix the problem as described in this issue: issues#347 (i.e add the option <code>--X-rtd ADDNLL_CBNLL=0</code>).</li> </ul> </li> <li>Why does the fit/fits take so long? <ul> <li>The minimization routines are common to many methods in Combine. You can tune the fits using the generic optimization command line options described here. For example, setting the default minimizer strategy to 0 can greatly improve the speed, since this avoids running HESSE. In calculations such as <code>AsymptoticLimits</code>, HESSE is not needed and hence this can be done, however, for <code>FitDiagnostics</code> the uncertainties and correlations are part of the output, so using strategy 0 may not be particularly accurate. </li> </ul> </li> <li>Why are the results for my counting experiment so slow or unstable? <ul> <li>There is a known issue with counting experiments with large numbers of events that will cause unstable fits or even the fit to fail. You can avoid this by creating a \"fake\" shape datacard (see this section from the setting up the datacards page). The simplest way to do this is to run <code>combineCards.py -S mycountingcard.txt &gt; myshapecard.txt</code>. You may still find that your parameter uncertainties are not correct when you have large numbers of events. This can be often fixed using the <code>--robustHesse</code> option. An example of this issue is detailed here. </li> </ul> </li> <li>Why do some of my nuisance parameters have uncertainties &gt; 1?<ul> <li>When running <code>-M FitDiagnostics</code> you may find that the post-fit uncertainties of the nuisances are \\(&gt; 1\\) (or larger than their pre-fit values). If this is the case, you should first check if the same is true when adding the option <code>--minos all</code>, which will invoke MINOS to scan the likelihood as a function of these parameters to determine the crossing at \\(-2\\times\\Delta\\log\\mathcal{L}=1\\) rather than relying on the estimate from HESSE. However, this is not guaranteed to succeed, in which case you can scan the likelihood yourself using <code>MultiDimFit</code> (see here ) and specifying the option <code>--poi X</code> where <code>X</code> is your nuisance parameter. </li> </ul> </li> <li>How can I avoid using the data? <ul> <li>For almost all methods, you can use toy data (or an Asimov dataset) in place of the real data for your results to be blind. You should be careful however as in some methods, such as <code>-M AsymptoticLimits</code> or <code>-M HybridNew --LHCmode LHC-limits</code> or any other method using the option <code>--toysFrequentist</code>, the data will be used to determine the most likely nuisance parameter values (to determine the so-called a-posteriori expectation). See the section on toy data generation for details on this. </li> </ul> </li> <li>What if my nuisance parameters have correlations which are not 0 or 1?<ul> <li>Combine is designed under the assumption that each source of nuisance parameter is uncorrelated with the other sources. If you have a case where some pair (or set) of nuisances have some known correlation structure, you can compute the eigenvectors of their correlation matrix and provide these diagonalised nuisances to Combine. You can also model partial correlations, between different channels or data taking periods, of a given nuisance parameter using the <code>combineTool</code> as described in this page. </li> </ul> </li> <li>My nuisances are (artificially) constrained and/or the impact plot show some strange behaviour, especially after including MC statistical uncertainties. What can I do?<ul> <li>Depending on the details of the analysis, several solutions can be adopted to mitigate these effects. We advise to run the validation tools at first, to identify possible redundant shape uncertainties that can be safely eliminated or replaced with lnN ones. Any remaining artificial constraints should be studies. Possible mitigating strategies can be to (a) smooth the templates or (b) adopt some rebinning in order to reduce statistical fluctuations in the templates. A description of possible strategies and effects can be found in this talk by Margaret Eminizer</li> </ul> </li> <li>What do CLs, CLs+b and CLb in the code mean?<ul> <li>The names CLs+b and CLb what are found within some of the <code>RooStats</code> tools are rather outdated and should instead be referred to as p-values - \\(p_{\\mu}\\) and \\(1-p_{b}\\), respectively. We use the CLs (which itself is not a p-value) criterion often in High energy physics as it is designed to avoid excluding a signal model when the sensitivity is low (and protects against excluding due to underfluctuations in the data). Typically, when excluding a signal model the p-value \\(p_{\\mu}\\) often refers to the p-value under the signal+background hypothesis, assuming a particular value of the signal strength (\\(\\mu\\)) while \\(p_{b}\\) is the p-value under the background only hypothesis. You can find more details and definitions of the CLs criterion and \\(p_{\\mu}\\) and \\(p_{b}\\) in section 39.4.2.4 of the 2016 PDG review.       </li> </ul> </li> </ul>"},{"location":"part5/longexercise/","title":"Main Features of Combine (Long Exercises)","text":"<p>This exercise is designed to give a broad overview of the tools available for statistical analysis in CMS using the combine tool. Combine is a high-level tool for building <code>RooFit</code>/<code>RooStats</code> models and running common statistical methods. We will cover the typical aspects of setting up an analysis and producing the results, as well as look at ways in which we can diagnose issues and get a deeper understanding of the statistical model. This is a long exercise - expect to spend some time on it especially if you are new to Combine. If you get stuck while working through this exercise or have questions specifically about the exercise, you can ask them on this mattermost channel. Finally, we also provide some solutions to some of the questions that are asked as part of the exercise. These are available here.</p> <p>For the majority of this course we will work with a simplified version of a real analysis, that nonetheless will have many features of the full analysis. The analysis is a search for an additional heavy neutral Higgs boson decaying to tau lepton pairs. Such a signature is predicted in many extensions of the standard model, in particular the minimal supersymmetric standard model (MSSM). You can read about the analysis in the paper here. The statistical inference makes use of a variable called the total transverse mass (\\(M_{\\mathrm{T}}^{\\mathrm{tot}}\\)) that provides good discrimination between the resonant high-mass signal and the main backgrounds, which have a falling distribution in this high-mass region. The events selected in the analysis are split into a several categories which target the main di-tau final states as well as the two main production modes: gluon-fusion (ggH) and b-jet associated production (bbH). One example is given below for the fully-hadronic final state in the b-tag category which targets the bbH signal:</p> <p></p> <p>Initially we will start with the simplest analysis possible: a one-bin counting experiment using just the high \\(M_{\\mathrm{T}}^{\\mathrm{tot}}\\) region of this distribution, and from there each section of this exercise will expand on this, introducing a shape-based analysis and adding control regions to constrain the backgrounds.</p>"},{"location":"part5/longexercise/#background","title":"Background","text":"<p>You can find a presentation with some more background on likelihoods and extracting confidence intervals here. A presentation that discusses limit setting in more detail can be found here. If you are not yet familiar with these concepts, or would like to refresh your memory, we recommend that you have a look at these presentations before you start with the exercise.</p>"},{"location":"part5/longexercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>Now we will move to the working directory for this tutorial, which contains all the inputs needed to run the exercises below: <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/longexercise/\n</code></pre></p>"},{"location":"part5/longexercise/#part-1-a-one-bin-counting-experiment","title":"Part 1: A one-bin counting experiment","text":"<p>Topics covered in this section:</p> <ul> <li>A: Computing limits using the asymptotic approximation</li> <li>Advanced section: B: Computing limits with toys</li> </ul> <p>We will begin with a simplified version of a datacard from the MSSM \\(\\phi\\rightarrow\\tau\\tau\\) analysis that has been converted to a one-bin counting experiment, as described above. While the full analysis considers a range of signal mass hypotheses, we will start by considering just one: \\(m_{\\phi}\\)=800GeV. Click the text below to study the datacard (<code>datacard_part1.txt</code> in the <code>longexercise</code> directory):</p> Show datacard <pre><code>imax    1 number of bins\njmax    4 number of processes minus 1\nkmax    * number of nuisance parameters\n--------------------------------------------------------------------------------\n--------------------------------------------------------------------------------\nbin          signal_region\nobservation  10.0\n--------------------------------------------------------------------------------\nbin                      signal_region   signal_region   signal_region   signal_region   signal_region\nprocess                  ttbar           diboson         Ztautau         jetFakes        bbHtautau\nprocess                  1               2               3               4               0\nrate                     4.43803         3.18309         3.7804          1.63396         0.711064\n--------------------------------------------------------------------------------\nCMS_eff_b          lnN   1.02            1.02            1.02            -               1.02\nCMS_eff_t          lnN   1.12            1.12            1.12            -               1.12\nCMS_eff_t_highpt   lnN   1.1             1.1             1.1             -               1.1\nacceptance_Ztautau lnN   -               -               1.08            -               -\nacceptance_bbH     lnN   -               -               -               -               1.05\nacceptance_ttbar   lnN   1.005           -               -               -               -\nnorm_jetFakes      lnN   -               -               -               1.2             -\nxsec_diboson       lnN   -               1.05            -               -               -\n</code></pre> <p>The layout of the datacard is as follows:</p> <ul> <li>At the top are the numbers <code>imax</code>, <code>jmax</code> and <code>kmax</code> representing the number of bins, processes and nuisance parameters respectively. Here a \"bin\" can refer to a literal single event count as in this example, or a full distribution we are fitting, in general with many histogram bins, as we will see later. We will refer to both as \"channels\" from now on. It is possible to replace these numbers with <code>*</code> and they will be deduced automatically.</li> <li>The first line starting with <code>bin</code> gives a unique label to each channel, and the following line starting with <code>observation</code> gives the number of events observed in data.</li> <li>In the remaining part of the card there are several columns: each one represents one process in one channel. The first four lines labelled <code>bin</code>, <code>process</code>, <code>process</code> and <code>rate</code> give the channel label, the process label, a process identifier (<code>&lt;=0</code> for signal, <code>&gt;0</code> for background) and the number of expected events respectively.</li> <li>The remaining lines describe sources of systematic uncertainty. Each line gives the name of the uncertainty, (which will become the name of the nuisance parameter inside our RooFit model), the type of uncertainty (\"lnN\" = log-normal normalisation uncertainty) and the effect on each process in each channel. E.g. a 20% uncertainty on the yield is written as 1.20.</li> <li>It is also possible to add a hash symbol (<code>#</code>) at the start of a line, which Combine will then ignore when it reads the card.</li> </ul> <p>We can now run Combine directly using this datacard as input. The general format for running Combine is:</p> <pre><code>combine -M [method] [datacard] [additional options...]\n</code></pre>"},{"location":"part5/longexercise/#a-computing-limits-using-the-asymptotic-approximation","title":"A: Computing limits using the asymptotic approximation","text":"<p>As we are searching for a signal process that does not exist in the standard model, it's natural to set an upper limit on the cross section times branching fraction of the process (assuming our dataset does not contain a significant discovery of new physics). Combine has dedicated methods for calculating upper limits. The most commonly used one is <code>AsymptoticLimits</code>, which implements the CLs criterion and uses the modified profile likelihood ratio for upper limits as the default test statistic. As the name implies, the test statistic distributions are determined analytically in the asymptotic approximation, so there is no need for more time-intensive toy throwing and fitting. Try running the following command:</p> <pre><code>combine -M AsymptoticLimits datacard_part1.txt -n .part1A\n</code></pre> <p>You should see the results of the observed and expected limit calculations printed to the screen. Here we have added an extra option, <code>-n .part1A</code>, which is short for <code>--name</code>, and is used to label the output file Combine produces, which in this case will be called <code>higgsCombine.part1A.AsymptoticLimits.mH120.root</code>. The file name depends on the options we ran with, and is of the form: <code>higgsCombine[name].[method].mH[mass].root</code>. The file contains a TTree called <code>limit</code> which stores the numerical values returned by the limit computation. Note that in our case we did not set a signal mass when running Combine (i.e. <code>-m 800</code>), so the output file just uses the default value of <code>120</code>. This does not affect our result in any way though, just the label that is used on the output file.</p> <p>The limits are given on a parameter called <code>r</code>. This is the default parameter of interest (POI) that is added to the model automatically. It is a linear scaling of the normalization of all signal processes given in the datacard, i.e. if \\(s_{i,j}\\) is the nominal number of signal events in channel \\(i\\) for signal process \\(j\\), then the normalization of that signal in the model is given as \\(r\\cdot s_{i,j}(\\vec{\\theta})\\), where \\(\\vec{\\theta}\\) represents the set of nuisance parameters which may also affect the signal normalization. We therefore have some choice in the interpretation of r: for the measurement of a process with a well-defined SM prediction we may enter this as the nominal yield in the datacard, such that \\(r=1\\) corresponds to this SM expectation, whereas for setting limits on BSM processes we may choose the nominal yield to correspond to some cross section, e.g. 1 pb, such that we can interpret the limit as a cross section limit directly. In this example the signal has been normalised to a cross section times branching fraction of 1 fb.</p> <p>The expected limit is given under the background-only hypothesis. The median value under this hypothesis as well as the quantiles needed to give the 68% and 95% intervals are also calculated. These are all the ingredients needed to produce the standard limit plots you will see in many CMS results, for example the \\(\\sigma \\times \\mathcal{B}\\) limits for the \\(\\text{bb}\\phi\\rightarrow\\tau\\tau\\) process:</p> <p></p> <p>In this case we only computed the values for one signal mass hypothesis, indicated by a red dashed line.</p> <p>Tasks and questions:</p> <ul> <li>There are some important uncertainties missing from the datacard above. Add the uncertainty on the luminosity (name: <code>lumi_13TeV</code>) which has a 2.5% effect on all processes (except the <code>jetFakes</code>, which are taken from data), and uncertainties on the inclusive cross sections of the <code>Ztautau</code> and <code>ttbar</code> processes (with names <code>xsec_Ztautau</code> and <code>xsec_ttbar</code>) which are 4% and 6% respectively.</li> <li>Try changing the values of some uncertainties (up or down, or removing them altogether) - how do the expected and observed limits change?</li> <li>Now try changing the number of observed events. The observed limit will naturally change, but the expected does too - why might this be?</li> </ul> <p>There are other command line options we can supply to Combine which will change its behaviour when run. You can see the full set of supported options by doing <code>combine -h</code>. Many options are specific to a given method, but others are more general and are applicable to all methods. Throughout this tutorial we will highlight some of the most useful options you may need to use, for example:</p> <ul> <li>The range on the signal strength modifier: <code>--rMin=X</code> and <code>--rMax=Y</code>: In <code>RooFit</code> parameters can optionally have a range specified. The implication of this is that their values cannot be adjusted beyond the limits of this range. The min and max values can be adjusted though, and we might need to do this for our POI <code>r</code> if the order of magnitude of our measurement is different from the default range of <code>[0, 20]</code>. This will be discussed again later in the tutorial.</li> <li>Verbosity: <code>-v X</code>: By default combine does not usually produce much output on the screen other the main result at the end. However, much more detailed information can be printed by setting the <code>-v N</code> with N larger than zero. For example at <code>-v 3</code> the logs from the minimizer, Minuit, will also be printed. These are very useful for debugging problems with the fit.</li> </ul>"},{"location":"part5/longexercise/#advanced-section-b-computing-limits-with-toys","title":"Advanced section: B: Computing limits with toys","text":"<p>Now we will look at computing limits without the asymptotic approximation, so instead using toy datasets to determine the test statistic distributions under the signal+background and background-only hypotheses. This can be necessary if we are searching for signal in bins with a small number of events expected. In Combine we will use the <code>HybridNew</code> method to calculate limits using toys. This mode is capable of calculating limits with several different test statistics and with fine-grained control over how the toy datasets are generated internally. To calculate LHC-style profile likelihood limits (i.e. the same as we did with the asymptotic) we set the option <code>--LHCmode LHC-limits</code>. You can read more about the different options in the Combine documentation.</p> <p>Run the following command: <pre><code>combine -M HybridNew datacard_part1.txt --LHCmode LHC-limits -n .part1B --saveHybridResult\n</code></pre> In contrast to <code>AsymptoticLimits</code> this will only determine the observed limit, and will take a few minutes. There will not be much output to the screen while combine is running. You can add the option <code>-v 1</code> to get a better idea of what is going on. You should see Combine stepping around in <code>r</code>, trying to find the value for which CLs = 0.05, i.e. the 95% CL limit. The <code>--saveHybridResult</code> option will cause the test statistic distributions that are generated at each tested value of <code>r</code> to be saved in the output ROOT file.</p> <p>To get an expected limit add the option <code>--expectedFromGrid X</code>, where <code>X</code> is the desired quantile, e.g. for the median:</p> <pre><code>combine -M HybridNew datacard_part1.txt --LHCmode LHC-limits -n .part1B --saveHybridResult --expectedFromGrid 0.500\n</code></pre> <p>Calculate the median expected limit and the 68% range. The 95% range could also be done, but note it will take much longer to run the 0.025 quantile. While Combine is running you can move on to the next steps below.</p> <p>Tasks and questions:</p> <ul> <li>In contrast to <code>AsymptoticLimits</code>, with <code>HybridNew</code> each limit comes with an uncertainty. What is the origin of this uncertainty?</li> <li>How good is the agreement between the asymptotic and toy-based methods?</li> <li>Why does it take longer to calculate the lower expected quantiles (e.g. 0.025, 0.16)? Think about how the statistical uncertainty on the CLs value depends on Pmu and Pb.</li> </ul> <p>Next plot the test statistic distributions stored in the output file: <pre><code>python3 $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/test/plotTestStatCLs.py --input higgsCombine.part1B.HybridNew.mH120.root --poi r --val all --mass 120\n</code></pre></p> <p>This produces a new ROOT file <code>test_stat_distributions.root</code> containing the plots, to save them as pdf/png files run this small script and look at the resulting figures:</p> <pre><code>python3 printTestStatPlots.py test_stat_distributions.root\n</code></pre>"},{"location":"part5/longexercise/#advanced-section-b-asymptotic-approximation-limitations","title":"Advanced section: B: Asymptotic approximation limitations","text":"<p>These distributions can be useful in understanding features in the CLs limits, especially in the low statistics regime. To explore this, try reducing the observed and expected yields in the datacard by a factor of 10, and rerun the above steps to compare the observed and expected limits with the asymptotic approach, and plot the test statistic distributions.</p> <p>Tasks and questions:</p> <ul> <li>Is the asymptotic limit still a good approximation?</li> <li>You might notice that the test statistic distributions are not smooth but rather have several \"bump\" structures? Where might this come from? Try reducing the size of the systematic uncertainties to make them more pronounced.</li> </ul> <p>Note that for more complex models the fitting time can increase significantly, making it infeasible to run all the toy-based limits interactively like this. An alternative strategy is documented here</p>"},{"location":"part5/longexercise/#part-2-a-shape-based-analysis","title":"Part 2: A shape-based analysis","text":"<p>Topics covered in this section:</p> <ul> <li>A: Setting up the datacard</li> <li>B: Running Combine for a blind analysis</li> <li>C: Using FitDiagnostics</li> <li>D: MC statistical uncertainties</li> </ul>"},{"location":"part5/longexercise/#a-setting-up-the-datacard","title":"A: Setting up the datacard","text":"<p>Now we move to the next step: instead of a one-bin counting experiment we will fit a binned distribution. In a typical analysis we will produce TH1 histograms of some variable sensitive to the presence of signal: one for the data and one for each signal and background processes. Then we add a few extra lines to the datacard to link the declared processes to these shapes which are saved in a ROOT file, for example:</p> Show datacard <pre><code>imax 1\njmax 1\nkmax *\n---------------\nshapes * * simple-shapes-TH1_input.root $PROCESS $PROCESS_$SYSTEMATIC\nshapes signal * simple-shapes-TH1_input.root $PROCESS$MASS $PROCESS$MASS_$SYSTEMATIC\n---------------\nbin bin1\nobservation 85\n------------------------------\nbin             bin1       bin1\nprocess         signal     background\nprocess         0          1\nrate            10         100\n--------------------------------\nlumi     lnN    1.10       1.0\nbgnorm   lnN    1.00       1.3\nalpha  shape    -          1\n</code></pre> <p>Note that as with the one-bin card, the total nominal rate of a given process must be specified in the <code>rate</code> line of the datacard. This should agree with the value returned by <code>TH1::Integral</code>. However, we can also put a value of <code>-1</code> and the Integral value will be substituted automatically.</p> <p>There are two other differences with respect to the one-bin card:</p> <ul> <li>A new block of lines at the top defining how channels and processes are mapped to the histograms (more than one line can be used)</li> <li>In the list of systematic uncertainties some are marked as shape instead of lnN</li> </ul> <p>The syntax of the \"shapes\" line is: <code>shapes [process] [channel] [file] [histogram] [histogram_with_systematics]</code>. It is possible to use the <code>*</code> wildcard to map multiple processes and/or channels with one line. The histogram entries can contain the <code>$PROCESS</code>, <code>$CHANNEL</code> and <code>$MASS</code> place-holders which will be substituted when searching for a given (process, channel) combination. The value of <code>$MASS</code> is specified by the <code>-m</code> argument when combine. By default the observed data process name will be <code>data_obs</code>.</p> <p>Shape uncertainties can be added by supplying two additional histograms for a process, corresponding to the distribution obtained by shifting that parameter up and down by one standard deviation. These shapes will be interpolated (see the template shape uncertainties section for details) for shifts within \\(\\pm1\\sigma\\) and linearly extrapolated beyond. The normalizations are interpolated linearly in log scale just like we do for log-normal uncertainties.</p> <p></p> <p>The final argument of the \"shapes\" line above should contain the <code>$SYSTEMATIC</code> place-holder which will be substituted by the systematic name given in the datacard.</p> <p>In the list of uncertainties the interpretation of the values for <code>shape</code> lines is a bit different from <code>lnN</code>. The effect can be \"-\" or 0 for no effect, 1 for normal effect, and possibly something different from 1 to test larger or smaller effects (in that case, the unit Gaussian is scaled by that factor before using it as parameter for the interpolation).</p> <p>In this section we will use a datacard corresponding to the full distribution that was shown at the start of section 1, not just the high mass region. Have a look at <code>datacard_part2.txt</code>: this is still currently a one-bin counting experiment, however the yields are much higher since we now consider the full range of \\(M_{\\mathrm{T}}^{\\mathrm{tot}}\\). If you run the asymptotic limit calculation on this you should find the sensitivity is significantly worse than before.</p> <p>The first task is to convert this to a shape analysis: the file <code>datacard_part2.shapes.root</code> contains all the necessary histograms, including those for the relevant shape systematic uncertainties. Add the relevant <code>shapes</code> lines to the top of the datacard (after the <code>kmax</code> line) to map the processes to the correct TH1s in this file. Hint: you will need a different line for the signal process.</p> <p>Compared to the counting experiment we must also consider the effect of uncertainties that change the shape of the distribution. Some, like <code>CMS_eff_t_highpt</code>, were present before, as it has both a shape and normalisation effect. Others are primarily shape effects so were not included before.</p> <p>Add the following shape uncertainties: <code>top_pt_ttbar_shape</code> affecting <code>ttbar</code>,the tau energy scale uncertainties <code>CMS_scale_t_1prong0pi0_13TeV</code>, <code>CMS_scale_t_1prong1pi0_13TeV</code> and <code>CMS_scale_t_3prong0pi0_13TeV</code> affecting all processes except <code>jetFakes</code>, and <code>CMS_eff_t_highpt</code> also affecting the same processes.</p> <p>Once this is done you can run the asymptotic limit calculation on this datacard. From now on we will convert the text datacard into a RooFit workspace ourselves instead of combine doing it internally every time we run. This is a good idea for more complex analyses since the conversion step can take a notable amount of time. For this we use the <code>text2workspace.py</code> command:</p> <p><pre><code>text2workspace.py datacard_part2.txt -m 800 -o workspace_part2.root\n</code></pre> And then we can use this as input to combine instead of the text datacard: <pre><code>combine -M AsymptoticLimits workspace_part2.root -m 800\n</code></pre> Tasks and questions:</p> <ul> <li>Verify that the sensitivity of the shape analysis is indeed improved over the counting analysis in the first part.</li> <li>Advanced task: You can open the workspace ROOT file interactively and print the contents: <code>w-&gt;Print();</code>. Each process is represented by a PDF object that depends on the shape morphing nuisance parameters. From the workspace, choose a process and shape uncertainty, and make a plot overlaying the nominal shape with different values of the shape morphing nuisance parameter. You can change the value of a parameter with <code>w-&gt;var(\"X\")-&gt;setVal(Y)</code>, and access a particular pdf with <code>w-&gt;pdf(\"Z\")</code>. PDF objects in RooFit have a createHistogram method that requires the name of the observable (the variable defining the x-axis) - this is called <code>CMS_th1x</code> in combine datacards. Feel free to ask for help with this!</li> </ul>"},{"location":"part5/longexercise/#b-running-combine-for-a-blind-analysis","title":"B: Running combine for a blind analysis","text":"<p>Most analyses are developed and optimised while we are \"blind\" to the region of data where we expect our signal to be. With <code>AsymptoticLimits</code> we can choose just to run the expected limit (<code>--run expected</code>), so as not to calculate the observed. However the data is still used, even for the expected, since in the frequentist approach a background-only fit to the data is performed to define the Asimov dataset used to calculate the expected limits. To skip this fit to data and use the pre-fit state of the model the option <code>--run blind</code> or <code>--noFitAsimov</code> can be used. Task: Compare the expected limits calculated with <code>--run expected</code> and <code>--run blind</code>. Why are they different?</p> <p>A more general way of blinding is to use combine's toy and Asimov dataset generating functionality. You can read more about this here. These options can be used with any method in combine, not just <code>AsymptoticLimits</code>.</p> <p>Task: Calculate a blind limit by generating a background-only Asimov with the <code>-t -1</code> option instead of using the <code>AsymptoticLimits</code> specific options. You should find the observed limit is the same as the expected. Then see what happens if you inject a signal into the Asimov dataset using the <code>--expectSignal [X]</code> option.</p>"},{"location":"part5/longexercise/#c-using-fitdiagnostics","title":"C: Using FitDiagnostics","text":"<p>We will now explore one of the most commonly used modes of Combine: <code>FitDiagnostics</code> . As well as allowing us to make a measurement of some physical quantity (as opposed to just setting a limit on it), this method is useful to gain additional information about the model and the behaviour of the fit. It performs two fits:</p> <ul> <li>A \"background-only\" (b-only) fit: first POI (usually \"r\") fixed to zero</li> <li>A \"signal+background\" (s+b) fit: all POIs are floating</li> </ul> <p>With the s+b fit Combine will report the best-fit value of our signal strength modifier <code>r</code>. As well as the usual output file, a file named <code>fitDiagnosticsTest.root</code> is produced which contains additional information. In particular it includes two <code>RooFitResult</code> objects, one for the b-only and one for the s+b fit, which store the fitted values of all the nuisance parameters (NPs) and POIs as well as estimates of their uncertainties. The covariance matrix from both fits is also included, from which we can learn about the correlations between parameters. Run the <code>FitDiagnostics</code> method on our workspace:</p> <p><pre><code>combine -M FitDiagnostics workspace_part2.root -m 800 --rMin -20 --rMax 20\n</code></pre> Open the resulting <code>fitDiagnosticsTest.root</code> interactively and print the contents of the s+b RooFitResult:</p> <pre><code>root [1] fit_s-&gt;Print()\n</code></pre> Show output <pre><code>RooFitResult: minimized FCN value: -2.55338e-05, estimated distance to minimum: 7.54243e-06\n                covariance matrix quality: Full, accurate covariance matrix\n                Status : MINIMIZE=0 HESSE=0\n\n    Floating Parameter    FinalValue +/-  Error\n  --------------------  --------------------------\n             CMS_eff_b   -4.5380e-02 +/-  9.93e-01\n             CMS_eff_t   -2.6311e-01 +/-  7.33e-01\n      CMS_eff_t_highpt   -4.7146e-01 +/-  9.62e-01\n  CMS_scale_t_1prong0pi0_13TeV   -1.5989e-01 +/-  5.93e-01\n  CMS_scale_t_1prong1pi0_13TeV   -1.6426e-01 +/-  4.94e-01\n  CMS_scale_t_3prong0pi0_13TeV   -3.0698e-01 +/-  6.06e-01\n    acceptance_Ztautau   -3.1262e-01 +/-  8.62e-01\n        acceptance_bbH   -2.8676e-05 +/-  1.00e+00\n      acceptance_ttbar    4.9981e-03 +/-  1.00e+00\n            lumi_13TeV   -5.6366e-02 +/-  9.89e-01\n         norm_jetFakes   -9.3327e-02 +/-  2.56e-01\n                     r   -2.7220e+00 +/-  2.59e+00\n    top_pt_ttbar_shape    1.7586e-01 +/-  7.00e-01\n          xsec_Ztautau   -1.6007e-01 +/-  9.66e-01\n          xsec_diboson    3.9758e-02 +/-  1.00e+00\n            xsec_ttbar    5.7794e-02 +/-  9.46e-01\n</code></pre> <p>There are several useful pieces of information here. At the top the status codes from the fits that were performed is given. In this case we can see that two algorithms were run: <code>MINIMIZE</code> and <code>HESSE</code>, both of which returned a successful status code (0). Both of these are routines in the Minuit2 minimization package - the default minimizer used in RooFit. The first performs the main fit to the data, and the second calculates the covariance matrix at the best-fit point. It is important to always check this second step was successful and the message \"Full, accurate covariance matrix\" is printed, otherwise the parameter uncertainties can be very inaccurate, even if the fit itself was successful.</p> <p>Underneath this the best-fit values (\\(\\theta\\)) and symmetrised uncertainties for all the floating parameters are given. For all the constrained nuisance parameters a convention is used by which the nominal value (\\(\\theta_I\\)) is zero, corresponding to the mean of a Gaussian constraint PDF with width 1.0, such that the parameter values \\(\\pm 1.0\\) correspond to the \\(\\pm 1\\sigma\\) input uncertainties.</p> <p>A more useful way of looking at this is to compare the pre- and post-fit values of the parameters, to see how much the fit to data has shifted and constrained these parameters with respect to the input uncertainty. The script <code>diffNuisances.py</code> can be used for this:</p> <pre><code>python diffNuisances.py fitDiagnosticsTest.root --all\n</code></pre> Show output <pre><code>name                                              b-only fit            s+b fit         rho\nCMS_eff_b                                        -0.04, 0.99        -0.05, 0.99       +0.01\nCMS_eff_t                                     * -0.24, 0.73*     * -0.26, 0.73*       +0.06\nCMS_eff_t_highpt                              * -0.56, 0.94*     * -0.47, 0.96*       +0.02\nCMS_scale_t_1prong0pi0_13TeV                  * -0.17, 0.58*     * -0.16, 0.59*       -0.04\nCMS_scale_t_1prong1pi0_13TeV                  ! -0.12, 0.45!     ! -0.16, 0.49!       +0.20\nCMS_scale_t_3prong0pi0_13TeV                  * -0.31, 0.61*     * -0.31, 0.61*       +0.02\nacceptance_Ztautau                            * -0.31, 0.86*     * -0.31, 0.86*       -0.05\nacceptance_bbH                                   +0.00, 1.00        -0.00, 1.00       +0.05\nacceptance_ttbar                                 +0.01, 1.00        +0.00, 1.00       +0.00\nlumi_13TeV                                       -0.05, 0.99        -0.06, 0.99       +0.01\nnorm_jetFakes                                 ! -0.09, 0.26!     ! -0.09, 0.26!       -0.05\ntop_pt_ttbar_shape                            * +0.24, 0.69*     * +0.18, 0.70*       +0.22\nxsec_Ztautau                                     -0.16, 0.97        -0.16, 0.97       -0.02\nxsec_diboson                                     +0.03, 1.00        +0.04, 1.00       -0.02\nxsec_ttbar                                       +0.08, 0.95        +0.06, 0.95       +0.02\n</code></pre> <p>The numbers in each column are respectively \\(\\frac{\\theta-\\theta_I}{\\sigma_I}\\) (This is often called the pull, but note that this is a misnomer. In this tutorial we will refer to it as the fitted value of the nuisance parameter relative to the input uncertainty. The true pull is defined as discussed under <code>diffPullAsym</code> here ), where \\(\\sigma_I\\) is the input uncertainty; and the ratio of the post-fit to the pre-fit uncertainty \\(\\frac{\\sigma}{\\sigma_I}\\).</p> <p>Tasks and questions:</p> <ul> <li>Which parameter has the largest shift from the nominal value (0) in the fitted value of the nuisance parameter relative to the input uncertainty? Which has the tightest constraint?</li> <li>Should we be concerned when a parameter is more strongly constrained than the input uncertainty (i.e. \\(\\frac{\\sigma}{\\sigma_I}&lt;1.0\\))?</li> <li>Check the fitted values of the nuisance parameters and constraints on a b-only and s+b asimov dataset instead. This check is required for all analyses in the Higgs PAG. It serves both as a closure test (do we fit exactly what signal strength we input?) and a way to check whether there are any infeasibly strong constraints while the analysis is still blind (typical example: something has probably gone wrong if we constrain the luminosity uncertainty to 10% of the input!)</li> <li>Advanced task: Sometimes there are problems in the fit model that aren't apparent from only fitting the Asimov dataset, but will appear when fitting randomised data. Follow the exercise on toy-by-toy diagnostics here to explore the tools available for this.</li> </ul>"},{"location":"part5/longexercise/#d-mc-statistical-uncertainties","title":"D: MC statistical uncertainties","text":"<p>So far there is an important source of uncertainty we have neglected. Our estimates of the backgrounds come either from MC simulation or from sideband regions in data, and in both cases these estimates are subject to a statistical uncertainty on the number of simulated or data events. In principle we should include an independent statistical uncertainty for every bin of every process in our model. It's important to note that Combine/<code>RooFit</code> does not take this into account automatically - statistical fluctuations of the data are implicitly accounted for in the likelihood formalism, but statistical uncertainties in the model must be specified by us.</p> <p>One way to implement these uncertainties is to create a <code>shape</code> uncertainty for each bin of each process, in which the up and down histograms have the contents of the bin  shifted up and down by the \\(1\\sigma\\) uncertainty. However this makes the likelihood evaluation computationally inefficient, and can lead to a large number of nuisance parameters in more complex models. Instead we will use a feature in Combine called <code>autoMCStats</code> that creates these automatically from the datacard, and uses a technique called \"Barlow-Beeston-lite\" to reduce the number of systematic uncertainties that are created. This works on the assumption that for high MC event counts we can model the uncertainty with a Gaussian distribution. Given the uncertainties in different bins are independent, the total uncertainty of several processes in a particular bin is just the sum of \\(N\\) individual Gaussians, which is itself a Gaussian distribution. So instead of \\(N\\) nuisance parameters we need only one. This breaks down when the number of events is small and we are not in the Gaussian regime. The <code>autoMCStats</code> tool has a threshold setting on the number of events below which the the Barlow-Beeston-lite approach is not used, and instead a Poisson PDF is used to model per-process uncertainties in that bin.</p> <p>After reading the full documentation on <code>autoMCStats</code> here, add the corresponding line to your datacard. Start by setting a threshold of 0, i.e. <code>[channel] autoMCStats 0</code>, to force the use of Barlow-Beeston-lite in all bins.</p> <p>Tasks and questions:</p> <ul> <li>Check how much the cross section measurement and uncertainties change using <code>FitDiagnostics</code>.</li> <li>It is also useful to check how the expected uncertainty changes using an Asimov dataset, say with <code>r=10</code> injected.</li> <li>Advanced task: See what happens if the Poisson threshold is increased. Based on your results, what threshold would you recommend for this analysis?</li> </ul>"},{"location":"part5/longexercise/#part-3-adding-control-regions","title":"Part 3: Adding control regions","text":"<p>Topics covered in this section:</p> <ul> <li>A: Use of rateParams</li> <li>B: Nuisance parameter impacts</li> <li>C: Post-fit distributions</li> <li>D: Calculating the significance</li> <li>E: Signal strength measurement and uncertainty breakdown</li> <li>F: Use of channel masking</li> </ul> <p>In a modern analysis it is typical for some or all of the backgrounds to be estimated using the data, instead of relying purely on MC simulation. This can take many forms, but a common approach is to use \"control regions\" (CRs) that are pure and/or have higher statistics for a given process. These are defined by event selections that are similar to, but non-overlapping with, the signal region. In our \\(\\phi\\rightarrow\\tau\\tau\\) example the \\(\\text{Z}\\rightarrow\\tau\\tau\\) background normalisation can be calibrated using a \\(\\text{Z}\\rightarrow\\mu\\mu\\) CR, and the \\(\\text{t}\\bar{\\text{t}}\\) background using an \\(e+\\mu\\) CR. By comparing the number of data events in these CRs to our MC expectation we can obtain scale factors to apply to the corresponding backgrounds in the signal region (SR). The idea is that the data will gives us a more accurate prediction of the background with less systematic uncertainties. For example, we can remove the cross section and acceptance uncertainties in the SR, since we are no longer using the MC prediction (with a caveat discussed below). While we could simply derive these correction factors and apply them to our signal region datacard and better way is to include these regions in our fit model and tie the normalisations of the backgrounds in the CR and SR together. This has a number of advantages:</p> <ul> <li>Automatically handles the statistical uncertainty due to the number of data events in the CR</li> <li>Allows for the presence of some signal contamination in the CR to be handled correctly</li> <li>The CRs are typically not 100% pure in the background they're meant to control - other backgrounds may be present, with their own systematic uncertainties, some of which may be correlated with the SR or other CRs. Propagating these effects through to the SR \"by hand\" can become very challenging.</li> </ul> <p>In this section we will continue to use the same SR as in the previous one, however we will switch to a lower signal mass hypothesis, \\(m_{\\phi}=200\\)GeV, as its sensitivity depends more strongly on the background prediction than the high mass signal, so is better for illustrating the use of CRs. Here the nominal signal (<code>r=1</code>) has been normalised to a cross section of 1 pb.</p> <p>The SR datacard for the 200 GeV signal is <code>datacard_part3.txt</code>. Two further datacards are provided: <code>datacard_part3_ttbar_cr.txt</code> and <code>datacard_part3_DY_cr.txt</code> which represent the CRs for the Drell-Yan and \\(\\text{t}\\bar{\\text{t}}\\) processes as described above. The cross section and acceptance uncertainties for these processes have pre-emptively been removed from the SR card. However we cannot get away with neglecting acceptance effects altogether. We are still implicitly using the MC simulation to predict to the ratio of events in the CR and SR, and this ratio will in general carry a theoretical acceptance uncertainty. If the CRs are well chosen then this uncertainty should be smaller than the direct acceptance uncertainty in the SR however. The uncertainties <code>acceptance_ttbar_cr</code> and <code>acceptance_DY_cr</code> have been added to these datacards cover this effect. Task: Calculate the ratio of CR to SR events for these two processes, as well as their CR purity to verify that these are useful CRs.</p> <p>The next step is to combine these datacards into one, which is done with the <code>combineCards.py</code> script:</p> <pre><code>combineCards.py signal_region=datacard_part3.txt ttbar_cr=datacard_part3_ttbar_cr.txt DY_cr=datacard_part3_DY_cr.txt &amp;&gt; part3_combined.txt\n</code></pre> <p>Each argument is of the form <code>[new channel name]=[datacard.txt]</code>. The new datacard is written to the screen by default, so we redirect the output into our new datacard file. The output looks like:</p> Show datacard <pre><code>imax 3 number of bins\njmax 8 number of processes minus 1\nkmax 15 number of nuisance parameters\n----------------------------------------------------------------------------------------------------------------------------------\nshapes *              DY_cr          datacard_part3_DY_cr.shapes.root DY_control_region/$PROCESS DY_control_region/$PROCESS_$SYSTEMATIC\nshapes *              signal_region  datacard_part3.shapes.root signal_region/$PROCESS signal_region/$PROCESS_$SYSTEMATIC\nshapes bbHtautau      signal_region  datacard_part3.shapes.root signal_region/bbHtautau$MASS signal_region/bbHtautau$MASS_$SYSTEMATIC\nshapes *              ttbar_cr       datacard_part3_ttbar_cr.shapes.root tt_control_region/$PROCESS tt_control_region/$PROCESS_$SYSTEMATIC\n----------------------------------------------------------------------------------------------------------------------------------\nbin          signal_region  ttbar_cr       DY_cr\nobservation  3416           79251          365754\n----------------------------------------------------------------------------------------------------------------------------------\nbin                                               signal_region  signal_region  signal_region  signal_region  signal_region  ttbar_cr       ttbar_cr       ttbar_cr       ttbar_cr       ttbar_cr       DY_cr          DY_cr          DY_cr          DY_cr          DY_cr          DY_cr\nprocess                                           bbHtautau      ttbar          diboson        Ztautau        jetFakes       W              QCD            ttbar          VV             Ztautau        W              QCD            Zmumu          ttbar          VV             Ztautau\nprocess                                           0              1              2              3              4              5              6              1              7              3              5              6              8              1              7              3\nrate                                              198.521        683.017        96.5185        742.649        2048.94        597.336        308.965        67280.4        10589.6        150.025        59.9999        141.725        305423         34341.1        5273.43        115.34\n----------------------------------------------------------------------------------------------------------------------------------\nCMS_eff_b               lnN                       1.02           1.02           1.02           1.02           -              -              -              -              -              -              -              -              -              -              -              -\nCMS_eff_e               lnN                       -              -              -              -              -              1.02           -              -              1.02           1.02           -              -              -              -              -              -\n...\n</code></pre> <p>The <code>[new channel name]=</code> part of the input arguments is not required, but it gives us control over how the channels in the combined card will be named, otherwise default values like <code>ch1</code>, <code>ch2</code> etc will be used.</p>"},{"location":"part5/longexercise/#a-use-of-rateparams","title":"A: Use of rateParams","text":"<p>We now have a combined datacard that we can run text2workspace.py on and start doing fits, however there is still one important ingredient missing. Right now the yields of the <code>Ztautau</code> process in the SR and <code>Zmumu</code> in the CR are not connected to each other in any way, and similarly for the <code>ttbar</code> processes. In the fit both would be adjusted by the nuisance parameters only, and constrained to the nominal yields. To remedy this we introduce <code>rateParam</code> directives to the datacard. A <code>rateParam</code> is a new free parameter that multiples the yield of a given process, just in the same way the signal strength <code>r</code> multiplies the signal yield. The syntax of a <code>rateParam</code> line in the datacard is</p> <pre><code>[name] rateParam [channel] [process] [init] [min,max]\n</code></pre> <p>where <code>name</code> is the chosen name for the parameter, <code>channel</code> and <code>process</code> specify which (channel, process) combination it should affect, <code>init</code> gives the initial value, and optionally <code>[min,max]</code> specifies the ranges on the RooRealVar that will be created. The <code>channel</code> and <code>process</code> arguments support the use of the wildcard <code>*</code> to match multiple entries. Task: Add two <code>rateParam</code>s with nominal values of <code>1.0</code> to the end of the combined datacard named <code>rate_ttbar</code> and <code>rate_Zll</code>. The former should affect the <code>ttbar</code> process in all channels, and the latter should affect the <code>Ztautau</code> and <code>Zmumu</code> processes in all channels. Set ranges of <code>[0,5]</code> to both. Note that a <code>rateParam</code> name can be repeated to apply it to multiple processes, e.g.:</p> <pre><code>rateScale rateParam * procA 1.0\nrateScale rateParam * procB 1.0\n</code></pre> <p>is perfectly valid and only one <code>rateParam</code> will be created. These parameters will allow the yields to float in the fit without prior constraint (unlike a regular <code>lnN</code> or <code>shape</code> systematic), with the yields in the CRs and SR tied together.</p> <p>Tasks and questions:</p> <ul> <li>Run <code>text2workspace.py</code> on this combined card (don't forget to set the mass and output name <code>-m 200 -o workspace_part3.root</code>) and then use <code>FitDiagnostics</code> on an Asimov dataset with <code>r=1</code> to get the expected uncertainty. Suggested command line options: <code>--rMin 0 --rMax 2</code></li> <li>Using the RooFitResult in the <code>fitDiagnosticsTest.root</code> file, check the post-fit value of the rateParams. To what level are the normalisations of the DY and ttbar processes constrained?</li> <li>To compare to the previous approach of fitting the SR only, with cross section and acceptance uncertainties restored, an additional card is provided: <code>datacard_part3_nocrs.txt</code>. Run the same fit on this card to verify the improvement of the SR+CR approach</li> </ul>"},{"location":"part5/longexercise/#b-nuisance-parameter-impacts","title":"B: Nuisance parameter impacts","text":"<p>It is often useful to examine in detail the effects the systematic uncertainties have on the signal strength measurement. This is often referred to as calculating the \"impact\" of each uncertainty. What this means is to determine the shift in the signal strength, with respect to the best-fit, that is induced if a given nuisance parameter is shifted by its \\(\\pm1\\sigma\\) post-fit uncertainty values. If the signal strength shifts a lot, it tells us that it has a strong dependency on this systematic uncertainty. In fact, what we are measuring here is strongly related to the correlation coefficient between the signal strength and the nuisance parameter. The <code>MultiDimFit</code> method has an algorithm for calculating the impact for a given systematic: <code>--algo impact -P [parameter name]</code>, but it is typical to use a higher-level script, <code>combineTool.py</code>, to automatically run the impacts for all parameters. Full documentation on this is given here. There is a three step process for running this. First we perform an initial fit for the signal strength and its uncertainty:</p> <p><pre><code>combineTool.py -M Impacts -d workspace_part3.root -m 200 --rMin -1 --rMax 2 --robustFit 1 --doInitialFit\n</code></pre> Then we run the impacts for all the nuisance parameters: <pre><code>combineTool.py -M Impacts -d workspace_part3.root -m 200 --rMin -1 --rMax 2 --robustFit 1 --doFits\n</code></pre> This will take a little bit of time. When finished we collect all the output and convert it to a json file: <pre><code>combineTool.py -M Impacts -d workspace_part3.root -m 200 --rMin -1 --rMax 2 --robustFit 1 --output impacts.json\n</code></pre> We can then make a plot showing the fitted values of the nuisance parameters, relative to the input uncertainty, and parameter impacts, sorted by the largest impact: <pre><code>plotImpacts.py -i impacts.json -o impacts\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Identify the most important uncertainties using the impacts tool.</li> <li>In the plot, some parameters do not show a fitted value of the nuisance parameter relative to the input uncertainty, but rather just a numerical value - why?</li> </ul>"},{"location":"part5/longexercise/#c-post-fit-distributions","title":"C: Post-fit distributions","text":"<p>Another thing the <code>FitDiagnostics</code> mode can help us with is visualising the distributions we are fitting, and the uncertainties on those distributions, both before the fit is performed (\"pre-fit\") and after (\"post-fit\"). The pre-fit can give us some idea of how well our uncertainties cover any data-MC discrepancy, and the post-fit if discrepancies remain after the fit to data (as well as possibly letting us see the presence of a significant signal!).</p> <p>To produce these distributions add the <code>--saveShapes</code> and <code>--saveWithUncertainties</code> options when running <code>FitDiagnostics</code>:</p> <pre><code>combine -M FitDiagnostics workspace_part3.root -m 200 --rMin -1 --rMax 2 --saveShapes --saveWithUncertainties -n .part3B\n</code></pre> <p>Combine will produce pre- and post-fit distributions (for fit_s and fit_b) in the fitDiagnosticsTest.root output file:</p> <p></p> <p>Tasks and questions:</p> <ul> <li> <p>Make a plot showing the expected background and signal contributions using the output from <code>FitDiagnostics</code> - do this for both the pre-fit and post-fit. You will find a script <code>postFitPlot.py</code> in the <code>longexercise</code> directory that can help you get started.  The bin errors on the TH1s in the fitDiagnostics file are determined from the systematic uncertainties. In the post-fit these take into account the additional constraints on the nuisance parameters as well as any correlations.</p> </li> <li> <p>Why is the uncertainty on the post-fit so much smaller than on the pre-fit?</p> </li> </ul>"},{"location":"part5/longexercise/#d-calculating-the-significance","title":"D: Calculating the significance","text":"<p>In the event that you observe a deviation from your null hypothesis, in this case the b-only hypothesis, Combine can be used to calculate the p-value or significance. To do this using the asymptotic approximation simply do:</p> <p><pre><code>combine -M Significance workspace_part3.root -m 200 --rMin -1 --rMax 2\n</code></pre> To calculate the expected significance for a given signal strength we can just generate an Asimov dataset first:</p> <p><pre><code>combine -M Significance workspace_part3.root -m 200 --rMin -1 --rMax 5 -t -1 --expectSignal 1.5\n</code></pre> Note that the Asimov dataset generated this way uses the nominal values of all model parameters to define the dataset. Another option is to add <code>--toysFrequentist</code>, which causes a fit to the data to be performed first (with <code>r</code> frozen to the <code>--expectSignal</code> value) and then any subsequent Asimov datasets or toys are generated using the post-fit values of the model parameters. In general this will result in a different value for the expected significance due to changes in the background normalisation and shape induced by the fit to data:</p> <pre><code>combine -M Significance workspace_part3.root -m 200 --rMin -1 --rMax 5 -t -1 --expectSignal 1.5 --toysFrequentist\n</code></pre> <p>Tasks and questions:</p> <ul> <li>Note how much the expected significance changes with the --toysFrequentist option. Does the change make sense given the difference in the post-fit and pre-fit distributions you looked at in the previous section?</li> <li>Advanced task It is also possible to calculate the significance using toys with <code>HybridNew</code> (details here) if we are in a situation where the asymptotic approximation is not reliable or if we just want to verify the result. Why might this be challenging for a high significance, say larger than \\(5\\sigma\\)?</li> </ul>"},{"location":"part5/longexercise/#e-signal-strength-measurement-and-uncertainty-breakdown","title":"E: Signal strength measurement and uncertainty breakdown","text":"<p>We have seen that with <code>FitDiagnostics</code> we can make a measurement of the best-fit signal strength and uncertainty. In the asymptotic approximation we find an interval at the \\(\\alpha\\) CL around the best fit by identifying the parameter values at which our test statistic \\(q=\u22122\\Delta \\ln L\\) equals a critical value. This value is the \\(\\alpha\\) quantile of the \\(\\chi^2\\) distribution with one degree of freedom. In the expression for q we calculate the difference in the profile likelihood between some fixed point and the best-fit.</p> <p>Depending on what we want to do with the measurement, e.g. whether it will be published in a journal, we may want to choose a more precise method for finding these intervals. There are a number of ways that parameter uncertainties are estimated in combine, and some are more precise than others:</p> <ul> <li>Covariance matrix: calculated by the Minuit HESSE routine, this gives a symmetric uncertainty by definition and is only accurate when the profile likelihood for this parameter is symmetric and parabolic.</li> <li>Minos error: calculated by the Minuit MINOS route - performs a search for the upper and lower values of the parameter that give the critical value of \\(q\\) for the desired CL. Return an asymmetric interval. This is what <code>FitDiagnostics</code> does by default, but only for the parameter of interest. Usually accurate but prone to fail on more complex models and not easy to control the tolerance for terminating the search.</li> <li>RobustFit error: a custom implementation in combine similar to Minos that returns an asymmetric interval, but with more control over the precision. Enabled by adding <code>--robustFit 1</code> when running <code>FitDiagnostics</code>.</li> <li>Explicit scan of the profile likelihood on a chosen grid of parameter values. Interpolation between points to find parameter values corresponding to appropriate d. It is a good idea to use this for important measurements since we can see by eye that there are no unexpected features in the shape of the likelihood curve.</li> </ul> <p>In this section we will look at the last approach, using the <code>MultiDimFit</code> mode of combine. By default this mode just performs a single fit to the data:</p> <pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E -m 200 --rMin -1 --rMax 2\n</code></pre> <p>You should see the best-fit value of the signal strength reported and nothing else. By adding the <code>--algo X</code> option combine will run an additional algorithm after this best fit. Here we will use <code>--algo grid</code>, which performs a scan of the likelihood with <code>r</code> fixed to a set of different values. The set of points will be equally spaced between the <code>--rMin</code> and <code>--rMax</code> values, and the number of points is controlled with <code>--points N</code>:</p> <pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E -m 200 --rMin -1 --rMax 2 --algo grid --points 30\n</code></pre> <p>The results of the scan are written into the output file, if opened interactively should see:</p> Show output <pre><code>root [1] limit-&gt;Scan(\"r:deltaNLL\")\n************************************\n*    Row   *         r *  deltaNLL *\n************************************\n*        0 * 0.5399457 *         0 *\n*        1 * -0.949999 * 5.6350698 *\n*        2 * -0.850000 * 4.9482779 *\n*        3 *     -0.75 * 4.2942519 *\n*        4 * -0.649999 * 3.6765284 *\n*        5 * -0.550000 * 3.0985388 *\n*        6 * -0.449999 * 2.5635135 *\n*        7 * -0.349999 * 2.0743820 *\n*        8 *     -0.25 * 1.6337506 *\n*        9 * -0.150000 * 1.2438088 *\n*       10 * -0.050000 * 0.9059833 *\n*       11 * 0.0500000 * 0.6215767 *\n*       12 * 0.1500000 * 0.3910581 *\n*       13 *      0.25 * 0.2144184 *\n*       14 * 0.3499999 * 0.0911308 *\n*       15 * 0.4499999 * 0.0201983 *\n*       16 * 0.5500000 * 0.0002447 *\n*       17 * 0.6499999 * 0.0294311 *\n*       18 *      0.75 * 0.1058298 *\n*       19 * 0.8500000 * 0.2272539 *\n*       20 * 0.9499999 * 0.3912534 *\n*       21 * 1.0499999 * 0.5952836 *\n*       22 * 1.1499999 * 0.8371513 *\n*       23 *      1.25 * 1.1142146 *\n*       24 * 1.3500000 * 1.4240909 *\n*       25 * 1.4500000 * 1.7644306 *\n*       26 * 1.5499999 * 2.1329684 *\n*       27 * 1.6499999 * 2.5273966 *\n*       28 *      1.75 * 2.9458723 *\n*       29 * 1.8500000 * 3.3863399 *\n*       30 * 1.9500000 * 3.8469560 *\n************************************\n</code></pre> <p>To turn this into a plot run: <pre><code>python plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root -o single_scan\n</code></pre> This script will also perform a spline interpolation of the points to give accurate values for the uncertainties.</p> <p>In the next step we will split this total uncertainty into two components. It is typical to separate the contribution from statistics and systematics, and sometimes even split the systematic part into different components. This gives us an idea of which aspects of the uncertainty dominate. The statistical component is usually defined as the uncertainty we would have if all the systematic uncertainties went to zero. We can emulate this effect by freezing all the nuisance parameters when we do the scan in <code>r</code>, such that they do not vary in the fit. This is achieved by adding the <code>--freezeParameters allConstrainedNuisances</code> option. It would also work if the parameters are specified explicitly, e.g. <code>--freezeParameters CMS_eff_t,lumi_13TeV,...,</code> but the <code>allConstrainedNuisances</code> option is more concise. Run the scan again with the systematics frozen, and use the plotting script to overlay this curve with the previous one:</p> <pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E.freezeAll -m 200 --rMin -1 --rMax 2 --algo grid --points 30 --freezeParameters allConstrainedNuisances\npython plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root --others 'higgsCombine.part3E.freezeAll.MultiDimFit.mH200.root:FreezeAll:2' -o freeze_first_attempt\n</code></pre> <p></p> <p>This doesn't look quite right - the best-fit has been shifted because unfortunately the <code>--freezeParameters</code> option acts before the initial fit, whereas we only want to add it for the scan after this fit. To remedy this we can use a feature of Combine that lets us save a \"snapshot\" of the best-fit parameter values, and reuse this snapshot in subsequent fits. First we perform a single fit, adding the <code>--saveWorkspace</code> option:</p> <p><pre><code>combine -M MultiDimFit workspace_part3.root -n .part3E.snapshot -m 200 --rMin -1 --rMax 2 --saveWorkspace\n</code></pre> The output file will now contain a copy of our workspace from the input, and this copy will contain a snapshot of the best-fit parameter values. We can now run the frozen scan again, but instead using this copy of the workspace as input, and restoring the snapshot that was saved:</p> <pre><code>combine -M MultiDimFit higgsCombine.part3E.snapshot.MultiDimFit.mH200.root -n .part3E.freezeAll -m 200 --rMin -1 --rMax 2 --algo grid --points 30 --freezeParameters allConstrainedNuisances --snapshotName MultiDimFit\npython plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root --others 'higgsCombine.part3E.freezeAll.MultiDimFit.mH200.root:FreezeAll:2' -o freeze_second_attempt --breakdown Syst,Stat\n</code></pre> <p>Now the plot should look correct:</p> <p></p> <p>We added the <code>--breakdown Syst,Stat</code> option to the plotting script to make it calculate the systematic component, which is defined simply as \\(\\sigma_{\\text{syst}} = \\sqrt{\\sigma^2_{\\text{tot}} - \\sigma^2_{\\text{stat}}}\\). To split the systematic uncertainty into different components we just need to run another scan with a subset of the systematics frozen. For example, say we want to split this into experimental and theoretical uncertainties, we would calculate the uncertainties as:</p> <p>\\(\\sigma_{\\text{theory}} = \\sqrt{\\sigma^2_{\\text{tot}} - \\sigma^2_{\\text{fr.theory}}}\\)</p> <p>\\(\\sigma_{\\text{expt}} = \\sqrt{\\sigma^2_{\\text{fr.theory}} - \\sigma^2_{\\text{fr.theory+expt}}}\\)</p> <p>\\(\\sigma_{\\text{stat}} = \\sigma_{\\text{fr.theory+expt}}\\)</p> <p>where fr.=freeze.</p> <p>While it is perfectly fine to just list the relevant nuisance parameters in the <code>--freezeParameters</code> argument for the \\(\\sigma_{\\text{fr.theory}}\\) scan, a convenient way can be to define a named group of parameters in the text datacard and then freeze all parameters in this group with <code>--freezeNuisanceGroups</code>. The syntax for defining a group is:</p> <pre><code>[group name] group = uncertainty_1 uncertainty_2 ... uncertainty_N\n</code></pre> <p>Tasks and questions:</p> <ul> <li>Take our stat+syst split one step further and separate the systematic part into two: one part for hadronic tau uncertainties and one for all others.</li> <li>Do this by defining a <code>tauID</code> group in the datacard including the following parameters: <code>CMS_eff_t</code>, <code>CMS_eff_t_highpt</code>, and the three <code>CMS_scale_t_X</code> uncertainties.</li> <li>To plot this and calculate the split via the relations above you can just add further arguments to the <code>--others</code> option in the <code>plot1DScan.py</code> script. Each is of the form: <code>'[file]:[label]:[color]'</code>. The <code>--breakdown</code> argument should also be extended to three terms.</li> <li>How important are these tau-related uncertainties compared to the others?</li> </ul>"},{"location":"part5/longexercise/#f-use-of-channel-masking","title":"F: Use of channel masking","text":"<p>We will now return briefly to the topic of blinding. We've seen that we can compute expected results by performing any Combine method on an Asimov dataset generated using <code>-t -1</code>. This is useful, because we can optimise our analysis without introducing any accidental bias that might come from looking at the data in the signal region. However our control regions have been chosen specifically to be signal-free, and it would be useful to use the data here to set the normalisation of our backgrounds even while the signal region remains blinded. Unfortunately there's no easy way to generate a partial Asimov dataset just for the signal region, but instead we can use a feature called \"channel masking\" to remove specific channels from the likelihood evaluation. One useful application of this feature is to make post-fit plots of the signal region from a control-region-only fit.</p> <p>To use the masking we first need to rerun <code>text2workspace.py</code> with an extra option that will create variables named like <code>mask_[channel]</code> in the workspace:</p> <pre><code>text2workspace.py part3_combined.txt -m 200 -o workspace_part3_with_masks.root --channel-masks\n</code></pre> <p>These parameters have a default value of 0 which means the channel is not masked. By setting it to 1 the channel is masked from the likelihood evaluation. Task: Run the same <code>FitDiagnostics</code> command as before to save the post-fit shapes, but add an option <code>--setParameters mask_signal_region=1</code>. Note that the s+b fit will probably fail in this case, since we are no longer fitting a channel that contains signal, however the b-only fit should work fine. Task: Compare the expected background distribution and uncertainty to the pre-fit, and to the background distribution from the full fit you made before.</p>"},{"location":"part5/longexercise/#part-4-physics-models","title":"Part 4: Physics models","text":"<p>Topics covered in this section:</p> <ul> <li>A: Writing a simple physics model</li> <li>B: Performing and plotting 2D likelihood scans</li> </ul> <p>With Combine we are not limited to parametrising the signal with a single scaling parameter <code>r</code>. In fact we can define any arbitrary scaling using whatever functions and parameters we would like. For example, when measuring the couplings of the Higgs boson to the different SM particles we would introduce a POI for each coupling parameter, for example \\(\\kappa_{\\text{W}}\\), \\(\\kappa_{\\text{Z}}\\), \\(\\kappa_{\\tau}\\) etc. We would then generate scaling terms for each \\(i\\rightarrow \\text{H}\\rightarrow j\\) process in terms of how the cross section (\\(\\sigma_i(\\kappa)\\)) and branching ratio (\\(\\frac{\\Gamma_i(\\kappa)}{\\Gamma_{\\text{tot}}(\\kappa)}\\)) scale relative to the SM prediction.</p> <p>This parametrisation of the signal (and possibly backgrounds too) is specified in a physics model. This is a python class that is used by <code>text2workspace.py</code> to construct the model in terms of RooFit objects. There is documentation on using phyiscs models here.</p>"},{"location":"part5/longexercise/#a-writing-a-simple-physics-model","title":"A: Writing a simple physics model","text":"<p>An example physics model that just implements a single parameter <code>r</code> is given in <code>DASModel.py</code>:</p> Show DASModel.py <pre><code>from HiggsAnalysis.CombinedLimit.PhysicsModel import PhysicsModel\n\n\nclass DASModel(PhysicsModel):\n    def doParametersOfInterest(self):\n        \"\"\"Create POI and other parameters, and define the POI set.\"\"\"\n        self.modelBuilder.doVar(\"r[0,0,10]\")\n        self.modelBuilder.doSet(\"POI\", \",\".join([\"r\"]))\n\n    def getYieldScale(self, bin, process):\n        \"Return the name of a RooAbsReal to scale this yield by or the two special values 1 and 0 (don't scale, and set to zero)\"\n        if self.DC.isSignal[process]:\n            print(\"Scaling %s/%s by r\" % (bin, process))\n            return \"r\"\n        return 1\n\n\ndasModel = DASModel()\n</code></pre> <p>In this we override two methods of the basic <code>PhysicsModel</code> class: <code>doParametersOfInterest</code> and <code>getYieldScale</code>. In the first we define our POI variables, using the doVar function which accepts the RooWorkspace factory syntax for creating variables, and then define all our POIs in a set via the doSet function. The second function will be called for every process in every channel (bin), and using the corresponding strings we have to specify how that process should be scaled. Here we check if the process was declared as signal in the datacard, and if so scale it by <code>r</code>, otherwise if it is a background no scaling is applied (<code>1</code>). To use the physics model with <code>text2workspace.py</code> first copy it to the python directory in the Combine package: <pre><code>cp DASModel.py $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/python/\n</code></pre></p> <p>In this section we will use the full datacards from the MSSM analysis. Have a look in <code>part4/200/combined.txt</code>. You will notice that there are now two signal processes declared: <code>ggH</code> and <code>bbH</code>. In the MSSM these cross sections can vary independently depending on the exact parameters of the model, so it is useful to be able to measure them independently too. First run <code>text2workspace.py</code> as follows, adding the <code>-P</code> option to specify the physics model, then verify the result of the fit:</p> <pre><code>text2workspace.py part4/200/combined.txt -P HiggsAnalysis.CombinedLimit.DASModel:dasModel -m 200 -o workspace_part4.root\ncombine -M MultiDimFit workspace_part4.root -n .part4A -m 200 --rMin 0 --rMax 2\n</code></pre> <p>Tasks and questions:</p> <ul> <li>Modify the physics model to scale the ggH and bbH processes by <code>r_ggH</code> and <code>r_bbH</code> separately.</li> <li>Then rerun the <code>MultiDimFit</code> command - you should see the result for both signal strengths printed.</li> </ul>"},{"location":"part5/longexercise/#b-performing-and-plotting-2d-likelihood-scans","title":"B: Performing and plotting 2D likelihood scans","text":"<p>For a model with two POIs it is often useful to look at the how well we are able to measure both simultaneously. A natural extension of determining 1D confidence intervals on a single parameter like we did in part 3D is to determine confidence level regions in 2D. To do this we also use combine in a similar way, with <code>-M MultiDimFit --algo grid</code>. When two POIs are found, Combine will scan a 2D grid of points instead of a 1D array.</p> <p>Tasks and questions:</p> <ul> <li>Run a 2D likelihood scan in <code>r_ggH</code> and <code>r_bbH</code>. You can start with around 100 points but may need to increase this later too see more detail in the resulting plot.</li> <li>Have a look at the output limit tree, it should have branches for each POI as well as the usual deltaNLL value. You can use TTree::Draw to plot a 2D histogram of deltaNLL with <code>r_ggH</code> and <code>r_bbH</code> on the axes.</li> </ul>"},{"location":"part5/longexerciseanswers/","title":"Answers to tasks and questions","text":""},{"location":"part5/longexerciseanswers/#part-1-a-one-bin-counting-experiment","title":"Part 1: A one-bin counting experiment","text":""},{"location":"part5/longexerciseanswers/#a-computing-limits-using-the-asymptotic-approximation","title":"A: Computing limits using the asymptotic approximation","text":"<p>Tasks and questions:</p> <ul> <li>There are some important uncertainties missing from the datacard above. Add the uncertainty on the luminosity (name: <code>lumi_13TeV</code>) which has a 2.5% effect on all processes (except the <code>jetFakes</code>, which are taken from data), and uncertainties on the inclusive cross sections of the <code>Ztautau</code> and <code>ttbar</code> processes (with names <code>xsec_Ztautau</code> and <code>xsec_diboson</code>) which are 4% and 6% respectively.</li> <li>Try changing the values of some uncertainties (up or down, or removing them altogether) - how do the expected and observed limits change?</li> </ul> Show answer  Larger uncertainties make the limits worse (ie, higher values of the limit); smaller uncertainties improve the limit (lower values of the limit).  <ul> <li>Now try changing the number of observed events. The observed limit will naturally change, but the expected does too - why might this be?</li> </ul> Show answer  This is because the expected limit relies on a background-only Asimov dataset that is created  after  a background-only fit to the data. By changing the observed the pulls on the NPs in this fit also change, and therefore so does the expected sensitivity."},{"location":"part5/longexerciseanswers/#advanced-section-b-computing-limits-with-toys","title":"Advanced section: B: Computing limits with toys","text":"<p>Tasks and questions:</p> <ul> <li>In contrast to <code>AsymptoticLimits</code>, with <code>HybridNew</code> each limit comes with an uncertainty. What is the origin of this uncertainty?</li> </ul> Show answer  The uncertainty is caused by the limited number of toys: the values of Pmu and Pb come from counting the number of toys in the tails of the test statistic distributions. The number of toys used can be adjusted with the option <code> --toysH </code> <ul> <li>How good is the agreement between the asymptotic and toy-based methods?</li> </ul> Show answer  The agreement should be pretty good in this example, but will generally break down once we get to the level of 0-5 events.  <ul> <li>Why does it take longer to calculate the lower expected quantiles (e.g. 0.025, 0.16)? Think about how the statistical uncertainty on the CLs value depends on Pmu and Pb.</li> </ul> Show answer  For this we need the definition of CLs = Pmu / (1-Pb). The 0.025 expected quantile is by definition where Pb = 0.025, so for a 95% CL limit we have CLs = 0.05, implying we are looking for the value of r where Pmu = 0.00125. With 1000 s+b toys we would then only expect `1000 * 0.00125 = 1.25` toys in the tail region we have to integrate over. Contrast this to the median limit where 25 toys would be in this region. This means we have to generate a much larger numbers of toys to get the same statistical power."},{"location":"part5/longexerciseanswers/#advanced-section-b-asymptotic-approximation-limitations","title":"Advanced section: B: Asymptotic approximation limitations","text":"<p>Tasks and questions:</p> <ul> <li>Is the asymptotic limit still a good approximation?</li> </ul> Show answer  A \"good\" approximation is not well defined, but the difference is clearly larger here.  <ul> <li>You might notice that the test statistic distributions are not smooth but rather have several \"bump\" structures? Where might this come from? Try reducing the size of the systematic uncertainties to make them more pronounced.</li> </ul> Show answer  This bump structure comes from the discrete-ness of the Poisson sampling of the toy datasets. Systematic uncertainties then smear these bumps out, but without systematics we would see delta functions corresponding to the possible integer number of events that could be observed. Once we go to more typical multi-bin analyses with more events and systematic uncertainties these discrete-ness washes out very quickly."},{"location":"part5/longexerciseanswers/#part-2-a-shape-based-analysis","title":"Part 2: A shape-based analysis","text":""},{"location":"part5/longexerciseanswers/#a-setting-up-the-datacard","title":"A: Setting up the datacard","text":"<p>Only tasks, no questions in this section</p>"},{"location":"part5/longexerciseanswers/#b-running-combine-for-a-blind-analysis","title":"B: Running combine for a blind analysis","text":"<p>Tasks and questions:</p> <ul> <li>Compare the expected limits calculated with --run expected and --run blind. Why are they different?</li> </ul> Show answer  When using --run blind combine will create a background-only Asimov dataset without performing a fit to data first. With --run expected, the observed limit isn't shown, but the background-only Asimov dataset used for the limit calculation is still created after a background-only fit to the data.  <ul> <li>Calculate a blind limit by generating a background-only Asimov with the -t option instead of using the AsymptoticLimits specific options. You should find the observed limit is the same as the expected. Then see what happens if you inject a signal into the Asimov dataset using the --expectSignal [X] option.</li> </ul> Show answer  You should see that with a signal injected the observed limit is worse (has a higher value) than the expected limit: for the expected limit the b-only Asimov dataset is still used, but the observed limit is now calculated on the signal + background Asimov dataset, with a signal at the specified cross section [X]."},{"location":"part5/longexerciseanswers/#c-using-fitdiagnostics","title":"C: Using FitDiagnostics","text":"<p>Tasks and questions:</p> <ul> <li>Which parameter has the largest shift from the nominal value? Which has the tightest constraint?</li> </ul> Show answer <code> CMS_eff_t_highpt </code> should have the largest shift from the nominal value (around 0.47), <code> norm_jetFakes </code> has the tightest constraint (to 25% of the input uncertainty).  <ul> <li>Should we be concerned when a parameter is more strongly constrained than the input uncertainty (i.e. \\(\\frac{\\sigma}{\\sigma_I}&lt;1.0\\))?</li> </ul> Show answer  This is still a hot topic in CMS analyses today, and there isn't a right or wrong answer. Essentially we have to judge if our analysis should really be able to provide more information about this parameter than the external measurement that gave us the input uncertainty. So we would not expect to be able to constrain the luminosity uncertainty for example, but uncertainties specific to the analysis might legitimately be constrained."},{"location":"part5/longexerciseanswers/#d-mc-statistical-uncertainties","title":"D: MC statistical uncertainties","text":"<p>Tasks and questions:</p> <ul> <li>Check how much the cross section measurement and uncertainties change using <code>FitDiagnostics</code>.</li> </ul> Show answer  Without autoMCStats we find: <code> Best fit r: -2.73273  -2.13428/+3.38185</code>, with autoMCStats: <code> Best fit r: -3.07825  -3.17742/+3.7087 </code> <ul> <li>It is also useful to check how the expected uncertainty changes using an Asimov dataset, say with <code>r=10</code> injected.</li> </ul> Show answer  Without autoMCStats we find: <code> Best fit r: 9.99978  -4.85341/+6.56233 </code>, with autoMCStats: <code> Best fit r: 9.99985  -5.24634/+6.98266 </code> <ul> <li>Advanced task: See what happens if the Poisson threshold is increased. Based on your results, what threshold would you recommend for this analysis?</li> </ul> Show answer  At first the uncertainties increase, as the threshold increases, and at some point they stabilise. A Poisson threshold at 10 is probably reasonable for this analysis."},{"location":"part5/longexerciseanswers/#part-3-adding-control-regions","title":"Part 3: Adding control regions","text":""},{"location":"part5/longexerciseanswers/#a-use-of-rateparams","title":"A: Use of rateParams","text":"<p>Tasks and questions:</p> <ul> <li>Run <code>text2workspace.py</code> on this combined card and then use <code>FitDiagnostics</code> on an Asimov dataset with <code>r=1</code> to get the expected uncertainty. Suggested command line options: <code>--rMin 0 --rMax 2</code></li> </ul> Show answer  As expected uncertainty you should get <code> -0.417238/+0.450593 </code> <ul> <li>Using the RooFitResult in the <code>fitDiagnosticsTest.root</code> file, check the post-fit value of the rateParams. To what level are the normalisations of the DY and ttbar processes constrained?</li> </ul> Show answer  They are constrained to around 1-2%  <ul> <li>To compare to the previous approach of fitting the SR only, with cross section and acceptance uncertainties restored, an additional card is provided: <code>datacard_part3_nocrs.txt</code>. Run the same fit on this card to verify the improvement of the SR+CR approach</li> </ul> Show answer  The expected uncertainty is larger with only the SR: <code> -0.465799/+0.502088 </code> compared with <code> -0.417238/+0.450593 </code> in the SR+CR approach."},{"location":"part5/longexerciseanswers/#b-nuisance-parameter-impacts","title":"B: Nuisance parameter impacts","text":"<p>Tasks and questions:</p> <ul> <li>Identify the most important uncertainties using the impacts tool.</li> </ul> Show answer  The most important uncertainty is <code>norm_jetFakes</code>, followed by two MC statistical uncerainties (<code>prop_binsignal_region_bin8</code> and <code>prop_binsignal_region_bin9</code>). <ul> <li>In the plot, some parameters do not show a plotted point for the fitted value, but rather just a numerical value - why?</li> </ul> Show answer  These are freely floating parameters (<code> rate_ttbar </code> and <code> rate_Zll </code>). They have no prior constraint (and so no shift from the nominal value relative to the input uncertainty) - we show the best-fit value + uncertainty directly."},{"location":"part5/longexerciseanswers/#c-post-fit-distributions","title":"C: Post-fit distributions","text":"<p>Tasks and questions:</p> <p>The bin errors on the TH1s in the fitdiagnostics file are determined from the systematic uncertainties. In the post-fit these take into account the additional constraints on the nuisance parameters as well as any correlations.</p> <ul> <li>Why is the uncertainty on the post-fit so much smaller than on the pre-fit?</li> </ul> Show answer  There are two effects at play here: the nuisance parameters get constrained, and there are anti-correlations between the parameters which also have the effect of reducing the total uncertainty. Note: the post-fit uncertainty could become larger when rateParams are present as they are not taken into account in the pre-fit uncertainty but do enter in the post-fit uncertainty."},{"location":"part5/longexerciseanswers/#d-calculating-the-significance","title":"D: Calculating the significance","text":"<p>Tasks and questions:</p> <ul> <li>Advanced task It is also possible to calculate the significance using toys with <code>HybridNew</code> (details here) if we are in a situation where the asymptotic approximation is not reliable or if we just want to verify the result. Why might this be challenging for a high significance, say larger than \\(5\\sigma\\)?</li> </ul> Show answer  A significance of $5\\sigma$ corresponds to a p-value of around $3\\cdot 10^{-7}$ - so we need to populate the very tail of the test statistic distribution and this requires generating a large number of toys."},{"location":"part5/longexerciseanswers/#e-signal-strength-measurement-and-uncertainty-breakdown","title":"E: Signal strength measurement and uncertainty breakdown","text":"<p>** Tasks and questions: **</p> <ul> <li>Take our stat+syst split one step further and separate the systematic part into two: one part for hadronic tau uncertainties and one for all others. Do this by defining a <code>tauID</code> group in the datacard including the following parameters: <code>CMS_eff_t</code>, <code>CMS_eff_t_highpt</code>, and the three <code>CMS_scale_t_X</code> uncertainties.</li> </ul> Show datacard line  You should add this line to the end of the datacard: <pre><code>tauID group = CMS_eff_t CMS_eff_t_highpt CMS_scale_t_1prong0pi0_13TeV CMS_scale_t_1prong1pi0_13TeV CMS_scale_t_3prong0pi0_13TeV\n</code></pre> <ul> <li>To plot this and calculate the split via the relations above you can just add further arguments to the <code>--others</code> option in the <code>plot1DScan.py</code> script. Each is of the form: <code>'[file]:[label]:[color]'</code>. The <code>--breakdown</code> argument should also be extended to three terms.</li> </ul> Show code  This can be done as: <pre><code>python plot1DScan.py higgsCombine.part3E.MultiDimFit.mH200.root --others 'higgsCombine.part3E.freezeTauID.MultiDimFit.mH200.root:FreezeTauID:4' 'higgsCombine.part3E.freezeAll.MultiDimFit.mH200.root:FreezeAll:2' -o freeze_third_attempt --breakdown TauID,OtherSyst,Stat\n</code></pre> <ul> <li>How important are these tau-related uncertainties compared to the others?</li> </ul> Show answer  They are smaller than both the statistical uncertainty and the remaining systematic uncertainties"},{"location":"part5/roofit/","title":"RooFit Basics","text":"<p><code>RooFit</code> is a OO analysis environment built on <code>ROOT</code>. It has a collection of classes designed to augment root for data modeling.</p> <p>This section covers a few of the basics of <code>RooFit</code>. There are many more tutorials available at this link: https://root.cern.ch/root/html600/tutorials/roofit/index.html</p>"},{"location":"part5/roofit/#objects","title":"Objects","text":"<p>In <code>RooFit</code>, any variable, data point, function, PDF (etc.) is represented by a c++ object The most basic of these is the <code>RooRealVar</code>. We will create one that will represent the mass of some hypothetical particle, we name it and give it an initial starting value and range.</p> <p><pre><code>RooRealVar MH(\"MH\",\"mass of the Hypothetical Boson (H-boson) in GeV\",125,120,130);\nMH.Print();\n</code></pre> <pre><code>RooRealVar::MH = 125  L(120 - 130)\n</code></pre></p> <p>Ok, great. This variable is now an object we can play around with. We can access this object and modify its properties, such as its value. </p> <pre><code>MH.setVal(130);\nMH.getVal();\n</code></pre> <p>In particle detectors we typically do not observe this particle mass, but usually define some observable which is sensitive to this mass. We will assume we can detect and reconstruct the decay products of the H-boson and measure the invariant mass of those particles. We need to make another variable that represents that invariant mass.</p> <pre><code>RooRealVar mass(\"m\",\"m (GeV)\",100,80,200);\n</code></pre> <p>In the perfect world we would perfectly measure the exact mass of the particle in every single event. However, our detectors are usually far from perfect so there will be some resolution effect. We will assume the resolution of our measurement of the invariant mass is 10 GeV and call it \"sigma\"</p> <pre><code>RooRealVar sigma(\"resolution\",\"#sigma\",10,0,20);\n</code></pre> <p>More exotic variables can be constructed out of these <code>RooRealVar</code>s using <code>RooFormulaVars</code>. For example, suppose we wanted to make a function out of the variables that represented the relative resolution as a function of the hypothetical mass MH. </p> <pre><code>RooFormulaVar func(\"R\",\"@0/@1\",RooArgList(sigma,mass));\nfunc.Print(\"v\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes: \n  Address: 0x10e878068\n  Clients: \n  Servers: \n    (0x10dcd47b0,V-) RooRealVar::resolution \"#sigma\"\n    (0x10dcd4278,V-) RooRealVar::m \"m (GeV)\"\n  Proxies: \n    actualVars -&gt; \n      1)  resolution\n      2)           m\n--- RooAbsReal ---\n\n  Plot label is \"R\"\n    --- RooFormula ---\n    Formula: \"@0/@1\"\n    (resolution,m)\n</code></pre> <p>Notice how there is a list of the variables we passed (the servers or \"actual vars\"). We can now plot the function. <code>RooFit</code> has a special plotting object <code>RooPlot</code> which keeps track of the objects (and their normalisations) that we want to draw. Since <code>RooFit</code> does not know the difference between objects that are and are not dependent, we need to tell it. </p> <p>Right now, we have the relative resolution as \\(R(m,\\sigma)\\), whereas we want to plot  \\(R(m,\\sigma(m))\\)!</p> <p><pre><code>TCanvas *can = new TCanvas();\n\n//make the x-axis the \"mass\"\nRooPlot *plot = mass.frame(); \nfunc.plotOn(plot);\n\nplot-&gt;Draw();\ncan-&gt;Draw();\n</code></pre> </p> <p>The main objects we are interested in using from <code>RooFit</code> are probability denisty functions or (PDFs). We can construct the PDF,</p> \\[ f(m|M_{H},\\sigma) \\] <p>as a simple Gaussian shape for example or a <code>RooGaussian</code> in <code>RooFit</code> language (think McDonald's logic, everything is a <code>RooSomethingOrOther</code>)</p> <pre><code>RooGaussian gauss(\"gauss\",\"f(m|M_{H},#sigma)\",mass,MH,sigma);\ngauss.Print(\"V\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes: \n  Address: 0x10ecf4188\n  Clients: \n  Servers: \n    (0x10dcd4278,V-) RooRealVar::m \"m (GeV)\"\n    (0x10a08a9d8,V-) RooRealVar::MH \"mass of the Hypothetical Boson (H-boson) in GeV\"\n    (0x10dcd47b0,V-) RooRealVar::resolution \"#sigma\"\n  Proxies: \n    x -&gt; m\n    mean -&gt; MH\n    sigma -&gt; resolution\n--- RooAbsReal ---\n\n  Plot label is \"gauss\"\n--- RooAbsPdf ---\nCached value = 0\n</code></pre> <p>Notice how the gaussian PDF, like the <code>RooFormulaVar</code> depends on our <code>RooRealVar</code> objects, these are its servers.  Its evaluation will depend on their values. </p> <p>The main difference between PDFs and Functions in RooFit is that PDFs are automatically normalised to unitiy, hence they represent a probability density, you don't need to normalise yourself. Lets plot it for the different values of \\(m\\).</p> <pre><code>plot = mass.frame();\n\ngauss.plotOn(plot);\n\nMH.setVal(120);\ngauss.plotOn(plot,RooFit::LineColor(kBlue));\n\nMH.setVal(125);\ngauss.plotOn(plot,RooFit::LineColor(kRed));\n\nMH.setVal(135);\ngauss.plotOn(plot,RooFit::LineColor(kGreen));\n\nplot-&gt;Draw();\n\ncan-&gt;Update();\ncan-&gt;Draw();\n</code></pre> <p></p> <p>Note that as we change the value of <code>MH</code>, the PDF gets updated at the same time.</p> <p>PDFs can be used to generate Monte Carlo data. One of the benefits of <code>RooFit</code> is that to do so only uses a single line of code! As before, we have to tell <code>RooFit</code> which variables to generate in (e.g which are the observables for an experiment). In this case, each of our events will be a single value of \"mass\" \\(m\\). The arguments for the function are the set of observables, follwed by the number of events,</p> <pre><code>RooDataSet *gen_data = (RooDataSet*) gauss.generate(RooArgSet(mass),500); \n</code></pre> <p>Now we can plot the data as with other RooFit objects.</p> <pre><code>plot = mass.frame();\n\ngen_data-&gt;plotOn(plot);\ngauss.plotOn(plot);\ngauss.paramOn(plot);\n\nplot-&gt;Draw();\ncan-&gt;Update();\ncan-&gt;Draw();\n</code></pre> <p></p> <p>Of course we are not in the business of generating MC events, but collecting real data!. Next we will look at using real data in <code>RooFit</code>.</p>"},{"location":"part5/roofit/#datasets","title":"Datasets","text":"<p>A dataset is essentially just a collection of points in N-dimensional (N-observables) space. There are two basic implementations in <code>RooFit</code>, </p> <p>1) an \"unbinned\" dataset - <code>RooDataSet</code></p> <p>2) a \"binned\" dataset - <code>RooDataHist</code></p> <p>both of these use the same basic structure as below</p> <p></p> <p>We will create an empty dataset where the only observable is the mass. Points can be added to the dataset one by one ...</p> <pre><code>RooDataSet mydata(\"dummy\",\"My dummy dataset\",RooArgSet(mass)); \n// We've made a dataset with one observable (mass)\n\nmass.setVal(123.4);\nmydata.add(RooArgSet(mass));\nmass.setVal(145.2);\nmydata.add(RooArgSet(mass));\nmass.setVal(170.8);\nmydata.add(RooArgSet(mass));\n\nmydata.Print();\n</code></pre> <pre><code>RooDataSet::dummy[m] = 3 entries\n</code></pre> <p>There are also other ways to manipulate datasets in this way as shown in the diagram below </p> <p></p> <p>Luckily there are also Constructors for a <code>RooDataSet</code> from a <code>TTree</code> and for a <code>RooDataHist</code> from a <code>TH1</code> so its simple to convert from your usual ROOT objects.</p> <p>We will take an example dataset put together already. The file <code>tutorial.root</code> can be downloaded here.</p> <pre><code>TFile *file = TFile::Open(\"tutorial.root\");\nfile-&gt;ls();\n</code></pre> Show file contents <pre><code>TFile**     tutorial.root\n TFile*     tutorial.root\n  KEY: RooWorkspace workspace;1 Tutorial Workspace\n  KEY: TProcessID   ProcessID0;1    48737500-e7e5-11e6-be6f-0d0011acbeef\n</code></pre> <p>Inside the file, there is something called a <code>RooWorkspace</code>. This is just the <code>RooFit</code> way of keeping a persistent link between the objects for a model. It is a very useful way to share data and PDFs/functions etc among CMS collaborators.</p> <p>We will now take a look at it. It contains a <code>RooDataSet</code> and one variable. This time we called our variable (or observable) <code>CMS_hgg_mass</code>, we will assume that this is the invariant mass of photon pairs where we assume our H-boson decays to photons.  </p> <pre><code>RooWorkspace *wspace = (RooWorkspace*) file-&gt;Get(\"workspace\");\nwspace-&gt;Print(\"v\");\n</code></pre> Show <pre><code>RooWorkspace(workspace) Tutorial Workspace contents\n\nvariables\n---------\n(CMS_hgg_mass)\n\ndatasets\n--------\nRooDataSet::dataset(CMS_hgg_mass)\n</code></pre> <p>Now we will have a look at the data. The <code>RooWorkspace</code> has several accessor functions, we will use the <code>RooWorkspace::data</code> one.  There are also <code>RooWorkspace::var</code>, <code>RooWorkspace::function</code> and <code>RooWorkspace::pdf</code> with (hopefully) obvious purposes.</p> <p><pre><code>RooDataSet *hgg_data = (RooDataSet*) wspace-&gt;data(\"dataset\");\nRooRealVar *hgg_mass = (RooRealVar*) wspace-&gt;var(\"CMS_hgg_mass\");\n\nplot = hgg_mass-&gt;frame();\n\nhgg_data-&gt;plotOn(plot,RooFit::Binning(160)); \n// Here we've picked a certain number of bins just for plotting purposes \n\nTCanvas *hggcan = new TCanvas();\nplot-&gt;Draw();\nhggcan-&gt;Update();\nhggcan-&gt;Draw();\n</code></pre> </p>"},{"location":"part5/roofit/#likelihoods-and-fitting-to-data","title":"Likelihoods and Fitting to data","text":"<p>The data we have in our file does not look like a Gaussian distribution. Instead, we could probably use something like an exponential to describe it. </p> <p>There is an exponential PDF already in <code>RooFit</code> (yes, you guessed it) <code>RooExponential</code>. For a PDF, we only need one parameter which is the exponential slope \\(\\alpha\\) so our pdf is,  </p> \\[ f(m|\\alpha) = \\dfrac{1}{N} e^{-\\alpha m}\\] <p>Where of course, \\(N = \\int_{110}^{150} e^{-\\alpha m} dm\\) is the normalisation constant.</p> <p>You can find several available <code>RooFit</code> functions here: https://root.cern.ch/root/html/ROOFIT_ROOFIT_Index.html</p> <p>There is also support for a generic PDF in the form of a <code>RooGenericPdf</code>, check this link: https://root.cern.ch/doc/v608/classRooGenericPdf.html</p> <p>Now we will create an exponential PDF for our background, </p> <pre><code>RooRealVar alpha(\"alpha\",\"#alpha\",-0.05,-0.2,0.01);\nRooExponential expo(\"exp\",\"exponential function\",*hgg_mass,alpha);\n</code></pre> <p>We can use <code>RooFit</code> to tell us to estimate the value of \\(\\alpha\\) using this dataset. You will learn more about parameter estimation, but for now we will just assume you know about maximizing likelihoods. This maximum likelihood estimator is common in HEP and is known to give unbiased estimates for things like distribution means etc. </p> <p>This also introduces the other main use of PDFs in <code>RooFit</code>. They can be used to construct likelihoods easily.</p> <p>The likelihood \\(\\mathcal{L}\\) is defined for a particluar dataset (and model) as being proportional to the probability to observe the data assuming some pdf. For our data, the probability to observe an event with a value in an interval bounded by a and b is given by,</p> \\[ P\\left(m~\\epsilon~[a,b] \\right) = \\int_{a}^{b} f(m|\\alpha)dm  \\] <p>As that interval shrinks we can say this probability just becomes equal to \\(f(m|\\alpha)dm\\).</p> <p>The probability to observe the dataset we have is given by the product of such probabilities for each of our data points, so that </p> \\[\\mathcal{L}(\\alpha) \\propto \\prod_{i} f(m_{i}|\\alpha)\\] <p>Note that for a specific dataset, the \\(dm\\) factors which should be there are constnant. They can therefore be absorbed into the constant of proportionality!</p> <p>The maximum likelihood esitmator for \\(\\alpha\\), usually written as \\(\\hat{\\alpha}\\), is found by maximising \\(\\mathcal{L}(\\alpha)\\).</p> <p>Note that this will not depend on the value of the constant of proportionality so we can ignore it. This is true in most scenarios because usually only the ratio of likelihoods is needed, in which the constant factors out. </p> <p>Obviously this multiplication of exponentials can lead to very large (or very small) numbers which can lead to numerical instabilities. To avoid this, we can take logs of the likelihood. Its also common to multiply this by -1 and minimize the resulting Negative Log Likelihood : \\(\\mathrm{-Log}\\mathcal{L}(\\alpha)\\).</p> <p><code>RooFit</code> can construct the NLL for us.</p> <pre><code>RooNLLVar *nll = (RooNLLVar*) expo.createNLL(*hgg_data);\nnll-&gt;Print(\"v\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes:\n  Address: 0x7fdddbe46200\n  Clients:\n  Servers:\n    (0x11eab5638,V-) RooRealVar::alpha \"#alpha\"\n  Proxies:\n    paramSet -&gt;\n      1)  alpha\n--- RooAbsReal ---\n\n  Plot label is \"nll_exp_dataset\"\n</code></pre> <p>Notice that the NLL object knows which RooRealVar is the parameter because it doesn't find that one in the dataset. This is how <code>RooFit</code> distiguishes between observables and parameters.</p> <p><code>RooFit</code> has an interface to Minuit via the <code>RooMinimizer</code> class which takes the NLL as an argument. To minimize, we just call the <code>RooMinimizer::minimize()</code> function. <code>Minuit2</code> is the program and <code>migrad</code> is the minimization routine which uses gradient descent.</p> <pre><code>RooMinimizer minim(*nll);\nminim.minimize(\"Minuit2\",\"migrad\");  \n</code></pre> Show <pre><code> **********\n **    1 **SET PRINT           1\n **********\n **********\n **    2 **SET NOGRAD\n **********\n PARAMETER DEFINITIONS:\n    NO.   NAME         VALUE      STEP SIZE      LIMITS\n     1 alpha       -5.00000e-02  2.10000e-02   -2.00000e-01  1.00000e-02\n **********\n **    3 **SET ERR         0.5\n **********\n **********\n **    4 **SET PRINT           1\n **********\n **********\n **    5 **SET STR           1\n **********\n NOW USING STRATEGY  1: TRY TO BALANCE SPEED AGAINST RELIABILITY\n **********\n **    6 **MIGRAD         500           1\n **********\n FIRST CALL TO USER FUNCTION AT NEW START POINT, WITH IFLAG=4.\n START MIGRAD MINIMIZATION.  STRATEGY  1.  CONVERGENCE WHEN EDM .LT. 1.00e-03\n FCN=3589.52 FROM MIGRAD    STATUS=INITIATE        4 CALLS           5 TOTAL\n                     EDM= unknown      STRATEGY= 1      NO ERROR MATRIX\n  EXT PARAMETER               CURRENT GUESS       STEP         FIRST\n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE\n   1  alpha       -5.00000e-02   2.10000e-02   2.24553e-01  -9.91191e+01\n                               ERR DEF= 0.5\n MIGRAD MINIMIZATION HAS CONVERGED.\n MIGRAD WILL VERIFY CONVERGENCE AND ERROR MATRIX.\n COVARIANCE MATRIX CALCULATED SUCCESSFULLY\n FCN=3584.68 FROM MIGRAD    STATUS=CONVERGED      18 CALLS          19 TOTAL\n                     EDM=1.4449e-08    STRATEGY= 1      ERROR MATRIX ACCURATE\n  EXT PARAMETER                                   STEP         FIRST\n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE\n   1  alpha       -4.08262e-02   2.91959e-03   1.33905e-03  -3.70254e-03\n                               ERR DEF= 0.5\n EXTERNAL ERROR MATRIX.    NDIM=  25    NPAR=  1    ERR DEF=0.5\n  8.527e-06\n</code></pre> <p><code>RooFit</code> has found the best fit value of alpha for this dataset. It also estimates an uncertainty on alpha using the Hessian matrix from the fit.</p> <p><pre><code>alpha.Print(\"v\");\n</code></pre> <pre><code>--- RooAbsArg ---\n  Value State: clean\n  Shape State: clean\n  Attributes:\n  Address: 0x11eab5638\n  Clients:\n    (0x11eab5978,V-) RooExponential::exp \"exponential function\"\n    (0x7fdddbe46200,V-) RooNLLVar::nll_exp_dataset \"-log(likelihood)\"\n    (0x7fdddbe95600,V-) RooExponential::exp \"exponential function\"\n    (0x7fdddbe5a400,V-) RooRealIntegral::exp_Int[CMS_hgg_mass] \"Integral of exponential function\"\n  Servers:\n  Proxies:\n--- RooAbsReal ---\n\n  Plot label is \"alpha\"\n--- RooAbsRealLValue ---\n  Fit range is [ -0.2 , 0.01 ]\n--- RooRealVar ---\n  Error = 0.00291959\n</code></pre></p> <p>We will plot the resulting exponential on top of the data. Notice that the value of \\(\\hat{\\alpha}\\) is used for the exponential. </p> <pre><code>expo.plotOn(plot);\nexpo.paramOn(plot);\nplot-&gt;Draw();\nhggcan-&gt;Update();\nhggcan-&gt;Draw();\n</code></pre> <p></p> <p>It looks like there could be a small region near 125 GeV for which our fit does not quite go through the points. Maybe our hypothetical H-boson is not so hypothetical after all!</p> <p>We will now see what happens if we include some resonant signal into the fit. We can take our Gaussian function again and use that as a signal model. A reasonable value for the resolution of a resonant signal with a mass around 125 GeV decaying to a pair of photons is around a GeV.</p> <pre><code>sigma.setVal(1.);\nsigma.setConstant();\n\nMH.setVal(125);\nMH.setConstant();\n\nRooGaussian hgg_signal(\"signal\",\"Gaussian PDF\",*hgg_mass,MH,sigma);\n</code></pre> <p>By setting these parameters constant, <code>RooFit</code> knows (either when creating the NLL by hand or when using <code>fitTo</code>) that there is not need to fit for these parameters. </p> <p>We need to add this to our exponential model and fit a \"Sigmal+Background model\" by creating a <code>RooAddPdf</code>. In <code>RooFit</code> there are two ways to add PDFs, recursively where the fraction of yields for the signal and background is a parameter or absolutely where each PDF has its own normalization. We're going to use the second one.</p> <pre><code>RooRealVar norm_s(\"norm_s\",\"N_{s}\",10,100);\nRooRealVar norm_b(\"norm_b\",\"N_{b}\",0,1000);\n\nconst RooArgList components(hgg_signal,expo);\nconst RooArgList coeffs(norm_s,norm_b);\n\nRooAddPdf model(\"model\",\"f_{s+b}\",components,coeffs);\nmodel.Print(\"v\");\n</code></pre> Show <pre><code>--- RooAbsArg ---\n  Value State: DIRTY\n  Shape State: DIRTY\n  Attributes: \n  Address: 0x11ed5d7a8\n  Clients: \n  Servers: \n    (0x11ed5a0f0,V-) RooGaussian::signal \"Gaussian PDF\"\n    (0x11ed5d058,V-) RooRealVar::norm_s \"N_{s}\"\n    (0x11eab5978,V-) RooExponential::exp \"exponential function\"\n    (0x11ed5d398,V-) RooRealVar::norm_b \"N_{b}\"\n  Proxies: \n    !refCoefNorm -&gt; \n    !pdfs -&gt; \n      1)  signal\n      2)     exp\n    !coefficients -&gt; \n      1)  norm_s\n      2)  norm_b\n--- RooAbsReal ---\n\n  Plot label is \"model\"\n--- RooAbsPdf ---\nCached value = 0\n</code></pre> <p>Ok, now we will fit the model. Note this time we add the option <code>Extended()</code>, which tells <code>RooFit</code> that we care about the overall number of observed events in the data \\(n\\) too. It will add an additional Poisson term in the likelihood to account for this so our likelihood this time looks like,</p> \\[L_{s+b}(N_{s},N_{b},\\alpha) = \\dfrac{ N_{s}+N_{b}^{n} e^{N_{s}+N_{b}} }{n!} \\cdot \\prod_{i}^{n} \\left[ c f_{s}(m_{i}|M_{H},\\sigma)+ (1-c)f_{b}(m_{i}|\\alpha)  \\right] \\] <p>where \\(c = \\dfrac{ N_{s} }{ N_{s} + N_{b} }\\),   \\(f_{s}(m|M_{H},\\sigma)\\) is the Gaussian signal pdf and \\(f_{b}(m|\\alpha)\\) is the exponential pdf. Remember that \\(M_{H}\\) and \\(\\sigma\\) are fixed so that they are no longer parameters of the likelihood.</p> <p>There is a simpler interface for maximum-likelihood fits which is the <code>RooAbsPdf::fitTo</code> method. With this simple method, <code>RooFit</code> will construct the negative log-likelihood function, from the pdf, and minimize all of the free parameters in one step.</p> <pre><code>model.fitTo(*hgg_data,RooFit::Extended());\n\nmodel.plotOn(plot,RooFit::Components(\"exp\"),RooFit::LineColor(kGreen));\nmodel.plotOn(plot,RooFit::LineColor(kRed));\nmodel.paramOn(plot);\n\nhggcan-&gt;Clear();\nplot-&gt;Draw();\nhggcan-&gt;Update();\nhggcan-&gt;Draw();\n</code></pre> <p></p> <p>What about if we also fit for the mass (\\(M_{H}\\))? we can easily do this by removing the constant setting on MH.</p> <pre><code>MH.setConstant(false);\nmodel.fitTo(*hgg_data,RooFit::Extended());\n</code></pre> Show output <pre><code>[#1] INFO:Minization -- RooMinimizer::optimizeConst: activating const optimization\n[#1] INFO:Minization --  The following expressions will be evaluated in cache-and-track mode: (signal,exp)\n **********\n **    1 **SET PRINT           1\n **********\n **********\n **    2 **SET NOGRAD\n **********\n PARAMETER DEFINITIONS:\n    NO.   NAME         VALUE      STEP SIZE      LIMITS\n     1 MH           1.25000e+02  1.00000e+00    1.20000e+02  1.30000e+02\n     2 alpha       -4.08793e-02  2.96856e-03   -2.00000e-01  1.00000e-02\n     3 norm_b       9.67647e+02  3.25747e+01    0.00000e+00  1.00000e+03\n MINUIT WARNING IN PARAMETR\n ============== VARIABLE3 BROUGHT BACK INSIDE LIMITS.\n     4 norm_s       3.22534e+01  1.16433e+01    1.00000e+01  1.00000e+02\n **********\n **    3 **SET ERR         0.5\n **********\n **********\n **    4 **SET PRINT           1\n **********\n **********\n **    5 **SET STR           1\n **********\n NOW USING STRATEGY  1: TRY TO BALANCE SPEED AGAINST RELIABILITY\n **********\n **    6 **MIGRAD        2000           1\n **********\n FIRST CALL TO USER FUNCTION AT NEW START POINT, WITH IFLAG=4.\n START MIGRAD MINIMIZATION.  STRATEGY  1.  CONVERGENCE WHEN EDM .LT. 1.00e-03\n FCN=-2327.53 FROM MIGRAD    STATUS=INITIATE       10 CALLS          11 TOTAL\n                     EDM= unknown      STRATEGY= 1      NO ERROR MATRIX       \n  EXT PARAMETER               CURRENT GUESS       STEP         FIRST   \n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE \n   1  MH           1.25000e+02   1.00000e+00   2.01358e-01   1.12769e+01\n   2  alpha       -4.08793e-02   2.96856e-03   3.30048e-02  -1.22651e-01\n   3  norm_b       9.67647e+02   3.25747e+01   2.56674e-01  -1.96463e-02\n   4  norm_s       3.22534e+01   1.16433e+01   3.10258e-01  -8.97036e-04\n                               ERR DEF= 0.5\n MIGRAD MINIMIZATION HAS CONVERGED.\n MIGRAD WILL VERIFY CONVERGENCE AND ERROR MATRIX.\n COVARIANCE MATRIX CALCULATED SUCCESSFULLY\n FCN=-2327.96 FROM MIGRAD    STATUS=CONVERGED      65 CALLS          66 TOTAL\n                     EDM=1.19174e-05    STRATEGY= 1      ERROR MATRIX ACCURATE \n  EXT PARAMETER                                   STEP         FIRST   \n  NO.   NAME      VALUE            ERROR          SIZE      DERIVATIVE \n   1  MH           1.24628e+02   3.98153e-01   2.66539e-03   2.46327e-02\n   2  alpha       -4.07708e-02   2.97195e-03   1.10093e-03   8.33780e-02\n   3  norm_b       9.66105e+02   3.25772e+01   5.96627e-03   1.83523e-03\n   4  norm_s       3.39026e+01   1.17380e+01   9.60816e-03  -2.32681e-03\n                               ERR DEF= 0.5\n EXTERNAL ERROR MATRIX.    NDIM=  25    NPAR=  4    ERR DEF=0.5\n  1.589e-01 -3.890e-05  1.462e-01 -1.477e-01 \n -3.890e-05  8.836e-06 -2.020e-04  2.038e-04 \n  1.462e-01 -2.020e-04  1.073e+03 -1.072e+02 \n -1.477e-01  2.038e-04 -1.072e+02  1.420e+02 \n PARAMETER  CORRELATION COEFFICIENTS  \n       NO.  GLOBAL      1      2      3      4\n        1  0.04518   1.000 -0.033  0.011 -0.031\n        2  0.03317  -0.033  1.000 -0.002  0.006\n        3  0.27465   0.011 -0.002  1.000 -0.275\n        4  0.27610  -0.031  0.006 -0.275  1.000\n **********\n **    7 **SET ERR         0.5\n **********\n **********\n **    8 **SET PRINT           1\n **********\n **********\n **    9 **HESSE        2000\n **********\n COVARIANCE MATRIX CALCULATED SUCCESSFULLY\n FCN=-2327.96 FROM HESSE     STATUS=OK             23 CALLS          89 TOTAL\n                     EDM=1.19078e-05    STRATEGY= 1      ERROR MATRIX ACCURATE \n  EXT PARAMETER                                INTERNAL      INTERNAL  \n  NO.   NAME      VALUE            ERROR       STEP SIZE       VALUE   \n   1  MH           1.24628e+02   3.98106e-01   5.33077e-04  -7.45154e-02\n   2  alpha       -4.07708e-02   2.97195e-03   2.20186e-04   5.42722e-01\n   3  norm_b       9.66105e+02   3.26003e+01   2.38651e-04   1.20047e+00\n   4  norm_s       3.39026e+01   1.17445e+01   3.84326e-04  -4.87967e-01\n                               ERR DEF= 0.5\n EXTERNAL ERROR MATRIX.    NDIM=  25    NPAR=  4    ERR DEF=0.5\n  1.588e-01 -3.888e-05  1.304e-01 -1.304e-01 \n -3.888e-05  8.836e-06 -1.954e-04  1.954e-04 \n  1.304e-01 -1.954e-04  1.074e+03 -1.082e+02 \n -1.304e-01  1.954e-04 -1.082e+02  1.421e+02 \n PARAMETER  CORRELATION COEFFICIENTS  \n       NO.  GLOBAL      1      2      3      4\n        1  0.04274   1.000 -0.033  0.010 -0.027\n        2  0.03314  -0.033  1.000 -0.002  0.006\n        3  0.27694   0.010 -0.002  1.000 -0.277\n        4  0.27806  -0.027  0.006 -0.277  1.000\n[#1] INFO:Minization -- RooMinimizer::optimizeConst: deactivating const optimization\n</code></pre> <p>Notice the result for the fitted MH is not 125 and is included in the list of fitted parameters.  We can get more information about the fit, via the <code>RooFitResult</code>, using the option <code>Save()</code>. </p> <pre><code>RooFitResult *fit_res = (RooFitResult*) model.fitTo(*hgg_data,RooFit::Extended(),RooFit::Save());\n</code></pre> <p>For example, we can get the Correlation Matrix from the fit result... Note that the order of the parameters are the same as listed in the \"Floating Parameter\" list above</p> <p><pre><code>TMatrixDSym cormat = fit_res-&gt;correlationMatrix();\ncormat.Print();\n</code></pre> <pre><code>4x4 matrix is as follows\n\n     |      0    |      1    |      2    |      3    |\n---------------------------------------------------------\n   0 |          1    -0.03282    0.009538    -0.02623 \n   1 |   -0.03282           1   -0.001978    0.005439 \n   2 |   0.009538   -0.001978           1     -0.2769 \n   3 |   -0.02623    0.005439     -0.2769           1 \n</code></pre></p> <p>A nice feature of <code>RooFit</code> is that once we have a PDF, data and results like this, we can import this new model into our <code>RooWorkspace</code> and show off our new discovery to our LHC friends (if we weren't already too late!). We can also save the \"state\" of our parameters for later, by creating a snapshot of the current values. </p> <pre><code>wspace-&gt;import(model);  \nRooArgSet *params = model.getParameters(*hgg_data);\nwspace-&gt;saveSnapshot(\"nominal_values\",*params);\n\nwspace-&gt;Print(\"V\");\n</code></pre> Show output <pre><code>RooWorkspace(workspace) Tutorial Workspace contents\n\nvariables\n---------\n(CMS_hgg_mass,MH,alpha,norm_b,norm_s,resolution)\n\np.d.f.s\n-------\nRooExponential::exp[ x=CMS_hgg_mass c=alpha ] = 0.00248636\nRooAddPdf::model[ norm_s * signal + norm_b * exp ] = 0.00240205\nRooGaussian::signal[ x=CMS_hgg_mass mean=MH sigma=resolution ] = 5.34013e-110\n\ndatasets\n--------\nRooDataSet::dataset(CMS_hgg_mass)\n\nparameter snapshots\n-------------------\nnominal_values = (MH=124.627 +/- 0.398094,resolution=1[C],norm_s=33.9097 +/- 11.7445,alpha=-0.040779 +/- 0.00297195,norm_b=966.109 +/- 32.6025)\n</code></pre> <p>This is exactly what needs to be done when you want to use shape based datacards in Combine with parametric models.</p>"},{"location":"part5/roofit/#a-likelihood-for-a-counting-experiment","title":"A likelihood for a counting experiment","text":"<p>An introductory presentation about likelihoods and interval estimation is available here.</p> <p>Note: We will use python syntax in this section; you should use a .py script. Make sure to do <code>import ROOT</code> at the top of your script</p> <p>We have seen how to create variables and PDFs, and how to fit a PDF to data. But what if we have a counting experiment, or a histogram template shape? And what about systematic uncertainties?  We are going to build a likelihood  for this:</p> <p>\\(\\mathcal{L} \\propto p(\\text{data}|\\text{parameters})\\)</p> <p>where our parameters are parameters of interest, \\(\\mu\\), and nuisance parameters, \\(\\nu\\). The nuisance parameters are constrained by external measurements, so we add constraint terms \\(\\pi(\\vec{\\nu}_0|\\vec{\\nu})\\)</p> <p>So we have \\(\\mathcal{L} \\propto p(\\text{data}|\\mu,\\vec{\\nu})\\cdot \\pi(\\vec{\\nu}_0|\\vec{\\nu})\\)</p> <p>now we will try to build the likelihood by hand for a 1-bin counting experiment. The data is the number of observed events \\(N\\), and the probability is just a Poisson probability \\(p(N|\\lambda) = \\frac{\\lambda^N e^{-\\lambda}}{N!}\\), where \\(\\lambda\\) is the number of events expected in our signal+background model: \\(\\lambda = \\mu\\cdot s(\\vec{\\nu}) + b(\\vec{\\nu})\\). </p> <p>In the expression, s and b are the numbers of expected signal and background events, which both depend on the nuisance parameters. We will start by building a simple likelihood function with one signal process and one background process. We will assume there are no nuisance parameters for now. The number of observed events in data is 15, the expected number of signal events is 5 and the expected number of background events 8.1.</p> <p>It is easiest to use the <code>RooFit</code> workspace factory to build our model (this tutorial has more information on the factory syntax).</p> <p><pre><code>import ROOT\nw = ROOT.RooWorkspace(\"w\")\n</code></pre> We need to create an expression for the number of events in our model, \\(\\mu s +b\\):</p> <p><pre><code>w.factory('expr::n(\"mu*s +b\", mu[1.0,0,4], s[5],b[8.1])')\n</code></pre> Now we can build the likelihood, which is just our Poisson PDF: <pre><code>w.factory('Poisson::poisN(N[15],n)')\n</code></pre></p> <p>To find the best fit value for our parameter of interest \\(\\mu\\) we need to maximize the likelihood. In practice it is actually easier to minimize the Negative log of the likelihood, or NLL:</p> <pre><code>w.factory('expr::NLL(\"-log(@0)\",poisN)')\n</code></pre> <p>We can now use the <code>RooMinimizer</code> to find the minimum of the NLL</p> <p><pre><code>nll = w.function(\"NLL\")\nminim = ROOT.RooMinimizer(nll)\nminim.setErrorLevel(0.5)\nminim.minimize(\"Minuit2\",\"migrad\")\nbestfitnll = nll.getVal()\n</code></pre> Notice that we need to set the error level to 0.5 to get the uncertainties (relying on Wilks' theorem!) - note that there is a more reliable way of extracting the confidence interval (explicitly rather than relying on migrad). We will discuss this a bit later in this section.</p> <p>Now we will add a nuisance parameter, lumi, which represents the luminosity uncertainty. It has a 2.5% effect on both the signal and the background. The parameter will be log-normally distributed: when it's 0, the normalization of the signal and background are not modified; at \\(+1\\sigma\\) the signal and background normalizations will be multiplied by 1.025 and at \\(-1\\sigma\\) they will be divided by 1.025.  We should modify the expression for the number of events in our model:</p> <pre><code>w.factory('expr::n(\"mu*s*pow(1.025,lumi) +b*pow(1.025,lumi)\", mu[1.0,0,4], s[5],b[8.1],lumi[0,-4,4])')\n</code></pre> <p>And we add a unit gaussian constraint  <pre><code>w.factory('Gaussian::lumiconstr(lumi,0,1)')\n</code></pre></p> <p>Our full likelihood will now be <pre><code>w.factory('PROD::likelihood(poisN,lumiconstr)')\n</code></pre> and the NLL <pre><code>w.factory('expr::NLL(\"-log(@0)\",likelihood)')\n</code></pre></p> <p>Which we can minimize in the same way as before. </p> <p>Now we will extend our model a bit. </p> <ul> <li>Expanding on what was demonstrated above, build the likelihood for \\(N=15\\), a signal process s with expectation 5 events, a background ztt with expectation 3.7 events and a background tt with expectation 4.4 events. The luminosity uncertainty applies to all three processes. The signal process is further subject to a 5% log-normally distributed uncertainty sigth, tt is subject to a 6% log-normally distributed uncertainty ttxs, and ztt is subject to a 4% log-normally distributed uncertainty zttxs. Find the best-fit value and the associated uncertainty</li> <li>Also perform an explicit scan of the \\(\\Delta\\) NLL ( = log of profile likelihood ratio) and make a graph of the scan. Some example code can be found below to get you started. Hint: you'll need to perform fits for different values of mu, where mu is fixed. In <code>RooFit</code> you can set a variable to be constant as <code>var(\"VARNAME\").setConstant(True)</code></li> <li>From the curve that you have created by performing an explicit scan, we can extract the 68% CL interval. You can do so by eye or by writing some code to find the relevant intersections of the curve. </li> </ul> <pre><code>gr = ROOT.TGraph()\n\nnpoints = 0\nfor i in range(0,60):\n  npoints+=1\n  mu=0.05*i\n  ...\n  [perform fits for different values of mu with mu fixed]\n  ...\n  deltanll = ...\n  gr.SetPoint(npoints,mu,deltanll)\n\n\ncanv = ROOT.TCanvas()\ngr.Draw(\"ALP\")\ncanv.SaveAs(\"likelihoodscan.pdf\")\n</code></pre> <p>Well, this is doable - but we were only looking at a simple one-bin counting experiment. This might become rather cumbersome for large models... \\([*]\\)</p> <p>For the next set ot tutorials, we will now switch to working with Combine that will help in building the statistical model and do the statistical analysis, instead of  building the likelihood with <code>RooFit</code>.</p> <p>Info</p> <p><code>RooFit</code> does have additional functionality to help with statistical model building, but we will not go into detail in these tutorials.   </p>"},{"location":"tutorial2023/parametric_exercise/","title":"Parametric Models in Combine","text":""},{"location":"tutorial2023/parametric_exercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>Now let's move to the working directory for this tutorial which contains all of the inputs and scripts needed to run the parametric fitting exercise: <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/parametric_exercise\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#session-structure","title":"Session structure","text":"<p>The exercise is split into six parts which cover:</p> <p>1) Parametric model building</p> <p>2) Simple fits</p> <p>3) Systematic uncertainties</p> <p>4) Toy generation</p> <p>5) Discrete profiling</p> <p>6) Multi-signal hypothesis</p> <p>Throughout the tutorial there are a number of questions and exercises for you to complete. These are shown by the bullet points in this markdown file.</p> <p>All the code required to run the different parts is available in python scripts. We have purposely commented out the code to encourage you to open the scripts and take a look what is inside. Each block is separated by a divider and a blank line. When you are happy and understand the code, you can uncomment (block by block) and then run the scripts (using python3) e.g.: <pre><code>python3 construct_models_part1.py\n</code></pre> A number of scripts will produce plots (as .png files). The default path to store these plots is in the current working directory. You can change this (e.g. pipe to an eos webpage) by changing the <code>plot_dir</code> variable in the <code>config.py</code> script.</p> <p>There's also a set of combine (.txt) datacards which will help you get through the various parts of the exercise. The exercises should help you become familiar with the structure of parametric fitting datacards.</p> <p>Finally, this exercise is heavily based off the <code>RooFit</code> package. So if you find yourself using the python interpreter for any checks, don't forget to... <pre><code>import ROOT\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#jupyter-notebooks","title":"Jupyter notebooks","text":"<p>Alternatively, we have provided <code>Jupyter</code> notebooks to run the different parts of the exercise e.g. <code>part1.ipynb</code>. You will have already downloaded these notebooks when cloning the tutorial gitlab repo. To open Jupyter notebooks on lxplus within a CMSSW environment, you can add the following option when you <code>ssh</code> into lxplus: <pre><code>ssh -X -Y username@lxplus.cern.ch -L8xxx:localhost:8xxx\n</code></pre> where you should replace <code>xxx</code> with some three digit number. Then <code>cd</code> into the <code>combinetutorial-2023-parametric</code> directory and set up the CMSSW environment with: <pre><code>cmsenv\n</code></pre> You can then open the Jupyter notebook inside the environment with: <pre><code>jupyter notebook --no-browser --port 8xxx\n</code></pre> replacing <code>xxx</code> with the same three digit number. You should now be able to copy the url it provides into a browser and access the various exercise notebooks.</p>"},{"location":"tutorial2023/parametric_exercise/#analysis-overview","title":"Analysis overview","text":"<p>In this exercise we will look at one of the most famous parametric fitting analyses at the LHC: the Higgs boson decaying to two photons (H \\(\\rightarrow \\gamma\\gamma\\)). This decay channel is key in understanding the properties of the Higgs boson due to its clean final state topology. The excellent energy resolution- of the CMS electromagnetic calorimeter leads to narrow signal peak in the diphoton invariant mass spectrum, \\(m_{\\gamma\\gamma}\\), above a smoothly falling background continuum. The mass spectrum for the legacy Run 2 analysis is shown below.</p> <p></p> <p>In the analysis, we construct parametric models (analytic functions) of both signal and background events to fit the \\(m_{\\gamma\\gamma}\\) spectrum in data. From the fit we can extract measurements of Higgs boson properties including its rate of production, its mass (\\(m_H\\)), its coupling behaviour, to name a few. This exercise will show how to construct parametric models using RooFit, and subsequently how to use combine to extract the results.</p>"},{"location":"tutorial2023/parametric_exercise/#part-1-parametric-model-building","title":"Part 1: Parametric model building","text":"<p>As with any fitting exercise, the first step is to understand the format of the input data, explore its contents and construct a model. The python script which performs the model construction is <code>construct_models_part1.py</code>. This section will explain what the various lines of code are doing. If you are not very familiar with <code>RooFit</code>, you may want to refer to our <code>RooFit</code> Basics tutorial  here.</p>"},{"location":"tutorial2023/parametric_exercise/#signal-modelling","title":"Signal modelling","text":"<p>Firstly, we will construct a model to fit the signal (H \\(\\rightarrow\\gamma\\gamma\\)) mass peak using a Monte Carlo simulation sample of gluon-gluon fusion production (ggH) events with \\(m_H=125\\) GeV. This production mode has the largest cross section in the SM, and the LO Feynman diagram is shown below.</p> <p></p> <p>There has already been a dedicated selection performed on the events to increase the signal-to-background ratio (e.g. using some ML event classifier). Events passing this selection enter the analysis category, Tag0. Events entering Tag0 are used for the parametric fitting of the \\(m_{\\gamma\\gamma}\\) spectrum.</p> <p>The events are stored in a ROOT <code>TTree</code>, where the diphoton mass, <code>CMS_hgg_mass</code>, and the event weight, <code>weight</code>, are saved. Let's begin by loading the MC, and converting the <code>TTree</code> data into <code>RooDataSet</code>: <pre><code>import ROOT\nROOT.gROOT.SetBatch(True)\n\nf = ROOT.TFile(\"mc_part1.root\",\"r\")\n# Load TTree\nt = f.Get(\"ggH_Tag0\")\n\n# Define mass and weight variables\nmass = ROOT.RooRealVar(\"CMS_hgg_mass\", \"CMS_hgg_mass\", 125, 100, 180)\nweight = ROOT.RooRealVar(\"weight\",\"weight\",0,0,1)\n\n# Convert to RooDataSet\nmc = ROOT.RooDataSet(\"ggH_Tag0\",\"ggH_Tag0\", t, ROOT.RooArgSet(mass,weight), \"\", \"weight\" )\n\n# Lets plot the signal mass distribution\ncan = ROOT.TCanvas()\nplot = mass.frame()\nmc.plotOn(plot)\nplot.Draw()\ncan.Update()\ncan.SaveAs(\"part1_signal_mass.png\")\n</code></pre></p> <p></p> <p>The plot shows a peak centred on the Higgs mass at 125 GeV. Let's use a simple Gaussian to model the peak. <pre><code># Introduce a RooRealVar into the workspace for the Higgs mass\nMH = ROOT.RooRealVar(\"MH\", \"MH\", 125, 120, 130 )\nMH.setConstant(True)\n\n# Signal peak width\nsigma = ROOT.RooRealVar(\"sigma_ggH_Tag0\", \"sigma_ggH_Tag0\", 2, 1, 5)\n\n# Define the Gaussian with mean=MH and width=sigma\nmodel = ROOT.RooGaussian( \"model_ggH_Tag0\", \"model_ggH_Tag0\", mass, MH, sigma )\n\n# Fit Gaussian to MC events and plot\nmodel.fitTo(mc,ROOT.RooFit.SumW2Error(True))\n\ncan = ROOT.TCanvas()\nplot = mass.frame()\nmc.plotOn(plot)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(2) )\nplot.Draw()\ncan.Update()\ncan.Draw()\ncan.SaveAs(\"part1_signal_model_v0.png\")\n</code></pre></p> <p></p> <p>It looks like a good fit!</p> <p>Tasks and questions:</p> <ul> <li>Run the code above for yourself (or uncomment the relevant sections in <code>python3 construct_models_part1.py</code>) to produce the plots of the signal mass distribution and the signal model.</li> <li>Do you understand the output from the <code>fitTo</code> command (i.e the mimimization)? From now on we will add the option <code>ROOT.RooFit.PrintLevel(-1)</code> when fitting the models to surpress the minimizer output.</li> </ul> <p>But what if the mean of the model does not correspond directly to the Higgs boson mass i.e. there are some reconstruction effects. Let's instead define the mean of the model as:</p> \\[\\mu = m_H + \\delta\\] <p>and we can fit for \\(\\delta\\) in the model construction. For this we introduce a <code>RooFormulaVar</code>. <pre><code>dMH = ROOT.RooRealVar(\"dMH_ggH_Tag0\", \"dMH_ggH_Tag0\", 0, -1, 1 )\nmean = ROOT.RooFormulaVar(\"mean_ggH_Tag0\", \"mean_ggH_Tag0\", \"(@0+@1)\", ROOT.RooArgList(MH,dMH))\nmodel = ROOT.RooGaussian( \"model_ggH_Tag0\", \"model_ggH_Tag0\", mass, mean, sigma )\n\n# Fit the new model with a variable mean\nmodel.fitTo(mc,ROOT.RooFit.SumW2Error(True),ROOT.RooFit.PrintLevel(-1))\n\n# Model is parametric in MH. Let's show this by plotting for different values of MH\ncan = ROOT.TCanvas()\nplot = mass.frame()\nMH.setVal(120)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(2) )\nMH.setVal(125)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(3) )\nMH.setVal(130)\nmodel.plotOn( plot, ROOT.RooFit.LineColor(4) )\nplot.Draw()\ncan.Update()\ncan.SaveAs(\"part1_signal_model_v1.png\")\n</code></pre></p> <p></p> <p>Tasks and questions:</p> <ul> <li>Run the code above (or uncomment the relevant sections in the script) to produce the plots.</li> <li>This choice of setting the shape parameters to constant means we believe our MC will perfectly model the Higgs boson events in data. Is this the case? How could we account for the MC mis-modelling in the fit? (See part 3).</li> <li>Let's now save the model inside a <code>RooWorkspace</code>. Combine will load this model when performing the fits. Crucially, we need to freeze the fit parameters of the signal model, otherwise they will be freely floating in the final results extraction. Run the code below to save the model to a workspace.</li> </ul> <p><pre><code>MH.setVal(125)\ndMH.setConstant(True)\nsigma.setConstant(True)\n\nf_out = ROOT.TFile(\"workspace_sig.root\", \"RECREATE\")\nw_sig = ROOT.RooWorkspace(\"workspace_sig\",\"workspace_sig\")\ngetattr(w_sig, \"import\")(model)\nw_sig.Print()\nw_sig.Write()\nf_out.Close()\n</code></pre> We have successfully constructed a parametric model to fit the shape of the signal peak. But we also need to know the yield/normalisation of the ggH signal process. In the SM, the ggH event yield in Tag0 is equal to:</p> \\[ N = \\sigma_{ggH} \\cdot \\mathcal{B}^{\\gamma\\gamma} \\cdot \\epsilon \\cdot \\mathcal{L}\\] <p>Where \\(\\sigma_{ggH}\\) is the SM ggH cross section, \\(\\mathcal{B}^{\\gamma\\gamma}\\) is the SM branching fraction of the Higgs boson to two photons, \\(\\epsilon\\) is the efficiency factor and corresponds to the fraction of the total ggH events landing in the Tag0 analysis category. Finally \\(\\mathcal{L}\\) is the integrated luminosity.</p> <p>In this example, the ggH MC events are normalised before any selection is performed to \\(\\sigma_{ggH} \\cdot \\mathcal{B}^{\\gamma\\gamma}\\), taking the values from the LHCHWG twiki. Note this does not include the lumi scaling, which may be different to what you have in your own analyses! We can then calculate the efficiency factor, \\(\\epsilon\\), by taking the sum of weights in the MC dataset and dividing through by \\(\\sigma_{ggH} \\cdot \\mathcal{B}^{\\gamma\\gamma}\\). This will tell us what fraction of ggH events land in Tag0. <pre><code># Define SM cross section and branching fraction values\nxs_ggH = 48.58 #in [pb]\nbr_gamgam = 2.7e-3\n\n# Calculate the efficiency and print output\nsumw = mc.sumEntries()\neff = sumw/(xs_ggH*br_gamgam)\nprint(\"Efficiency of ggH events landing in Tag0 is: %.2f%%\"%(eff*100))\n\n# Calculate the total yield (assuming full Run 2 lumi) and print output\nlumi = 138000\nN = xs_ggH*br_gamgam*eff*lumi\nprint(\"For 138fb^-1, total normalisation of signal is: N = xs * br * eff * lumi = %.2f events\"%N)\n</code></pre> Gives the output: <pre><code>Efficiency of ggH events landing in Tag0 is: 1.00%\nFor 138fb^-1, total normalisation of signal is: N = xs * br * eff * lumi = 181.01 events\n</code></pre> So we find 1% of all ggH events enter Tag0. And the total expected yield of ggH events in Tag0 (with lumi scaling) is <code>181.01</code>. Lets make a note of this for later!</p>"},{"location":"tutorial2023/parametric_exercise/#background-modelling","title":"Background modelling","text":"<p>In the H \\(\\rightarrow\\gamma\\gamma\\) analysis we construct the background model directly from data. To avoid biasing our background estimate, we remove the signal region from the model construction and fit the mass sidebands. Let's begin by loading the data <code>TTree</code> and converting to a <code>RooDataSet</code>. We will then plot the mass sidebands. <pre><code>f = ROOT.TFile(\"data_part1.root\",\"r\")\nt = f.Get(\"data_Tag0\")\n\n# Convert TTree to a RooDataSet\ndata = ROOT.RooDataSet(\"data_Tag0\", \"data_Tag0\", t, ROOT.RooArgSet(mass), \"\", \"weight\")\n\n# Define mass sideband ranges on the mass variable: 100-115 and 135-180\nn_bins = 80\nbinning = ROOT.RooFit.Binning(n_bins,100,180)\nmass.setRange(\"loSB\", 100, 115 )\nmass.setRange(\"hiSB\", 135, 180 )\nmass.setRange(\"full\", 100, 180 )\nfit_range = \"loSB,hiSB\"\n\n# Plot the data in the mass sidebands\ncan = ROOT.TCanvas()\nplot = mass.frame()\ndata.plotOn( plot, ROOT.RooFit.CutRange(fit_range), binning )\nplot.Draw()\ncan.Update()\ncan.Draw()\ncan.SaveAs(\"part1_data_sidebands.png\")\n</code></pre></p> <p></p> <p>By eye, it looks like an exponential function would fit the data sidebands well.</p> <p>Tasks and questions:</p> <ul> <li>Run the code above to produce the plot.</li> <li>Construct the background model using a <code>RooExponential</code> and fit the data sidebands. You can use something like the code below to do this (or uncomment the relevant section of the script.)</li> </ul> <pre><code>alpha = ROOT.RooRealVar(\"alpha\", \"alpha\", -0.05, -0.2, 0 )\nmodel_bkg = ROOT.RooExponential(\"model_bkg_Tag0\", \"model_bkg_Tag0\", mass, alpha )\n\n# Fit model to data sidebands\nmodel_bkg.fitTo( data, ROOT.RooFit.Range(fit_range),  ROOT.RooFit.PrintLevel(-1))\n\n# Let's plot the model fit to the data\ncan = ROOT.TCanvas()\nplot = mass.frame()\n# We have to be careful with the normalisation as we only fit over sidebands\n# First do an invisible plot of the full data set\ndata.plotOn( plot, binning, ROOT.RooFit.MarkerColor(0), ROOT.RooFit.LineColor(0) )\nmodel_bkg.plotOn( plot, ROOT.RooFit.NormRange(fit_range), ROOT.RooFit.Range(\"full\"), ROOT.RooFit.LineColor(2))\ndata.plotOn( plot, ROOT.RooFit.CutRange(fit_range), binning )\nplot.Draw()\ncan.Update()\ncan.Draw()\ncan.SaveAs(\"part1_bkg_model.png\")\n</code></pre> <p></p> <p>Tasks and questions:</p> <ul> <li>As the background model is extracted from data, we want to introduce a freely floating normalisation term in the fit that enters the likelihood as an additional Poisson term (this is known as performing an extended maximum likelihood fit). We use the total number of data events (including in the signal region) as the initial prefit value of this normalisation object i.e. assuming no signal in the data. The syntax to name this normalisation object is <code>{model}_norm</code> which will the be picked up automatically by combine. Note we also allow the shape parameter to float in the final fit to data (by not setting to constant). Include the code below to include this parameter in the workspace.</li> </ul> <p><pre><code>norm = ROOT.RooRealVar(\"model_bkg_Tag0_norm\", \"Number of background events in Tag0\", data.numEntries(), 0, 3*data.numEntries() )\nalpha.setConstant(False)\n</code></pre> Let's then save the background model, the normalisation object, and the data distribution to a new <code>RooWorkspace</code>: <pre><code>f_out = ROOT.TFile(\"workspace_bkg.root\", \"RECREATE\")\nw_bkg = ROOT.RooWorkspace(\"workspace_bkg\",\"workspace_bkg\")\ngetattr(w_bkg, \"import\")(data)\ngetattr(w_bkg, \"import\")(norm)\ngetattr(w_bkg, \"import\")(model_bkg)\nw_bkg.Print()\nw_bkg.Write()\nf_out.Close()\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#datacard","title":"Datacard","text":"<p>The model workspaces have now been constructed. But before we can run any fits in combine we need to build the so-called datacard. This is a text file which defines the different processes entering the fit and their expected yields, and maps these processes to the corresponding (parametric) models. We also store information on the systematic uncertainties in the datacard (see part 3). Given the low complexity of this example, the datacard is reasonably short. The datacard for this section is titled <code>datacard_part1.txt</code>. Take some time to understand the different lines. In particular, the values for the process normalisations:</p> <p>Tasks and questions:</p> <ul> <li>Where does the signal (ggH) normalisation come from?</li> <li>Why do we use a value of 1.0 for the background model normalisation in this analysis?</li> </ul> <p><pre><code># Datacard example for combine tutorial 2023 (part 1)\n---------------------------------------------\nimax 1\njmax 1\nkmax *\n---------------------------------------------\n\nshapes      ggH          Tag0      workspace_sig.root      workspace_sig:model_ggH_Tag0\nshapes      bkg_mass     Tag0      workspace_bkg.root      workspace_bkg:model_bkg_Tag0\nshapes      data_obs     Tag0      workspace_bkg.root      workspace_bkg:data_Tag0\n\n---------------------------------------------\nbin             Tag0\nobservation     -1\n---------------------------------------------\nbin             Tag0         Tag0\nprocess         ggH          bkg_mass\nprocess         0            1\nrate            181.01       1.0\n---------------------------------------------\n</code></pre> To compile the datacard we run the following command, using a value of the Higgs mass of 125.0: <pre><code>text2workspace.py datacard_part1.txt -m 125\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Run the command above. his compiles the datacard into a RooWorkspace, effectively building the likelihood function. Try opening the compiled workspace (<code>root datacard_part1.root</code>) and print the contents (use <code>w-&gt;Print()</code>).</li> <li>Do you understand what all the different objects are? What does the variable <code>r</code> correspond to? Try (verbose) printing with <code>w-&gt;var(\"r\")-&gt;Print(\"v\")</code>.</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-signal-normalisation-object","title":"Extension: signal normalisation object","text":"<p>In the example above, the signal model normalisation is input by hand in the datacard. We can instead define the signal normalisation components in the model in a similar fashion to the background model normalisation object. Let's build the cross section (ggH), branching fraction (H-&gt;gamgam), and efficiency variables. It's important to set these terms to be constant for the final fit to data: <pre><code>xs_ggH = ROOT.RooRealVar(\"xs_ggH\", \"Cross section of ggH in [pb]\", 48.58 )\nbr_gamgam = ROOT.RooRealVar(\"BR_gamgam\", \"Branching ratio of Higgs to gamma gamma\", 0.0027 )\neff_ggH_Tag0 = ROOT.RooRealVar(\"eff_ggH_Tag0\", \"Efficiency for ggH events to land in Tag0\", eff )\n\nxs_ggH.setConstant(True)\nbr_gamgam.setConstant(True)\neff_ggH_Tag0.setConstant(True)\n</code></pre> The normalisation component is then defined as the product of these three variables: <pre><code>norm_sig = ROOT.RooProduct(\"model_ggH_Tag0_norm\", \"Normalisation term for ggH in Tag 0\", ROOT.RooArgList(xs_ggH,br_gamgam,eff_ggH_Tag0))\n</code></pre></p> <p>Again the syntax <code>{model}_norm</code> has been used so that combine will automatically assign this object as the normalisation for the model (<code>model_ggH_Tag0</code>). Firstly we need to save a new version of the signal model workspace with the normalisation term included. <pre><code>f_out = ROOT.TFile(\"workspace_sig_with_norm.root\", \"RECREATE\")\nw_sig = ROOT.RooWorkspace(\"workspace_sig\",\"workspace_sig\")\ngetattr(w_sig, \"import\")(model)\ngetattr(w_sig, \"import\")(norm_sig)\nw_sig.Print()\nw_sig.Write()\nf_out.Close()\n</code></pre> We then need to modify the datacard to account for this normalisation term. Importantly, the <code>{model}_norm</code> term in our updated signal model workspace does not contain the integrated luminosity. Therefore, the <code>rate</code> term in the datacard must be set equal to the integrated luminosity in [pb^-1] (as the cross section was defined in [pb]). The total normalisation for the signal model is then the product of the <code>{model}_norm</code> and the <code>rate</code> value.</p> <p>Tasks and questions:</p> <ul> <li>You can find the example datacard here: <code>datacard_part1_with_norm.txt</code> with the signal normalisation object included. Check if it compiles successfully using <code>text2workspace</code>? If so, try printing out the contents of the workspace. Can you see the normalisation component?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-unbinned-vs-binned","title":"Extension: unbinned vs binned","text":"<p>In a parametric analysis, the fit can be performed using a binned or unbinned likelihood function. The consequences of binned vs unbinned likelihoods were discussed in the morning session. In combine, we can simply toggle between binned and unbinned fits by changing how the data set is stored in the workspace. In the example above, the data was saved as a <code>RooDataSet</code>. This means that an unbinned maximum likelihood function would be used.</p> <p>To switch to a binned maximum likelihood fit, we need to store the data set in the workspace as a <code>RooDataHist</code>.</p> <p>Tasks and questions:</p> <ul> <li>First load the data as a <code>RooDataSet</code> as before:</li> </ul> <p><pre><code>f = ROOT.TFile(\"data_part1.root\",\"r\")\nt = f.Get(\"data_Tag0\")\n\n# Convert TTree to a RooDataSet\ndata = ROOT.RooDataSet(\"data_Tag0\", \"data_Tag0\", t, ROOT.RooArgSet(mass, weight), \"\", \"weight\")\n</code></pre>   -   Now set the number of bins in the observable and convert the data to a <code>RooDataHist</code> (as below). In the example below we will use 320 bins over the full mass range (0.25 GeV per bin). It is important that the binning is sufficiently granular so that we do not lose information in the data by switching to a binned likelihood fit. When fitting a signal peak over a background we want the bin width to be sufficiently smaller than the signal model mass resolution. Try changing the number of bins (reducing them) at what point do you start to see diffeeences in the fit results?</p> <pre><code># Set bin number for mass variables\nmass.setBins(320)\ndata_hist = ROOT.RooDataHist(\"data_hist_Tag0\", \"data_hist_Tag0\", mass, data)\n\n# Save the background model with the RooDataHist instead\nf_out = ROOT.TFile(\"workspace_bkg_binned.root\", \"RECREATE\")\nw_bkg = ROOT.RooWorkspace(\"workspace_bkg\",\"workspace_bkg\")\ngetattr(w_bkg, \"import\")(data_hist)\ngetattr(w_bkg, \"import\")(norm)\ngetattr(w_bkg, \"import\")(model_bkg)\nw_bkg.Print()\nw_bkg.Write()\nf_out.Close()\n</code></pre>"},{"location":"tutorial2023/parametric_exercise/#part-2-simple-fits","title":"Part 2: Simple fits","text":"<p>Now the parametric models have been constructed and the datacard has been compiled, we are ready to start using combine for running fits. In CMS analyses we begin by blinding ourselves to the data in the signal region, and looking only at the expected results based off toys datasets (asimov or pseudo-experiments). In this exercise, we will look straight away at the observed results. Note, the python commands in this section are taken from the script called <code>simple_fits.py</code>.</p> <p>Tasks and questions:</p> <ul> <li>Run a simple best-fit for the signal strength, <code>r</code>, fixing the Higgs mass to 125 GeV, you can run the command in the terminal: <pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH --saveWorkspace -n .bestfit\n</code></pre> We obtain a best-fit signal strength of <code>r = 1.548</code> i.e. the observed signal yield is 1.548 times the SM prediction.</li> </ul> <p>The option <code>--saveWorkspace</code> stores a snapshot of the postfit workspace in the output file (<code>higgsCombine.bestfit.MultiDimFit.mH125.root</code>). We can load the postfit workspace and look at how the values of all the fit parameters change (compare the <code>clean</code> and <code>MultiDimFit</code> parameter snapshots): <pre><code>import ROOT\n\nf = ROOT.TFile(\"higgsCombine.bestfit.MultiDimFit.mH125.root\")\nw = f.Get(\"w\")\nw.Print(\"v\")\n</code></pre> We can even plot the postfit signal-plus-background model using the workspace snapshot: <pre><code>n_bins = 80\nbinning = ROOT.RooFit.Binning(n_bins,100,180)\n\ncan = ROOT.TCanvas()\nplot = w.var(\"CMS_hgg_mass\").frame()\nw.data(\"data_obs\").plotOn( plot, binning )\n\n# Load the S+B model\nsb_model = w.pdf(\"model_s\").getPdf(\"Tag0\")\n\n# Prefit\nsb_model.plotOn( plot, ROOT.RooFit.LineColor(2), ROOT.RooFit.Name(\"prefit\") )\n\n# Postfit\nw.loadSnapshot(\"MultiDimFit\")\nsb_model.plotOn( plot, ROOT.RooFit.LineColor(4), ROOT.RooFit.Name(\"postfit\") )\nr_bestfit = w.var(\"r\").getVal()\n\nplot.Draw()\n\nleg = ROOT.TLegend(0.55,0.6,0.85,0.85)\nleg.AddEntry(\"prefit\", \"Prefit S+B model (r=1.00)\", \"L\")\nleg.AddEntry(\"postfit\", \"Postfit S+B model (r=%.2f)\"%r_bestfit, \"L\")\nleg.Draw(\"Same\")\n\ncan.Update()\ncan.SaveAs(\"part2_sb_model.png\")\n</code></pre></p> <p></p>"},{"location":"tutorial2023/parametric_exercise/#confidence-intervals","title":"Confidence intervals","text":"<p>We not only want to find the best-fit value of the signal strength, r, but also the confidence intervals. The <code>singles</code> algorithm will find the 68% CL intervals: <pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH -n .singles --algo singles\n</code></pre> To perform a likelihood scan (i.e. calculate 2NLL at fixed values of the signal strength, profiling the other parameters), we use the <code>grid</code> algorithm. We can control the number of points in the scan using the <code>--points</code> option. Also, it is important to set a suitable range for the signal strength parameter. The <code>singles</code> algorithm has shown us that the 1 stdev interval on r is around +/-0.2.</p> <p>Tasks and questions:</p> <ul> <li>Use these intervals to define a suitable range for the scan, and change <code>lo,hi</code> in the following options accordingly: <code>--setParameterRanges r=lo,hi</code>.</li> </ul> <p><pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH -n .scan --algo grid --points 20 --setParameterRanges r=lo,hi\n</code></pre> We can use the <code>plot1DScan.py</code> function from CombineTools to plot the likelihood scan: <pre><code>plot1DScan.py higgsCombine.scan.MultiDimFit.mH125.root -o part2_scan\n</code></pre> </p> <ul> <li>Do you understand what the plot is showing? What information about the signal strength parameter can be inferred from the plot?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-expected-fits","title":"Extension: expected fits","text":"<p>To run expected fits we simply add <code>-t N</code> to the combine command. For <code>N&gt;0</code>, this will generate N random toys from the model and fit each one independently. For <code>N=-1</code>, this will generate an asimov toy in which all statistical fluctuations from the model are suppressed.</p> <p>You can use the <code>--expectSignal 1</code> option to set the signal strength parameter to 1 when generating the toy. Alternatively, <code>--expectSignal 0</code> will generate a toy from the background-only model. For multiple parameter models you can set the initial values when generating the toy(s) using the <code>--setParameters</code> option of combine. For example, if you want to throw a toy where the Higgs mass is at 124 GeV and the background slope parameter <code>alpha</code> is equal to -0.05, you would add <code>--setParameters MH=124.0,alpha=-0.05</code>.</p> <p>Tasks and questions:</p> <ul> <li>Try running the asimov likelihood scan for <code>r=1</code> and <code>r=0</code>, and plotting them using the <code>plot1DScan.py</code> script.</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#extension-goodness-of-fit-tests","title":"Extension: goodness-of-fit tests","text":"<p>The goodness-of-fit tests available in combine are only well-defined for binned maximum likelihood fits. Therefore, to perform a goodness-of-fit test with a parametric datacard, make sure to save the data object as a <code>RooDataHist</code>, as in <code>workspace_bkg_binned.root</code>.</p> <p>Tasks and questions:</p> <ul> <li>Try editing the <code>datacard_part1_with_norm.txt</code> file to pick up the correct binned workspace file, and the <code>RooDataHist</code>. The goodness-of-fit method requires at-least one nuisance parameter in the model to run successfully. Append the following line to the end of the datacard: <pre><code>lumi_13TeV      lnN          1.01         -\n</code></pre></li> <li>Does the datacard compile with the <code>text2workspace.py</code> command?</li> </ul> <p>We use the <code>GoodnessOfFit</code> method in combine to evaluate how compatible the observed data are with the model pdf. There are three types of GoF algorithm within combine, this example will use the <code>saturated</code> algorithm. You can find more information about the other algorithms here.</p> <p>Firstly, we want to calculate the value of the test statistic for the observed data: <pre><code>combine -M GoodnessOfFit datacard_part1_binned.root --algo saturated -m 125 --freezeParameters MH -n .goodnessOfFit_data\n</code></pre></p> <p>Now lets calculate the test statistic value for many toys thrown from the model: <pre><code>combine -M GoodnessOfFit datacard_part1_binned.root --algo saturated -m 125 --freezeParameters MH -n .goodnessOfFit_toys -t 1000\n</code></pre></p> <p>To make a plot of the GoF test-statistic distribution you can run the following commands, which first collect the values of the test-statistic into a json file, and then plots from the json file: <pre><code>combineTool.py -M CollectGoodnessOfFit --input higgsCombine.goodnessOfFit_data.GoodnessOfFit.mH125.root higgsCombine.goodnessOfFit_toys.GoodnessOfFit.mH125.123456.root -m 125.0 -o gof.json\n\nplotGof.py gof.json --statistic saturated --mass 125.0 -o part2_gof\n</code></pre></p> <p></p> <ul> <li>What does the plot tell us? Does the model fit the data well?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#part-3-systematic-uncertainties","title":"Part 3: Systematic uncertainties","text":"<p>In this section, we will learn how to add systematic uncertainties to a parametric fit analysis. The python commands are taken from the <code>systematics.py</code> script.</p> <p>For uncertainties which only affect the process normalisation, we can simply implement these as <code>lnN</code> uncertainties in the datacard. The file <code>mc_part3.root</code> contains the systematic-varied trees i.e. Monte-Carlo events where some systematic uncertainty source <code>{photonID,JEC,scale,smear}</code> has been varied up and down by \\(1\\sigma\\). <pre><code>import ROOT\n\nf = ROOT.TFile(\"mc_part3.root\")\nf.ls()\n</code></pre> Gives the output: <pre><code>TFile**     mc_part3.root\n TFile*     mc_part3.root\n  KEY: TTree    ggH_Tag0;1  ggH_Tag0\n  KEY: TTree    ggH_Tag0_photonIDUp01Sigma;1    ggH_Tag0_photonIDUp01Sigma\n  KEY: TTree    ggH_Tag0_photonIDDown01Sigma;1  ggH_Tag0_photonIDDown01Sigma\n  KEY: TTree    ggH_Tag0_scaleUp01Sigma;1   ggH_Tag0_scaleUp01Sigma\n  KEY: TTree    ggH_Tag0_scaleDown01Sigma;1 ggH_Tag0_scaleDown01Sigma\n  KEY: TTree    ggH_Tag0_smearUp01Sigma;1   ggH_Tag0_smearUp01Sigma\n  KEY: TTree    ggH_Tag0_smearDown01Sigma;1 ggH_Tag0_smearDown01Sigma\n  KEY: TTree    ggH_Tag0_JECUp01Sigma;1 ggH_Tag0_JECUp01Sigma\n  KEY: TTree    ggH_Tag0_JECDown01Sigma;1   ggH_Tag0_JECDown01Sigma\n</code></pre> Let's first load the systematic-varied trees as RooDataSets and store them in a python dictionary, <code>mc</code>:</p> <p>Tasks and questions:</p> <ul> <li>Run the code below (or uncomment the relevant section of the script for this part).</li> </ul> <pre><code># Define mass and weight variables\nmass = ROOT.RooRealVar(\"CMS_hgg_mass\", \"CMS_hgg_mass\", 125, 100, 180)\nweight = ROOT.RooRealVar(\"weight\",\"weight\",0,0,1)\n\nmc = {}\n\n# Load the nominal dataset\nt = f.Get(\"ggH_Tag0\")\nmc['nominal'] = ROOT.RooDataSet(\"ggH_Tag0\",\"ggH_Tag0\", t, ROOT.RooArgSet(mass,weight), \"\", \"weight\" )\n\n# Load the systematic-varied datasets\nfor syst in ['JEC','photonID','scale','smear']:\n    for direction in ['Up','Down']:\n        key = \"%s%s01Sigma\"%(syst,direction)\n        name = \"ggH_Tag0_%s\"%(key)\n        t = f.Get(name)\n        mc[key] = ROOT.RooDataSet(name, name, t, ROOT.RooArgSet(mass,weight), \"\", \"weight\" )\n</code></pre> <p>The jet energy scale (JEC) and photon identification (photonID) uncertainties do not affect the shape of the \\(m_{\\gamma\\gamma}\\) distribution i.e. they only effect the signal yield estimate. We can calculate their impact by comparing the sum of weights to the nominal dataset. Note, the photonID uncertainty changes the weight of the events in the tree, whereas the JEC varied trees contain a different set of events, generated by shifting the jet energy scale in the simulation. In any case, the means for calculating the yield variations is equivalent: <pre><code>for syst in ['JEC','photonID']:\n    for direction in ['Up','Down']:\n        yield_variation = mc['%s%s01Sigma'%(syst,direction)].sumEntries()/mc['nominal'].sumEntries()\n        print(\"Systematic varied yield (%s,%s): %.3f\"%(syst,direction,yield_variation))\n</code></pre> <pre><code>Systematic varied yield (JEC,Up): 1.056\nSystematic varied yield (JEC,Down): 0.951\nSystematic varied yield (photonID,Up): 1.050\nSystematic varied yield (photonID,Down): 0.950\n</code></pre> We can write these yield variations in the datacard with the lines: <pre><code>CMS_scale_j           lnN      0.951/1.056      -\nCMS_hgg_phoIdMva      lnN      1.05             -\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Run the code (or uncomment the relevant lines of code in the script) to produce the systematic variations in the workspace and add the datacard lines above to the datacard.</li> <li>Why is the photonID uncertainty expressed as one number, whereas the JEC uncertainty is defined by two?</li> </ul> <p>Note in this analysis there are no systematic uncertainties affecting the background estimate (<code>-</code> in the datacard), as the background model has been derived directly from data.</p>"},{"location":"tutorial2023/parametric_exercise/#parametric-shape-uncertainties","title":"Parametric shape uncertainties","text":"<p>What about systematic uncertainties which affect the shape of the mass distribution?</p> <p>In a parametric analysis, we need to build the dependence directly into the model parameters. The example uncertainty sources in this tutorial are the photon energy scale and smearing uncertainties. From the names alone we can expect that the scale uncertainty will affect the mean of the signal Gaussian, and the smear uncertainty will impact the resolution (sigma). Let's first take a look at the <code>scaleUp01Sigma</code> dataset:</p> <p><pre><code># Build the model to fit the systematic-varied datasets\nmean = ROOT.RooRealVar(\"mean\", \"mean\", 125, 124, 126)\nsigma = ROOT.RooRealVar(\"sigma\", \"sigma\", 2, 1.5, 2.5)\ngaus = ROOT.RooGaussian(\"model\", \"model\", mass, mean, sigma)\n\n# Run the fits twice (second time from the best-fit of first run) to obtain more reliable results\ngaus.fitTo(mc['scaleUp01Sigma'], ROOT.RooFit.SumW2Error(True),ROOT.RooFit.PrintLevel(-1))\ngaus.fitTo(mc['scaleUp01Sigma'], ROOT.RooFit.SumW2Error(True),ROOT.RooFit.PrintLevel(-1))\nprint(\"Mean = %.3f +- %.3f GeV, Sigma = %.3f +- %.3f GeV\"%(mean.getVal(),mean.getError(),sigma.getVal(),sigma.getError()) )\n</code></pre> Gives the output: <pre><code>Mean = 125.370 +- 0.009 GeV, Sigma = 2.011 +- 0.006 GeV\n</code></pre> Now let's compare the values to the nominal fit for all systematic-varied trees. We observe a significant variation in the mean for the scale uncertainty, and a significant variation in sigma for the smear uncertainty. <pre><code># First fit the nominal dataset\ngaus.fitTo(mc['nominal'], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1) )\ngaus.fitTo(mc['nominal'], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1) )\n# Save the mean and sigma values and errors to python dicts\nmean_values, sigma_values = {}, {}\nmean_values['nominal'] = [mean.getVal(),mean.getError()]\nsigma_values['nominal'] = [sigma.getVal(),sigma.getError()]\n\n# Next for the systematic varied datasets\nfor syst in ['scale','smear']:\n    for direction in ['Up','Down']:\n        key = \"%s%s01Sigma\"%(syst,direction)\n        gaus.fitTo(mc[key] , ROOT.RooFit.SumW2Error(True),  ROOT.RooFit.PrintLevel(-1))\n        gaus.fitTo(mc[key], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1))\n        mean_values[key] = [mean.getVal(), mean.getError()]\n        sigma_values[key] = [sigma.getVal(), sigma.getError()]\n\n# Print the variations in mean and sigma\nfor key in mean_values.keys():\n    print(\"%s: mean = %.3f +- %.3f GeV, sigma = %.3f +- %.3f GeV\"%(key,mean_values[key][0],mean_values[key][1],sigma_values[key][0],sigma_values[key][1]))\n</code></pre> Prints the output: <pre><code>nominal: mean = 125.001 +- 0.009 GeV, sigma = 1.996 +- 0.006 GeV\nscaleUp01Sigma: mean = 125.370 +- 0.009 GeV, sigma = 2.011 +- 0.006 GeV\nscaleDown01Sigma: mean = 124.609 +- 0.009 GeV, sigma = 2.005 +- 0.006 GeV\nsmearUp01Sigma: mean = 125.005 +- 0.009 GeV, sigma = 2.097 +- 0.007 GeV\nsmearDown01Sigma: mean = 125.007 +- 0.009 GeV, sigma = 1.912 +- 0.006 GeV\n</code></pre> The values tell us that the scale uncertainty (at \\(\\pm 1 \\sigma\\)) varies the signal peak mean by around 0.3%, and the smear uncertainty (at \\(\\pm 1 \\sigma\\)) varies the signal width (sigma) by around 4.5% (average of up and down variations).</p> <p>Now we need to bake these effects into the parametric signal model. The mean of the Gaussian was previously defined as:</p> \\[ \\mu = m_H + \\delta\\] <p>We introduce the nuisance parameter <code>nuisance_scale</code> = \\(\\eta\\) to account for a shift in the signal peak mean using:</p> \\[ \\mu = (m_H + \\delta) \\cdot (1+0.003\\eta)\\] <p>At \\(\\eta = +1 (-1)\\) the signal peak mean will shift up (down) by 0.3%. To build this into the RooFit signal model we simply define a new parameter, \\(\\eta\\), and update the definition of the mean formula variable: <pre><code># Building the workspace with systematic variations\nMH = ROOT.RooRealVar(\"MH\", \"MH\", 125, 120, 130 )\nMH.setConstant(True)\n\n# Define formula for mean of Gaussian\ndMH = ROOT.RooRealVar(\"dMH_ggH_Tag0\", \"dMH_ggH_Tag0\", 0, -5, 5 )\neta = ROOT.RooRealVar(\"nuisance_scale\", \"nuisance_scale\", 0, -5, 5)\neta.setConstant(True)\nmean_formula = ROOT.RooFormulaVar(\"mean_ggH_Tag0\", \"mean_ggH_Tag0\", \"(@0+@1)*(1+0.003*@2)\", ROOT.RooArgList(MH,dMH,eta))\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Update the workspace with the new mean formula variable (using the code above, or uncomment in the script)</li> <li>Why do we set the nuisance parameter to constant at this stage?</li> </ul> <p>Similarly for the width introducing a nuisance parameter, \\(\\chi\\):</p> \\[ \\sigma = \\sigma \\cdot (1+0.045\\chi)\\] <p><pre><code>sigma = ROOT.RooRealVar(\"sigma_ggH_Tag0_nominal\", \"sigma_ggH_Tag0_nominal\", 2, 1, 5)\nchi = ROOT.RooRealVar(\"nuisance_smear\", \"nuisance_smear\", 0, -5, 5)\nchi.setConstant(True)\nsigma_formula = ROOT.RooFormulaVar(\"sigma_ggH_Tag0\", \"sigma_ggH_Tag0\", \"@0*(1+0.045*@1)\", ROOT.RooArgList(sigma,chi))\n</code></pre> Let's now fit the new model to the signal Monte-Carlo dataset, build the normalisation object and save the workspace. <pre><code># Define Gaussian\nmodel = ROOT.RooGaussian( \"model_ggH_Tag0\", \"model_ggH_Tag0\", mass, mean_formula, sigma_formula )\n\n# Fit model to MC\nmodel.fitTo( mc['nominal'], ROOT.RooFit.SumW2Error(True), ROOT.RooFit.PrintLevel(-1) )\n\n# Build signal model normalisation object\nxs_ggH = ROOT.RooRealVar(\"xs_ggH\", \"Cross section of ggH in [pb]\", 48.58 )\nbr_gamgam = ROOT.RooRealVar(\"BR_gamgam\", \"Branching ratio of Higgs to gamma gamma\", 0.0027 )\neff = mc['nominal'].sumEntries()/(xs_ggH.getVal()*br_gamgam.getVal())\neff_ggH_Tag0 = ROOT.RooRealVar(\"eff_ggH_Tag0\", \"Efficiency for ggH events to land in Tag0\", eff )\n# Set values to be constant\nxs_ggH.setConstant(True)\nbr_gamgam.setConstant(True)\neff_ggH_Tag0.setConstant(True)\n# Define normalisation component as product of these three variables\nnorm_sig = ROOT.RooProduct(\"model_ggH_Tag0_norm\", \"Normalisation term for ggH in Tag 0\", ROOT.RooArgList(xs_ggH,br_gamgam,eff_ggH_Tag0))\n\n# Set shape parameters of model to be constant (i.e. fixed in fit to data)\ndMH.setConstant(True)\nsigma.setConstant(True)\n\n# Build new signal model workspace with signal normalisation term.\nf_out = ROOT.TFile(\"workspace_sig_with_syst.root\", \"RECREATE\")\nw_sig = ROOT.RooWorkspace(\"workspace_sig\",\"workspace_sig\")\ngetattr(w_sig, \"import\")(model)\ngetattr(w_sig, \"import\")(norm_sig)\nw_sig.Print()\nw_sig.Write()\nf_out.Close()\n</code></pre> The final step is to add the parametric uncertainties as Gaussian-constrained nuisance parameters into the datacard. The syntax means the Gaussian constraint term in the likelihood function will have a mean of 0 and a width of 1. <pre><code>nuisance_scale        param    0.0    1.0\nnuisance_smear        param    0.0    1.0\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Run the python code above to include the smearing uncertainty too.</li> <li>Try adding these lines to <code>datacard_part1_with_norm.txt</code>, along with the lines for the JEC and photonID yield uncertainties above, and compiling with the <code>text2workspace</code> command. Open the workspace and look at its contents. You will need to change the signal process workspace file name in the datacard to point to the new workspace (<code>workspace_sig_with_syst.root</code>).</li> <li>Can you see the new objects in the compiled datacard that have been created for the systematic uncertainties? What do they correspond to?</li> </ul> <p>We can now run a fit with the systematic uncertainties included. The option <code>--saveSpecifiedNuis</code> can be called to save the postfit nuisance parameter values in the combine output limit tree. <pre><code>combine -M MultiDimFit datacard_part1_with_norm.root -m 125 --freezeParameters MH --saveWorkspace -n .bestfit.with_syst --saveSpecifiedNuis CMS_scale_j,CMS_hgg_phoIdMva,nuisance_scale,nuisance_smear\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>What do the postfit values of the nuisances tell us here? You can check them by opening the output file (<code>root higgsCombine.bestfit.with_syst.MultiDimFit.mH125.root</code>) and running <code>limit-&gt;Show(0)</code>.</li> <li>Try plotting the postfit mass distribution (as detailed in part 2). Do you notice any difference?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#uncertainty-breakdown","title":"Uncertainty breakdown","text":"<p>A more complete datacard with additional nuisance parameters is stored in <code>datacard_part3.txt</code>. We will use this datacard for the rest of part 3. Open the text file and have a look at the contents.</p> <p>The following line has been appended to the end of the datacard to define the set of theory nuisance parameters. This will come in handy when calculating the uncertainty breakdown. <pre><code>theory group = BR_hgg QCDscale_ggH pdf_Higgs_ggH alphaS_ggH UnderlyingEvent PartonShower\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li> <p>Compile the datacard and run an observed <code>MultiDimFit</code> likelihood scan over the signal strength, r: <pre><code>text2workspace.py datacard_part3.txt -m 125\n\ncombine -M MultiDimFit datacard_part3.root -m 125 --freezeParameters MH -n .scan.with_syst --algo grid --points 20 --setParameterRanges r=0.5,2.5\n</code></pre></p> </li> <li> <p>Our aim is to break down the total uncertainty into the systematic and statistical components. To get the statistical-uncertainty-only scan it should be as simple as freezing the nuisance parameters in the fit... right? Try it by adding <code>,allConstrainedNuisances</code> to the <code>--freezeParameters</code> option. This will freeze all (constrained) nuisance parameters in the fit. You can also feed in regular expressions with wildcards using <code>rgx{.*}</code>. For instance to freeze only the <code>nuisance_scale</code> and <code>nuisance_smear</code> you could run with <code>--freezeParameters MH,rgx{nuisance_.*}</code>.</p> </li> </ul> <p><pre><code>combine -M MultiDimFit datacard_part3.root -m 125 --freezeParameters MH,allConstrainedNuisances -n .scan.with_syst.statonly --algo grid --points 20 --setParameterRanges r=0.5,2.5\n</code></pre>   -   Plot the two likelihood scans on the same axis. You can use the plotting script provided or write your own. <pre><code>plot1DScan.py higgsCombine.scan.with_syst.MultiDimFit.mH125.root --main-label \"With systematics\" --main-color 1 --others higgsCombine.scan.with_syst.statonly.MultiDimFit.mH125.root:\"Stat-only\":2 -o part3_scan_v0\n</code></pre></p> <p></p> <ul> <li>Can you spot the problem?</li> </ul> <p>The nuisance parameters introduced into the model have pulled the best-fit signal strength point! Therefore we cannot simply subtract the uncertainties in quadrature to get an estimate for the systematic/statistical uncertainty breakdown.</p> <p>The correct approach is to freeze the nuisance parameters to their respective best-fit values in the stat-only scan.</p> <ul> <li>We can do this by first saving a postfit workspace with all nuisance parameters profiled in the fit. Then we load the postfit snapshot values of the nuisance parameters (with the option <code>--snapshotName MultiDimFit</code>) from the combine output of the previous step, and then freeze the nuisance parameters for the stat-only scan. <pre><code>combine -M MultiDimFit datacard_part3.root -m 125 --freezeParameters MH -n .bestfit.with_syst --setParameterRanges r=0.5,2.5 --saveWorkspace\n\ncombine -M MultiDimFit higgsCombine.bestfit.with_syst.MultiDimFit.mH125.root -m 125 --freezeParameters MH,allConstrainedNuisances -n .scan.with_syst.statonly_correct --algo grid --points 20 --setParameterRanges r=0.5,2.5 --snapshotName MultiDimFit\n</code></pre></li> <li>Adding the option <code>--breakdown syst,stat</code> to the <code>plot1DScan.py</code> command will automatically calculate the uncertainty breakdown for you. <pre><code>plot1DScan.py higgsCombine.scan.with_syst.MultiDimFit.mH125.root --main-label \"With systematics\" --main-color 1 --others higgsCombine.scan.with_syst.statonly_correct.MultiDimFit.mH125.root:\"Stat-only\":2 -o part3_scan_v1 --breakdown syst,stat\n</code></pre></li> </ul> <p></p> <p>We can also freeze groups of nuisance parameters defined in the datacard with the option <code>--freezeNuisanceGroups</code>. Let's run a scan freezing only the theory uncertainties (using the nuisance group we defined in the datacard): <pre><code>combine -M MultiDimFit higgsCombine.bestfit.with_syst.MultiDimFit.mH125.root -m 125 --freezeParameters MH --freezeNuisanceGroups theory -n .scan.with_syst.freezeTheory --algo grid --points 20 --setParameterRanges r=0.5,2.5 --snapshotName MultiDimFit\n</code></pre> To breakdown the total uncertainty into the theory, experimental and statistical components we can then use: <pre><code>plot1DScan.py higgsCombine.scan.with_syst.MultiDimFit.mH125.root --main-label Total --main-color 1 --others higgsCombine.scan.with_syst.freezeTheory.MultiDimFit.mH125.root:\"Freeze theory\":4 higgsCombine.scan.with_syst.statonly_correct.MultiDimFit.mH125.root:\"Stat-only\":2 -o part3_scan_v2 --breakdown theory,exp,stat\n</code></pre></p> <p></p> <p>These methods are not limited to this particular grouping of systematics. We can use the above procedure to assess the impact of any nuisance parameter(s) on the signal strength confidence interval.</p> <p>Tasks and questions:</p> <ul> <li>Try and calculate the contribution to the total uncertainty from the luminosity estimate using this approach.</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#impacts","title":"Impacts","text":"<p>It is often useful/required to check the impacts of the nuisance parameters (NP) on the parameter of interest, r. The impact of a NP is defined as the shift \\(\\Delta r\\) induced as the NP, \\(\\theta\\), is fixed to its \\(\\pm1\\sigma\\) values, with all other parameters profiled as normal. More information can be found in the combine documentation via this link.</p> <p>Let's calculate the impacts for our analysis. We can use the <code>combineTool.py</code> to automate the scripts. The impacts are calculated in a few stages:</p> <p>1) Do an initial fit for the parameter of interest, adding the <code>--robustFit 1</code> option: <pre><code>combineTool.py -M Impacts -d datacard_part3.root -m 125 --freezeParameters MH -n .impacts --setParameterRanges r=0.5,2.5 --doInitialFit --robustFit 1\n</code></pre></p> <p>2) Next perform a similar scan for each NP with the <code>--doFits</code> option. This may take a few minutes: <pre><code>combineTool.py -M Impacts -d datacard_part3.root -m 125 --freezeParameters MH -n .impacts --setParameterRanges r=0.5,2.5 --doFits --robustFit 1\n</code></pre></p> <p>3) Collect the outputs from the previous step and write the results to a json file: <pre><code>combineTool.py -M Impacts -d datacard_part3.root -m 125 --freezeParameters MH -n .impacts --setParameterRanges r=0.5,2.5 -o impacts_part3.json\n</code></pre></p> <p>4) Produce a plot summarising the nuisance parameter values and impacts: <pre><code>plotImpacts.py -i impacts_part3.json -o impacts_part3\n</code></pre></p> <p></p> <p>Tasks and questions:</p> <ul> <li>Run the commands 1-4 above. There is a lot of information in these plots, which can be of invaluable use to analysers in understanding the fit. Do you understand everything that the plot is showing?</li> <li>Which NP has the highest impact on the signal strength measurement?</li> <li>Which NP is pulled the most in the fit to data? What does this information imply about the signal model mean in relation to the data?</li> <li>Which NP is the most constrained in the fit to the data? What does it mean for a nuisance parameter to be constrained?</li> <li>Try adding the option <code>--summary</code> to the impacts plotting command. This is a nice new feature in combine!</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#part-4-toy-generation-and-bias-studies","title":"Part 4: Toy generation and bias studies","text":"<p>With combine we can generate toy datasets from the compiled datacard workspace. Please read this section in the combine manual before proceeding.</p> <p>An interesting use case of toy generation is when performing bias studies. In the Higgs to two photon (Hgg) analysis, the background is fit with some functional form. However (due to the complexities of QCD) the exact form of this function is unknown. Therefore, we need to understand how our choice of background function may impact the fitted signal strength. This is performed using a bias study, which will indicate how much potential bias is present given a certain choice of functional form.</p> <p>In the classical bias studies we begin by building a set of workspaces which correspond to different background function choices. In addition to the <code>RooExponential</code> constructed in Section 1, let's also try a (4th order) <code>RooChebychev</code> polynomial and a simple power law function to fit the background \\(m_{\\gamma\\gamma}\\) distribution.</p> <p>The script used to fit the different functions and build the workspaces is <code>construct_models_bias_study_part4.py</code>. Take some time to look at the script and understand what the code is doing. In particular notice how we have saved the data as a <code>RooDataHist</code> in the workspace. This means we are now performing binned maximum likelihood fits (this is useful for part 4 to speed up fitting the many toys). If the binning is sufficiently granular, then there will be no noticeable difference in the results to the unbinned likelihood fits. Run the script with: <pre><code>python3  construct_models_bias_study_part4.py\n</code></pre></p> <p>The outputs are a set of workspaces which correspond to different choices of background model functions, and a plot showing fits of the different functions to the data mass sidebands.</p> <p></p> <p>The datacards for the different background model functions are saved as <code>datacard_part4_{pdf}.txt</code> where <code>pdf = {exp,poly,pow}</code>. Have a look inside the .txt files and understand what changes have been made to pick up the different functions. Compile the datacards with: <pre><code>for pdf in {exp,poly,pow}; do text2workspace.py datacard_part4_${pdf}.txt -m 125; done\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#bias-studies","title":"Bias studies","text":"<p>For the bias studies we want to generate (\"throw\") toy datasets with some choice of background function and fit back with another. The toys are thrown with a known value of the signal strength (r=1 in this example), which we will call \\(r_{truth}\\). The fitted value of r is defined as \\(r_{fit}\\), with some uncertainty \\(\\sigma_{fit}\\). A pull value, \\(P\\), is calculated for each toy dataset according to,</p> \\[ P = (r_{truth}-r_{fit})/\\sigma_{fit}\\] <p>By repeating the process for many toys we can build up a pull distribution. If there is no bias present then we would expect to obtain a normal distribution centred at 0, with a standard deviation of 1. Let's calculate the bias for our analysis.</p> <p>Firstly,  we generate N=1000 toys from each of the background function choices and save them in a ROOT file. For this we use the <code>GenerateOnly</code> method of combine. We will inject signal in the toys by setting <code>r=1</code> using the <code>--expectSignal 1</code> option.</p> <p>Tasks and questions:</p> <ul> <li>Repeat the bias studies as outlined above with <code>--expectSignal 0</code>. This will inform us of the potential bias in the signal strength measurement given that there is no true signal.</li> </ul> <p>The following commands show the example of throwing 1000 toys from the exponential function, and then fitting back with the 4th-order Chebychev polynomial. We use the <code>singles</code> algorithm to obtain a value for \\(r_{fit}\\) and \\(\\sigma_{fit}\\) simultaneously. <pre><code>combine -M GenerateOnly datacard_part4_exp.root -m 125 --freezeParameters MH -t 1000 -n .generate_exp --expectSignal 1 --saveToys\n\ncombine -M MultiDimFit datacard_part4_poly.root -m 125 --freezeParameters MH -t 1000 -n .bias_truth_exp_fit_poly --expectSignal 1 --toysFile higgsCombine.generate_exp.GenerateOnly.mH125.123456.root --algo singles\n</code></pre> The script <code>plot_bias_pull.py</code> will plot the pull distribution and fit a Gaussian to it: <pre><code>python3 plot_bias_pull.py\n</code></pre></p> <p></p> <p>The potential bias is defined as the (fitted) mean of the pull distribution.</p> <p>Tasks and questions:</p> <ul> <li>What is our bias value? Have we generated enough toys to be confident of the bias value? You could try generating more toys if not.</li> <li>What threshold do we use to define \"acceptable\" bias?</li> </ul> <p>From the pull definition, we see the bias value is defined relative to the total uncertainty in the signal strength (denominator of \\(\\sigma_{fit}\\)). Some analyses use 0.14 as the threshold because a bias below this value would change the total uncertainty (when added in quadrature) by less than 1% (see equation below). Other analyses use 0.2 as this will change the total uncertainty by less than 2%. We should define the threshold before performing the bias study.</p> \\[ \\sqrt{ 1^2 + 0.14^2} = 1.0098 \\] <ul> <li>How does our bias value compare to the thresholds? If we the bias is outside the acceptable region we should account for this using a spurious signal method (see advanced exercises TBA).</li> <li>Repeat the bias study for each possible truth and fitted background function combinations. Do the bias values induced by the choice of background function merit adding a spurious signal component into the fit?</li> <li>What would you expect the bias value to be for a background function that does not fit the data well? Should we be worried about such functions? What test could we use to reject such functions from the study beforehand?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#part-5-discrete-profiling","title":"Part 5: Discrete-profiling","text":"<p>If multiple pdfs exist to fit some distribution, we can store all pdfs in a single workspace by using a <code>RooMultiPdf</code> object. The script <code>construct_models_multipdf_part5.py</code> shows how to store the exponential, (4th order) Chebychev polynomial and the power law function from the previous section in a <code>RooMultiPdf</code> object. This requires a <code>RooCategory</code> index, which controls the pdf which is active at any one time. Look at the contents of the script and then run with: <pre><code>python3 construct_models_multipdf_part5.py\n</code></pre> The file <code>datacard_part5.txt</code> will load the multipdf as the background model. Notice the line at the end of the datacard (see below). This tells combine about the <code>RooCategory</code> index. <pre><code>pdfindex_Tag0         discrete\n</code></pre> Compile the datacard with: <pre><code>text2workspace.py datacard_part5.txt -m 125\n</code></pre></p> <p>The <code>RooMultiPdf</code> is a handy object for performing bias studies as all functions can be stored in a single workspace. You can then set which function is used for generating the toys with the <code>--setParameters pdfindex_Tag0=i</code> option, and which function is used for fitting with <code>--setParameters pdfindex_Tag0=j --freezeParameters pdfindex_Tag0</code> options.</p> <p>Tasks and questions:</p> <ul> <li>It would be a useful exercise to repeat the bias studies from part 4 but using the RooMultiPdf workspace. What happens when you do not freeze the index in the fitting step?</li> </ul> <p>But simpler bias studies are not the only benefit of using the <code>RooMultiPdf</code>! It also allows us to apply the discrete profiling method in our analysis. In this method, the index labelling which pdf is active (a discrete nuisance parameter) is left floating in the fit, and will be profiled by looping through all the possible index values and finding the pdf which gives the best fit. In this manner, we are able to account for the uncertainty in the choice of the background function.</p> <p>Note, by default, the multipdf will tell combine to add 0.5 to the NLL for each parameter in the pdf. This is known as the penalty term (or correction factor) for the discrete profiling method. You can toggle this term when building the workspace with the command <code>multipdf.setCorrectionFactor(0.5)</code>. You may need to change the value of this term to obtain an acceptable bias in your fit!</p> <p>Let's run a likelihood scan using the compiled datacard with the <code>RooMultiPdf</code>: <pre><code>combine -M MultiDimFit datacard_part5.root -m 125 --freezeParameters MH -n .scan.multidimfit --algo grid --points 20 --cminDefaultMinimizerStrategy 0 --saveSpecifiedIndex pdfindex_Tag0 --setParameterRanges r=0.5,2.5\n</code></pre> The option <code>--cminDefaultMinimizerStrategy 0</code> is required to prevent HESSE being called as this cannot handle discrete nuisance parameters. HESSE is the full calculation of the second derivative matrix (Hessian) of the likelihood using finite difference methods.</p> <p>The option <code>--saveSpecifiedIndex pdfindex_Tag0</code> saves the value of the index at each point in the likelihood scan. Let's have a look at how the index value changes as a function of the signal strength. You can make the following plot by running: <pre><code>python3 plot_pdfindex.py\n</code></pre></p> <p></p> <p>By floating the discrete nuisance parameter <code>pdfindex_Tag0</code>, at each point in the likelihood scan the pdfs will be iterated over and the one which gives the max likelihood (lowest 2NLL) including the correction factor will be used. The plot above shows that the <code>pdfindex_Tag0=0</code> (exponential) is chosen for the majority of r values, but this switches to <code>pdfindex_Tag0=1</code> (Chebychev polynomial) at the lower edge of the r range. We can see the impact on the likelihood scan by fixing the pdf to the exponential: <pre><code>combine -M MultiDimFit datacard_part5.root -m 125 --freezeParameters MH,pdfindex_Tag0 --setParameters pdfindex_Tag0=0 -n .scan.multidimfit.fix_exp --algo grid --points 20 --cminDefaultMinimizerStrategy 0 --saveSpecifiedIndex pdfindex_Tag0 --setParameterRanges r=0.5,2.5\n</code></pre> Plotting the two scans on the same axis: <pre><code>plot1DScan.py higgsCombine.scan.multidimfit.MultiDimFit.mH125.root --main-label \"Pdf choice floating\" --main-color 1 --others higgsCombine.scan.multidimfit.fix_exp.MultiDimFit.mH125.root:\"Pdf fixed to exponential\":2 -o part5_scan --y-cut 35 --y-max 35\n</code></pre></p> <p></p> <p>The impact on the likelihood scan is evident at the lower edge, where the scan in which the index is floating flattens out. In this example, neither the \\(1\\sigma\\) or \\(2\\sigma\\) intervals are affected. But this is not always the case! Ultimately, this method allows us to account for the uncertainty in the choice of background function in the signal strength measurement.</p> <p>Coming back to the bias studies. Do you now understand what you are testing if you do not freeze the index in the fitting stage? In this case you are fitting the toys back with the discrete profiling method. This is the standard approach for the bias studies when we use the discrete-profiling method in an analysis.</p> <p>There are a number of options which can be added to the combine command to improve the performance when using discrete nuisance parameters. These are detailed at the end of this section in the combine manual.</p>"},{"location":"tutorial2023/parametric_exercise/#part-6-multi-signal-model","title":"Part 6: Multi-signal model","text":"<p>In reality, there are multiple Higgs boson processes which contribute to the total signal model, not only ggH. This section will explain how we can add an additional signal process (VBF) into the fit. Following this, we will add a second analysis category (Tag1), which has a higher purity of VBF events. To put this in context, the selection for Tag1 may require two jets with a large pseudorapidity separation and high invariant mass, which are typical properties of the VBF topology. By including this additional category with a different relative yield of VBF to ggH production, we are able to simultaneously constrain the rate of the two production modes.</p> <p>In the SM, the VBF process has a cross section which is roughly 10 times smaller than the ggH cross section. This explains why we need to use certain features of the event to boost the purity of VBF events. The LO Feynman diagram for VBF production is shown below.</p> <p></p>"},{"location":"tutorial2023/parametric_exercise/#building-the-models","title":"Building the models","text":"<p>Firstly, lets build the necessary inputs for this section using <code>construct_models_part6.py</code>. This script uses everything we have learnt in the previous sections: 1) Signal models (Gaussians) are built separately for each process (ggH and VBF) in each analysis category (Tag0 and Tag1). This uses separate <code>TTrees</code> for each contribution in the <code>mc_part6.root</code> file. The mean and width of the Gaussians include the effect of the parametric shape uncertainties, <code>nuisance_scale</code> and <code>nuisance_smear</code>. Each signal model is normalised according to the following equation, where \\(\\epsilon_{ij}\\) labels the fraction of process, \\(i\\) (=ggH,VBF), landing in analysis category, \\(j\\) (=Tag0,Tag1), and \\(\\mathcal{L}\\) is the integrated luminosity (defined in the datacard).</p> \\[ N_{ij} = \\sigma_i \\cdot \\mathcal{B}^{\\gamma\\gamma} \\cdot \\epsilon_{ij} \\cdot \\mathcal{L}\\] <p>2) A background model is constructed for each analysis category by fitting the mass sidebands in data. The input data is stored in the <code>data_part6.root</code> file. The models are <code>RooMultiPdfs</code> which contain an exponential, a 4th-order Chebychev polynomial and a power law function. The shape parameters and normalisation terms of the background models are freely floating in the final fit.</p> <p>Tasks and questions:</p> <ul> <li>Have a look through the <code>construct_models_part6.py</code> script and try to understand all parts of the model construction. When you are happy, go ahead and construct the models with: <pre><code>python3 construct_models_part6.py\n</code></pre></li> </ul> <p>The datacards for the two analysis categories are saved separately as <code>datacard_part6_Tag0.txt</code> and <code>datacard_part6_Tag1.txt</code>.</p> <ul> <li>Do you understand the changes made to include multiple signal processes in the datacard? What value in the <code>process</code> line is used to label VBF as a signal?</li> <li>Try compiling the individual datacards. What are the prefit ggH and VBF yields in each analysis category? You can find these by opening the workspace and printing the contents.</li> <li>Run the best fits and plot the prefit and postfit S+B models along with the data (see code in part 2). How does the absolute number of data events in Tag1 compare to Tag0? What about the signal-to-background ratio, S/B?</li> </ul> <p>In order to combine the two categories into a single datacard, we make use of the <code>combineCards.py</code> script: <pre><code>combineCards.py datacard_part6_Tag0.txt datacard_part6_Tag1.txt &gt; datacard_part6_combined.txt\n</code></pre></p>"},{"location":"tutorial2023/parametric_exercise/#running-the-fits","title":"Running the fits","text":"<p>If we use the default <code>text2workspace</code> command on the combined datacard, then this will introduce a single signal strength modifer which modifies the rate of all signal processes (ggH and VBF) by the same factor.</p> <p>Tasks and questions:</p> <ul> <li>Try compiling the combined datacard and running a likelihood scan. Does the sensitivity to the global signal strength improve by adding the additional analysis category \"Tag1\"?</li> </ul> <p>If we want to measure the independent rates of both processes simultaneously, then we need to introduce a separate signal strength for ggH and VBF. To do this we use the <code>multiSignalModel</code> physics model in combine by adding the following options to the <code>text2workspace</code> command: <pre><code>text2workspace.py datacard_part6_combined.txt -m 125 -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO \"map=.*/ggH:r_ggH[1,0,2]\" --PO \"map=.*/VBF:r_VBF[1,0,3]\" -o datacard_part6_combined_multiSignalModel.root\n</code></pre> The syntax for the parameter to process mapping is <code>map=category/process/POI[default,min,max]</code>. We have used the wildcard <code>.*</code> to tell combine that the POI (parameter of interest) should scale all cases of that process, regardless of the analysis category. The output of this command tells us what is scaled by the two signal strengths: <pre><code>Will scale  ch1/ggH  by  r_ggH\nWill scale  ch1/VBF  by  r_VBF\nWill scale  ch1/bkg_mass  by  1\nWill scale  ch2/ggH  by  r_ggH\nWill scale  ch2/VBF  by  r_VBF\nWill scale  ch2/bkg_mass  by  1\nWill scale  ch1/ggH  by  r_ggH\nWill scale  ch1/VBF  by  r_VBF\nWill scale  ch1/bkg_mass  by  1\nWill scale  ch2/ggH  by  r_ggH\nWill scale  ch2/VBF  by  r_VBF\nWill scale  ch2/bkg_mass  by  1\n</code></pre> Exactly what we require!</p> <p>To run a 1D \"profiled\" likelihood scan for ggH we use the following command: <pre><code>combine -M MultiDimFit datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .scan.part6_multiSignalModel_ggH --algo grid --points 20 --cminDefaultMinimizerStrategy 0 --saveInactivePOI 1 -P r_ggH --floatOtherPOIs 1\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>\"Profiled\" here means we are profiling over the other parameter of interest, <code>r_VBF</code> in the fit. In other words, we are treating <code>r_VBF</code> as an additional nuisance parameter. The option <code>--saveInactivePOI 1</code> stores the value of <code>r_VBF</code> in the combine output. Take a look at the fit output. Does the value of <code>r_VBF</code> depend on <code>r_ggH</code>? Are the two parameters of interest correlated? Remember, to look at the contents of the TTree you can use <code>limit-&gt;Show(i)</code>, where i is an integer labelling the point in the likelihood scan.</li> <li>Run the profiled scan for the VBF signal strength. Plot the <code>r_ggH</code> and <code>r_VBF</code> likelihood scans using the <code>plot1DScan.py</code> script. You will need to change some of the input options, in particular the <code>--POI</code> option. You can list the full set of options by running: <pre><code>plot1DScan.py --help\n</code></pre></li> </ul>"},{"location":"tutorial2023/parametric_exercise/#two-dimensional-likelihood-scan","title":"Two-dimensional likelihood scan","text":"<p>We can also run the fit at fixed points in (<code>r_ggH</code>,<code>r_VBF</code>) space. By using a sufficient number of points, we are able to up the 2D likelihood surface. Let's change the ranges of the parameters of interest to match what we have found in the profiled scans: <pre><code>combine -M MultiDimFit datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .scan2D.part6_multiSignalModel --algo grid --points 800 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --setParameterRanges r_ggH=0.5,2.5:r_VBF=-1,2\n</code></pre> To plot the output you can use the <code>plot_2D_scan.py</code> script: <pre><code>python3 plot_2D_scan.py\n</code></pre> This script interpolates the 2NLL value between the points ran in the scan so that the plot shows a smooth likelihood surface. You may find in some cases, the number of scanned points and interpolation parameters need to be tuned to get a sensible looking surface. This basically depends on how complicated the likelihood surface is.</p> <p></p> <p>Tasks and questions:</p> <ul> <li>The plot shows that the data is in agreement with the SM within the \\(2\\sigma\\) CL. Here, the \\(1\\sigma\\) and \\(2\\sigma\\) confidence interval contours corresponds to 2NLL values of 2.3 and 5.99, respectively. Do you understand why this? Think about Wilk's theorem.</li> <li>Does the plot show any correlation between the ggH and VBF signal strengths? Are the two positively or negatively correlated? Does this make sense for this pair of parameters given the analysis setup? Try repeating the 2D likelihood scan using the \"Tag0\" only datacard. How does the correlation behaviour change?</li> <li>How can we read off the \"profiled\" 1D likelihood scan constraints from this plot?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#correlations-between-parameters","title":"Correlations between parameters","text":"<p>For template-based analyses we can use the <code>FitDiagnostics</code> method in combine to extract the covariance matrix for the fit parameters. Unfortunately, this method is not compatible when using discrete nuisance parameters (<code>RooMultiPdf</code>). Instead, we can use the <code>robustHesse</code> method to find the Hessian matrix by finite difference methods. The matrix is then inverted to get the covariance. Subsequently, we can use the covariance to extract the correlations between fit parameters. <pre><code>combine -M MultiDimFit datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .robustHesse.part6_multiSignalModel --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --setParameterRanges r_ggH=0.5,2.5:r_VBF=-1,2 --robustHesse 1 --robustHesseSave 1 --saveFitResult\n</code></pre> The output file <code>robustHesse.robustHesse.part6_multiSignalModel.root</code> stores the correlation matrix (<code>h_correlation</code>). This contains the correlations between all parameters including the nuisances. So if we are interested in the correlation between <code>r_ggH</code> and <code>r_VBF</code>, we first need to find which bin corresponds to these parameters: <pre><code>root robustHesse.robustHesse.part6_multiSignalModel.root\n\nroot [1] h_correlation-&gt;GetXaxis()-&gt;GetBinLabel(19)\n(const char *) \"r_VBF\"\nroot [2] h_correlation-&gt;GetYaxis()-&gt;GetBinLabel(20)\n(const char *) \"r_ggH\"\nroot [3] h_correlation-&gt;GetBinContent(19,20)\n(double) -0.19822058\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>The two parameters of interest have a correlation coefficient of -0.198. This means the two parameters are somewhat anti-correlated. Does this match what we see in the 2D likelihood scan?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#impacts_1","title":"Impacts","text":"<p>We extract the impacts for each parameter of interest using the following commands: <pre><code>combineTool.py -M Impacts -d datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .impacts_part6_multiSignal --robustFit 1 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --doInitialFit\n\ncombineTool.py -M Impacts -d datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .impacts_part6_multiSignal --robustFit 1 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF --doFits\n\ncombineTool.py -M Impacts -d datacard_part6_combined_multiSignalModel.root -m 125 --freezeParameters MH -n .impacts_part6_multiSignal --robustFit 1 --cminDefaultMinimizerStrategy 0 -P r_ggH -P r_VBF -o impacts_part6.json\n\nplotImpacts.py -i impacts_part6.json -o impacts_part6_r_ggH --POI r_ggH\nplotImpacts.py -i impacts_part6.json -o impacts_part6_r_VBF --POI r_VBF\n</code></pre></p> <p>Tasks and questions:</p> <ul> <li>Look at the output PDF files. How does the ranking of the nuisance parameters change for the different signal strengths?</li> </ul>"},{"location":"tutorial2023/parametric_exercise/#advanced-exercises-to-be-added","title":"Advanced exercises (to be added)","text":"<p>The combine experts will include additional exercises here in due course. These will include:</p> <ul> <li>Convolution of model pdfs: <code>RooAddPdf</code></li> <li>Application of the spurious signal method</li> <li>Advanced physics models including parametrised signal strengths e.g. SMEFT</li> <li>Mass fits</li> <li>Two-dimensional parametric models</li> </ul>"},{"location":"tutorial2023_unfolding/unfolding_exercise/","title":"Likelihood Based Unfolding Exercise in Combine","text":""},{"location":"tutorial2023_unfolding/unfolding_exercise/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>After setting up <code>Combine</code>, you can access the working directory for this tutorial which contains all of the inputs and scripts needed to run the unfolding fitting exercise:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/tutorial_unfolding_2023/\n</code></pre>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#exercise-outline","title":"Exercise outline","text":"<p>The hands-on exercise is split into seven parts: </p> <p>1) \"Simple\" Unfolding Experiment</p> <p>2) Producing the Migration matrix from the datacards</p> <p>3) Advanced Unfolding with more detector-level information and control regions</p> <p>4) Extracting the expected intervals</p> <p>5) Producing Impacts for multiple POIs</p> <p>6) Unfold to the generator-level quantities </p> <p>7) Extracting POI correlations from the FitDiagnostics output</p> <p>Throughout the tutorial there are a number of questions and exercises for you to complete. These are shown in the boxes like this one. </p> <p>Note that some additional information on unfolding in <code>Combine</code> are available here, which also includes some information on regularization, which is not discussed in this tutorial. </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#analysis-overview","title":"Analysis overview","text":"<p>In this tutorial we will look at the cross section measurements of on of the SM Higgs processes VH, in \\(H\\to b\\bar{b}\\) (VHbb) final state. </p> <p>The measurement is performed within the Simplified Template Cross Section (STXS) framework, which provides the prediction in the bins of generator-level quantities \\(p_{T}(V)\\) and number of additional jets. The maximum likelihood based unfolding is performed to measure the cross section in the generator-level bins defined by STXS scheme. At the detector-level we define appropriate categories to match the STXS bins as closely as possible so that there is a good correspondence between the detector-level observable and the underlying generator-level quantity we are interested in.  </p> <p> </p> <p>Note that for this STXS measurement, as well as measuring the cross-section as a function of the \\(p_{T}\\) of the vector boson, the measurement includes some information on the number of additional jets and is performed over multiple different production modes, for different production processes. However, it is common to focus on a single distribution (e.g. \\(p_{T}\\)) for a signle process, (e.g. \\(t\\bar{t}\\)).</p> <p>In this tutorial we will focus on the ZH production, with the Z boson decaying to charged leptons, and Higgs boson reconstructed with the resolved \\(b\\bar{b}\\) pair. We will also use only a part of the Run 2 categories, we will not achieve the same sensitivity as the full analysis. Note that ggZH and ZH production modes are combined in the fit, since it is not possible to resolve them at this stage of the analysis. The STXS categories are defined independently of the Higgs decay channel, to streamline the combinations of the cross section measurement. </p> <p>In the first part of the tutorial, we will setup a relatively simple unfolding, where there is a single detector-level bin for every generator-level bin we are trying to measure. We will then perform a blind analysis using this setup to see the expected sensitivity. </p> <p>In this simple version of the analysis, we use a series of datacards, one for each detector-level bin, implemented as a counting experiment. We then combine the datacards for the full measurement. It is also possible to implement the same analysis as a single datacard, passing a histogram with each of the detector-level bins. Either method can be used, depending on which is more practical for the analysis being considered. </p> <p>In the second part of the tutorial we will perform the same measurement with a more advanced setup, making use of differential distributions per generator-level bin we are trying to measure, as well as control regions. By providing this additional information to the fit, we are able to achieve a better and more robust unfolding result. After checking the expected sensitivity, we will take a look at the impacts and pulls of the nuisance parameters. Then we will unblind and look at the results of the measurement, produce generator-level plots and provide the correlation matrix for our measured observables.</p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#simplified-unfolding","title":"Simplified unfolding","text":"<p>When determining the detector-level binning for any differential analysis the main goal is to chose a binning that distinguishes contributions from the various generator-level bins well. In the simplest case it can be done with the cut-based approach, i.e. applying the same binning for the detector-level observables as is being applied to the generator-level quantities being measured. In this case, that means binning in \\(p_{T}(Z)\\) and \\(n_{\\text{add. jets}}\\).  Due to the good lepton \\(p_{T}\\) resolution we can follow the original STXS scheme quite closely with the detector-level selection, with one exception, it is not possible to access the very-low transverse momenta bin \\(p_{T}(Z)&lt;75\\) GeV.  </p> <p>In <code>counting/regions</code> dicrectory you can find the datacards with five detector-level categories, each targetting a corresponding generator-level bin. Below you can find an example of the datacard for the detector-level bin with \\(p_{T}(Z)&gt;400\\) GeV. </p> <pre><code>imax    1 number of bins\njmax    9 number of processes minus 1\nkmax    * number of nuisance parameters\n--------------------------------------------------------------------------------\n--------------------------------------------------------------------------------\nbin          vhbb_Zmm_gt400_13TeV\nobservation  12.0\n--------------------------------------------------------------------------------\nbin                                   vhbb_Zmm_gt400_13TeV   vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV   vhbb_Zmm_gt400_13TeV     vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV vhbb_Zmm_gt400_13TeV\nprocess                               ggZH_lep_PTV_GT400_hbb ZH_lep_PTV_GT400_hbb ZH_lep_PTV_250_400_hbb ggZH_lep_PTV_250_400_hbb Zj1b            Zj0b_c          Zj0b_udsg       VVLF            Zj2b            VVHF\nprocess                               -3                     -2                   -1                     0                        1               2               3               4               5               6\nrate                                  0.0907733              0.668303             0.026293               0.00434588               3.78735         2.58885         4.09457         0.413716        7.02731         0.642605\n--------------------------------------------------------------------------------\n</code></pre> <p>You can see the contributions from various background processes, namely Z+jets, \\(t\\bar{t}\\) and the single top, as well as the signal processes (ggZH and ZH) corresponding to the STXS scheme discussed above. Note that for each generator-level bin being measured, we assign a different process in combine. This is so that the signal strengths for each of their contributions can float independently in the measurement. Also note, that due to migrations, each detector-level bin will receive contributions from multiple generator-level bins.</p> <p>One of the most important stages in the analysis design, is to make sure that the detector-level categories are well-chosen to target the corresponding generator-level processes.</p> <p>To explicitly check the correspondance between detector- and generator-level, one can plot the contributions of each of the generator-level bins in all of the detector-level bins. You can use the script provided in the tutorial git-lab page. This script uses <code>CombineHarvester</code> to loop over detector-level bins, and get the rate at which each of the signal processes (generator-level bins) contributes to that detector-level bin; which is then used to plot the migration matrix. </p> <p><pre><code>python scripts/get_migration_matrix.py counting/combined_ratesOnly.txt\n</code></pre> </p> <p>The migration matrix shows the generator-level bins on the x-axis and the corresponding detector-level bins on the y-axis. The entries are normalized such that the sum of all contributions for a given generator-level bin sum up to 1. With this convention, the numbers in each bin represent the probability that an event from a given generator-level bin is reconstructed in a given detector-level bin if it is reconstructed at all within the considered bins.</p> <p>Now that we checked the response matrix we can attempt the maximum likelihood unfolding. We can use the <code>multiSignalModel</code> physics model available in <code>Combine</code>, which assigns a parameter of interest <code>poi</code> to a process <code>p</code> within a bin <code>b</code> using the syntax <code>--PO 'map=b/p:poi[init, min, max]'</code> to linearly scale the normalisation of this process under the parameter of interest (POI) variations. To create the workspace we can run the following command:  <pre><code>text2workspace.py -m 125  counting/combined_ratesOnly.txt -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/.*ZH_lep_PTV_75_150_hbb:r_zh_75_150[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_150_250_0J_hbb:r_zh_150_250noj[1,-5,5]'  --PO 'map=.*/.*ZH_lep_PTV_150_250_GE1J_hbb:r_zh_150_250wj[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_250_400_hbb:r_zh_250_400[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_GT400_hbb:r_zh_gt400[1,-5,5]' -o ws_counting.root\n</code></pre> In the example given above a signal POI is assigned to each generator-level bin independent of detector-level bin. This allows the measurement to take into account migrations. </p> <p>To extract the measurement let's run the initial fit first using the <code>MultiDimFit</code> method implemented in <code>Combine</code> to extract the best-fit values and uncertainties on all floating parameters:  </p> <pre><code>combineTool.py -M MultiDimFit --datacard ws_counting.root --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 -t -1 \n</code></pre> <p>With the option <code>-t -1</code> we set <code>Combine</code> to fit the asimov dataset instead of actual data. The <code>--setParameters &lt;param&gt;=&lt;value&gt;</code> set the initial value of parameter named . <code>--redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400</code> set the POIs to the comma-separated list, instead of the default one <code>r</code>.</p> <p>While the uncertainties on the parameters of interest (POIs) can be extracted in multiple ways, the most robust way is to run the likelihood scans for a POI corresponding to each generator-level bin, it allows you to spot discontinuities in the likelihood shape in case of problems with the fit or the model. </p> <p><pre><code>combineTool.py -M MultiDimFit --datacard ws_counting.root -t -1 --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --algo=grid --points=100 -P r_zh_75_150 --floatOtherPOIs=1 -n scan_r_zh_75_150\n</code></pre> Now we can plot the likelihood scan and extract the expected intervals.</p> <p><pre><code>python scripts/plot1DScan.py higgsCombinescan_r_zh_75_150.MultiDimFit.mH120.root -o r_zh_75_150 --POI r_zh_75_150\n</code></pre> * Repeat for all POIs</p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#shape-analysis-with-control-regions","title":"Shape analysis with control regions","text":"<p>One of the advantages of the maximum likelihood unfolding is the flexibility to choose the analysis observable and include more information on the event kinematics, consequently improving the analysis sensitivity. This analysis benefits from the shape information of the DNN output trained to differentiate the VH(bb) signal from the SM backgrounds. </p> <p>The datacards for this part of the exercise located <code>full_model_datacards/</code>, where you can find a separate datacard for each region within <code>full_model_datacards/regions</code> directory and also a combined datacard <code>full_model_datacards/comb_full_model.txt</code>. In this case, each of the detector-level bins being used in the unfolding above is now split into multiple bins according to the DNN output score. This provides extra discrimination power to separate the signal from background and improve the measurement.</p> <p>As you will find, the datacards also contain several background processes. To control them properly we will also add regions enriched in the respective backgrounds. Then we can define a common set of rate parameters for signal and control regions to scale the rates or other parameters affecting their shape.  </p> <p>For the shape datacards one has to specify the mapping of histograms and channels/processes as given described below:</p> <p><pre><code>shapes [process] [channel] [file] [nominal] [systematics_templates]\n</code></pre> Then the <code>shape</code> nuisance parameters can be defined in the systematics block in the datacard. More details can be found in <code>Combine</code> documentation pages.</p> <p>In many CMS analyses there are hundreds of nuisance parameters corresponding to various source of systematics. </p> <p>When we unfold to the generator-level quantities we should remove the nuisances affecting the rate of the generator-level bins, i.e. when measuring a given cross-section such as \\(\\sigma_{\\textrm{gen1}}\\), the nuisance parameters should not change the value of that parameter itself; they should only change the relationship between that parameter and the observations.  This means that, for example, effects of renormalization and factorization scales on the generator-level cross section within each bin need to be removed. Only their effects on the detector-level distribution through changes of shape within each bin as well as acceptances and efficiencies should be considered. </p> <p>For this analysis, that means removing the <code>lnN</code> nuisance parameters: <code>THU_ZH_mig*</code> and  <code>THU_ZH_inc</code>; keeping only the acceptance <code>shape</code> uncertainties: <code>THU_ZH_acc</code> and <code>THU_ggZH_acc</code>, which do not scale the inclusive cross sections by construction. In this analysis the normalisation effects in the <code>THU_ZH_acc</code> and <code>THU_ggZH_acc</code> templates were already removed from the shape histograms. Removing the normalization effects can be achieved by removing them from the datacard. Alternatively, freezing the respective nuisance parameters with the option <code>--freezeParameters par_name1,par_name2</code>. Or you can create a group following the syntax given below at the end of the combined datacard, and freeze the parameters with the <code>--freezeNuisanceGroups group_name</code> option.</p> <pre><code>[group_name] group = uncertainty_1 uncertainty_2 ... uncertainty_N\n</code></pre> <p>Now we can create the workspace using the same <code>multiSignalmodel</code> as before:</p> <pre><code>text2workspace.py -m 125  full_model_datacards/comb_full_model.txt -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel  --PO verbose --PO 'map=.*/.*ZH_lep_PTV_75_150_hbb:r_zh_75_150[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_150_250_0J_hbb:r_zh_150_250noj[1,-5,5]'  --PO 'map=.*/.*ZH_lep_PTV_150_250_GE1J_hbb:r_zh_150_250wj[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_250_400_hbb:r_zh_250_400[1,-5,5]' --PO 'map=.*/.*ZH_lep_PTV_GT400_hbb:r_zh_gt400[1,-5,5]' --for-fits --no-wrappers --X-pack-asympows --optimize-simpdf-constraints=cms --use-histsum -o ws_full.root\n</code></pre> <p>As you might have noticed we are using a few extra versions <code>--for-fits --no-wrappers --X-pack-asympows --optimize-simpdf-constraints=cms --use-histsum</code> to create a workspace. They are needed to construct a more optimised pdf using the <code>CMSHistSum</code> class implemented in Combine to significantly lower the memory consumption.</p> <ul> <li>Following the instructions given earlier, create the workspace and run the initial fit with <code>-t -1</code>. </li> </ul> <p>Since this time the datacards include shape uncertainties as well as additional categories to improve the background description the fit might take much longer, but we can submit jobs to a batch system by using the combine tool and have results ready to look at in a few minutes.  </p> <pre><code>combineTool.py -M MultiDimFit -d ws_full.root --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400  -t -1 --X-rtd FAST_VERTICAL_MORPH --algo=grid --points=50 --floatOtherPOIs=1 -n .scans_blinded --job-mode condor --task-name scans_zh  --split-points 1 --generate P:n::r_zh_gt400,r_zh_gt400:r_zh_250_400,r_zh_250_400:r_zh_150_250wj,r_zh_150_250wj:r_zh_150_250noj,r_zh_150_250noj:r_zh_75_150,r_zh_75_150\n</code></pre> <p>The option <code>--X-rtd FAST_VERTICAL_MORPH</code> is added here and for all <code>combineTool.py -M MultiDimFit ...</code> to speed up the minimisation. </p> <p>The job submission is handled by the <code>CombineHarvester</code>, the combination of options <code>--job-mode condor --task-name scans_zh  --split-points 1 --generate P:n::r_zh_gt400,r_zh_gt400:r_zh_250_400,r_zh_250_400:r_zh_150_250wj,r_zh_150_250wj:r_zh_150_250noj,r_zh_150_250noj:r_zh_75_150,r_zh_75_150</code> will submit the jobs to HTCondor for each POI.  The <code>--generate</code> option is is being used to automatically generate jobs attaching the options <code>-P &lt;POI&gt; -n &lt;name&gt;</code> with each of the pairs of values <code>&lt;POI&gt;,&lt;name&gt;</code> specified between the colons. You can add <code>--dry-run</code> option to create the submissions files first and check them, and then submit the jobs with <code>condor_submit condor_scans_zh.sub</code>. </p> <p>If you are running the tutorial from a cluster where HTCondor is not available you can also submit the jobs to the slurm system, just change the <code>--job-mode condor</code> to <code>--job-mode slurm</code>. </p> <p>After all jobs are completed we can combine the files for each POI: </p> <pre><code>for p in r_zh_75_150 r_zh_150_250noj r_zh_150_250wj r_zh_250_400 r_zh_gt400\ndo\n    hadd -k -f scan_${p}_blinded.root higgsCombine.scans_blinded.${p}.POINTS.*.MultiDimFit.mH120.root\ndone\n</code></pre> <p>And finally plot the likelihood scans </p> <p><pre><code>python scripts/plot1DScan.py scan_r_zh_75_150_blinded.root  -o scan_r_zh_75_150_blinded --POI r_zh_75_150 --json summary_zh_stxs_blinded.json\n</code></pre> </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#impacts","title":"Impacts","text":"<p>One of the important tests before we move to the unblinding stage is to check the impacts of nuisance parameters on each POI. For this we can run the <code>combineTool.py</code> with <code>-M Impacts</code> method. We start with the initial fit, which should take about 20 minutes (good time to have a coffee break!):</p> <pre><code>combineTool.py -M Impacts -d ws_full.root -m 125 --robustFit 1 --doInitialFit --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --X-rtd FAST_VERTICAL_MORPH\n</code></pre> <p>Note that it is important to add the option <code>--redefineSignalPOIs [list of parameters]</code>, to produce the impacts for all POIs we defined when the workspace was created with the <code>multiSignalModel</code>.</p> <p>After the initial fit is completed we can perform the likelihood scans for each nuisance parameter.  We will submit the jobs to the HTCondor to speed up the process.</p> <pre><code>combineTool.py -M Impacts -d ws_full.root -m 125 --robustFit 1 --doFits --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --job-mode condor --task-name impacts_zh --X-rtd FAST_VERTICAL_MORPH \n</code></pre> <p>Now we can combine the results into the <code>.json</code> format and use it to produce the impact plots.</p> <pre><code>combineTool.py -M Impacts -d ws_full.root -m 125 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400 --output impacts.json \n\nplotImpacts.py -i impacts.json -o impacts_r_zh_75_150 --POI r_zh_75_150\n</code></pre> <p> * Do you observe differences in impacts plots for different POIs, do these differences make sense to you? </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#unfolded-measurements","title":"Unfolded measurements","text":"<p>Now that we studied the nuisance parameter impacts for each POI, we can finally perform the measurement. Note that for the purposes of the tutorial, we are skipping further checks and validation that you should do on your analysis. Namely the goodness of fit test and the post-fit plots of folded observables. Both of these checks were detailed in the previous exercises, which you can find under the following link. </p> <p>At this stage we'll run the <code>MultiDimFit</code> again scanning each POI to calculate the intervals, but this time we'll remove the <code>-t -1</code> option to extract the unblinded results. </p> <p>Also since we want to unfold the measurements to the generator-level observables, i.e. extract the cross sections, we remove the theoretical uncertainties affecting the rates of signal processes,  we can do this be freezing them <code>--freezeNuisanceGroups &lt;group_name&gt;</code>, using the <code>group_name</code> you assigned earlier in the tutorial. </p> <p>Now plot the scans and collect the measurements in the json file <code>summary_zh_stxs.json</code>. </p> <pre><code>python scripts/plot1DScan.py scan_r_zh_75_150.root -o r_zh_75_150 --POI r_zh_75_150 --json summary_zh_stxs.json  \n</code></pre> <p></p> <p>Repeat the same command for other POIs to fill the <code>summary_zh_stxs.json</code>, which can then be used to make the cross section plot by multiplying the standard model cross sections by the signal strengths' best-fit values as shown below. </p> <p><pre><code>python scripts/make_XSplot.py summary_zh_stxs.json\n</code></pre> </p>"},{"location":"tutorial2023_unfolding/unfolding_exercise/#poi-correlations","title":"POI correlations","text":"<p>In addition to the cross-section measurements it is very important to publish covariance or correlation information of the measured cross sections.  This allows the measurement to be properly intepreted or reused in combined fits.  </p> <p>The correlation matrix or covariance matrix can be extracted from the results after the fit. Here we can use the <code>FitDiagnostics</code> or <code>MultiDimFit</code> method.</p> <pre><code>combineTool.py -M FitDiagnostics --datacard ws_full.root --setParameters r_zh_250_400=1,r_zh_150_250noj=1,r_zh_75_150=1,r_zh_150_250wj=1,r_zh_gt400=1 --redefineSignalPOIs r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400  --robustHesse 1 -n .full_model --X-rtd FAST_VERTICAL_MORPH\n</code></pre> <p>Then the <code>RooFitResult</code>, containing correlations matrix, can be found in the <code>fitDiagnostics.full_model.root</code> file under the name <code>fit_s</code>. The script <code>plotCorrelations_pois.py</code> from the exercise git-lab repository can help to plot the correlation matrix.</p> <p><pre><code>python scripts/plotCorrelations_pois.py -i fitDiagnostics.full_model.root:fit_s -p r_zh_75_150,r_zh_150_250noj,r_zh_150_250wj,r_zh_250_400,r_zh_gt400\n</code></pre> </p>"},{"location":"tutorial_stat_routines/stat_routines/","title":"Understanding Statistical Routines in Combine","text":""},{"location":"tutorial_stat_routines/stat_routines/#getting-started","title":"Getting started","text":"<p>To get started, you should have a working setup of <code>Combine</code>, please follow the instructions from the home page. Make sure to use the latest recommended release.</p> <p>After setting up <code>Combine</code>, you can access the working directory for this tutorial which contains all of the inputs and scripts needed to run the unfolding fitting exercise:</p> <pre><code>cd $CMSSW_BASE/src/HiggsAnalysis/CombinedLimit/data/tutorials/statistical_routines_tutorial\n</code></pre>"},{"location":"tutorial_stat_routines/stat_routines/#the-model","title":"The model","text":"<p>This tutorial will go through various statistical routines in combine in detail using a very simple counting experiment model. There is a single channel with contributions from Higgs production and WW production, and three nuisance parameters.</p> <p>The model details can be seen in the <code>datacard.txt</code> file in the tutorial directory.</p> <p>The parameter of interest (POI) for this model is a single signal strength parameter (called <code>r</code> or \\(\\mu\\)) which scales the total yield of the signal (Higgs) process.</p> <p>We will use this model to run statistical tests such as estimating the higgs cross section, attempting to discover the higgs, and setting limits on the cross section.</p>"},{"location":"tutorial_stat_routines/stat_routines/#estimating-a-best-fit-value","title":"Estimating a best fit value","text":"<p>The most commonly used statistical routines in combine are frequentist maximum-likelihood based routines. For these routines, \"best-fit\" values of a parameter, \\(\\mu\\), are denoted \\(\\hat{\\mu}\\) and they are determined by finding the value of that parameter which maximizes the likelihood, \\(\\mathrm{L}(\\mu)\\).</p> <p>In combine you can find the best-fit value of your parameter of interest with the <code>MultiDimFit</code> routine:</p> <pre><code>combine -M MultiDimFit datacard.txt\n</code></pre> <p>you should get some output, which prints the best fit signal strength</p> <pre><code>Doing initial fit: \n\n --- MultiDimFit ---\nbest fit parameter values: \n   r :    +0.266\n</code></pre>"},{"location":"tutorial_stat_routines/stat_routines/#uncertainty-intervals","title":"Uncertainty Intervals","text":"<p>to get more information, you can add the <code>--algo singles</code> flag which will also calculate the uncertainty on the parameter. In order to get the full uncertainty, let's also change the limits on <code>r</code>, which are \\([0,20]\\) by default:</p> <pre><code>combine -M MultiDimFit datacard.txt --algo singles --rMin -10 --rMax 10\n</code></pre> <p>now the output should contain the uncertainties as well as the best fit value</p> <pre><code>Doing initial fit: \n\n --- MultiDimFit ---\nbest fit parameter values and profile-likelihood uncertainties: \n   r :    +0.260   -1.004/+1.265 (68%)\nDone in 0.00 min (cpu), 0.00 min (real)\n</code></pre> <p>These uncertainty intervals have been computed internally as part of the fit. What do they mean, and how are they determined?</p> <p>These are frequentist confidence intervals, which means that if our statistical model is good enough and we were to perform repeated experiments of this type, we would expected that 68% of the confidence intervals we produce would contain the true value of our signal strength parameter, <code>r</code>.</p> <p>These can be constructed from first principles using the Neyman Construction, by finding, but in practice they are usually constructed assuming Wilks' theorem. Wilks' theorem tells us what the expected distribution of the likelihood ratio \\(\\Lambda = \\frac{\\mathrm{L}(r)}{\\mathrm{L}(\\hat{r})}\\) is, and from this we can construct confidence intervals. In practice, we use the log-likelihood ratio \\(t_r \\equiv -2 \\ln( \\Lambda )\\), rather than the likelihood ratio itself. The confidence interval is constructed by finding all values of <code>r</code> for which the \\(-2 \\ln(\\Lambda)\\) is below a threshold value which depends on the confidence level we are using.</p> <p>We can also calculat the best fit value and confidence interval using the <code>FitDiagnostics</code> routine:</p> <pre><code>combine -M FitDiagnostics datacard.txt --rMin -10 --rMax 10\n</code></pre> <p>which should give you a compatible result.  The <code>FitDiagnostics</code> routine also produces an output file called <code>fitDiagnosticsTest.root</code> which contains the full result of fits of both the background-only and signal + background model.</p> <p>You can see the results of the signal + background model fit by opening the file and checking the fit result:</p> <pre><code>root -l fitDiagnosticsTest.root\nroot [1]&gt; fit_s-&gt;Print()\n  RooFitResult: minimized FCN value: -2.92406e-05, estimated distance to minimum: 7.08971e-07\n                covariance matrix quality: Full, accurate covariance matrix\n                Status : MINIMIZE=0 HESSE=0 \n\n    Floating Parameter    FinalValue +/-  Error   \n  --------------------  --------------------------\n                  lumi    3.1405e-04 +/-  1.00e+00\n                     r    2.6039e-01 +/-  1.12e+00\n                 xs_WW    8.6964e-04 +/-  1.00e+00\n                xs_ggH    7.3756e-04 +/-  1.00e+00\n</code></pre> <p>Notice that in this case, the uncertainty interval for <code>r</code> is reported as a symmetric interval.  What's the difference between this interval and the asymmetric one?</p> <p>In both cases, the interval is found by determining the values of <code>r</code> for which \\(-2 \\ln(\\Lambda)\\) is below the threshold value, which in this case for the 68% interval is 1. Both algorithms take the best fit value \\(\\hat{r}\\) for which \\(-2 \\ln(\\Lambda)\\) will always be 0, and then try to find the interval by estimating the crossing points where \\(-2 \\ln(\\Lambda) = 1\\).</p> <p>However, the different intervals estimate this value in different ways. The asymmetric intervals are \"minos errors\", which means that the crossing points were determined by explicitly scanning the likelihood as a function of <code>r</code> to look for the crossing, while minimizing other parameters at each step (profiling). The symmetric intervals are \"hesse errors\", which means that the crossing points were determined by taking the matrix of second-order partial derivatives (Hessian) at the minimum, and inverting it to estimate the crossing assuming all other derivatives vanish.</p> <p>The information printed under the <code>Status</code> section of the <code>RooFitResult</code> is showing that the minimization suceeded and that the hessian was positive definite, i.e. that all the second derivates are positive, as they should be at the minimum of a function.  If the HESSE status is not 0 or the covariance matrix quality indicates it had to be forced positive definite, this indicates that there are problems with the fit.</p>"},{"location":"tutorial_stat_routines/stat_routines/#running-an-explicit-likelihood-scan","title":"Running an explicit likelihood scan","text":"<p>You can see that the minos errors should match the crossing points of the likelihood-ratio by explicitly scanning the likelihood function with <code>MultiDimFit</code>, using the <code>--algo grid</code> option and specifying the range and how many points to scan:</p> <pre><code>combine -M MultiDimFit datacard.txt --algo grid --points 100 --rMin -2 --rMax 6\n</code></pre> <p>The results can then be plotted using the <code>plot1Dscan.py</code> script, using the file <code>higgsCombineTest.MultiDimFit.mH120.root</code> which was output by the scan:</p> <pre><code>python3 ../../../scripts/plot1DScan.py --POI r higgsCombineTest.MultiDimFit.mH120.root\n</code></pre> <p>it should produce an output pdf and png files which look like the one shown below. You can see the best fit values, as well as the crossing points for the \\(1\\sigma\\) and \\(2\\sigma\\) intervals.</p> <p></p>"},{"location":"tutorial_stat_routines/stat_routines/#uncertainty-intervals-from-first-principles","title":"Uncertainty intervals from first principles","text":"<p>All the methods mentioned above rely on Wilks' theorem, which only holds under certain conditions. In some cases, particularly those of low statistics or some other cases, such as sometimes when the yield depends quadratically on the parameter of interest, Wilks' theorem will not be a good approximation.</p> <p>One thing you can do is check the uncertainty intervals explicitly, following the Neyman Construction. In order to do this you would scan your signal strength parameter <code>r</code>, at each value generating a set of pseudodata toys to determine the expected distribution of the test statistic \\(t_\\mu\\) under the hypothesis that the true signal strength is \\(\\mu\\). Then, you can check the distribution of \\(t_\\mu\\) and find the critical value of the test statistic, \\(t'\\), such that:</p> \\[ \\int_{t'}^{\\infty} p(t_{\\mu}) \\mathrm{d}t_{\\mu} =(1 - \\mathrm{CL}) \\] <p>where \\(\\mathrm{CL}\\) is the confidence level you are trying to reach (e.g. 68%).</p> <p>For a given value of <code>r</code>, we can check the test statistic distribution explicitly and determine the crossing point of the interval. Let's do that for the upper end of our confidence interval, using the <code>MultiDimFit</code> method.</p> <p>We do it by generating a number of toy datasets with <code>r = 1.525</code>, which is our upper bound value.  Then we calculate the test statistic: </p> \\[ t_{\\mu=1.525} = -2 \\ln (\\frac{\\mathrm{L}(r=1.525)}{\\mathrm{L}(r=\\hat{r})}) \\] <p>on each of these toy datasets, and fill a histogram with the results to determine the expect distribution under the the null hypothesis (in this case that <code>r</code> = 1.525). We could do this all in one command using the <code>-t &lt;n_toys&gt;</code> functionality of combine, but let's split it into two steps to make the logic more clear.</p> <p>First, lets generate a set of 1000 toys from our model with <code>r</code> = 1.525. Since we want to generate frequentist toys (since we are calculating a frequentist confidence interval), we also need the <code>--toysFrequentist</code> option.</p> <pre><code>combine -M GenerateOnly datacard.txt -t 1000 --toysFrequentist --setParameters r=1.525 --saveToys \n</code></pre> <p>Now we can tell <code>MultiDimFit</code> to run over these toys by using the output from the previous step, with the command line argument <code>--toysFile &lt;output_file_from_toy_generation&gt;</code>. To calculate the test statistic with MultiDimFit we will use <code>--algo fixed --fixedPointPOIs r=1.525</code> to tell <code>MultiDimFit</code> to calculate the log-likelihood ratio using that point in the numerator. The full command is then:</p> <pre><code>combine -M MultiDimFit datacard.txt --rMin -10 --rMax 10 --algo fixed --fixedPointPOIs r=1.525 -t 500 --toysFrequentist   --toysFile higgsCombineTest.GenerateOnly.mH120.123456.root\n</code></pre> <p>We can inspect the results of all of our toy fits by opening the <code>higgsCombineTest.MultiDimFit.mH120.123456.root</code> file our command created, and looking at the <code>limit</code> tree contained in it. The log-likelihood ratio \\(-\\ln(\\Lambda)\\) is stored in the <code>deltaNLL</code> branch of the tree. For the <code>fixed</code> algorithm, there are two entries stored in the tree for every dataset: one for the best fit point, and one for the fixed point passed as the aregument to <code>--fixedPointPOIs</code>. In order to select only the values we are interest in we can pass the requirement <code>quantileExpected &gt;= 0</code> to our TTree selection, because combine uses the value <code>-1</code> for <code>quantileExpected</code> to indicate best fit points.</p> <p>You can draw the \\(t_{\\mu}\\) distribution with:</p> <pre><code>root -l higgsCombineTest.MultiDimFit.mH120.123456.root\nroot [1] &gt; limit-&gt;Draw(\"2*deltaNLL\",\"quantileExpected &gt;= 0\")\n</code></pre> <p>To test wether or not this point should be rejected, we first define the confidence level of our rejection, say \\(1\\sigma\\) (approximately 68%), then we use the empirical distribution of the test statistic to estimate the cut-off value of the test statistic. This is done for you in the script <code>get_quantile.py</code>, which you can run:</p> <pre><code>python3 get_quantile.py --input higgsCombineTest.MultiDimFit.mH120.123456.root\n</code></pre> <ul> <li>How does the value estimated from this method compare to the value using Wilks' theorem and the methods above?</li> <li>How does the value change if you generate more toys?</li> <li>Check the observed value of the test statistic on the data, how does it compare to threshold value for this point? Is the point accepted or rejected?</li> </ul> <p>You can do the toy data generation and the fits in one step for a given parameter value with the command:</p> <pre><code>combine -M MultiDimFit datacard.txt --rMin -10 --rMax 10 --algo fixed --fixedPointPOIs r=&lt;r_val&gt; --setParameters r=&lt;r_val&gt; -t &lt;ntoys&gt; --toysFrequentist\n</code></pre> <p>Test out a few values of <code>r</code> and see if they all give you the same result.  What happens for <code>r</code> less than about -1? Can you explain why? (hint: look at the values in the datacard)</p>"},{"location":"tutorial_stat_routines/stat_routines/#significance-testing","title":"Significance Testing","text":"<p>For significance testing, we want to test the compatibility of our model with the background-only hypothesis \\(\\mu = 0\\). However, when performing significance testing we are typically only interested in rejecting the null hypothesis if the confidence level is very high (e.g. \\(5\\sigma\\)). Furthermore, we typically use a modified test-statistic \\(q_0\\) which is equal to 0 whenever the best-fit signal strength is less than 0, to avoid rejecting the null hypothesis due to a deficit of events.</p> <p>A typical significance test can be run with combine using the <code>Significance</code> method:</p> <pre><code>combine -M Significance datacard.txt\n</code></pre> <p>for this datacard, we get a very modest significance of about <code>0.24</code>, meaning we fail to reject the null hypothesis. This method is run using the asymptotic approximation, which relies on Wilks' theorem, similar to as it was used above. Under this approximation the significance is directly related to our test statistic, \\(q_0\\) by: Significance = \\(\\sqrt{q_0}\\). So for positive values of \\(\\hat{r}\\) we can read the Significance from the likelihood scan, by checking the value at the origin.</p> <pre><code>combine -M MultiDimFit datacard.txt --algo grid --points 100 --rMin -0.1 --rMax 1\npython3 ../../../scripts/plot1DScan.py --POI r higgsCombineTest.MultiDimFit.mH120.root --y-max 0.5\n</code></pre> <p>This will produce the same likelihood scan as before, but where we've restricted the range to be able to see the value of the curve at <code>r</code> = 0 more clearly. As expected, the crossing happens at around \\(0.24^2\\)</p> <p></p>"},{"location":"tutorial_stat_routines/stat_routines/#going-beyond-the-asymptotic-approximation-with-hybrid-new","title":"Going beyond the Asymptotic Approximation with Hybrid New","text":"<p>We could move beyond the asymptotic approximation as we did before by generating toys and explicitly calculating the test statistic. In order to do this, we would simply run <code>MultiDimFit</code> using:</p> <pre><code>combine -M MultiDimFit datacard.txt --rMin -10 --rMax 10 --algo fixed --fixedPointPOIs r=0 --setParameters r=0 -t 500 --toysFrequentist\n</code></pre> <p>and then calculate the value of \\(q_0\\) for every toy, check their distribution and compare the observed value in data to the distribution from the toys.</p> <p>However, we can also use the <code>HybridNew</code> method, which has a built-in routine to do this for us, and save us the work of calculating the test-statistic values ourselves.</p> <pre><code>combine -M HybridNew --LHCmode LHC-significance datacard.txt\n</code></pre> <p>We see that in this case, the value is a little different from the asymptotic approximation, though not drastically so.</p> <pre><code> -- Hybrid New -- \nSignificance: 0.306006  -0.0127243/+0.012774\nNull p-value: 0.3798 +/- 0.00485337\n</code></pre>"},{"location":"tutorial_stat_routines/stat_routines/#limit-setting","title":"Limit Setting","text":"<p>NOTE: This section explores several methods which are not recommended to be used in limit setting, in order to better understand their limitations before getting to the commonly used procedure</p> <p>One might be tempted to set limits by simply setting a confidence level (e.g. 95%), using the profile-likelihood ratio test statistic, \\(t_\\mu\\), and finding the values of the signal strength which are rejected. This is not what is typically done for setting limits, but lets try to set some limits this way as an exercise.</p> <p>Under the asymptotic approximation, then, we can read off the values which we would reject from our earlier likelihood scan, they are all the values above about 4.</p> <p>Let's see what happens if we were to have observed 12 events instead of 6. There is a modified datacard <code>datacard_obs12.txt</code> with this already set for you.</p> <pre><code>combine -M MultiDimFit datacard_obs12.txt --algo grid --points 100 --rMin -2 --rMax 8\npython3 ../../../scripts/plot1DScan.py --POI r higgsCombineTest.MultiDimFit.mH120.root\n</code></pre> <p></p> <p>In this case we would reject values of <code>r</code> above about 6.7, but also values of <code>r</code> below about 0.3 at the 95% CL. However, despite rejecting <code>r</code> = 0, our 95% CL is far below typical particle physics standards for claiming discovery. We therefore prefer to set only an upper bound, which we can do by modifying the test statistic to be 0 for all values below the best fit value.</p> <p>However, even with such a modification, there is another problem, with a large enough under-fluctuation of the background we will set our limit below <code>r</code> = 0. You can check thiswith the <code>HybridNew</code> method, and the slightly modified datacard <code>datacard_underfluctuation.txt</code>:</p> <pre><code>combine -M HybridNew --frequentist --testStat=Profile datacard_underfluctuation.txt  --rule Pmu --rMin -5 --rMax 10\n</code></pre> <p>The above command is telling combine to calculate the limit, but we have to pass the non-standard arguemnts <code>--testStat=Profile --rule Pmu</code> to tell combine to use the profile likelihood ratio test statistic \\(t_{\\mu}\\) directly, and not to use the \\(\\mathrm{CL}_\\mathrm{s}\\) criterion which is normally applied.</p> <p>Usually at the LHC, for upper limits we use the modified test statistic \\(\\tilde{q}_{\\mu}\\) which is set to 0 for \\(\\mu &lt; \\hat{\\mu}\\) but also replaces \\(\\hat{\\mu}\\) with 0 if \\(\\min(\\hat{\\mu},0)\\) so that upper limits are always positive.</p> <p>If we use the standard LHC test statistic we will get a positive limit:</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule Pmu  datacard_underfluctuation.txt  \n</code></pre> <p>gives the result:</p> <pre><code> -- Hybrid New -- \nLimit: r &lt; 0.0736126 +/- 0.0902187 @ 95% CL\n</code></pre> <p>But this is an extremely tight bound on our signal strength, given that a signal strength of 1 is still within the statistical uncertainty of the background.</p>"},{"location":"tutorial_stat_routines/stat_routines/#the-cl_s-criterion","title":"the CL_s criterion","text":"<p>With the limit setting procedure above we set a limit of <code>r</code> &lt; 0.07, due to an underfluctuation in the observed data. However, if we had designed our experiment better so that the expected background were lower, we never would have been able to set a limit that strong. We can see this with the datacard <code>datacard_lowbackground.txt</code> where the expected background is 1 event, but 0 events are observed.  In this case, we only manage to set a limit of <code>r</code> &lt; 0.5:</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule Pmu  datacard_lowbackground.txt  \n</code></pre> <pre><code> -- Hybrid New -- \nLimit: r &lt; 0.509028 +/- 0.0188724 @ 95% CL\n</code></pre> <p>The CL_s criterion takes into account how likely the data are under the background only hypothesis as well as under the signal + background hypothesis. This has the effect of increasing the limit if the data are unlikely under the background-only hypothesis as well as the signal hypothesis. This prevents setting limits below the expected sensitivity of the experiment, as we can see by rerunning our cards with the option <code>--rule CLs</code>, which is actually the default value.</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule CLs datacard_lowbackground.txt  \n</code></pre> <p>gives a limit around 1.2, whereas</p> <pre><code>combine -M HybridNew --frequentist --testStat=LHC --rule CLs datacard_underfluctuation.txt \n</code></pre> <p>sets a limit around 2.7. This is reasonable, given that we should expect a better limit when we have a better experimental design which manages to reduce backgrounds without any change in the signal acceptance.</p> <p>These are the default settings for setting limits at the LHC and the arguments <code>--frequentist --testStat LHC --rule CLs</code> can be replaced by <code>--LHCmode LHC-limits</code>.</p>"},{"location":"tutorial_stat_routines/stat_routines/#asymptotic-limits","title":"Asymptotic Limits","text":"<p>The <code>HybridNew</code> method generates pseudodata to estimate the distributions of the test statistics and set a limit, however often the asymptotic distributions, which have analytically known forms, can be used. This is computationally much faster than running <code>HybridNew</code> and can be run with:</p> <pre><code>combine -M AsymptoticLimits &lt;datacard&gt;\n</code></pre> <ul> <li>Try comparing the asymptotic limits to the results with HybridNew computed above,  how do they compare for the two cases?</li> </ul> <p>As well as the observed limit, the <code>AsymptoticLimits</code> method will automatically tell you the 2.5%, 16.0%, 50.0%, 84.0% and 97.5% expected limits. These are calculated by taking the appropriate quantiles from the distribution of the test-statistic under the background-only hypothesis.</p>"},{"location":"tutorial_stat_routines/stat_routines/#the-limit-setting-algorithm","title":"The limit setting algorithm","text":"<p>Because the \\(CL_s\\) value as a function of \\(r\\) is not known analytically, the limit setting algorithm follows an iterative process. It starts by picking a value of the signal strength, then it calculates the expected distributions of the test-statistics for that signal strength \\(\\tilde{q}_{\\mu}\\) under both the background-only and signal+background hypotheses.</p> <p>We can try this ourselves by running the <code>HybridNew</code> method with the <code>--singlePoint</code> option. We can also use the <code>--saveHybridResult</code> argument to save the distribution of the test-statistic:</p> <pre><code>combine -M HybridNew --LHCmode LHC-limits --singlePoint r=2 --saveHybridResult datacard.txt\n</code></pre> <p>Then the test-statistic distributions can be plotted:</p> <pre><code>python3  ../../../test/plotTestStatCLs.py --input higgsCombineTest.HybridNew.mH120.root --poi r --val all --mass 120\n</code></pre> <p></p> <p>This plot shows the expected distribution of the test statistic under the signal hypothesis (blue histogram) and the background-only hypothesis (red histogram), as well as the actual observed value in the data (black arrow). the p-values of each of the hypotheses are calculated, as is the CLs value.</p> <p>This particular point is not rejected, and so to set the limit, we'd need to increase the value of <code>r</code>, find the expected distributions, observed value and calculate CLs again. Repeating this many times, we could find the value of <code>r</code> at which the CLs value crosses our threshold (in this case 0.05, for a 95% CL).</p> <p>When we run without the <code>--singlePoint</code> option, combine does this all for us internally, but running individual points manually can be helpful for debugging or splitting up fits of large models across parallel jobs.</p> <p>You can see the various <code>r</code> values at which combine calculated the CLs and the interpolation it performs to find the crossing by using the <code>--plot</code> option:</p> <pre><code>combine -M HybridNew --LHCmode LHC-limits --plot r_scan.png datacard.txt\n</code></pre> <p></p> <ul> <li>Where do the uncertainties on the CLs value at each value of <code>r</code> come from in the plot?</li> <li>How could the precision of the limit be increased?</li> </ul>"},{"location":"tutorial_stat_routines/stat_routines/#debugging","title":"Debugging","text":"<p>If you see some strange or unexpected behaviour in your limits, you can look at the distributions of the test static, or the various CLs values computed in order to better understand where the problem might be coming from.</p> <p>One situation you might encounter is observing the discrete nature or the counts when you are in the low statistics regime.</p> <pre><code>combine -M HybridNew --LHCmode LHC-limits --singlePoint r=1 --saveHybridResult datacard_lowbackground.txt\npython3  ../../../test/plotTestStatCLs.py --input higgsCombineTest.HybridNew.mH120.root --poi r --val all --mass 120\n</code></pre> <p></p> <p>There is nothing wrong with this distribution, but noting its features may help you understand the results you are seeing and if they are reasonable or there might be an issue with the fit. In a case like this, we can certainly expect the asymptotic approximation not to be very reliable. With low backgrounds, the shapes of the signal-hypothesis and signal+background hypothesis distributions can also start to look very similar. In such cases, some of the quantiles of the expected limits may be very compressed, and statistical fluctuations in the empirical distributions may be more apparent.</p>"},{"location":"what_combine_does/fitting_concepts/","title":"Likelihood based fitting","text":"<p>\"Fitting\" simply means estimating some parameters of a model (or really a set of models) based on data. Likelihood-based fitting does this through the likelihood function.</p> <p>In frequentist frameworks, this typically means doing maximum likelihood estimation. In bayesian frameworks, usually posterior distributions of the parameters are calculated from the likelihood.</p>"},{"location":"what_combine_does/fitting_concepts/#fitting-frameworks","title":"Fitting Frameworks","text":"<p>Likelihood fits typically either follow a frequentist framework of maximum likelihood estimation, or a bayesian framework of updating estimates to find posterior distributions given the data.</p>"},{"location":"what_combine_does/fitting_concepts/#maximum-likelihood-fits","title":"Maximum Likelihood fits","text":"<p>A maximum likelihood fit means finding the values of the model parameters \\((\\vec{\\mu}, \\vec{\\nu})\\) which maximize the likelihood, \\(\\mathcal{L}(\\vec{\\mu},\\vec{\\nu})\\) The values which maximize the likelihood, are the parameter estimates, denoted with a \"hat\" (\\(\\hat{}\\)):</p> \\[(\\vec{\\hat{\\mu}}, \\vec{\\hat{\\nu}}) \\equiv \\underset{\\vec{\\mu},\\vec{\\nu}}{\\operatorname{argmax}} \\mathcal{L}(\\vec{\\mu}, \\vec{\\nu})\\] <p>These values provide point estimates for the parameter values.</p> <p>Because the likelihood is equal to the probability of observing the data given the model, the maximum likelihood estimate finds the parameter values for which the data is most probable.</p>"},{"location":"what_combine_does/fitting_concepts/#bayesian-posterior-calculation","title":"Bayesian Posterior Calculation","text":"<p>In a bayesian framework, the likelihood represents the probability of observing the data given the model and some prior probability distribution over the model parameters.</p> <p>The prior probability of the parameters, \\(\\pi(\\vec{\\Phi})\\), are updated based on the data to provide a posterior distributions</p> \\[ p(\\vec{\\Phi};\\mathrm{data}) = \\frac{ p(\\mathrm{data};\\vec{\\Phi}) \\pi(\\vec{\\Phi}) }{\\int p(\\mathrm{data};\\vec{\\Phi}') \\pi(\\vec{\\Phi}') \\mathrm{d}\\vec{\\Phi}' } = \\frac{ \\mathcal{L}(\\vec{\\Phi}) \\pi(\\vec{\\Phi}) }{ \\int \\mathcal{L}(\\vec{\\Phi}') \\pi(\\vec{\\Phi}') \\mathrm{d}\\vec{\\Phi}' }\\] <p>The posterior distribution \\(p(\\vec{\\Phi};\\mathrm{data})\\) defines the updated belief about the parameters \\(\\vec{\\Phi}\\).</p>"},{"location":"what_combine_does/fitting_concepts/#methods-for-considering-subsets-of-models","title":"Methods for considering subsets of models","text":"<p>Often, one is interested in some particular aspect of a model. This may be for example information related to the parameters of interest, but not the nuisance parameters. In this case, one needs a method for specifying precisely what is meant by a model considering only those parameters of interest.</p> <p>There are several methods for considering sub models which each have their own interpretations and use cases.</p>"},{"location":"what_combine_does/fitting_concepts/#conditioning","title":"Conditioning","text":"<p>Conditional Sub-models can be made by simply restricting the values of some parameters. The conditional likelihood of the parameters \\(\\vec{\\mu}\\) conditioned on particular values of the parameters \\(\\vec{\\nu}\\) is:</p> \\[ \\mathcal{L}(\\vec{\\mu},\\vec{\\nu}) \\xrightarrow{\\mathrm{conditioned\\ on\\ } \\vec{\\nu} = \\vec{\\nu}_0} \\mathcal{L}(\\vec{\\mu}) = \\mathcal{L}(\\vec{\\mu},\\vec{\\nu}_0) \\]"},{"location":"what_combine_does/fitting_concepts/#profiling","title":"Profiling","text":"<p>The profiled likelihood \\(\\mathcal{L}(\\vec{\\mu})\\) is defined from the full likelihood, \\(\\mathcal{L}(\\vec{\\mu},\\vec{\\nu})\\), such that for every point \\(\\vec{\\mu}\\) it is equal to the full likelihood at \\(\\vec{\\mu}\\) maximized over \\(\\vec{\\nu}\\).</p> \\[ \\mathcal{L}(\\vec{\\mu},\\vec{\\nu}) \\xrightarrow{\\mathrm{profiling\\ } \\vec{\\nu}} \\mathcal{L}({\\vec{\\mu}}) = \\max_{\\vec{\\nu}} \\mathcal{L}(\\vec{\\mu},\\vec{\\nu})\\] <p>In some sense, the profiled likelihood is the best estimate of the likelihood at every point \\(\\vec{\\mu}\\), it is sometimes also denoted with a double hat notation \\(\\mathcal{L}(\\vec{\\mu},\\vec{\\hat{\\hat{\\nu}}}(\\vec{\\mu}))\\).</p>"},{"location":"what_combine_does/fitting_concepts/#marginalization","title":"Marginalization","text":"<p>Marginalization is a procedure for producing a probability distribution \\(p(\\vec{\\mu};\\mathrm{data})\\) for a set of parameters \\(\\vec{\\mu}\\), which are only a subset of the parameters in the full distribution \\(p(\\vec{\\mu},\\vec{\\nu};\\mathrm{data})\\). The marginal probability density \\(p(\\vec{\\mu})\\) is defined such that for every point \\(\\vec{\\mu}\\) it is equal to the probability at \\(\\vec{\\mu}\\) integrated over \\(\\vec{\\nu}\\).</p> \\[ p(\\vec{\\mu},\\vec{\\nu}) \\xrightarrow{\\mathrm{marginalizing\\ } \\vec{\\nu}} p({\\vec{\\mu}}) = \\int_{\\vec{\\nu}} p(\\vec{\\mu},\\vec{\\nu})\\] <p>The marginalized probability \\(p(\\vec{\\mu})\\) is the probability for the parameter values \\(\\vec{\\mu}\\) taking into account all possible values of \\(\\vec{\\nu}\\).</p> <p>Marginalized likelihoods can also be defined, by their relationship to the probability distributions.</p>"},{"location":"what_combine_does/fitting_concepts/#parameter-uncertainties","title":"Parameter Uncertainties","text":"<p>Parameter uncertainties describe regions of parameter values which are considered reasonable parameter values, rather than single estimates. These can be defined either in terms of frequentist confidence regions or bayesian credibility regions.</p> <p>In both cases the region is defined by a confidence or credibility level \\(CL\\), which quantifies the meaning of the region. For frequentist confidence regions, the confidence level \\(CL\\) describes how often the confidence region will contain the true parameter values if the model is a sufficiently accurate approximation of the truth. For bayesian credibility regions, the credibility level \\(CL\\) describes the bayesian probability that the true parameter value is in that region for under the given model.</p> <p>The confidence or credibility regions are described by a set of points \\(\\{ \\vec{\\mu} \\}_{\\mathrm{CL}}\\) which meet some criteria. In most situations of interest, the credibility region or confidence region for a single parameter, \\(\\mu\\), is effectively described by an interval:</p> \\[ \\{ \\mu \\}_{\\mathrm{CL}} = [ \\mu^{-}_{\\mathrm{CL}}, \\mu^{+}_{\\mathrm{CL}} ] \\] <p>Typically indicated as:</p> \\[ \\mu = X^{+\\mathrm{up}}_{-\\mathrm{down}} \\] <p>or, if symmetric intervals are used:</p> \\[ \\mu = X \\pm \\mathrm{unc.} \\]"},{"location":"what_combine_does/fitting_concepts/#frequentist-confidence-regions","title":"Frequentist Confidence Regions","text":"<p>Frequentist confidence regions are random variables of the observed data. These are very often the construction used to define the uncertainties reported on a parameter.</p> <p>If the same experiment is repeated multiple times, different data will be osbserved each time and a different confidence set \\(\\{ \\vec{\\mu}\\}_{\\mathrm{CL}}^{i}\\) will be found for each experiment. If the data are generated by the model with some set of values \\(\\vec{\\mu}_{\\mathrm{gen}}\\), then the fraction of the regions \\(\\{ \\vec{\\mu}\\}_{\\mathrm{CL}}^{i}\\) which contain the values \\(\\vec{\\mu}_{\\mathrm{gen}}\\) will be equal to the confidence level \\({\\mathrm{CL}}\\). The fraction of intervals which contain the generating parameter value is referred to as the \"coverage\".</p> <p>From first principles, the intervals can be constructed using the Neyman construction.</p> <p>In practice, the likelihood can be used to construct confidence regions for a set of parameters \\(\\vec{\\mu}\\) by using the profile likelikhood ratio:</p> \\[ \\Lambda \\equiv \\frac{\\mathcal{L}(\\vec{\\mu},\\vec{\\hat{\\nu}}(\\vec{\\mu}))}{\\mathcal{L}(\\vec{\\hat{\\mu}},\\vec{\\hat{\\nu}})} \\] <p>i.e. the ratio of the profile likelihood at point \\(\\vec{\\mu}\\) to the maxmimum likelihood. For technical reasons, the negative logarithm of this quantity is typically used in practice.</p> <p>Each point \\(\\vec{\\mu}\\) can be tested to see if it is in the confidence region, by checking the value of the likelihood ratio at that point and comparing it to the expected distribution if that point were the true generating value of the data.</p> \\[ \\{ \\vec{\\mu} \\}_{\\mathrm{CL}} = \\{ \\vec{\\mu} : -\\log(\\Lambda) \\lt  \\gamma_{\\mathrm{CL}}(\\vec{\\mu}) \\} \\] <p>The cutoff value \\(\\gamma_{\\mathrm{CL}}\\) must be chosen to match this desired coverage of the confidence set.</p> <p>Under some conditions, the value of \\(\\gamma_{\\mathrm{CL}}\\) is known analytically for any desired confidence level, and is independent of \\(\\vec{\\mu}\\), which greatly simplifies estimating confidence regions.</p> Constructing Frequentist Confidence Regions in Practice <p>When a single fit is performed by some numerical minimization program and parameter values are reported along with some uncertainty values, they are usually reported as frequentist intervals. The MINUIT minimizer which evaluates likelihood functions has two methods for estimating parameter uncertainties.</p> <p>These two methods are the most commonly used methods for estimating confidence regions in a fit; they are the minos method, and the hessian method. In both cases, Wilk's theorem is assumed to hold at all points in parameter space, such that \\(\\gamma_{\\mathrm{CL}}\\) is independent of \\(\\vec{\\mu}\\).</p> <p>When \\(\\gamma_{\\mathrm{CL}}\\) is independent of \\(\\vec{\\mu}\\) the problem simplifies to finding the boundaries where \\(-\\log(\\Lambda) = \\gamma_{\\mathrm{CL}}\\). This boundary point is referred to as the \"crossing\", i.e. where \\(-\\log(\\Lambda)\\) crosses the threshold value.</p>"},{"location":"what_combine_does/fitting_concepts/#the-minos-method-for-estimating-confidence-regions","title":"The Minos method for estimating confidence regions","text":"<p>In the minos method, once the best fit point \\(\\vec{\\hat{\\mu}}\\) is determined, the confidence region for any parameter \\(\\mu_i\\) can be found by moving away from its best fit value \\(\\hat{\\mu}_i\\). At each value of \\(\\mu_i\\), the other parameters are profiled, and \\(-\\log{\\Lambda}\\) is calculated.</p> <p>Following this procedure, \\(\\mu_i\\) is searched for the boundary of the confidence regions, where \\(-\\log{\\Lambda} = \\gamma_{\\mathrm{CL}}\\).</p> <p>The search is performed in both directions, away from the best fit value of the parameter and the two crossings are taken as the borders of the confidence region.</p> <p>This procedure has to be followed sepately for each parameter \\(\\mu_i\\) for which a confidence interval is calculated.</p>"},{"location":"what_combine_does/fitting_concepts/#the-hessian-method-for-estimating-confidence-regions","title":"The Hessian method for estimating confidence regions","text":"<p>The Hessian method relies on the second derivatives (i.e. the hessian) of the likelihood at the best fit point.</p> <p>By assuming that the shape of the likelihood function is well described by its second-order approximation, the values at which \\(-\\log(\\Lambda) = \\gamma_{\\mathrm{CL}}\\) can be calculated analytically without the need for a seach</p> \\[ \\mu_i^{\\mathrm{crossing}} - \\hat{\\mu} \\propto \\left(\\frac{\\partial^2{\\mathcal{L}(\\vec{\\hat{\\mu}})}}{\\partial\\mu_i^2}\\right)^{-2} \\] <p>By computing and then inverting the full hessian matrix, all individual confidence regions and the full covariance matrix are determined. By construction, this method always reports symmetric confidence intervals, as it assumes that the likelihood is well described by a second order expansion.</p>"},{"location":"what_combine_does/fitting_concepts/#bayesian-credibility-regions","title":"Bayesian Credibility Regions","text":"<p>Often the full posterior probability distribution is summarized in terms of some credible region which contains some specified portion of the posterior probability of the parameter.</p> \\[ \\{ \\vec{\\mu} \\}_{\\mathrm{CL}} =  \\{ \\vec{\\mu} : \\vec{\\mu} \\in \\Omega, \\int_{\\Omega} p(\\vec{\\mu};\\mathrm{data}) = \\mathrm{CL}  \\}\\] <p>The credible region represents a region in which the bayesian probability of the parameter being in that region is equal to the chosen Credibility Level.</p>"},{"location":"what_combine_does/introduction/","title":"Introduction And Capabilities","text":"<p>Combine is a tool for making statistical analyses based on a model of expected observations and a dataset. Example statistical analyses are claiming discovery of a new particle or process, setting limits on the existence of new physics, and measuring cross sections.</p> <p>The package has no physics-specific knowledge, it is completely agnostic to the interpretation of the analysis being performed, but its usage and development is based around common cases in High Energy Physics. This documentation is a description of what combine does and how you can use it to run your analyses.</p> <p>Roughly, combine does three things:</p> <ol> <li>Helps you to build a statistical model of expected observations;</li> <li>Runs statistical tests on the model and observed data;</li> <li>Provides tools for validating, inspecting, and understanding the model and the statistical tests.</li> </ol> <p>Combine can be used for analyses in HEP ranging from simple counting experiments to unfolded measurements, new physics searches, combinations of measurements, and EFT fits.</p>"},{"location":"what_combine_does/introduction/#model-building","title":"Model Building","text":"<p>Combine provides a powerful, human-readable, and lightweight interface for building likelihood models for both binned and unbinned data. The likelihood definition allows the user to define many processes which contribute to the observation, as well as multiple channels which may be fit simultaneously.</p> <p>Furthermore, combine provides a powerful and intuitive interface for combining models, as it was originally developped for combinations of higgs boson analysis at the CMS experiment.</p> <p>The interface simplifies many common tasks, while providing many options for customizations. Common nuisance parameter types are defined for easy use, while user-defined functions can also be provided. Input histograms defining the model can be provide in root format, or in other tabular formats compatable with pandas.</p> <p>Custom physics models can be defined in python which determine how the parameters of interest alter the model, and a number of predefined models are provided by default.</p> <p>A number of tools are also provided for run-time alterations of the model, allowing for straightforward comparisons of alternative models.</p>"},{"location":"what_combine_does/introduction/#statistical-tests","title":"Statistical Tests","text":"<p>Combine can be used for statistical tests in frequentist or bayesian frameworks as well as perform some hybrid frequentist-bayesian analysis tasks.</p> <p>Combine implements various methods for commonly used statistical tests in high energy physics, including for discovery, limit setting, and parameter estimation. Statistical tests can be customized to use various test statistics and confidence levels, as well as providing different output formats.</p> <p>A number of asymptotic methods, relying on Wilks' theorem, and valid in appropriate conditions are implemented for fast evaluation. Generation of pseudo-data from the model can also be performed, and tests are implemented to automatically run over emprical distributions without relying on asymptotic approximations. Pseudo-data generation and fitting over the pseudo-data can be customized in a number of ways.</p>"},{"location":"what_combine_does/introduction/#validation-and-inspection","title":"Validation and Inspection","text":"<p>Combine provides tools for inspecting the model for things like potentially problematic input templates.</p> <p>Various methods are provided for inspecting the likelihood function and the performance of the fits.</p> <p>Methods are provided for comparing pre-fit and postfit results of all values including nuisance parameters, and summaries of the results can produced.</p> <p>Plotting utilities allow the pre- and post-fit model expectations and their uncertainties to be plotted, as well as plotted summaries of debugging steps such as the nuisance parameter values and likelihood scans.</p>"},{"location":"what_combine_does/model_and_likelihood/","title":"Observation Models and Likelihoods","text":""},{"location":"what_combine_does/model_and_likelihood/#the-observation-model","title":"The Observation Model","text":"<p>The observation model, \\(\\mathcal{M}( \\vec{\\Phi})\\) defines the probability for any set of observations given specific values of the input parameters of the model \\(\\vec{\\Phi}\\). The probability for any observed data is denoted:</p> \\[ p_{\\mathcal{M}}(\\mathrm{data}; \\vec{\\Phi} ) \\] <p>where the subscript \\(\\mathcal{M}\\) is given here to remind us that these are the probabilities according to this particular model (though usually we will omit it for brevity).</p> <p>Combine is designed for counting experiments, where the number of events with particular features are counted. The events can either be binned, as in histograms, or unbinned, where continuous values are stored for each event. The event counts are assumed to be of independent events, such as individual proton-proton collisions, which are not correlated with each other.</p> <p>The event-count portion of the model consists of a sum over different processes. The expected observations, \\(\\vec{\\lambda}\\), are then the sum of the expected observations for each of the processes, \\(\\vec{\\lambda} =\\sum_{p} \\vec{\\lambda}_{p}\\).</p> <p>The model can also be composed of multiple channels, in which case the expected observation is the set of all expected observations from the various channels \\(\\vec{\\lambda} = \\{ \\vec{\\lambda}_{c1}, \\vec{\\lambda}_{c2}, .... \\vec{\\lambda}_{cN}\\}\\).</p> <p>The model can also include data and parameters related to non-count values, such as the observed luminosity or detector calibration constant. These non-count data are usually considered as auxiliary information which are used to constrain our expectations about the observed event counts.</p> <p>The full model therefore defines the probability of any given observations over all the channels, given all the processes and model parameters.</p> <p>Combining full models is possible by combining their channels, assuming that the channels are mutually independent.</p> A Simple Example <p>Consider performing an analysis searching for a Higgs boson by looking for events where the Higgs decays into two photons.</p> <p>The event count data may be binned histograms of the number of events with two photons with different bins of invariant mass of the photons. The expected counts would include signal contributions from processes where a Higgs boson is produced, as well as background contributions from processes where two photons are produced through other mechanisms, like radiation off a quark. The expected counts may also depend on parameters such as the energy resolution of the measured photons and the total luminosity of collisions being considered in the dataset, these can be parameterized in the model as auxiliary information.</p> <p>The analysis itself might be split into multiple channels, targetting different Higgs production modes with different event selection criteria. Furthermore, the analysis may eventually be combined with other analyses, such as a measurement targetting Higgs production where the Higgs boson decays into four leptons, rather than two photons.</p> <p>Combine provides the functionality for building the statistical models and combining all the channels or analyses together into one common analysis.</p>"},{"location":"what_combine_does/model_and_likelihood/#sets-of-observation-models","title":"Sets of Observation Models","text":"<p>We are typically not interested in a single model, but in a set of models, parameterized by a set of real numbers representing possible versions of the model.</p> <p>Model parameters include the parameters of interest ( \\(\\vec{\\mu}\\), those being measured such as a cross section) as well as nuisance parameters (\\(\\vec{\\nu}\\)), which may not be of interest but still affect the model expectation.</p> <p>Combine provides tools and interfaces for defining the model as pre-defined or user-defined functions of the input parameters. In practice, however, there are a number of most commonly used functional forms which define how the expected events depend on the model parameters. These are discussed in detail in the context of the full likelihood below.</p>"},{"location":"what_combine_does/model_and_likelihood/#the-likelihood","title":"The Likelihood","text":"<p>For any given model, \\(\\mathcal{M}(\\vec{\\Phi})\\), the likelihood defines the probability of observing a given dataset. It is numerically equal to the probability of observing the data, given the model.</p> \\[ \\mathcal{L}_\\mathcal{M}(\\vec{\\Phi}) = p_{\\mathcal{M}}(\\mathrm{data};\\vec{\\Phi}) \\] <p>Note, however that the likelihood is a function of the model parameters, not the data, which is why we distinguish it from the probability itself.</p> <p>The likelihood in combine takes the general form:</p> \\[ \\mathcal{L} =  \\mathcal{L}_{\\textrm{primary}} \\cdot \\mathcal{L}_{\\textrm{auxiliary}} \\] <p>Where \\(\\mathcal{L}_{\\mathrm{auxiliary}}\\) is equal to the probability of observing the event count data for a given set of model parameters, and \\(\\mathcal{L}_{\\mathrm{auxiliary}}\\) represent some external constraints on the parameters. The constraint term may be constraints from previous measurements (such as Jet Energy Scales) or prior beliefs about the value some parameter in the model should have.</p> <p>Both \\(\\mathcal{L}_{\\mathrm{primary}}\\) and \\(\\mathcal{L}_{\\mathrm{auxiliary}}\\) can be composed of many sublikelihoods, for example for observations of different bins and constraints on different nuisance parameters.</p> <p>This form is entirely general. However, as with the model itself, there are typical forms that the likelihood takes which will cover most use cases, and for which combine is primarily designed.</p>"},{"location":"what_combine_does/model_and_likelihood/#primary-likelihoods-for-binned-data","title":"Primary Likelihoods for binned data","text":"<p>For a binned likelihood, the probability of observing a certain number of counts, given a model takes on a simple form. For each bin:</p> \\[ \\mathcal{L}_{\\mathrm{bin}}(\\vec{\\Phi}) = \\mathrm{Poiss}(n_{\\mathrm{obs}}; n_{\\mathrm{exp}}(\\vec{\\Phi})) \\] <p>i.e. it is a poisson distribution with the mean given by the expected number of events in that bin. The full primary likelihood for binned data is simply the product of each of the bins' likelihoods:</p> \\[ \\mathcal{L}_\\mathrm{primary} = \\prod_\\mathrm{bins} \\mathcal{L}_\\mathrm{bin}. \\] <p>This is the underlying likelihood model used for every binned analysis. The freedom in the analysis comes in how \\(n_\\mathrm{exp}\\) depends on the model parameters, and the constraints that are placed on those parameters.</p>"},{"location":"what_combine_does/model_and_likelihood/#primary-likelihoods-for-unbinned-data","title":"Primary Likelihoods for unbinned data","text":"<p>For unbinned likelihood models, a likelihood can be given to each data point. It is proportional to the probability density function at that point, \\(\\vec{x}\\). For the full set of observed data points, information about the total number of data points is also included:</p> \\[ \\mathcal{L}_\\mathrm{data} = \\mathrm{Poiss}(n_{\\mathrm{obs}} ; n_{\\mathrm{exp}}(\\vec{\\Phi})) \\prod_{i}^{N_{\\mathrm{obs}}} \\mathrm{pdf}(\\vec{x}_i ; \\vec{\\Phi} ) \\] <p>Where \\(n_{\\mathrm{obs}}\\) and \\(n_{\\mathrm{exp}}\\) are the total number of observed and expected events, respectively. This is sometimes referred to as an 'extended' likelihood, as the probability density has been 'extended' to include information about the total number of observations.</p>"},{"location":"what_combine_does/model_and_likelihood/#auxiliary-likelihoods","title":"Auxiliary Likelihoods","text":"<p>The auxiliary likelihood terms encode the probability of model nuisance parameters taking on a certain value, without regards to the primary data. In frequentist frameworks, this usually represents the result of a previous measurement (such as of the jet energy scale). We will write in a mostly frequentist framework, though combine can be used for either frequentist or bayesian analyses[^1].</p> <p>[^1]: see: the first paragraphs of the PDGs statistics review for more information on these two frameworks</p> <p>In this framework, each auxiliary term represents the likelihood of some parameter, \\(\\nu\\), given some previous observation \\(y\\); the quantity \\(y\\) is sometimes referred to as a \"global observable\".</p> \\[ \\mathcal{L}_{\\mathrm{auxiliary}}( \\nu ) = p( y ; \\nu ) \\] <p>In principle the form of the likelihood can be any function where the corresponding \\(p\\) is a valid probability distribution. In practice, most of the auxiliary terms are gaussian, and the definition of \\(\\nu\\) is chosen such that the central observation \\(y = 0\\) , and the width of the gaussian is one.</p> <p>Note that on its own, the form of the auxiliary term is not meaningful; what is meaningful is the relationship between the auxiliary term and how the model expectation is altered by the parameter. Any co-ordinate transformation of the parameter values can be absorbed into the definition of the parameter. A reparameterization would change the mathematical form of the auxiliary term, but would also simultaneously change how the model depends on the parameter in such a way that the total likelihood is unchanged. e.g. if you define  \\(\\nu = \\sigma(tt)\\) or \\(\\nu = \\sigma(tt) - \\sigma_0\\) you will change the form of the constraint term, but the you will not change the overall likelihood.</p>"},{"location":"what_combine_does/model_and_likelihood/#likelihoods-implemented-in-combine","title":"Likelihoods implemented in Combine","text":"<p>Combine builds on the generic forms of the likelihood for counting experiments given above to provide specific functional forms which are commonly most useful in high energy physics, such as separating contributions between different processes.</p>"},{"location":"what_combine_does/model_and_likelihood/#binned-likelihoods-using-templates","title":"Binned Likelihoods using Templates","text":"<p>Binned likelihood models can be defined by the user by providing simple inputs such as a set of histograms and systematic uncertainties. These likelihood models are referred to as template-based because they rely heavily on histograms as templates for building the full likelihood function.</p> <p>Here, we describe the details of the mathematical form of these likelihoods. As already mentioned, the likelihood can be written as a product of two parts:</p> \\[ \\mathcal{L} =  \\mathcal{L}_\\mathrm{primary} \\cdot \\mathcal{L}_\\mathrm{auxiliary} = \\prod_{c=1}^{N_c} \\prod_{b=1}^{N_b^c} \\mathrm{Poiss}(n_{cb}; n^\\mathrm{exp}_{cb}(\\vec{\\mu},\\vec{\\nu})) \\cdot \\prod_{e=1}^{N_E}  p_e(y_e ; \\nu_e) \\] <p>Where \\(c\\) indexes the channel, \\(b\\) indexes the histogram bin, and \\(e\\) indexes the nuisance parameter.</p>"},{"location":"what_combine_does/model_and_likelihood/#model-of-expected-event-counts-per-bin","title":"Model of expected event counts per bin","text":"<p>The generic model of the expected event count in a given bin, \\(n^\\mathrm{exp}_{cb}\\), implemented in combine for template based analyses is given by:</p> \\[n^\\mathrm{exp}_{cb} = \\mathrm{max}(0, \\sum_{p} M_{cp}(\\vec{\\mu})N_{cp}(\\nu_G, \\vec{\\nu}_L,\\vec{\\nu}_S,\\vec{\\nu}_{\\rho})\\omega_{cbp}(\\vec{\\nu}_S) + E_{cb}(\\vec{\\nu}_B) ) \\] <p>where here:</p> <ul> <li>\\(p\\) indexes the processes contributing to the channel;</li> <li>\\(\\nu_{G}, \\vec{\\nu}_L, \\vec{\\nu}_S, \\vec{\\nu}_{\\rho}\\) and \\(\\vec{\\nu}_B\\) are different types of nuisance parameters which modify the processes with different functional forms;<ul> <li>\\(\\nu_{G}\\) is a gamma nuisances,</li> <li>\\(\\vec{\\nu}_{L}\\) are log-normal nuisances,</li> <li>\\(\\vec{\\nu}_{S}\\) are \"shape\" nuisances,</li> <li>\\(\\vec{\\nu}_{\\rho}\\) are user defined rate parameters, and</li> <li>\\(\\vec{\\nu}_{B}\\) are nuisance parameters related to the statistical uncertainties in the simulation used to build the model.</li> </ul> </li> <li>\\(M\\) defines the effect of the parameters of interest on the signal process;</li> <li>\\(N\\) defines the overall normalization effect of the nuisance parameters;</li> <li>\\(\\omega\\) defines the shape effects (i.e. bin-dependent effects) of the nuisance parameters; and</li> <li>\\(E\\) defines the impact of statistical uncertainties from the samples used to derive the histogram templates used to build the model.</li> </ul>"},{"location":"what_combine_does/model_and_likelihood/#parameter-of-interest-model","title":"Parameter of Interest Model","text":"<p>The function \\(M\\) can take on custom functional forms, as defined by the user, but in the most common case, the parameter of interest \\(\\mu\\) simply scales the contributions from signal processes:</p> \\[\\label{eq:sig_param} M_{cp}(\\mu) = \\begin{cases}     \\mu  &amp;\\mathrm{if\\ } p \\in \\mathrm{signal} \\\\     1    &amp;\\mathrm{otherwise} \\end{cases} \\] <p>However, combine supports many more models beyond this. As well as built-in support for models with multiple parameters of interest, combine comes with many pre-defined models which go beyond simple process normalization, which are targetted at various types of searches and measurements.</p>"},{"location":"what_combine_does/model_and_likelihood/#normalization-effects","title":"Normalization Effects","text":"<p>The overall normalization \\(N\\) is affected differently by the different types of nuisances parameters, and takes the general form</p> \\[N = \\prod_X \\prod_i f_X(\\vec{\\nu}_{X}^{i})\\mathrm{,}\\] <p>With \\(X\\) identifying a given nuisance parameter type; i.e. \\(N\\) multiplies together the morphings from each of the individual nuisance parameters from each of the nuisance types.</p> Normalization Parameterization Details <p>The full functional form of the normalization term is given by:</p> \\[ N_{cp} = N_{\\mathrm{0}}(\\nu_{G})\\prod_{n} {\\kappa_{n}}^{\\nu_{L,n}}\\prod_{a} {\\kappa^{\\mathrm{A}}_{a}(\\nu_{L(S)}^{a},\\kappa^{+}_{a}, \\kappa^{-}_{a})}^{\\nu_{L(S)}^{a}} \\prod_{r}F_{r}(\\nu_\\rho) \\] <p>where:</p> <ul> <li>\\(N_{\\mathrm{0}}(\\nu_{G}) \\equiv \\frac{\\nu_{G}}{y_{G}}\\), is the normalization effect of a gamma uncertainty. \\(y_{G}\\) is taken as the observed number of events in some external control region and \\(\\nu_{G}\\) has a constraint pdf \\(\\mathrm{Poiss}(\\nu; y)\\)</li> <li>\\(\\kappa_{n}^{\\nu_{L,n}}\\), are log-normal uncertainties specified by a fixed value \\(\\kappa\\);</li> <li>\\(\\kappa^{\\mathrm{A}}_{a}(\\nu_{L(S)}^{a},\\kappa^{+}_{a}, \\kappa^{-}_{a})^{\\nu_{L(S)}^{a}}\\) are asymmetric log-normal uncertainties, in which the value of \\(\\kappa^{\\mathrm{A}}\\) depends on the nuisance parameter and two fixed values \\(\\kappa^{+}_{a}\\) and \\(\\kappa^{-}_{a}\\). The functions, \\(\\kappa^A\\), define a smooth interpolation for the asymmetric uncertainty; and</li> <li>\\(F_{r}(\\vec{\\nu}_\\rho)\\) are user-defined functions of the user defined nuisance parameters which may have uniform or gaussian constraint terms.</li> </ul> <p>The function for the asymmetric normalization modifier, \\(\\kappa^A\\) is</p> \\[     \\kappa^{\\mathrm{A}}(\\nu,\\kappa^{+}, \\kappa^{-}) =     \\begin{cases}         \\kappa^{+}, &amp;\\mathrm{for\\,} \\nu \\geq 0.5 \\\\         \\frac{1}{\\kappa^{-}}, &amp;\\mathrm{for\\,} \\nu \\leq -0.5 \\\\         \\exp\\left(\\frac{1}{2} \\left( (\\ln{\\kappa^{+}}-\\ln{\\kappa^{-}}) + \\frac{1}{4}(\\ln{\\kappa^{+}}+\\ln{\\kappa^{-}})I(\\nu)\\right)\\right), &amp;\\mathrm{otherwise}\\end{cases} \\] <p>where \\(I(\\nu) = 48\\nu^5 - 40\\nu^3 + 15\\nu\\), which ensures \\(\\kappa^{\\mathrm{A}}\\) and its first and second derivatives are continuous for all values of \\(\\nu\\).</p> <p>and the \\(\\kappa^{+}\\) and \\(\\kappa^{-}\\) are the relative normalizations of the two systematics variations; i.e.:</p> \\[ \\kappa^{\\pm}_{s} = \\frac{\\sum_{b}\\omega_{b}^{s,\\pm}}{\\sum_{b}\\omega_{b}^{0}}. \\] <p>where \\(\\omega_{b}^{s,\\pm}\\) is the bin yield as defined by the two shifted values  \\(\\nu_{S} = \\nu_{S}^{\\pm}\\), and \\(\\omega_{b}^{0}\\) is the bin yield when \\(\\nu_{S} = \\omega_{S}\\).</p>"},{"location":"what_combine_does/model_and_likelihood/#shape-morphing-effects","title":"Shape Morphing Effects","text":"<p>The number of events in a given bin \\(b\\), \\(\\omega_{cbp}\\), is a function of the shape parameters \\(\\vec{\\nu}_{S}\\). The shape interpolation works with the fractional yields in each bin, where the interpolation can be performed either directly on the fractional yield, or on the logarithm of the fraction yield, which is then exponentiated again.</p> Shape parameterization Details <p>In the following, the channel and process labels \\(c\\) and \\(p\\) apply to every term, and so are omitted.</p> <p>The fixed nominal number of events is denoted \\(\\omega_{b}^{0}\\). For each applicable shape uncertainty \\(s\\), two additional predictions are specified, \\(\\omega_{b}^{s,+}\\) and \\(\\omega_{b}^{s,-}\\), typically corresponding to the \\(+1\\sigma\\) and \\(-1\\sigma\\) variations, respectively. These may change both the shape and normalization of the process. The two effects are separated; the shape transformation is constructed in terms of the fractional event counts in the templates via a smooth vertical interpolation, and the normalization is treated as an asymmetric log-normal uncertainty, as described above in the description of the \\(N\\) term in the likelihood.</p> <p>For a given process, the shape may be interpolated either directly in terms of the fractional bin yields, \\(f_b = \\omega_b / \\sum \\omega_{b}\\) or their logarithms, \\(\\ln(f_b)\\). The transformed yield is then given as, respectively,</p> \\[ \\omega_{b}(\\vec{\\nu}) = \\begin{cases} \\max\\left(0, y^{0}\\left(f^{0}_{b} + \\sum_{s} F(\\nu_{s}, \\delta^{s,+}_{b}, \\delta^{s,-}_{b}, \\epsilon_{s})\\right)\\right) &amp; \\text{(direct),}\\\\ \\max\\left(0, y^{0}\\exp\\left(\\ln(f^{0}_{b}) + \\sum_{s} F(\\nu_{s}, \\Delta^{s,+}_{b}, \\Delta^{s,-}_{b}, \\epsilon_{s})\\right) \\right) &amp; \\text{(logarithmic)}, \\end{cases} \\] <p>where \\(\\omega^{0} = \\sum \\omega_{b}^{0}\\), \\(\\delta^{\\pm} = f^{\\pm}_{i} - f^{0}_{i}\\), and \\(\\Delta^{\\pm} = \\ln\\left(\\frac{f^{\\pm}_{i}}{f^{0}_{i}}\\right)\\).</p> <p>The smooth interpolating function \\(F\\), defined below, depends on a set of coefficients, \\(\\epsilon_{s}\\). These are assumed to be unity by default, but may be set to different values, for example if the \\(\\omega_{b}^{s,\\pm}\\) correspond to the \\(\\pm X\\sigma\\) variations, then \\(\\epsilon_{s} = 1/X\\) is typically set. The minimum value of \\(\\epsilon\\) over the shape uncertainties for a given process is  \\(q = \\min({{\\epsilon_{s}}})\\). The function \\({F}\\) is then defined as</p> \\[ F(\\nu, \\delta^{+}, \\delta^{-}, \\epsilon) = \\begin{cases} \\frac{1}{2}\\nu^{'} \\left( (\\delta^{+}-\\delta^{-}) + \\frac{1}{8}(\\delta^{+}+\\delta^{-})(3\\bar{\\nu}^5 - 10\\bar{\\nu}^3 + 15\\bar{\\nu}) \\right), &amp; \\text{for } -q &lt; \\nu' &lt; q; \\\\ \\nu^{'}\\delta^{+}, &amp; \\text{for } \\nu' \\ge q;\\\\ -\\nu^{'}\\delta^{-}, &amp; \\text{for } \\nu' \\le -q;\\\\ \\end{cases} \\] <p>where \\(\\nu^{'} = \\nu\\epsilon\\), \\(\\bar{\\nu} = \\nu^{'} / q\\), and the label \\(s\\) has been omitted. This function ensures the yield and its first and second derivatives are continuous for all values of \\(\\nu\\).</p>"},{"location":"what_combine_does/model_and_likelihood/#statistical-uncertainties-in-the-simulation-used-to-build-the-model","title":"Statistical Uncertainties in the Simulation used to build the Model","text":"<p>Since the histograms used in a binned shape analysis are typically created from simulated samples, the yields in each bin are also subject to statistical uncertainties on the bin yields. These are taken into account by either assigning one nuisance parameter per bin, or as many parameters as contributing processes per bin.</p> Model Statistical Uncertainty Details <p>If the uncertainty in each bin is modelled as a single nuisance parameter it takes the form:</p> \\[ E_{cb}(\\vec{\\mu},\\vec{\\nu},\\nu) = \\nu\\left(\\sum_{p} (e_{cpb}N_{cp}M_{cp}(\\vec{\\mu},\\vec{\\nu}))^{2}\\right)^{\\frac{1}{2}}. \\] <p>where \\(e_{cbp}\\) is the uncertainty in the bin content for the histogram defining process \\(p\\) in the channel \\(c\\).</p> <p>Alternatively, one parameter is assigned per process, which may be modelled with either a Poisson or Gaussian constraint pdf:</p> \\[     E_{cb}(\\vec{\\mu},\\vec{\\nu},\\vec{\\nu}_{\\alpha},\\vec{\\nu}_{\\beta}) = \\sum_{\\alpha}^{\\text{Poisson}} \\left(\\frac{\\nu_{\\alpha}}{\\omega_{\\alpha}} - 1\\right)\\omega_{c\\alpha b}N_{c\\alpha}(\\vec{\\nu})M_{c\\alpha}(\\vec{\\mu},\\vec{\\nu}) + \\sum_{\\beta}^{\\text{Gaussian}} \\nu_{\\beta}e_{c\\beta b}N_{c\\beta}(\\vec{\\nu})M_{c\\beta}(\\vec{\\mu},\\vec{\\nu}), \\] <p>where the indices \\(\\alpha\\) and \\(\\beta\\) runs over the Poisson- and Gaussian-constrained processes, respectively. The parameters \\(\\omega_{\\alpha}\\) represent the nominal unweighted numbers of events, and are treated as the external measurements and \\(N_{cp}\\) and \\(\\omega_{c\\alpha b}\\) are defined as above.</p>"},{"location":"what_combine_does/model_and_likelihood/#customizing-the-form-of-the-expected-event-counts","title":"Customizing the form of the expected event counts","text":"<p>Although the above likelihood defines some specific functional forms, users are also able to implement custom functional forms for \\(M\\),  \\(N\\), and  \\(\\omega_{cbp}\\). In practice, this makes the functional form much more general than the default forms used above.</p> <p>However, some constraints do exist, such as the requirement that bin contents be positive, and that the function \\(M\\) only depends on \\(\\vec{\\mu}\\), whereas \\(N\\), and \\(\\omega_{cbp}\\) only depend on \\(\\vec{\\nu}\\).</p>"},{"location":"what_combine_does/model_and_likelihood/#auxiliary-likelihood-terms","title":"Auxiliary Likelihood terms","text":"<p>The auxiliary constraint terms implemented in combine are Gaussian, Poisson or Uniform:</p> \\[ p_{e} \\propto \\exp{\\left(-0.5 \\left(\\frac{(\\nu_{e} - y_{e})}{\\sigma}\\right)^2 \\right)}\\mathrm{;~} \\\\ p_{e} = \\mathrm{Poiss}( \\nu_{e}; y_{e} ) \\mathrm{;\\ or~} \\\\ p_{e} \\propto \\mathrm{constant\\ (on\\ some\\ interval\\ [a,b])}. \\] <p>Which form they have depends on the type of nuisance paramater:</p> <ul> <li>The shape (\\(\\vec{\\nu}_{S}\\)) and log-normal (\\(\\vec{\\nu}_{L}\\)), nuisance parameters always use gaussian constraint terms;</li> <li>The gamma (\\(\\vec{\\nu}_{G}\\)) nuisance parameters always use Poisson constraints;</li> <li>The rate parameters (\\(\\vec{\\nu}_{\\rho}\\)) may have either Gaussian or Uniform constraints; and</li> <li>The model statistical uncertiainties (\\(\\vec{\\nu}_{B}\\)) may use Gaussian or  Poisson Constraints.</li> </ul> <p>While combine does not provide functionality for user-defined auxiliary pdfs, the effect of nuisance paramters is highly customizable through the form of the dependence of \\(n^\\mathrm{exp}_{cb}\\) on the parameter.</p>"},{"location":"what_combine_does/model_and_likelihood/#overview-of-the-template-based-likelihood-model-in-combine","title":"Overview of the template-based likelihood model in Combine","text":"<p>An overview of the binned likelihood model built by combine is given below. Note that \\(M_{cp}\\) can be chosen by the user from a set of predefined models, or defined by the user themselves.</p> <p></p>"},{"location":"what_combine_does/model_and_likelihood/#parametric-likelihoods-in-combine","title":"Parametric Likelihoods in Combine","text":"<p>As with the template likelihood, the parameteric likelihood implemented in combine implements likelihoods for multiple process and multiple channels. Unlike the template likelihoods, the parametric likelihoods are defined using custom probability density functions, which are functions of continuous observables, rather than discrete, binned counts. Because the pdfs are functions of a continuous variable, the likelihood can be evaluated over unbinned data. They can still, also, be used for analysis on binned data.</p> <p>The unbinned model implemented in combine is given by:</p> \\[ \\mathcal{L} = \\mathcal{L}_\\mathrm{primary} \\cdot \\mathcal{L}_\\mathrm{auxiliary}  = \\\\ \\left(\\prod_c \\mathrm{Poiss}(n_{c,\\mathrm{tot}}^{\\mathrm{obs}} ; n_{c,\\mathrm{tot}}^{\\mathrm{exp}}(\\vec{\\mu},\\vec{\\nu})) \\prod_{i}^{n_c^{\\mathrm{obs}}} \\sum_p f_{cp}^{\\mathrm{exp}} \\mathrm{pdf}_{cp}(\\vec{x}_i ; \\vec{\\mu}, \\vec{\\nu} ) \\right) \\cdot \\prod_e p_e( y_e ; \\nu_e) \\] <p>where \\(c\\) indexes the channel, \\(p\\) indexes the process, and \\(e\\) indexes the nuisance parameter.</p> <ul> <li>\\(n_{c,\\mathrm{tot}}\\) is the total number of expected events in channel \\(c\\);</li> <li>\\(\\mathrm{pdf}_{cp}\\) are user defined probability density functions, which may take on the form of any valid probability density; and</li> <li>\\(f_{cp}^{\\mathrm{exp}}\\) is the fraction of the total events in channel \\(c\\) from process \\(p\\), \\(f_{cp} = \\frac{n_{cp}}{\\sum_p n_{cp}}\\).</li> </ul> <p>for parametric likelihoods on binned data, the data likelihood is first converted into the binned data likelihood format before evaluation. i.e.</p> \\[ \\mathcal{L} = \\prod_c \\prod_b  \\mathrm{Poiss}(n_{cb}^{\\mathrm{obs}}; n_{cb}^{\\mathrm{exp}})  \\prod_e p_e( y_e ; \\nu_e) \\] <p>where \\(n^\\mathrm{exp}\\) is calculated from the input pdf and normalization, based on the model parameters.</p>"},{"location":"what_combine_does/model_and_likelihood/#model-of-expected-event-counts","title":"Model of expected event counts","text":"<p>The total number of expected events is modelled as:</p> \\[n_{c,\\mathrm{tot}}^\\mathrm{exp} = \\mathrm{max}(0, \\sum_{p} n^{cp}_0 M_{cp}(\\vec{\\mu})N_{cp}(\\nu_{G},\\vec{\\nu}_L,\\vec{\\nu}_{\\rho})) \\] <p>where, \\(n^{cp}_0\\)  is a default normalization for the process; and as for the binned likelihoods \\(\\nu_G, \\vec{\\nu}_L\\), and \\(\\vec{\\nu}_{\\rho}\\)  are different types of nuisance parameters which modify the processes normalizations with different functional forms, as in the binned case;</p> Details of Process Normalization <p>As in the template-based case, the different types of nuisance parameters affecting the process normalizations are:</p> <ul> <li>\\(\\nu_{G}\\) is a gamma nuisance, with linear normalization effects and a poisson constraint term.</li> <li>\\(\\vec{\\nu}_{L}\\) are log-normal nuisances, with log-normal normalization effects and gaussian constraint terms.</li> <li>\\(\\vec{\\nu}_{\\rho}\\) are user defined rate parameters, with user-defined normalization effects and gaussian or uniform constraint terms.</li> <li>\\(N\\) defines the overall normalization effect of the nuisance parameters;</li> </ul> <p>and \\(N\\) is defined as in the template-based case, except that there are no \\(\\vec{\\nu}_S\\) uncertainties.</p> \\[ N_{cp} = N_{\\mathrm{0}}(\\nu_{G})\\prod_{n} {\\kappa_{n}}^{\\nu_{L,n}}\\prod_{a} {\\kappa^{\\mathrm{A}}_{a}(\\nu_{L}^{a},\\kappa^{+}_{a}, \\kappa^{-}_{a})}^{\\nu_{L}^{a}} \\prod_{r}F_{r}(\\nu_\\rho) \\] <p>The function \\(F_{r}\\) is any user-defined mathematical expression. The functions \\(\\kappa(\\nu,\\kappa^+,\\kappa^-)\\) are defined to create smooth asymmetric log-normal uncertainties. The details of the interpolations which are used are found in the section on normalization effects in the binned model.</p>"},{"location":"what_combine_does/model_and_likelihood/#parameter-of-interest-model_1","title":"Parameter of Interest Model","text":"<p>As in the template-based case, the parameter of interest model, \\(M_{cp}(\\vec{\\mu})\\), can take on different forms defined by the user. The default model is one where \\(\\vec{\\mu}\\) simply scales the signal processes' normalizations.</p>"},{"location":"what_combine_does/model_and_likelihood/#shape-morphing-effects_1","title":"Shape Morphing Effects","text":"<p>The user may define any number of nuisance parameters which morph the shape of the pdf according to functional forms defined by the user. These nuisance parameters are included as \\(\\vec{\\nu}_\\rho\\) uncertainties, which may have gaussian or uniform constraints, and include user-defined process normalization effects.</p>"},{"location":"what_combine_does/model_and_likelihood/#combining-template-based-and-parametric-likelihoods","title":"Combining template-based and parametric Likelihoods","text":"<p>While we presented the likelihoods for the template and parameteric models separately, they can also be combined into a single likelihood, by treating them each as separate channels. When combining the models, the data likelihoods of the binned and unbinned channels are multiplied.</p> \\[ \\mathcal{L}_{\\mathrm{combined}} = \\mathcal{L}_{\\mathrm{primary}} \\cdot \\mathcal{L}_\\mathrm{auxiliary} =  \\left(\\prod_{c_\\mathrm{template}} \\mathcal{L}_{\\mathrm{primary}}^{c_\\mathrm{template}}\\right) \\left(\\prod_{c_\\mathrm{parametric}} \\mathcal{L}_{\\mathrm{primary}}^{c_\\mathrm{parametric}}\\right)\\cdot \\mathcal{L}_{\\mathrm{auxiliary}} \\]"},{"location":"what_combine_does/model_and_likelihood/#references-and-external-literature","title":"References and External Literature","text":"<ul> <li>See the Particle Data Group's Review of Statistics for various fundamental concepts used here.</li> <li>The Particle Data Group's Review of Probability also has definitions of commonly used distributions, some of which are used here.</li> </ul>"},{"location":"what_combine_does/statistical_tests/","title":"Statistical Tests","text":"<p>Combine is a likelihood based statistical tool. That means that it uses the likelihood function to define statistical tests.</p> <p>Combine provides a number of customization options for each test; as always it is up to the user to chose an appropriate test and options.</p>"},{"location":"what_combine_does/statistical_tests/#general-framework","title":"General Framework","text":""},{"location":"what_combine_does/statistical_tests/#statistical-tests_1","title":"Statistical tests","text":"<p>Combine implements a number of different customizable statistical tests. These tests can be used for purposes such as determining the significance of some new physics model over the standard model, setting limits, estimating parameters, and checking goodness of fit.</p> <p>These tests are all performed on a given model (null hypothesis), and often require additional specification of an alternative model. The statistical test then typically requires defining some \"test statistic\", \\(t\\), which is simply any real-valued function of the observed data:</p> \\[ t(\\mathrm{data}) \\in \\mathbb{R} \\] <p>For example, in a simple coin-flipping experiment, the number of heads could be used as the test statistic.</p> <p>The distribution of the test statistic should be estimated under the null hypothesis (and the alternative hypothesis, if applicable). Then the value of the test statistic on the actual observed data, \\(t^{\\mathrm{obs}}\\) is compared with its expected value under the relevant hypotheses.</p> <p>This comparison, which depends on the test in question, defines the results of the test, which may be simple binary results (e.g. this model point is rejected at a given confidence level), or continuous (e.g. defining the degree to which the data are considered surprising, given the model). Often, as either a final result or as an intermediate step, the p-value of the observed test statistic under a given hypothesis is calculated.</p> How p-values are calculated <p>The distribution of the test statistic, \\(t\\) under some model hypothesis \\(\\mathcal{M}\\) is:</p> \\[t \\stackrel{\\mathcal{M}}{\\sim} D_{\\mathcal{M}}\\] <p>And the observed value of the test statistic is \\(t_{\\mathrm{obs}}\\). The p-value of the observed result gives the probability of having observed a test statistic at least as extreme as the actual observation. For example, this may be:</p> \\[p = \\int_{t_{\\mathrm{min}}}^{t_\\mathrm{obs}} D_{\\mathcal{M}} \\mathrm{d}t\\] <p>In some cases, the bounds of the integral may be modified, such as \\(( t_{\\mathrm{obs}}, t_{\\mathrm{max}} )\\) or \\((-t_{\\mathrm{obs}}, t_{\\mathrm{obs}} )\\), depending on the details of the test being performed. And specifically, for the distribution in question, whether an observed value in the right tail, left tail, or either tail of the distribution is considered as unexpected.</p> <p>The p-values using the left-tail and right tail are related to each other via \\(p_{\\mathrm{left}} = 1 - p_{\\mathrm{right}}\\).</p>"},{"location":"what_combine_does/statistical_tests/#test-statistics","title":"Test Statistics","text":"<p>The test statistic can be any real valued function of the data. While in principle, many valid test statistics can be used, the choice of tests statistic is very important as it influences the power of the statistical test.</p> <p>By associating a single real value with every observation, the test statistic allows us to recast the question \"how likely was this observation?\" in the form of a quantitative question about the value of the test statistic. Ideally a good test statistic should return different values for likely outcomes as compared to unlikely outcomes and the expected distributions under the null and alternate hypotheses should be well-separated.</p> <p>In many situations, extremely useful test statistics, sometimes optimal ones for particular tasks, can be constructed from the likelihood function itself:</p> \\[ t(\\mathrm{data}) = f(\\mathcal{L}) \\] <p>Even for a given statistical test, several likelihood-based test-statistics may be suitable, and for some tests combine implements multiple test-statistics from which the user can choose.</p>"},{"location":"what_combine_does/statistical_tests/#tests-with-likelihood-ratio-test-statistics","title":"Tests with Likelihood Ratio Test Statistics","text":"<p>The likelihood function itself often forms a good basis for building test statistics.</p> <p>Typically the absolute value of the likelihood itself is not very meaningful as it depends on many fixed aspects we are usually not interested in on their own, like the size of the parameter space and the number of observations. However, quantities such as the ratio of the likelihood at two different points in parameter space are very informative about the relative merits of those two models.</p>"},{"location":"what_combine_does/statistical_tests/#the-likelihood-ratio-and-likelihood-ratio-based-test-statistics","title":"The likelihood ratio and likelihood ratio based test statistics","text":"<p>A very useful test statistic is the likelihood ratio of two models:</p> \\[ \\Lambda \\equiv \\frac{\\mathcal{L}_{\\mathcal{M}}}{\\mathcal{L}_{\\mathcal{M}'}} \\] <p>For technical and convenience reasons, often the negative logarithm of the likelihood ratio is used:</p> \\[t \\propto -\\log(\\Lambda) = \\log(\\mathcal{L}_{\\mathcal{M}'}) - \\log(\\mathcal{L}_{\\mathcal{M}})\\] <p>With different proportionality constants being most convenient in different circumstances. The negative sign is used by convention since usually the ratios are constructed so that the larger likelihood value must be in the denominator. This way, \\(t\\) is positive, and larger values of \\(t\\) represent larger differences between the likelihoods of the two models.</p>"},{"location":"what_combine_does/statistical_tests/#sets-of-test-statistics","title":"Sets of test statistics","text":"<p>If the parameters of both likelihoods in the ratio are fixed to a single value, then that defines a single test statistic. Often, however, we are interested in testing \"sets\" of models, parameterized by some set of values \\((\\vec{\\mu}, \\vec{\\nu})\\).</p> <p>This is important in limit setting for example, where we perform statistical tests to exclude entire ranges of the parameter space.</p> <p>In these cases, the likelihood ratio (or a function of it) can be used to define a set of test statistics parameterized by the model parameters. For example, a very useful set of test statistics is:</p> \\[ t_{\\vec{\\mu}} \\propto -\\log\\left(\\frac{\\mathcal{L}(\\vec{\\mu})}{\\mathcal{L}(\\vec{\\hat{\\mu}})}\\right) \\] <p>Where the likelihood parameters in the bottom are fixed to their maximum likelihood values, but the parameter \\(\\vec{\\mu}\\) indexing the test statistic appears in the numerator of the likelihood ratio.</p> <p>When calculating the p-values for these statistical tests, the p-values are calculated at each point in parameter space using the test statistic for that point. In other words, the observed and expected distributions of the test statistics are computed separately at each parameter point \\(\\vec{\\mu}\\) being considered.</p>"},{"location":"what_combine_does/statistical_tests/#expected-distributions-of-likelihood-ratio-test-statistics","title":"Expected distributions of likelihood ratio test statistics","text":"<p>Under appropriate conditions, the distribution of \\(t_\\vec{\\mu}\\) can be approximated analytically, via Wilks' Theorem or other extensions of that work. Then, the p-value of the observed test statistic can be calculated from the known form of the expected distribution. This is also true for a number of the other test statistics derived from the likelihood ratio, where asymptotic approximations have been derived.</p> <p>Combine provides asymptotic methods, for limit setting, significance tests, and computing confidence intervals which make used of these approximations for fast calculations.</p> <p>In the general case, however, the distribution of the test statistic is not known, and it must be estimated. Typically it is estimated by generating many sets of pseudo-data from the model and using the emprirical distribution of the test statistic.</p> <p>Combine also provides methods for limit setting, significance tests, and computing confidence intervals which use pseudodata generation to estimate the expected test-statistic distributions, and therefore don't depend on the asymptotic approximation. Methods are also provided for generating pseudodata without running a particular test, which can be saved and used for estimating expected distributions.</p>"},{"location":"what_combine_does/statistical_tests/#parameter-estimation-using-the-likelihood-ratio","title":"Parameter Estimation using the likelihood ratio","text":"<p>A common use case for likelihood ratios is estimating the values of some parameters, such as the parameters of interest, \\(\\vec{\\mu}\\). The point estimate for the parameters is simply the maximum likelihood estimate, but the likelihood ratio can be used for estimating the uncertainty as a confidence region.</p> <p>A confidence region for the parameters \\(\\vec{\\mu}\\) can be defined by using an appropriate test statistic. Typically, we use the profile likelihood ratio:</p> \\[ t_{\\vec{\\mu}} \\propto -\\log\\left(\\frac{\\mathcal{L}(\\vec{\\mu},\\vec{\\hat{\\nu}}(\\vec{\\mu}))}{\\mathcal{L}(\\vec{\\hat{\\mu}},\\vec{\\hat{\\nu}})}\\right) \\] <p>Where the likelihood in the top is the value of the likelihood at a point \\(\\vec{\\mu}\\) profiled over \\(\\vec{\\nu}\\); and the likelihood on the bottom is at the best fit point.</p> <p>Then the confidence region can be defined as the region where the p-value of the observed test-statistic is less than the confidence level:</p> \\[ \\{ \\vec{\\mu}_{\\mathrm{CL}} \\} =  \\{ \\vec{\\mu} : p_{\\vec{\\mu}} \\lt \\mathrm{CL} \\}.\\] <p>This construction will satisfy the frequentist coverage property that the confidence region contains the parameter values used to generate the data in \\(\\mathrm{CL}\\) fraction of cases.</p> <p>In many cases, Wilks' theorem can be used to calculate the p-value and the criteria on \\(p_{\\vec{\\mu}}\\) can be converted directly into a criterion on \\(t_{\\vec{\\mu}}\\) itself, \\(t_{\\vec{\\mu}} \\lt \\gamma_{\\mathrm{CL}}\\). Where \\(\\gamma_{\\mathrm{CL}}\\) is a known function of the confidence level which depends on the parameter space being considered.</p>"},{"location":"what_combine_does/statistical_tests/#discoveries-using-the-likelihood-ratio","title":"Discoveries using the likelihood ratio","text":"<p>A common method for claiming discovery is based on a likelihood ratio test by showing that the new physics model has a \"significantly\" larger likelihood than the standard model.</p> <p>This could be done by using the standard profile likelihood ratio test statistic:</p> \\[ t_{\\mathrm{NP}} = -2\\log\\left(\\frac{\\mathcal{L}(\\mu_{\\mathrm{NP}} = 0, \\vec{\\hat{\\nu}}(\\mu_{\\mathrm{NP}} = 0))}{\\mathcal{L}(\\hat{\\mu}_{\\mathrm{NP}},\\vec{\\hat{\\nu}})}\\right) \\] <p>Where \\(\\mu_{\\mathrm{NP}}\\) represents the strength of some new physics quantity, such as the cross section for creation of a new particle. However, this would also allow for claiming \"discovery\" in cases where  the best fit value is negative, i.e. \\(\\hat{\\mu} \\lt 0\\), which in particle physics is often an unphysical model, such as a negative cross section. In order to avoid such a situation, we typically use a modified test statistic:</p> \\[ q_{0} = \\begin{cases}     0 &amp; \\hat{\\mu} \\lt 0 \\\\     -2\\log\\left(\\frac{\\mathcal{L}(\\mathrm{\\mu}_{\\mathrm{NP}} = 0)}{\\mathcal{L}(\\hat{\\mu}_{\\mathrm{NP}})}\\right) &amp; \\hat{\\mu} \\geq 0 \\end{cases} \\] <p>which excludes the possibility of claiming discovery when the best fit value of \\(\\mu\\) is negative.</p> <p>As with the likelihood ratio test statistic, \\(t\\), defined above, under suitable conditions, analytic expressions for the distribution of \\(q_0\\) are known.</p> <p>Once the value \\(q_{0}(\\mathrm{data})\\) is calculated, it can be compared to the expected distribution of \\(q_{0}\\) under the standard model hypothesis to calculate the p-value. If the p-value is below some threshold, discovery is often claimed. In high-energy physics the standard threshold is \\(\\sim 5\\times10^{-7}\\).</p>"},{"location":"what_combine_does/statistical_tests/#limit-setting-using-the-likelihood-ratio","title":"Limit Setting using the likelihood ratio","text":"<p>Various test statistics built from likelihood ratios can be used for limit setting, i.e. excluding some parameter values.</p> <p>One could set limits on a parameter \\(\\mu\\) by finding the values of \\(\\mu\\) that are outside the confidence regions defined above by using the likelihood ratio test statistic:</p> \\[ t_{\\mu} = -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\hat{\\mu})}\\right) \\] <p>However, this could \"exclude\" \\(\\mu = 0\\) or small values of \\(\\mu\\) at a typical limit setting confidence level, such as 95%, while still not claiming a discovery. This is considered undesirable, and often we only want to set upper limits on the value of \\(\\mu\\), rather than excluding any possible set of parameters outside our chosen confidence interval.</p> <p>This can be done using a modified test statistic:</p> \\[ \\tilde{t}_{\\mu} = -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\min(\\mu,\\hat{\\mu}))}\\right) = \\begin{cases}     -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\hat{\\mu})}\\right)&amp; \\hat{\\mu} \\lt \\mu  \\\\     0 &amp;  \\mu \\leq \\hat{\\mu} \\end{cases} \\] <p>However, this can also have undesirable properties when the best fit value, \\(\\hat{\\mu}\\), is less than 0. In that case, we may set limits below 0. In order to avoid these situations, another modified test statistic can be used:</p> \\[ \\tilde{q}_{\\mu} = \\begin{cases}     -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\mu = 0)}\\right)&amp; \\hat{\\mu} \\lt 0  \\\\     -2\\log\\left(\\frac{\\mathcal{L}(\\mu)}{\\mathcal{L}(\\hat{\\mu})}\\right)&amp; 0 \\lt \\hat{\\mu} \\lt \\mu  \\\\     0&amp;  \\mu \\lt \\hat{\\mu} \\end{cases} \\] <p>Which also has a known distribution under appropriate conditions, or can be estimated from pseudo-experiments. One can then set a limit at a given confidence level, \\(\\mathrm{CL}\\), by finding the smallest value of \\(\\mu\\) for which \\(p_{\\mu} \\equiv P( t_{\\mu} \\gt t_{\\mu}(\\mathrm{data});\\mathcal{M}_{\\mu}) = 1 - \\mathrm{CL}\\). Larger values of \\(\\mu\\) will have smaller p-values and are considered excluded at the given confidence level.</p> <p>However, this procedure is rarely used, in almost every case we use a modified test procedure which uses the \\(\\mathrm{CL}_{s}\\) criterion, explained below.</p>"},{"location":"what_combine_does/statistical_tests/#the-cls-criterion","title":"The CLs criterion","text":"<p>Regardless of which of these test statistics is used, the standard test-methodology has some undesirable properties for limit setting.</p> <p>Even for an experiment with almost no sensitivity to new physics, 5% of the time the experiment is performed we expect the experimenter to find \\(p_{\\mu} \\lt 0.05\\) for small values of \\(\\mu\\) and set limits on parameter values to which the experiment is not sensitive!</p> <p>In order to avoid such situations the \\(\\mathrm{CL}_{s}\\) criterion was developped, as explained in these two papers. Rather than requiring \\(p_{\\mu} \\lt (1-\\mathrm{CL})\\) to exclude \\(\\mu\\), as would be done in the general framework described above, the \\(\\mathrm{CL}_{s}\\) criterion requires:</p> \\[ \\frac{p_{\\mu}}{1-p_{b}} \\lt (1-\\mathrm{CL}) \\] <p>Where \\(p_{\\mu}\\) is the usual probability of observing the observed value of the test statistic under the signal + background model with signal strength \\(\\mu\\), and \\(p_{b}\\) is the p-value for the background-only hypothesis, with the p-value defined using the opposite tail from the definition of \\(p_{\\mu}\\).</p> <p>Using the \\(\\mathrm{CL}_{s}\\) criterion fixes the issue of setting limits much stricter than the experimental sensitivity, because for values of \\(\\mu\\) to which the experiment is not sensitive the distribution of the test statistic under the signal hypothesis is nearly the same as under the background hypothesis. Therefore, given the use of opposite tails in the p-value definition, \\(p_{\\mu} \\approx 1-p_{b}\\), and the ratio approaches 1.</p> <p>Note that this means that a limit set using the \\(\\mathrm{CL}_{s}\\) criterion at a given \\(\\mathrm{CL}\\) will exclude the true parameter value \\(\\mu\\) with a frequency less than the nominal rate of \\(1-\\mathrm{CL}\\). The actual frequency at which it is excluded depends on the sensitivity of the experiment to that parameter value.</p>"},{"location":"what_combine_does/statistical_tests/#goodness-of-fit-tests-using-the-likelihood-ratio","title":"Goodness of fit tests using the likelihood ratio","text":"<p>The likelihood ratio can also be used as a measure of goodness of fit, i.e. describing how well the data match the model for binned data.</p> <p>A standard likelihood-based measure of the goodness of fit is determined by using the log likelihood ratio with the likelihood in the denominator coming from the saturated model.</p> \\[ t_{\\mathrm{saturated}} \\propto -\\log\\left(\\frac{\\mathcal{L}_{\\mathcal{M}}}{\\mathcal{L}_{\\mathcal{M}_\\mathrm{saturated}}}\\right) \\] <p>Here \\(\\mathcal{M}\\) is whatever model one is testing the goodness of fit for, and the saturated model is a model for which the prediction matches the observed value in every bin. Typically, the saturated model would be one in which there are as many free parameters as bins.</p> <p>This ratio is then providing a comparison between how well the actual data are fit as compared to a hypothetical optimal fit.</p> <p>Unfortunately, the distribution of \\(t_{\\mathcal{saturated}}\\) usually is not known a priori and has to be estimated by generating pseudodata from the model \\(\\mathcal{L}\\) and calculating the empirical distribution of the statistic.</p> <p>Once the distribution is determined, a p-value for the statistic can be derived which indicates the probability of observing data with that quality of fit given the model, and therefore serves as a measure of the goodness of fit.</p>"},{"location":"what_combine_does/statistical_tests/#channel-compatibility-test-using-the-likelihood-ratio","title":"Channel Compatibility test using the likelihood ratio","text":"<p>When performing an anlysis across many different channels (for example, different Higgs decay modes), it is often interesting to check the level of compatibility of the various channels.</p> <p>Combine implements a channel compatibility test, by considering the a model, \\(\\mathcal{M}_{\\mathrm{c-independent}}\\), in which the signal is independent in every channel. As a test statistic, this test uses the likelihood ratio between the best fit value of the nominal model and the model with independent signal strength for each channel:</p> \\[ t = -\\log\\left(\\frac{\\mathcal{L}_{\\mathcal{M}}(\\vec{\\hat{\\mu}},\\vec{\\hat{\\nu}})}{\\mathcal{L}_{\\mathcal{M}_{\\mathrm{c-indep}}}(\\vec{\\hat{\\mu}}_{c1}, \\vec{\\hat{\\mu}}_{c2}, ..., \\vec{\\hat{\\nu}})}\\right) \\] <p>The distribution of the test statistic is not known a priori, and needs to be calculated by generating pseudo-data samples.</p>"},{"location":"what_combine_does/statistical_tests/#other-statistical-tests","title":"Other Statistical Tests","text":"<p>While combine is a likelihood based statistical framework, it does not require that all statistical tests use the likelihood ratio.</p>"},{"location":"what_combine_does/statistical_tests/#other-goodness-of-fit-tests","title":"Other Goodness of Fit Tests","text":"<p>As well as the saturated goodness of fit test, defined above, combine implements Kolmogorov-Smirnov and Anderson-Darling goodness of fit tests.</p> <p>For the Kolomogorov-Smirnov (KS) test, the test statistic is the maximum absolute difference between the cumulative distribution function between the data and the model:</p> \\[ D = \\max_{x} | F_{\\mathcal{M}}(x) - F_{\\mathrm{data}}(x) | \\] <p>Where \\(F(x)\\) is the Cumulative Distribution Function (i.e. cumulative sum) of the model or data at point \\(\\vec{x}\\).</p> <p>For the Anderson-Darling (AD) test, the test statistic is based on the integral of the square of the difference between the two cumulative distribution functions. The square difference is modified by a weighting function which gives more importance to differences in the tails:</p> \\[ A^2 = \\int_{x_{\\mathrm{min}}}^{x_{\\mathrm{max}}} \\frac{ (F_{\\mathcal{M}}(x) - F_{\\mathrm{data}}(x))^2}{ F_\\mathcal{M}(x) (1 - F_{\\mathcal{M}}(x)) } \\mathrm{d}F_\\mathcal{M}(x) \\] <p>Notably, both the Anderson-Darling and Kolmogorov-Smirnov test rely on the cumulative distribution. Because the ordering of different channels of a model is not well defined, the tests themselves are not unambiguously defined over multiple channels.</p>"}]}
\ No newline at end of file
diff --git a/v10.0.X/sitemap.xml.gz b/v10.0.X/sitemap.xml.gz
index 663a0de9267d9d1bdec3c48e831fa76575b86a21..f2ab0f576a2f761fd89235eb46d76b243898b82c 100644
GIT binary patch
delta 13
Ucmb=gXP58h;9$6Td?I@V035soXaE2J

delta 13
Ucmb=gXP58h;9%%JG?Bdm02@jK9RL6T

diff --git a/v10.0.X/tutorial2023/parametric_exercise/index.html b/v10.0.X/tutorial2023/parametric_exercise/index.html
index 1e56a73460f..dcfed9dbe10 100644
--- a/v10.0.X/tutorial2023/parametric_exercise/index.html
+++ b/v10.0.X/tutorial2023/parametric_exercise/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2845,7 +2845,7 @@ <h2 id="advanced-exercises-to-be-added">Advanced exercises (to be added)</h2>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/tutorial2023_unfolding/unfolding_exercise/index.html b/v10.0.X/tutorial2023_unfolding/unfolding_exercise/index.html
index a1f9c005683..4e80cf81996 100644
--- a/v10.0.X/tutorial2023_unfolding/unfolding_exercise/index.html
+++ b/v10.0.X/tutorial2023_unfolding/unfolding_exercise/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1705,7 +1705,7 @@ <h2 id="poi-correlations">POI correlations</h2>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/tutorial_stat_routines/stat_routines/index.html b/v10.0.X/tutorial_stat_routines/stat_routines/index.html
index d6404ee62ba..256bc4daebc 100644
--- a/v10.0.X/tutorial_stat_routines/stat_routines/index.html
+++ b/v10.0.X/tutorial_stat_routines/stat_routines/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1930,7 +1930,7 @@ <h3 id="debugging">Debugging</h3>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/what_combine_does/fitting_concepts/index.html b/v10.0.X/what_combine_does/fitting_concepts/index.html
index 139af067dc1..ecb416b791b 100644
--- a/v10.0.X/what_combine_does/fitting_concepts/index.html
+++ b/v10.0.X/what_combine_does/fitting_concepts/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1759,7 +1759,7 @@ <h3 id="bayesian-credibility-regions">Bayesian Credibility Regions</h3>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/what_combine_does/introduction/index.html b/v10.0.X/what_combine_does/introduction/index.html
index 34c17cf87d8..d56fe7e039b 100644
--- a/v10.0.X/what_combine_does/introduction/index.html
+++ b/v10.0.X/what_combine_does/introduction/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1493,7 +1493,7 @@ <h2 id="validation-and-inspection">Validation and Inspection</h2>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/what_combine_does/model_and_likelihood/index.html b/v10.0.X/what_combine_does/model_and_likelihood/index.html
index cf7289b8352..893a47f4b16 100644
--- a/v10.0.X/what_combine_does/model_and_likelihood/index.html
+++ b/v10.0.X/what_combine_does/model_and_likelihood/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -2102,7 +2102,7 @@ <h1 id="references-and-external-literature">References and External Literature</
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>
       
diff --git a/v10.0.X/what_combine_does/statistical_tests/index.html b/v10.0.X/what_combine_does/statistical_tests/index.html
index 2fb1c4d834b..c93ca12ddca 100644
--- a/v10.0.X/what_combine_does/statistical_tests/index.html
+++ b/v10.0.X/what_combine_does/statistical_tests/index.html
@@ -16,7 +16,7 @@
       
       
       <link rel="icon" href="../../logo.png">
-      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.31">
+      <meta name="generator" content="mkdocs-1.6.0, mkdocs-material-9.5.33">
     
     
       
@@ -1909,7 +1909,7 @@ <h3 id="other-goodness-of-fit-tests">Other Goodness of Fit Tests</h3>
     <script id="__config" type="application/json">{"base": "../..", "features": ["content.code.copy", "navigation.footer", "navigation.indexes", "navigation.expand", "navigation.tracking", "navigation.tabs", "navigation.tabs.sticky", "navigation.top", "search.highlight", "search.suggest", "toc.follow"], "search": "../../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
     
     
-      <script src="../../assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+      <script src="../../assets/javascripts/bundle.af256bd8.min.js"></script>
       
         <script src="../../javascripts/mathjax.js"></script>