+
+ Read the Docs
+ v: ${config.versions.current.slug}
+
+
+
+
+ ${renderLanguages(config)}
+ ${renderVersions(config)}
+ ${renderDownloads(config)}
+
+ On Read the Docs
+
+ Project Home
+
+
+ Builds
+
+
+ Downloads
+
+
+
+ Search
+
+
+
+
+
+
+ Hosted by Read the Docs
+
+
+
+ `;
+
+ // Inject the generated flyout into the body HTML element.
+ document.body.insertAdjacentHTML("beforeend", flyout);
+
+ // Trigger the Read the Docs Addons Search modal when clicking on the "Search docs" input from inside the flyout.
+ document
+ .querySelector("#flyout-search-form")
+ .addEventListener("focusin", () => {
+ const event = new CustomEvent("readthedocs-search-show");
+ document.dispatchEvent(event);
+ });
+ })
+}
+
+if (themeLanguageSelector || themeVersionSelector) {
+ function onSelectorSwitch(event) {
+ const option = event.target.selectedIndex;
+ const item = event.target.options[option];
+ window.location.href = item.dataset.url;
+ }
+
+ document.addEventListener("readthedocs-addons-data-ready", function (event) {
+ const config = event.detail.data();
+
+ const versionSwitch = document.querySelector(
+ "div.switch-menus > div.version-switch",
+ );
+ if (themeVersionSelector) {
+ let versions = config.versions.active;
+ if (config.versions.current.hidden || config.versions.current.type === "external") {
+ versions.unshift(config.versions.current);
+ }
+ const versionSelect = `
+
+ ${versions
+ .map(
+ (version) => `
+
+ ${version.slug}
+ `,
+ )
+ .join("\n")}
+
+ `;
+
+ versionSwitch.innerHTML = versionSelect;
+ versionSwitch.firstElementChild.addEventListener("change", onSelectorSwitch);
+ }
+
+ const languageSwitch = document.querySelector(
+ "div.switch-menus > div.language-switch",
+ );
+
+ if (themeLanguageSelector) {
+ if (config.projects.translations.length) {
+ // Add the current language to the options on the selector
+ let languages = config.projects.translations.concat(
+ config.projects.current,
+ );
+ languages = languages.sort((a, b) =>
+ a.language.name.localeCompare(b.language.name),
+ );
+
+ const languageSelect = `
+
+ ${languages
+ .map(
+ (language) => `
+
+ ${language.language.name}
+ `,
+ )
+ .join("\n")}
+
+ `;
+
+ languageSwitch.innerHTML = languageSelect;
+ languageSwitch.firstElementChild.addEventListener("change", onSelectorSwitch);
+ }
+ else {
+ languageSwitch.remove();
+ }
+ }
+ });
+}
+
+document.addEventListener("readthedocs-addons-data-ready", function (event) {
+ // Trigger the Read the Docs Addons Search modal when clicking on "Search docs" input from the topnav.
+ document
+ .querySelector("[role='search'] input")
+ .addEventListener("focusin", () => {
+ const event = new CustomEvent("readthedocs-search-show");
+ document.dispatchEvent(event);
+ });
+});
\ No newline at end of file
diff --git a/_static/language_data.js b/_static/language_data.js
new file mode 100644
index 0000000..c7fe6c6
--- /dev/null
+++ b/_static/language_data.js
@@ -0,0 +1,192 @@
+/*
+ * This script contains the language-specific data used by searchtools.js,
+ * namely the list of stopwords, stemmer, scorer and splitter.
+ */
+
+var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
+
+
+/* Non-minified version is copied as a separate JS file, if available */
+
+/**
+ * Porter Stemmer
+ */
+var Stemmer = function() {
+
+ var step2list = {
+ ational: 'ate',
+ tional: 'tion',
+ enci: 'ence',
+ anci: 'ance',
+ izer: 'ize',
+ bli: 'ble',
+ alli: 'al',
+ entli: 'ent',
+ eli: 'e',
+ ousli: 'ous',
+ ization: 'ize',
+ ation: 'ate',
+ ator: 'ate',
+ alism: 'al',
+ iveness: 'ive',
+ fulness: 'ful',
+ ousness: 'ous',
+ aliti: 'al',
+ iviti: 'ive',
+ biliti: 'ble',
+ logi: 'log'
+ };
+
+ var step3list = {
+ icate: 'ic',
+ ative: '',
+ alize: 'al',
+ iciti: 'ic',
+ ical: 'ic',
+ ful: '',
+ ness: ''
+ };
+
+ var c = "[^aeiou]"; // consonant
+ var v = "[aeiouy]"; // vowel
+ var C = c + "[^aeiouy]*"; // consonant sequence
+ var V = v + "[aeiou]*"; // vowel sequence
+
+ var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
+ var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
+ var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
+ var s_v = "^(" + C + ")?" + v; // vowel in stem
+
+ this.stemWord = function (w) {
+ var stem;
+ var suffix;
+ var firstch;
+ var origword = w;
+
+ if (w.length < 3)
+ return w;
+
+ var re;
+ var re2;
+ var re3;
+ var re4;
+
+ firstch = w.substr(0,1);
+ if (firstch == "y")
+ w = firstch.toUpperCase() + w.substr(1);
+
+ // Step 1a
+ re = /^(.+?)(ss|i)es$/;
+ re2 = /^(.+?)([^s])s$/;
+
+ if (re.test(w))
+ w = w.replace(re,"$1$2");
+ else if (re2.test(w))
+ w = w.replace(re2,"$1$2");
+
+ // Step 1b
+ re = /^(.+?)eed$/;
+ re2 = /^(.+?)(ed|ing)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ re = new RegExp(mgr0);
+ if (re.test(fp[1])) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1];
+ re2 = new RegExp(s_v);
+ if (re2.test(stem)) {
+ w = stem;
+ re2 = /(at|bl|iz)$/;
+ re3 = new RegExp("([^aeiouylsz])\\1$");
+ re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re2.test(w))
+ w = w + "e";
+ else if (re3.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ else if (re4.test(w))
+ w = w + "e";
+ }
+ }
+
+ // Step 1c
+ re = /^(.+?)y$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(s_v);
+ if (re.test(stem))
+ w = stem + "i";
+ }
+
+ // Step 2
+ re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step2list[suffix];
+ }
+
+ // Step 3
+ re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step3list[suffix];
+ }
+
+ // Step 4
+ re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+ re2 = /^(.+?)(s|t)(ion)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ if (re.test(stem))
+ w = stem;
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1] + fp[2];
+ re2 = new RegExp(mgr1);
+ if (re2.test(stem))
+ w = stem;
+ }
+
+ // Step 5
+ re = /^(.+?)e$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ re2 = new RegExp(meq1);
+ re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
+ w = stem;
+ }
+ re = /ll$/;
+ re2 = new RegExp(mgr1);
+ if (re.test(w) && re2.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+
+ // and turn initial Y back to y
+ if (firstch == "y")
+ w = firstch.toLowerCase() + w.substr(1);
+ return w;
+ }
+}
+
diff --git a/_static/minus.png b/_static/minus.png
new file mode 100644
index 0000000..d96755f
Binary files /dev/null and b/_static/minus.png differ
diff --git a/_static/plus.png b/_static/plus.png
new file mode 100644
index 0000000..7107cec
Binary files /dev/null and b/_static/plus.png differ
diff --git a/_static/pygments.css b/_static/pygments.css
new file mode 100644
index 0000000..84ab303
--- /dev/null
+++ b/_static/pygments.css
@@ -0,0 +1,75 @@
+pre { line-height: 125%; }
+td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+.highlight .hll { background-color: #ffffcc }
+.highlight { background: #f8f8f8; }
+.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */
+.highlight .err { border: 1px solid #FF0000 } /* Error */
+.highlight .k { color: #008000; font-weight: bold } /* Keyword */
+.highlight .o { color: #666666 } /* Operator */
+.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
+.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
+.highlight .cp { color: #9C6500 } /* Comment.Preproc */
+.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
+.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
+.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
+.highlight .gd { color: #A00000 } /* Generic.Deleted */
+.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */
+.highlight .gr { color: #E40000 } /* Generic.Error */
+.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
+.highlight .gi { color: #008400 } /* Generic.Inserted */
+.highlight .go { color: #717171 } /* Generic.Output */
+.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
+.highlight .gs { font-weight: bold } /* Generic.Strong */
+.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
+.highlight .gt { color: #0044DD } /* Generic.Traceback */
+.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
+.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
+.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
+.highlight .kp { color: #008000 } /* Keyword.Pseudo */
+.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
+.highlight .kt { color: #B00040 } /* Keyword.Type */
+.highlight .m { color: #666666 } /* Literal.Number */
+.highlight .s { color: #BA2121 } /* Literal.String */
+.highlight .na { color: #687822 } /* Name.Attribute */
+.highlight .nb { color: #008000 } /* Name.Builtin */
+.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */
+.highlight .no { color: #880000 } /* Name.Constant */
+.highlight .nd { color: #AA22FF } /* Name.Decorator */
+.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */
+.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
+.highlight .nf { color: #0000FF } /* Name.Function */
+.highlight .nl { color: #767600 } /* Name.Label */
+.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
+.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */
+.highlight .nv { color: #19177C } /* Name.Variable */
+.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
+.highlight .w { color: #bbbbbb } /* Text.Whitespace */
+.highlight .mb { color: #666666 } /* Literal.Number.Bin */
+.highlight .mf { color: #666666 } /* Literal.Number.Float */
+.highlight .mh { color: #666666 } /* Literal.Number.Hex */
+.highlight .mi { color: #666666 } /* Literal.Number.Integer */
+.highlight .mo { color: #666666 } /* Literal.Number.Oct */
+.highlight .sa { color: #BA2121 } /* Literal.String.Affix */
+.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */
+.highlight .sc { color: #BA2121 } /* Literal.String.Char */
+.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */
+.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
+.highlight .s2 { color: #BA2121 } /* Literal.String.Double */
+.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
+.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */
+.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
+.highlight .sx { color: #008000 } /* Literal.String.Other */
+.highlight .sr { color: #A45A77 } /* Literal.String.Regex */
+.highlight .s1 { color: #BA2121 } /* Literal.String.Single */
+.highlight .ss { color: #19177C } /* Literal.String.Symbol */
+.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */
+.highlight .fm { color: #0000FF } /* Name.Function.Magic */
+.highlight .vc { color: #19177C } /* Name.Variable.Class */
+.highlight .vg { color: #19177C } /* Name.Variable.Global */
+.highlight .vi { color: #19177C } /* Name.Variable.Instance */
+.highlight .vm { color: #19177C } /* Name.Variable.Magic */
+.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/_static/searchtools.js b/_static/searchtools.js
new file mode 100644
index 0000000..2c774d1
--- /dev/null
+++ b/_static/searchtools.js
@@ -0,0 +1,632 @@
+/*
+ * Sphinx JavaScript utilities for the full-text search.
+ */
+"use strict";
+
+/**
+ * Simple result scoring code.
+ */
+if (typeof Scorer === "undefined") {
+ var Scorer = {
+ // Implement the following function to further tweak the score for each result
+ // The function takes a result array [docname, title, anchor, descr, score, filename]
+ // and returns the new score.
+ /*
+ score: result => {
+ const [docname, title, anchor, descr, score, filename, kind] = result
+ return score
+ },
+ */
+
+ // query matches the full name of an object
+ objNameMatch: 11,
+ // or matches in the last dotted part of the object name
+ objPartialMatch: 6,
+ // Additive scores depending on the priority of the object
+ objPrio: {
+ 0: 15, // used to be importantResults
+ 1: 5, // used to be objectResults
+ 2: -5, // used to be unimportantResults
+ },
+ // Used when the priority is not in the mapping.
+ objPrioDefault: 0,
+
+ // query found in title
+ title: 15,
+ partialTitle: 7,
+ // query found in terms
+ term: 5,
+ partialTerm: 2,
+ };
+}
+
+// Global search result kind enum, used by themes to style search results.
+class SearchResultKind {
+ static get index() { return "index"; }
+ static get object() { return "object"; }
+ static get text() { return "text"; }
+ static get title() { return "title"; }
+}
+
+const _removeChildren = (element) => {
+ while (element && element.lastChild) element.removeChild(element.lastChild);
+};
+
+/**
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
+ */
+const _escapeRegExp = (string) =>
+ string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
+
+const _displayItem = (item, searchTerms, highlightTerms) => {
+ const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
+ const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
+ const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
+ const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
+ const contentRoot = document.documentElement.dataset.content_root;
+
+ const [docName, title, anchor, descr, score, _filename, kind] = item;
+
+ let listItem = document.createElement("li");
+ // Add a class representing the item's type:
+ // can be used by a theme's CSS selector for styling
+ // See SearchResultKind for the class names.
+ listItem.classList.add(`kind-${kind}`);
+ let requestUrl;
+ let linkUrl;
+ if (docBuilder === "dirhtml") {
+ // dirhtml builder
+ let dirname = docName + "/";
+ if (dirname.match(/\/index\/$/))
+ dirname = dirname.substring(0, dirname.length - 6);
+ else if (dirname === "index/") dirname = "";
+ requestUrl = contentRoot + dirname;
+ linkUrl = requestUrl;
+ } else {
+ // normal html builders
+ requestUrl = contentRoot + docName + docFileSuffix;
+ linkUrl = docName + docLinkSuffix;
+ }
+ let linkEl = listItem.appendChild(document.createElement("a"));
+ linkEl.href = linkUrl + anchor;
+ linkEl.dataset.score = score;
+ linkEl.innerHTML = title;
+ if (descr) {
+ listItem.appendChild(document.createElement("span")).innerHTML =
+ " (" + descr + ")";
+ // highlight search terms in the description
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ }
+ else if (showSearchSummary)
+ fetch(requestUrl)
+ .then((responseData) => responseData.text())
+ .then((data) => {
+ if (data)
+ listItem.appendChild(
+ Search.makeSearchSummary(data, searchTerms, anchor)
+ );
+ // highlight search terms in the summary
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ });
+ Search.output.appendChild(listItem);
+};
+const _finishSearch = (resultCount) => {
+ Search.stopPulse();
+ Search.title.innerText = _("Search Results");
+ if (!resultCount)
+ Search.status.innerText = Documentation.gettext(
+ "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
+ );
+ else
+ Search.status.innerText = Documentation.ngettext(
+ "Search finished, found one page matching the search query.",
+ "Search finished, found ${resultCount} pages matching the search query.",
+ resultCount,
+ ).replace('${resultCount}', resultCount);
+};
+const _displayNextItem = (
+ results,
+ resultCount,
+ searchTerms,
+ highlightTerms,
+) => {
+ // results left, load the summary and display it
+ // this is intended to be dynamic (don't sub resultsCount)
+ if (results.length) {
+ _displayItem(results.pop(), searchTerms, highlightTerms);
+ setTimeout(
+ () => _displayNextItem(results, resultCount, searchTerms, highlightTerms),
+ 5
+ );
+ }
+ // search finished, update title and status message
+ else _finishSearch(resultCount);
+};
+// Helper function used by query() to order search results.
+// Each input is an array of [docname, title, anchor, descr, score, filename, kind].
+// Order the results by score (in opposite order of appearance, since the
+// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
+const _orderResultsByScoreThenName = (a, b) => {
+ const leftScore = a[4];
+ const rightScore = b[4];
+ if (leftScore === rightScore) {
+ // same score: sort alphabetically
+ const leftTitle = a[1].toLowerCase();
+ const rightTitle = b[1].toLowerCase();
+ if (leftTitle === rightTitle) return 0;
+ return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
+ }
+ return leftScore > rightScore ? 1 : -1;
+};
+
+/**
+ * Default splitQuery function. Can be overridden in ``sphinx.search`` with a
+ * custom function per language.
+ *
+ * The regular expression works by splitting the string on consecutive characters
+ * that are not Unicode letters, numbers, underscores, or emoji characters.
+ * This is the same as ``\W+`` in Python, preserving the surrogate pair area.
+ */
+if (typeof splitQuery === "undefined") {
+ var splitQuery = (query) => query
+ .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
+ .filter(term => term) // remove remaining empty strings
+}
+
+/**
+ * Search Module
+ */
+const Search = {
+ _index: null,
+ _queued_query: null,
+ _pulse_status: -1,
+
+ htmlToText: (htmlString, anchor) => {
+ const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
+ for (const removalQuery of [".headerlink", "script", "style"]) {
+ htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
+ }
+ if (anchor) {
+ const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`);
+ if (anchorContent) return anchorContent.textContent;
+
+ console.warn(
+ `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.`
+ );
+ }
+
+ // if anchor not specified or not found, fall back to main content
+ const docContent = htmlElement.querySelector('[role="main"]');
+ if (docContent) return docContent.textContent;
+
+ console.warn(
+ "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template."
+ );
+ return "";
+ },
+
+ init: () => {
+ const query = new URLSearchParams(window.location.search).get("q");
+ document
+ .querySelectorAll('input[name="q"]')
+ .forEach((el) => (el.value = query));
+ if (query) Search.performSearch(query);
+ },
+
+ loadIndex: (url) =>
+ (document.body.appendChild(document.createElement("script")).src = url),
+
+ setIndex: (index) => {
+ Search._index = index;
+ if (Search._queued_query !== null) {
+ const query = Search._queued_query;
+ Search._queued_query = null;
+ Search.query(query);
+ }
+ },
+
+ hasIndex: () => Search._index !== null,
+
+ deferQuery: (query) => (Search._queued_query = query),
+
+ stopPulse: () => (Search._pulse_status = -1),
+
+ startPulse: () => {
+ if (Search._pulse_status >= 0) return;
+
+ const pulse = () => {
+ Search._pulse_status = (Search._pulse_status + 1) % 4;
+ Search.dots.innerText = ".".repeat(Search._pulse_status);
+ if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
+ };
+ pulse();
+ },
+
+ /**
+ * perform a search for something (or wait until index is loaded)
+ */
+ performSearch: (query) => {
+ // create the required interface elements
+ const searchText = document.createElement("h2");
+ searchText.textContent = _("Searching");
+ const searchSummary = document.createElement("p");
+ searchSummary.classList.add("search-summary");
+ searchSummary.innerText = "";
+ const searchList = document.createElement("ul");
+ searchList.setAttribute("role", "list");
+ searchList.classList.add("search");
+
+ const out = document.getElementById("search-results");
+ Search.title = out.appendChild(searchText);
+ Search.dots = Search.title.appendChild(document.createElement("span"));
+ Search.status = out.appendChild(searchSummary);
+ Search.output = out.appendChild(searchList);
+
+ const searchProgress = document.getElementById("search-progress");
+ // Some themes don't use the search progress node
+ if (searchProgress) {
+ searchProgress.innerText = _("Preparing search...");
+ }
+ Search.startPulse();
+
+ // index already loaded, the browser was quick!
+ if (Search.hasIndex()) Search.query(query);
+ else Search.deferQuery(query);
+ },
+
+ _parseQuery: (query) => {
+ // stem the search terms and add them to the correct list
+ const stemmer = new Stemmer();
+ const searchTerms = new Set();
+ const excludedTerms = new Set();
+ const highlightTerms = new Set();
+ const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
+ splitQuery(query.trim()).forEach((queryTerm) => {
+ const queryTermLower = queryTerm.toLowerCase();
+
+ // maybe skip this "word"
+ // stopwords array is from language_data.js
+ if (
+ stopwords.indexOf(queryTermLower) !== -1 ||
+ queryTerm.match(/^\d+$/)
+ )
+ return;
+
+ // stem the word
+ let word = stemmer.stemWord(queryTermLower);
+ // select the correct list
+ if (word[0] === "-") excludedTerms.add(word.substr(1));
+ else {
+ searchTerms.add(word);
+ highlightTerms.add(queryTermLower);
+ }
+ });
+
+ if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
+ localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
+ }
+
+ // console.debug("SEARCH: searching for:");
+ // console.info("required: ", [...searchTerms]);
+ // console.info("excluded: ", [...excludedTerms]);
+
+ return [query, searchTerms, excludedTerms, highlightTerms, objectTerms];
+ },
+
+ /**
+ * execute search (requires search index to be loaded)
+ */
+ _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+ const allTitles = Search._index.alltitles;
+ const indexEntries = Search._index.indexentries;
+
+ // Collect multiple result groups to be sorted separately and then ordered.
+ // Each is an array of [docname, title, anchor, descr, score, filename, kind].
+ const normalResults = [];
+ const nonMainIndexResults = [];
+
+ _removeChildren(document.getElementById("search-progress"));
+
+ const queryLower = query.toLowerCase().trim();
+ for (const [title, foundTitles] of Object.entries(allTitles)) {
+ if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
+ for (const [file, id] of foundTitles) {
+ const score = Math.round(Scorer.title * queryLower.length / title.length);
+ const boost = titles[file] === title ? 1 : 0; // add a boost for document titles
+ normalResults.push([
+ docNames[file],
+ titles[file] !== title ? `${titles[file]} > ${title}` : title,
+ id !== null ? "#" + id : "",
+ null,
+ score + boost,
+ filenames[file],
+ SearchResultKind.title,
+ ]);
+ }
+ }
+ }
+
+ // search for explicit entries in index directives
+ for (const [entry, foundEntries] of Object.entries(indexEntries)) {
+ if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
+ for (const [file, id, isMain] of foundEntries) {
+ const score = Math.round(100 * queryLower.length / entry.length);
+ const result = [
+ docNames[file],
+ titles[file],
+ id ? "#" + id : "",
+ null,
+ score,
+ filenames[file],
+ SearchResultKind.index,
+ ];
+ if (isMain) {
+ normalResults.push(result);
+ } else {
+ nonMainIndexResults.push(result);
+ }
+ }
+ }
+ }
+
+ // lookup as object
+ objectTerms.forEach((term) =>
+ normalResults.push(...Search.performObjectSearch(term, objectTerms))
+ );
+
+ // lookup as search terms in fulltext
+ normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms));
+
+ // let the scorer override scores with a custom scoring function
+ if (Scorer.score) {
+ normalResults.forEach((item) => (item[4] = Scorer.score(item)));
+ nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item)));
+ }
+
+ // Sort each group of results by score and then alphabetically by name.
+ normalResults.sort(_orderResultsByScoreThenName);
+ nonMainIndexResults.sort(_orderResultsByScoreThenName);
+
+ // Combine the result groups in (reverse) order.
+ // Non-main index entries are typically arbitrary cross-references,
+ // so display them after other results.
+ let results = [...nonMainIndexResults, ...normalResults];
+
+ // remove duplicate search results
+ // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
+ let seen = new Set();
+ results = results.reverse().reduce((acc, result) => {
+ let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
+ if (!seen.has(resultStr)) {
+ acc.push(result);
+ seen.add(resultStr);
+ }
+ return acc;
+ }, []);
+
+ return results.reverse();
+ },
+
+ query: (query) => {
+ const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query);
+ const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms);
+
+ // for debugging
+ //Search.lastresults = results.slice(); // a copy
+ // console.info("search results:", Search.lastresults);
+
+ // print the results
+ _displayNextItem(results, results.length, searchTerms, highlightTerms);
+ },
+
+ /**
+ * search for object names
+ */
+ performObjectSearch: (object, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const objects = Search._index.objects;
+ const objNames = Search._index.objnames;
+ const titles = Search._index.titles;
+
+ const results = [];
+
+ const objectSearchCallback = (prefix, match) => {
+ const name = match[4]
+ const fullname = (prefix ? prefix + "." : "") + name;
+ const fullnameLower = fullname.toLowerCase();
+ if (fullnameLower.indexOf(object) < 0) return;
+
+ let score = 0;
+ const parts = fullnameLower.split(".");
+
+ // check for different match types: exact matches of full name or
+ // "last name" (i.e. last dotted part)
+ if (fullnameLower === object || parts.slice(-1)[0] === object)
+ score += Scorer.objNameMatch;
+ else if (parts.slice(-1)[0].indexOf(object) > -1)
+ score += Scorer.objPartialMatch; // matches in last name
+
+ const objName = objNames[match[1]][2];
+ const title = titles[match[0]];
+
+ // If more than one term searched for, we require other words to be
+ // found in the name/title/description
+ const otherTerms = new Set(objectTerms);
+ otherTerms.delete(object);
+ if (otherTerms.size > 0) {
+ const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
+ if (
+ [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
+ )
+ return;
+ }
+
+ let anchor = match[3];
+ if (anchor === "") anchor = fullname;
+ else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
+
+ const descr = objName + _(", in ") + title;
+
+ // add custom score for some objects according to scorer
+ if (Scorer.objPrio.hasOwnProperty(match[2]))
+ score += Scorer.objPrio[match[2]];
+ else score += Scorer.objPrioDefault;
+
+ results.push([
+ docNames[match[0]],
+ fullname,
+ "#" + anchor,
+ descr,
+ score,
+ filenames[match[0]],
+ SearchResultKind.object,
+ ]);
+ };
+ Object.keys(objects).forEach((prefix) =>
+ objects[prefix].forEach((array) =>
+ objectSearchCallback(prefix, array)
+ )
+ );
+ return results;
+ },
+
+ /**
+ * search for full-text terms in the index
+ */
+ performTermsSearch: (searchTerms, excludedTerms) => {
+ // prepare search
+ const terms = Search._index.terms;
+ const titleTerms = Search._index.titleterms;
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+
+ const scoreMap = new Map();
+ const fileMap = new Map();
+
+ // perform the search on the required terms
+ searchTerms.forEach((word) => {
+ const files = [];
+ const arr = [
+ { files: terms[word], score: Scorer.term },
+ { files: titleTerms[word], score: Scorer.title },
+ ];
+ // add support for partial matches
+ if (word.length > 2) {
+ const escapedWord = _escapeRegExp(word);
+ if (!terms.hasOwnProperty(word)) {
+ Object.keys(terms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: terms[term], score: Scorer.partialTerm });
+ });
+ }
+ if (!titleTerms.hasOwnProperty(word)) {
+ Object.keys(titleTerms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: titleTerms[term], score: Scorer.partialTitle });
+ });
+ }
+ }
+
+ // no match but word was a required one
+ if (arr.every((record) => record.files === undefined)) return;
+
+ // found search word in contents
+ arr.forEach((record) => {
+ if (record.files === undefined) return;
+
+ let recordFiles = record.files;
+ if (recordFiles.length === undefined) recordFiles = [recordFiles];
+ files.push(...recordFiles);
+
+ // set score for the word in each file
+ recordFiles.forEach((file) => {
+ if (!scoreMap.has(file)) scoreMap.set(file, {});
+ scoreMap.get(file)[word] = record.score;
+ });
+ });
+
+ // create the mapping
+ files.forEach((file) => {
+ if (!fileMap.has(file)) fileMap.set(file, [word]);
+ else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word);
+ });
+ });
+
+ // now check if the files don't contain excluded terms
+ const results = [];
+ for (const [file, wordList] of fileMap) {
+ // check if all requirements are matched
+
+ // as search terms with length < 3 are discarded
+ const filteredTermCount = [...searchTerms].filter(
+ (term) => term.length > 2
+ ).length;
+ if (
+ wordList.length !== searchTerms.size &&
+ wordList.length !== filteredTermCount
+ )
+ continue;
+
+ // ensure that none of the excluded terms is in the search result
+ if (
+ [...excludedTerms].some(
+ (term) =>
+ terms[term] === file ||
+ titleTerms[term] === file ||
+ (terms[term] || []).includes(file) ||
+ (titleTerms[term] || []).includes(file)
+ )
+ )
+ break;
+
+ // select one (max) score for the file.
+ const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
+ // add result to the result list
+ results.push([
+ docNames[file],
+ titles[file],
+ "",
+ null,
+ score,
+ filenames[file],
+ SearchResultKind.text,
+ ]);
+ }
+ return results;
+ },
+
+ /**
+ * helper function to return a node containing the
+ * search summary for a given text. keywords is a list
+ * of stemmed words.
+ */
+ makeSearchSummary: (htmlText, keywords, anchor) => {
+ const text = Search.htmlToText(htmlText, anchor);
+ if (text === "") return null;
+
+ const textLower = text.toLowerCase();
+ const actualStartPosition = [...keywords]
+ .map((k) => textLower.indexOf(k.toLowerCase()))
+ .filter((i) => i > -1)
+ .slice(-1)[0];
+ const startWithContext = Math.max(actualStartPosition - 120, 0);
+
+ const top = startWithContext === 0 ? "" : "...";
+ const tail = startWithContext + 240 < text.length ? "..." : "";
+
+ let summary = document.createElement("p");
+ summary.classList.add("context");
+ summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
+
+ return summary;
+ },
+};
+
+_ready(Search.init);
diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js
new file mode 100644
index 0000000..8a96c69
--- /dev/null
+++ b/_static/sphinx_highlight.js
@@ -0,0 +1,154 @@
+/* Highlighting utilities for Sphinx HTML documentation. */
+"use strict";
+
+const SPHINX_HIGHLIGHT_ENABLED = true
+
+/**
+ * highlight a given string on a node by wrapping it in
+ * span elements with the given class name.
+ */
+const _highlight = (node, addItems, text, className) => {
+ if (node.nodeType === Node.TEXT_NODE) {
+ const val = node.nodeValue;
+ const parent = node.parentNode;
+ const pos = val.toLowerCase().indexOf(text);
+ if (
+ pos >= 0 &&
+ !parent.classList.contains(className) &&
+ !parent.classList.contains("nohighlight")
+ ) {
+ let span;
+
+ const closestNode = parent.closest("body, svg, foreignObject");
+ const isInSVG = closestNode && closestNode.matches("svg");
+ if (isInSVG) {
+ span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
+ } else {
+ span = document.createElement("span");
+ span.classList.add(className);
+ }
+
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ const rest = document.createTextNode(val.substr(pos + text.length));
+ parent.insertBefore(
+ span,
+ parent.insertBefore(
+ rest,
+ node.nextSibling
+ )
+ );
+ node.nodeValue = val.substr(0, pos);
+ /* There may be more occurrences of search term in this node. So call this
+ * function recursively on the remaining fragment.
+ */
+ _highlight(rest, addItems, text, className);
+
+ if (isInSVG) {
+ const rect = document.createElementNS(
+ "http://www.w3.org/2000/svg",
+ "rect"
+ );
+ const bbox = parent.getBBox();
+ rect.x.baseVal.value = bbox.x;
+ rect.y.baseVal.value = bbox.y;
+ rect.width.baseVal.value = bbox.width;
+ rect.height.baseVal.value = bbox.height;
+ rect.setAttribute("class", className);
+ addItems.push({ parent: parent, target: rect });
+ }
+ }
+ } else if (node.matches && !node.matches("button, select, textarea")) {
+ node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
+ }
+};
+const _highlightText = (thisNode, text, className) => {
+ let addItems = [];
+ _highlight(thisNode, addItems, text, className);
+ addItems.forEach((obj) =>
+ obj.parent.insertAdjacentElement("beforebegin", obj.target)
+ );
+};
+
+/**
+ * Small JavaScript module for the documentation.
+ */
+const SphinxHighlight = {
+
+ /**
+ * highlight the search words provided in localstorage in the text
+ */
+ highlightSearchWords: () => {
+ if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
+
+ // get and clear terms from localstorage
+ const url = new URL(window.location);
+ const highlight =
+ localStorage.getItem("sphinx_highlight_terms")
+ || url.searchParams.get("highlight")
+ || "";
+ localStorage.removeItem("sphinx_highlight_terms")
+ url.searchParams.delete("highlight");
+ window.history.replaceState({}, "", url);
+
+ // get individual terms from highlight string
+ const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
+ if (terms.length === 0) return; // nothing to do
+
+ // There should never be more than one element matching "div.body"
+ const divBody = document.querySelectorAll("div.body");
+ const body = divBody.length ? divBody[0] : document.querySelector("body");
+ window.setTimeout(() => {
+ terms.forEach((term) => _highlightText(body, term, "highlighted"));
+ }, 10);
+
+ const searchBox = document.getElementById("searchbox");
+ if (searchBox === null) return;
+ searchBox.appendChild(
+ document
+ .createRange()
+ .createContextualFragment(
+ '
' +
+ '' +
+ _("Hide Search Matches") +
+ "
"
+ )
+ );
+ },
+
+ /**
+ * helper function to hide the search marks again
+ */
+ hideSearchWords: () => {
+ document
+ .querySelectorAll("#searchbox .highlight-link")
+ .forEach((el) => el.remove());
+ document
+ .querySelectorAll("span.highlighted")
+ .forEach((el) => el.classList.remove("highlighted"));
+ localStorage.removeItem("sphinx_highlight_terms")
+ },
+
+ initEscapeListener: () => {
+ // only install a listener if it is really needed
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
+
+ document.addEventListener("keydown", (event) => {
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
+ if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
+ SphinxHighlight.hideSearchWords();
+ event.preventDefault();
+ }
+ });
+ },
+};
+
+_ready(() => {
+ /* Do not call highlightSearchWords() when we are on the search page.
+ * It will highlight words from the *previous* search query.
+ */
+ if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords();
+ SphinxHighlight.initEscapeListener();
+});
diff --git a/environments.html b/environments.html
new file mode 100644
index 0000000..e9f4ccb
--- /dev/null
+++ b/environments.html
@@ -0,0 +1,115 @@
+
+
+
+
+
+
+
+
+
Environments — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+Environments
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/examples.html b/examples.html
new file mode 100644
index 0000000..38a93a2
--- /dev/null
+++ b/examples.html
@@ -0,0 +1,382 @@
+
+
+
+
+
+
+
+
+
Examples — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+Examples
+TartanAir V2 is a flexible dataset when used with this Python package. Using it, you can download, iterate, and modify the raw data. Here are some examples of what you can do with it.
+
+Download Example
+
+Download via Python API
+import tartanair as ta
+
+# Initialize TartanAir.
+tartanair_data_root = '/my/path/to/root/folder/for/tartanair-v2'
+ta . init ( tartanair_data_root )
+
+# Download a trajectory.
+ta . download ( env = "ArchVizTinyHouseDay" ,
+ difficulty = [ 'easy' ], # this can be 'easy', and/or 'hard'
+ modality = [ 'image' , 'depth' , 'seg' , 'imu' ], # available modalities are: image', 'depth', 'seg', 'imu', 'lidar', 'flow', 'pose'
+ camera_name = [ 'lcam_front' , 'lcam_left' , 'lcam_right' , 'lcam_back' , 'lcam_top' , 'lcam_bottom' ],
+ unzip = True ) # unzip files autonomously after download
+
+
+
+
+Download via a yaml config file
+ta . download ( config = 'download_config.yaml' )
+
+
+The config file if of the following format:
+env : [ 'ArchVizTinyHouseDay' ]
+difficulty : [ 'easy' ]
+modality : [ 'image' , 'depth' ]
+camera_name : [ 'lcam_front' , 'lcam_left' , 'lcam_right' , 'lcam_back' , 'lcam_top' , 'lcam_bottom' ]
+unzip : True
+
+
+
+
+
+Customization Example
+TartanAir V2 allows you to synthesize your own dataset by modifying the raw data. For example, by specifying a new camera model and generating images using it.
+import tartanair as ta
+
+# For help with rotations.
+from scipy.spatial.transform import Rotation
+
+# Initialize TartanAir.
+tartanair_data_root = '/my/path/to/root/folder/for/tartanair-v2'
+ta . init ( tartanair_data_root )
+
+# Create your camera model(s).
+R_raw_new0 = Rotation . from_euler ( 'y' , 90 , degrees = True ) . as_matrix () . tolist ()
+
+cam_model_0 = { 'name' : 'pinhole' ,
+ 'raw_side' : 'left' , # TartanAir has two cameras, one on the left and one on the right. This parameter specifies which camera to use.
+ 'params' :
+ { 'fx' : 320 ,
+ 'fy' : 320 ,
+ 'cx' : 320 ,
+ 'cy' : 320 ,
+ 'width' : 640 ,
+ 'height' : 640 },
+ 'R_raw_new' : R_raw_new0 }
+
+R_raw_new1 = Rotation . from_euler ( 'xyz' , [ 45 , 0 , 0 ], degrees = True ) . as_matrix () . tolist ()
+
+cam_model_1 = { 'name' : 'doublesphere' ,
+ 'raw_side' : 'left' ,
+ 'params' :
+ { 'fx' : 300 ,
+ 'fy' : 300 ,
+ 'cx' : 500 ,
+ 'cy' : 500 ,
+ 'width' : 1000 ,
+ 'height' : 1000 ,
+ 'alpha' : 0.6 ,
+ 'xi' : - 0.2 ,
+ 'fov_degree' : 195 },
+ 'R_raw_new' : R_raw_new1 }
+
+# Customize the dataset.
+ta . customize ( env = 'ArchVizTinyHouseDay' ,
+ difficulty = 'easy' ,
+ trajectory_id = [ 'P000' ],
+ modality = [ 'image' , 'depth' ],
+ new_camera_models_params = [ cam_model_1 , cam_model_0 ],
+ num_workers = 4 ,
+ device = "cuda" ) # or cpu
+
+
+
+
+Dataloader Example
+TartanAir-V2 includes a powerful parallelized dataloader. It can be used to load data from the dataset and serve mini-batches in parallel, and also to apply (some) transformations to the data on the fly. We highly recommend that you use it for efficient data loading.
+import tartanair as ta
+import numpy as np
+import cv2
+
+# Initialize TartanAir.
+tartanair_data_root = '/my/path/to/root/folder/for/tartanair-v2'
+ta . init ( tartanair_data_root )
+
+# Specify the environments, difficulties, and trajectory ids to load.
+envs = [ 'ArchVizTinyHouseDay' ]
+difficulties = [ 'easy' ]
+trajectory_ids = [ 'P000' , 'P001' ]
+
+# Specify the modalities to load.
+modalities = [ 'image' , 'pose' , 'imu' ]
+camnames = [ 'lcam_front' , 'lcam_left' , 'lcam_right' , 'lcam_back' , 'lcam_top' , 'lcam_bottom' ]
+
+# Specify the dataloader parameters.
+new_image_shape_hw = [ 640 , 640 ] # If None, no resizing is performed. If a value is passed, then the image is resized to this shape.
+subset_framenum = 200 # This is the number of frames in a subset. Notice that this is an upper bound on the batch size. Ideally, make this number large to utilize your RAM efficiently. Information about the allocated memory will be provided in the console.
+seq_length = { 'image' : 2 , 'pose' : 2 , 'imu' : 10 } # This is the length of the data-sequences. For example, if the sequence length is 2, then the dataloader will load pairs of images.
+seq_stride = 1 # This is the stride between the data-sequences. For example, if the sequence length is 2 and the stride is 1, then the dataloader will load pairs of images [0,1], [1,2], [2,3], etc. If the stride is 2, then the dataloader will load pairs of images [0,1], [2,3], [4,5], etc.
+frame_skip = 0 # This is the number of frames to skip between each frame. For example, if the frame skip is 2 and the sequence length is 3, then the dataloader will load frames [0, 3, 6], [1, 4, 7], [2, 5, 8], etc.
+batch_size = 8 # This is the number of data-sequences in a mini-batch.
+num_workers = 4 # This is the number of workers to use for loading the data.
+shuffle = True # Whether to shuffle the data. Let's set this to False for now, so that we can see the data loading in a nice video. Yes it is nice don't argue with me please. Just look at it! So nice. :)
+
+# Create a dataloader object.
+dataloader = ta . dataloader ( env = envs ,
+ difficulty = difficulties ,
+ trajectory_id = trajectory_ids ,
+ modality = modalities ,
+ camera_name = camnames ,
+ new_image_shape_hw = new_image_shape_hw ,
+ seq_length = seq_length ,
+ subset_framenum = subset_framenum ,
+ seq_stride = seq_stride ,
+ frame_skip = frame_skip ,
+ batch_size = batch_size ,
+ num_workers = num_workers ,
+ shuffle = shuffle ,
+ verbose = True )
+
+# Iterate over the batches.
+for i in range ( 100 ):
+ # Get the next batch.
+ batch = dataloader . load_sample ()
+ # Visualize some images.
+ # The shape of an image batch is (B, S, H, W, C), where B is the batch size, S is the sequence length, H is the height, W is the width, and C is the number of channels.
+
+ print ( "Batch number: {} " . format ( i + 1 ), "Loaded {} samples so far." . format (( i + 1 ) * batch_size ))
+
+ for b in range ( batch_size ):
+
+ # Create image cross.
+ left = batch [ 'image_lcam_left' ][ b ][ 0 ] . numpy () . transpose ( 1 , 2 , 0 )
+ front = batch [ 'image_lcam_front' ][ b ][ 0 ] . numpy () . transpose ( 1 , 2 , 0 )
+ right = batch [ 'image_lcam_right' ][ b ][ 0 ] . numpy () . transpose ( 1 , 2 , 0 )
+ back = batch [ 'image_lcam_back' ][ b ][ 0 ] . numpy () . transpose ( 1 , 2 , 0 )
+ top = batch [ 'image_lcam_top' ][ b ][ 0 ] . numpy () . transpose ( 1 , 2 , 0 )
+ bottom = batch [ 'image_lcam_bottom' ][ b ][ 0 ] . numpy () . transpose ( 1 , 2 , 0 )
+ cross_mid = np . concatenate ([ left , front , right , back ], axis = 1 )
+ cross_top = np . concatenate ([ np . zeros_like ( top ), top , np . zeros_like ( top ), np . zeros_like ( top )], axis = 1 )
+ cross_bottom = np . concatenate ([ np . zeros_like ( bottom ), bottom , np . zeros_like ( bottom ), np . zeros_like ( bottom )], axis = 1 )
+ cross = np . concatenate ([ cross_top , cross_mid , cross_bottom ], axis = 0 )
+
+ pose = batch [ 'pose_lcam_front' ] . numpy ()
+ imu = batch [ 'imu' ] . numpy ()
+
+ # Resize.
+ cross = cv2 . resize ( cross , ( cross . shape [ 1 ] // 4 , cross . shape [ 0 ] // 4 ))
+
+ # Show the image cross.
+ cv2 . imshow ( 'cross' , cross )
+ cv2 . waitKey ( 100 )
+
+ print ( " Pose: " , pose [ 0 ][ 0 ])
+ print ( " IMU: " , imu [ 0 ][ 0 ])
+
+dataloader . stop_cachers ()
+
+
+
+
+Data Iteration Example
+Create a data iterator to get samples from the TartanAir V2 dataset. The samples include data in the specified modalities.
+import tartanair as ta
+
+# Initialize TartanAir.
+tartanair_data_root = '/my/path/to/root/folder/for/tartanair-v2'
+ta . init ( tartanair_data_root )
+
+# Create iterator.
+ta_iterator = ta . iterator ( env = [ 'ArchVizTinyHouseDay' ],
+ difficulty = 'easy' ,
+ trajectory_id = [],
+ modality = 'image' ,
+ camera_name = [ 'lcam_left' ])
+
+for i in range ( 100 ):
+ sample = next ( ta_iterator )
+
+
+
+
+Evaluation Example
+TartanAir also provides tools for evaluating estimated trajectories against the ground truth. The evaluation is based on the ATE and RPE metrics, which can be computed for the entire trajectory, a subset of the trajectory, and also a scaled and shifted version of the estimated trajectory that matched the ground truth better, if that is requested.
+import tartanair as ta
+import numpy as np
+
+# Initialize TartanAir.
+tartanair_data_root = '/my/path/to/root/folder/for/tartanair-v2'
+ta . init ( tartanair_data_root )
+
+# Create an example trajectory. This is a noisy version of the ground truth trajectory.
+env = 'ArchVizTinyHouseDay'
+difficulty = 'easy'
+trajectory_id = 'P002'
+camera_name = 'lcam_front'
+gt_traj = ta . get_traj_np ( env , difficulty , trajectory_id , camera_name )
+est_traj = gt_traj + np . random . normal ( 0 , 0.1 , gt_traj . shape )
+
+# Pass the ground truth trajectory directly to the evaluation function.
+results = ta . evaluate_traj ( est_traj ,
+ gt_traj = gt_traj ,
+ enforce_length = True ,
+ plot = True ,
+ plot_out_path = plot_out_path ,
+ do_scale = True ,
+ do_align = True )
+
+# Or pass the environment, difficulty, and trajectory id to the evaluation function.
+plot_out_path = "evaluator_example_plot.png"
+results = ta . evaluate_traj ( est_traj ,
+ env = env ,
+ difficulty = difficulty ,
+ trajectory_id = trajectory_id ,
+ camera_name = camera_name ,
+ enforce_length = True ,
+ plot = True ,
+ plot_out_path = plot_out_path ,
+ do_scale = True ,
+ do_align = True )
+
+
+
+
+Flow Sampling Example
+TartanAir V2 also includes a power dense correspondense sampling tool that can be used to calculate desnse correspondences between any points in the same environment.
+The tool supports sampling dense correspondence between any combination of pinhole, fisheye(doublesphere), and equirectangular cameras. Yes, you can sample dense correspondences between different camera models such as pinhole and equirectangular.
+Given a pair of RGB and depth image cubes and two direction, the tool will compute dense correspondense represented as optical flow and a occlusion mask signaling is the pixel directly observable.
+Please refer to flow_sampling_example.py for a complete example.
+
+
+To learn more about how the resampling happens, see Flow Sampling .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/flow_sampling.html b/flow_sampling.html
new file mode 100644
index 0000000..19ce5dd
--- /dev/null
+++ b/flow_sampling.html
@@ -0,0 +1,264 @@
+
+
+
+
+
+
+
+
+
Dense Correspondence / Flow Sampling — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+Dense Correspondence / Flow Sampling
+Given a pair of images, computing the dense correspondence is to compute which pixel location in the second image correspond to each pixel location in the first image, and the dense confidence, or the probability that the pair is occluded.
+This is a fundamental problem in computer vision and has many applications such as optical flow, stereo matching, and image registration.
+
+Introduction
+Based on the need for accurate, pixel-level correspondance information for the Match-Anything project, we select to leverage existing datasets that contains accurate camera intrinsics/extrinsics, and depth map.
+However, the real problem comes from how we store depth - by depth maps. Therefore, we only have depth information at discrete points that does not capture the full information. For example, when reprojecting image points from the first image onto the second image, it will almost always land onto a non-integer coordinate, which we do not have the depth information stored. Or, in other words we only have points at integer pixel coordinate of both images, and it is unlikely that two points will coincide in the 3D space.
+
+
+
+Previous Method: Projection error thresholding
+For each point \(i_1 \in \mathcal{I}_1\) :
+
+Project into 3D space with
+\(x^{\{C_1\}}_{i_1} = \hat{\pi}_1^{-1}(i_1) \cdot \tilde{D}[i_1]\)
+Transform to camera 2’s coordinate with
+\(x^{\{C_2\}}_{i_1} = \mathcal{T}_{\{C_1\}}^{\{C_2\}} \cdot x^{\{C_1\}}_{i_1}\)
+Compute the projected image coordinate \(i_2\) and expected depth \(d\)
+
+\[i_2 = \pi_2(x^{\{C_2\}}_{i_1}), \quad d = \|x^{\{C_2\}}_{i_1}\|\]
+
+Get the depth value at \(i_2\) from \(\tilde{D}_2\) , compute and threshold the error
+
+
+\[e = |d - \tilde{D}_2[i_2]| , \quad \rho^{1 \to 2}[i_1] = P(e < \epsilon), W^{1 \to 2}(i_1) = i_2\]
+
The problem is, we only have discrete sample of \(D_2\) : \(\tilde{D}_2\) . We cannot get depth at fractional pixel location \(i_2\) from discrete \(\tilde{D}_2\) .
+
Therefore, we need to interpolate the depth map for a approximate expected depth. In this step, the expected depth may not be accurate and may lead to aliasing artifacts at large FOV change.
+
+\[\tilde{D}_2 \approx \text{intrep}(\tilde{D}_2, i_2), \quad \tilde{D}_2[i_2] \approx \tilde{D}_2[\text{nearest}(i_2)]\]
+
+
+
+
+
+The rightmost image shows the occlusion map with the above method. The aliasing artifacts are shown as the block dots at the far side of the corridor.
+
+
+Nearest Approximation invalidated by large FOV change
+With large FOV change, walls that are perpendicular in one view may become highly inclined in the other.
+
+
+As shown in the figure, points projected from C1’s camera model and depth map land in few pixels in C2’s image. The nearest approximation will lead to a large error in the depth value as shown by the difference between the red and black lines.
+
+
+Our Method
+We propose a fix to the above formulation by interpolating wisely:
+
+Use linear interpolation to get the depth value at fractional pixel location \(i_2\) from \(\tilde{D}_2\) :
+
+
+\[\tilde{D}_2[i_2] = \text{bilinear}(\tilde{D}_2, i_2)\]
+
At most cases, the depth value can be seen as continuous. The reason we do not use nearest interpolation at depth images is that depth can change rapidly, and we do not want to create non-existing depth at object edges. However, we are only using depth as verification, which means its effect is not propogated beyond occlusion calculation, and it is highly unlikely that the non-existing depth value will hit the reprojection since we use a very small threshold, minimizing the risk of doing so.
+
+
+We allow a small error in the pixel space.
+
+
+\[\rho^{1 \to 2}[i_1] = P\left( \min_{i \in B_{r_0}(i_2)}|d - \text{bilinear}(\tilde{D}_2, i)| < \epsilon\right)\]
+
In other words, we threshold the lower bound of the reprojection error in a small neighborhood of the projected pixel location. This helps to compensate homography effect in non-pinhole cameras and further reduce aliasing artifacts.
+
+
+
+In conclusion, the full method is:
+
+
+With typical parameters:
+
+\(R_0 = 0.1\) : Maximum search radius in pixels
+\(n = 1\) : Maximum number of iterations
+\(\alpha = 0.2\) : Step size for gradient descent
+\(t_a = 0.04\) : absolute depth error threshold
+\(t_r = 0.005\) : relative depth error threshold
+\(\tau = 0.02\) : temperature for error confidence
+
+
+
+Ablations
+How necessary is the above method? We ablate:
+
+Nearest interpolation: We use nearest interpolation instead of bilinear interpolation.
+
+
+
+Left - with bilinear interpolation. Mid - with nearest interpolation. Right - difference between the occlusion masks.
+In this extereme example we see linear interpolation avoids major aliasing artifacts.
+
+No optimization: We do not optimize for lower bound of reprojection error, and threshold the error directly.
+
+
+
+Left - with bilinear interpolation. Mid - with nearest interpolation. Right - difference between the occlusion masks.
+Some occlusion will be missing due to the lack of optimization at highly distorted regions.
+
+
+Some Hard Cases
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/genindex.html b/genindex.html
new file mode 100644
index 0000000..53e3382
--- /dev/null
+++ b/genindex.html
@@ -0,0 +1,110 @@
+
+
+
+
+
+
+
+
Index — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/index.html b/index.html
new file mode 100644
index 0000000..becba3c
--- /dev/null
+++ b/index.html
@@ -0,0 +1,171 @@
+
+
+
+
+
+
+
+
+
TartanAir Dataset Documentation — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+TartanAir Dataset Documentation
+Install:
+
+Welcome to TartanAir V2!
+Let’s go on an adventure to beautiful mountains, to dark caves, to stylish homes, to the Moon 🚀, and to other exciting places. And there is more! You, your models, and your robots can experience these worlds via a variety of sensors: LiDAR, IMU, optical cameras with any lense configuration you want (we provide customizable fisheye, pinhole, and equirectangular camera models), depth cameras, segmentation “cameras”, and event cameras.
+
+All the environments have recorded trajectories that were designed to be challenging and realistic. Can we improve the state of the art in SLAM, navigation, and robotics?
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/installation.html b/installation.html
new file mode 100644
index 0000000..f0696d4
--- /dev/null
+++ b/installation.html
@@ -0,0 +1,203 @@
+
+
+
+
+
+
+
+
+
Installation — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+Installation
+To install the TartanAir Python package:
+
+TartanAir depends on PyTorch for some customization functionalities. Please install it, and Torchvision, on your system (we don’t do it for you to avoid version conflicts). Please follow the instructions on the PyTorch website: https://pytorch.org/get-started/locally/ .
+That’s it! You’re ready to go.
+
+Requirements
+Currently the TartanAir Python package was only tested on Ubuntu 20.04. We will be testing on other operating systems in the future.
+
+
+Known Installation Issues and Solutions
+
+Ubuntu
+
+Downloading does not work. It could be that you are missing wget . Get it using the following command:
+
+
+
+
+
+
+MacOS
+
+PyYAML fails to install with pip on MacOS. Please install it manually using the following command:
+
+ python3 -m pip install pyyaml
+
+
+
+
+opencv-contrib-python fails to install with pip on MacOS. Please install it manually using the following command:
+
+ python3 -m pip install opencv-contrib-python
+
+
+
This might take a while.
+
+
+pytransform3d fails to install with pip on MacOS. Please install it manually using the following command:
+
+ python3 -m pip install pytransform3d
+
+
+
+
+Pillow can cause trouble with older versions of torchvision . If you are facing issues with Pillow , like ImportError: cannot import name ‘PILLOW_VERSION’ from ‘PIL’ , please install it manually using the following command, remove the torchvision package and install it again:
+
+ python3 -m pip install Pillow
+python3 -m pip uninstall torchvision
+python3 -m pip install torchvision
+
+
+
+
+wget is not installed by default on MacOS. Please install it manually using the following command:
+
+
+
+URLLib may not find your certificates on Mac, and you’ll see something like
+
+ urllib.error.URLError: <urlopen error [ SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate ( _ssl.c:1123) >
+
+
+
Please run this in terminal to fix (adapt to your Python version):
+
+ /Applications/Python\ 3 .8/Install\ Certificates.command
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/modalities.html b/modalities.html
new file mode 100644
index 0000000..3a2a602
--- /dev/null
+++ b/modalities.html
@@ -0,0 +1,175 @@
+
+
+
+
+
+
+
+
+
Modalities — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+Modalities
+We placed 12 cameras in the environment, collecting raw data, including RGB images, depth images, semantic segmentation images, and camera poses. The 12 cameras all produce pinhole images with 90$^{circ}$ field of views. They are separated as 2 stereo sets of 6-cameras pointing in 6 directions to cover 360$^{circ}$ views (Figure~ref{fig:sensors}). The raw data are processed to generate other modalities including optical flow, fisheye image, LiDAR, and IMU.
+
+Raw RGB image
+For each time step, we provide 12 RGB images from 12 cameras covering a stereo set of 360$^{circ}$ views. Each image is sampled using an FoV of 90$^{circ}$ and a resolution of 640 by 640. The stereo baseline is 0.25$m$.
+In addition, We provide tools for adding random noise and motion blur to the images, to improve the realism.
+
+
+Raw depth image
+The depth images are sampled with the same camera intrinsics and extrinsics setups as the RGB images. It is perfectly aligned and synchronized with the RGB images.
+Each pixel of the depth image is represented using a float32 number. Depending on the environment setup, the far pixels pointing to the sky usually return a very large value.
+
+
+Raw semantic segmentation
+We provide category-level semantic labels. We overcome the disadvantage of AirSim, which provides random semantic labels with respect to each type of the models in the environment, by manually labeling the model types for all the environments. So each label in the semantic segmentation images is mapped to a semantic class. With 65 highly distinct environments, our data covers a wide range ofwsnote{xxx} semantic classes. However, due to the consecutive format of the data, large objects such as building, ground and sky take much higher percentage. We provide statistics files for each environment, with which people can easily balance the data while training their semantic models.
+
+
+Raw camera pose
+Camera pose are in the same format with V1. Each line of the pose file consists of 3 numbers of translation and 4 numbers of orientation in quarternion format, describing in NED frame.
+
+
+LiDAR
+The LiDAR data is sampled from the raw depth images of 6 left-side cameras, following the pattern of Velodyn Puck (VLP-16). As a result, the LiDAR frame is perfectly aligned with the left camera frame. We didn’t use AirSim LiDAR sensor because it is based on the collision model, which misses a lot of objects that don’t have collision model, such as branches and leaves. While sampling from the depth, we balance carefully the accuracy and realism. We use linear interpolation except for the points along the edges of the objects, to avoid ghost points at object edges. We provide the processing script as well as other LiDAR models, such as Velodyn Ultra Puck (VLP-32C), allowing users to create their own LiDAR data. %wsnote{Yaoyu revise with other info?}
+% LiDAR models, blend interpolation, etc
+
+
+Fisheye and Panorama
+The fisheye and Panorama data are sampled from the raw pinhole data, thus containing all three modalities of RGB, depth, and semantics. One of the biggest challenges for the fisheye model is that those real-world fisheye cameras have diverse FoVs and distortions. We have done two things to resolve the potential generalization issue. First, we define a standard model called Linear Spherical model for fisheye images. To test real-world fisheye data with different intrinsics and distortion on the model trained on TartanAir-V2 dataset, we just need to convert the real-world data into the Linear Spherical model. Second, we open-source our sampling code, together with a rich set of fisheye and pinhole camera models, which allow users to sample their own fisheye images (detailed in the nameref{sec:costomize} section).
+% Standard model (Linear Spherical), allow customization
+% wsnote{Yaoyu, Yorai please check and revise}
+% wsnote{add an illustration of linear spherical model? }
+
+
+Optical flow
+% New support for fisheye
+Same as V1, the optical flow is calculated for the static environments by image warping, using the camera pose and depth images. The biggest upgrades are that we accelerate the code by a Cuda implementation and provide tools for generating optical flow across any type of camera model (e.g. between pinhole and fisheye).
+
+
+IMU and noise model
+The IMU ground truth data is generated by interpolating the camera pose, as a result, the IMU frame is perfectly aligned and synchronized with the left camera data. In specific, we double-differentiate the translation pose using a spline for the acceleration and differentiate the orientation using a spline for the angular rate. We provide the code for customizing the data generation (such as changing the frequency) as well as the code to add realistic noise.
+
+
+Event Camera
+Following the same trajectories with other modalities, we recollect the front-facing camera data at 1000 Hz. We use the ESIM ~cite{Rebecq18corl,Gehrig_2020_CVPR} as that is one of the fastest event camera simulators available and close to SoTA performance. Though some of the new simulators like V2E and DVS Voltmeter produce events that are closer to real cameras, the models trained by ESIM have close performance when compared to the SoTA simulators~cite{lin2022dvs}. We sample the 640$times$640 RGB images at 1000 Hz and then generate the events using the simulator. To improve the generalization across various event cameras we used a wide range of contrast thresholds between 0.2 to 1.0. %Reducing the Sim-to-Real Gap for Event Cameras
+Because the event data is collected separately from other modalities, some frames are inconsistent with other modalities in dynamic scenes.
+
+
+Occupancy Map
+Occupancy grid maps are built while the data collection as will be detailed in the data collection pipeline section. The resolution of the map ranges from 0.125$m$ to 0.5$m$ depending on the size of the environment. The map can be used in evaluating the mapping algorithm.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/objects.inv b/objects.inv
new file mode 100644
index 0000000..7f9c34e
--- /dev/null
+++ b/objects.inv
@@ -0,0 +1,6 @@
+# Sphinx inventory version 2
+# Project: TartanAir
+# Version:
+# The remainder of this file is compressed using zlib.
+xڍn <ٴnղI=L#7q$8k@&m7m@-V*RADaxa[@.D0,wmQU:#:T¥.s- GڢB[d+L@`xO$r^K %V}kUؠ[DU
{
L3^%qwL\
+ցϜ?E䤁?:Իe
Z1h5>J5=T|\duUR,vSmOh&
\ No newline at end of file
diff --git a/search.html b/search.html
new file mode 100644
index 0000000..a3104ea
--- /dev/null
+++ b/search.html
@@ -0,0 +1,125 @@
+
+
+
+
+
+
+
+
Search — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+
+
+ Please activate JavaScript to enable the search functionality.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/searchindex.js b/searchindex.js
new file mode 100644
index 0000000..37c7a07
--- /dev/null
+++ b/searchindex.js
@@ -0,0 +1 @@
+Search.setIndex({"alltitles": {"API Reference": [[7, null]], "Ablations": [[2, "ablations"]], "Customization": [[7, "customization"]], "Customization Example": [[1, "customization-example"]], "Data Iteration": [[7, "data-iteration"]], "Data Iteration Example": [[1, "data-iteration-example"]], "DataLoader": [[7, "dataloader"]], "Dataloader Example": [[1, "dataloader-example"]], "Dense Correspondence / Flow Sampling": [[2, null]], "Download": [[7, "download"]], "Download Example": [[1, "download-example"]], "Download via Python API": [[1, "download-via-python-api"]], "Download via a yaml config file": [[1, "download-via-a-yaml-config-file"]], "Environments": [[0, null]], "Evaluation": [[7, "evaluation"]], "Evaluation Example": [[1, "evaluation-example"]], "Event Camera": [[5, "event-camera"]], "Examples": [[1, null]], "Fisheye and Panorama": [[5, "fisheye-and-panorama"]], "Flow Sampling Example": [[1, "flow-sampling-example"]], "Getting Started:": [[3, null]], "IMU and noise model": [[5, "imu-and-noise-model"]], "Initialization": [[7, "initialization"]], "Installation": [[4, null]], "Introduction": [[2, "introduction"]], "Known Installation Issues and Solutions": [[4, "known-installation-issues-and-solutions"]], "LiDAR": [[5, "lidar"]], "License": [[3, "license"]], "List": [[7, "list"]], "MacOS": [[4, "macos"]], "Modalities": [[5, null]], "Nearest Approximation invalidated by large FOV change": [[2, "nearest-approximation-invalidated-by-large-fov-change"]], "Occupancy Map": [[5, "occupancy-map"]], "Optical flow": [[5, "optical-flow"]], "Our Method": [[2, "our-method"]], "Previous Method: Projection error thresholding": [[2, "previous-method-projection-error-thresholding"]], "Problem Formulation": [[2, "problem-formulation"]], "Raw RGB image": [[5, "raw-rgb-image"]], "Raw camera pose": [[5, "raw-camera-pose"]], "Raw depth image": [[5, "raw-depth-image"]], "Raw semantic segmentation": [[5, "raw-semantic-segmentation"]], "Requirements": [[4, "requirements"]], "Some Hard Cases": [[2, "some-hard-cases"]], "TartanAir Dataset Documentation": [[3, null]], "Trajectories in NumPy": [[7, "trajectories-in-numpy"]], "Troubleshooting": [[6, null]], "Ubuntu": [[4, "ubuntu"]], "Visualization": [[7, "visualization"]]}, "docnames": ["environments", "examples", "flow_sampling", "index", "installation", "modalities", "troubleshooting", "usage"], "envversion": {"sphinx": 64, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2}, "filenames": ["environments.rst", "examples.rst", "flow_sampling.rst", "index.rst", "installation.rst", "modalities.rst", "troubleshooting.rst", "usage.rst"], "indexentries": {}, "objects": {}, "objnames": {}, "objtypes": {}, "terms": {"": [1, 2, 3, 4], "0": [1, 2, 3, 5], "005": 2, "02": 2, "04": [2, 4], "1": [1, 2, 5], "10": 1, "100": 1, "1000": [1, 5], "1123": 4, "12": 5, "125": 5, "16": 5, "195": 1, "2": [1, 2, 5], "20": 4, "200": 1, "25": 5, "3": [1, 2, 3, 4, 5], "300": 1, "320": 1, "32c": 5, "360": 5, "3d": 2, "4": [1, 3, 5], "45": 1, "5": [1, 5], "500": 1, "6": [1, 5], "640": [1, 5], "65": 5, "7": 1, "8": [1, 4], "90": [1, 5], "ATE": 1, "And": 3, "As": [2, 5], "At": 2, "For": [1, 2, 5], "If": [1, 4], "In": [2, 5], "It": [1, 4, 5], "No": 2, "One": 5, "Or": [1, 2], "That": 4, "The": [1, 2, 5], "To": [1, 4, 5], "With": [2, 5], "_": 2, "_1": 2, "_2": 2, "_ssl": 4, "about": 1, "abov": 2, "absolut": 2, "acceler": 5, "accur": 2, "accuraci": 5, "across": 5, "ad": 5, "adapt": 4, "add": 5, "addit": 5, "adventur": 3, "after": 1, "again": 4, "against": 1, "airsim": 5, "algorithm": 5, "alias": 2, "align": 5, "all": [2, 3, 5], "alloc": 1, "allow": [1, 2, 5], "almost": 2, "along": 5, "alpha": [1, 2], "also": 1, "although": 2, "alwai": 2, "an": [1, 2, 3, 5], "angular": 5, "ani": [1, 3, 5], "anoth": 2, "anyth": 2, "api": 3, "appli": 1, "applic": [2, 4], "approx": 2, "apt": 4, "ar": [1, 2, 4, 5], "archviztinyhousedai": 1, "argu": 1, "art": 3, "artifact": 2, "as_matrix": 1, "attribut": 3, "autonom": 1, "avail": [1, 5], "avoid": [2, 4, 5], "axi": 1, "b": 1, "b_": 2, "back": 1, "balanc": 5, "base": [1, 2, 5], "baselin": 5, "batch": 1, "batch_siz": 1, "beauti": 3, "becaus": 5, "becom": 2, "better": 1, "between": [1, 2, 5], "beyond": 2, "biggest": 5, "bilinear": 2, "black": 2, "blend": 5, "block": 2, "blur": 5, "both": 2, "bottom": 1, "bound": [1, 2], "branch": 5, "brew": 4, "bsd": 3, "bug": 6, "build": 5, "built": 5, "c": [1, 4], "c1": 2, "c2": 2, "c_1": 2, "c_2": 2, "calcul": [1, 2, 5], "call": 5, "cam_model_0": 1, "cam_model_1": 1, "camera": [1, 2, 3], "camera_nam": 1, "camnam": 1, "can": [1, 2, 3, 4, 5], "cannot": [2, 4], "captur": 2, "carefulli": 5, "categori": 5, "caus": 4, "cave": 3, "cdot": 2, "certif": 4, "certificate_verify_fail": 4, "challeng": [3, 5], "chang": 5, "channel": 1, "check": 5, "circ": 5, "cite": 5, "class": 5, "claus": 3, "close": 5, "closer": 5, "code": 5, "coincid": 2, "collect": 5, "collis": 5, "combin": 1, "come": 2, "command": 4, "common": 3, "compar": 5, "compens": 2, "complet": 1, "comput": [1, 2], "concaten": 1, "conclus": 2, "confid": 2, "configur": 3, "conflict": 4, "consecut": 5, "consist": 5, "consol": 1, "contain": [2, 5], "continu": 2, "contrast": 5, "contrib": 4, "convert": 5, "coordin": 2, "correspond": 1, "correspondens": 1, "corridor": 2, "costom": 5, "could": 4, "cover": 5, "cpu": 1, "creat": [1, 2, 5], "creativ": 3, "cross": 1, "cross_bottom": 1, "cross_mid": 1, "cross_top": 1, "cube": 1, "cuda": [1, 5], "current": 4, "custom": [3, 4, 5], "customiz": 3, "cv2": 1, "cx": 1, "cy": 1, "d": 2, "d_2": 2, "dark": 3, "data": [3, 5], "dataload": 3, "dataset": [1, 2, 5], "default": 4, "defin": 5, "definit": 6, "degre": 1, "dens": 1, "depend": [4, 5], "depth": [1, 2, 3], "descent": 2, "describ": 5, "design": 3, "desns": 1, "detail": 5, "determin": 2, "devic": 1, "didn": 5, "differ": [1, 2, 5], "differenti": 5, "difficulti": 1, "direct": [1, 5], "directli": [1, 2], "disadvantag": 5, "discret": 2, "distinct": 5, "distort": [2, 5], "divers": 5, "do": [1, 2, 4], "do_align": 1, "do_scal": 1, "doe": [2, 4], "don": [1, 4, 5], "done": 5, "dot": 2, "doubl": 5, "doublespher": 1, "download": [3, 4], "download_config": 1, "due": [2, 5], "dv": 5, "dynam": 5, "e": [2, 5], "each": [1, 2, 5], "easi": [1, 2], "easili": 5, "edg": [2, 5], "effect": 2, "effici": 1, "enforce_length": 1, "entir": 1, "env": 1, "environ": [1, 3, 5], "epsilon": 2, "equirectangular": [1, 3], "error": 4, "esim": 5, "est_traj": 1, "estim": 1, "etc": [1, 5], "evalu": [3, 5], "evaluate_traj": 1, "evaluator_example_plot": 1, "event": 3, "exampl": [2, 3], "except": 5, "excit": 3, "exist": 2, "expect": 2, "experi": 3, "exterem": 2, "extrins": [2, 5], "face": [4, 5], "fail": 4, "fals": 1, "far": [1, 2, 5], "fastest": 5, "few": 2, "field": 5, "fig": 5, "figur": [2, 5], "file": 5, "find": [2, 4], "first": [2, 5], "fishey": [1, 3], "fix": [2, 4], "flexibl": 1, "float32": 5, "flow": 3, "flow_sampling_exampl": 1, "fly": 1, "folder": 1, "follow": [1, 2, 4, 5], "format": [1, 5], "fov": 5, "fov_degre": 1, "fraction": 2, "frame": [1, 5], "frame_skip": 1, "frequenc": 5, "from": [1, 2, 4, 5], "from_eul": 1, "front": [1, 5], "full": 2, "function": [1, 2, 4], "fundament": 2, "further": 2, "futur": 4, "fx": 1, "fy": 1, "g": 5, "gap": 5, "gehrig_2020_cvpr": 5, "gener": [1, 5], "geomrtric": 2, "get": [1, 2, 4], "get_traj_np": 1, "ghost": 5, "github": 6, "given": [1, 2], "go": [3, 4], "gradient": 2, "grid": 5, "ground": [1, 5], "gt_traj": 1, "h": [1, 2], "ha": [1, 2], "happen": 1, "hard": 1, "hat": 2, "have": [2, 3, 5, 6], "height": 1, "help": [1, 2], "here": 1, "higher": 5, "highli": [1, 2, 5], "hit": 2, "home": 3, "homographi": 2, "how": [1, 2], "howev": [2, 5], "http": 4, "hz": 5, "i": [1, 2, 3, 4, 5], "i_1": 2, "i_2": 2, "id": 1, "ideal": 1, "illustr": 5, "imag": [1, 2, 3], "image_lcam_back": 1, "image_lcam_bottom": 1, "image_lcam_front": 1, "image_lcam_left": 1, "image_lcam_right": 1, "image_lcam_top": 1, "implement": 5, "import": [1, 4], "importerror": 4, "improv": [3, 5], "imshow": 1, "imu": [1, 3], "inclin": 2, "includ": [1, 5], "inconsist": 5, "info": 5, "inform": [1, 2], "init": 1, "initi": [1, 3], "instal": 3, "instead": 2, "instruct": 4, "integ": 2, "intern": 3, "interpol": [2, 5], "intrep": 2, "intrins": [2, 5], "issu": [3, 5, 6], "issuer": 4, "iter": [2, 3], "its": 2, "just": [1, 5], "known": 3, "label": 5, "lack": 2, "land": 2, "larg": [1, 5], "lcam_back": 1, "lcam_bottom": 1, "lcam_front": 1, "lcam_left": 1, "lcam_right": 1, "lcam_top": 1, "lead": 2, "learn": 1, "leav": 5, "left": [1, 2, 5], "length": 1, "lens": 3, "let": [1, 3], "level": [2, 5], "leverag": 2, "lidar": [1, 3], "like": [4, 5], "lin2022dv": 5, "line": [2, 5], "linear": [2, 5], "list": 3, "ll": 4, "load": 1, "load_sampl": 1, "local": 4, "locat": 2, "look": 1, "lot": 5, "lower": 2, "m": [4, 5], "mac": 4, "mai": [2, 4], "major": 2, "make": 1, "mani": 2, "manual": [4, 5], "map": [2, 3], "mask": [1, 2], "match": [1, 2], "mathbb": 2, "mathcal": 2, "maximum": 2, "me": 1, "mean": 2, "memori": 1, "metric": 1, "mid": 2, "might": 4, "min_": 2, "mini": 1, "minim": 2, "miss": [2, 4, 5], "modal": [1, 3], "model": [1, 2, 3], "modifi": 1, "moon": 3, "more": [1, 3], "most": 2, "motion": 5, "mountain": 3, "much": 5, "my": 1, "n": 2, "name": [1, 4], "nameref": 5, "navig": 3, "necessari": 2, "ned": 5, "need": [2, 5], "neighborhood": 2, "new": [1, 5], "new_camera_models_param": 1, "new_image_shape_hw": 1, "next": 1, "nice": 1, "nois": 3, "noisi": 1, "non": 2, "none": 1, "norm": 2, "normal": 1, "notic": 1, "now": 1, "np": 1, "num_work": 1, "number": [1, 2, 5], "numpi": [1, 3], "object": [1, 2, 5], "observ": [1, 2], "occlud": 2, "occlus": [1, 2], "occup": 3, "ofwsnot": 5, "older": 4, "one": [1, 2, 5], "onli": [2, 4], "onto": 2, "open": 5, "opencv": 4, "oper": 4, "optic": [1, 2, 3], "optim": 2, "org": 4, "orient": 5, "other": [2, 3, 4, 5], "our": 5, "over": 1, "overcom": 5, "own": [1, 5], "p": 2, "p000": 1, "p001": 1, "p002": 1, "packag": [1, 4], "pair": [1, 2], "panorama": 3, "parallel": 1, "param": 1, "paramet": [1, 2], "pass": 1, "path": 1, "pattern": 5, "peopl": 5, "perceiv": 2, "percentag": 5, "perfectli": 5, "perform": [1, 5], "perpendicular": 2, "pi": 2, "pi_2": 2, "pil": 4, "pillow": 4, "pillow_vers": 4, "pinhol": [1, 2, 3, 5], "pip": [3, 4], "pipelin": 5, "pixel": [1, 2, 5], "place": [3, 5], "pleas": [1, 4, 5, 6], "plot": 1, "plot_out_path": 1, "png": 1, "point": [1, 2, 5], "pose": [1, 3], "pose_lcam_front": 1, "posit": 2, "potenti": 5, "power": 1, "print": 1, "probabl": 2, "process": 5, "produc": 5, "propog": 2, "propos": 2, "provid": [1, 3, 5], "puck": 5, "pull": 6, "py": 1, "python": 4, "python3": 4, "pytorch": 4, "pytransform3d": 4, "pyyaml": 4, "quad": 2, "quarternion": 5, "r": 2, "r_0": 2, "r_raw_new": 1, "r_raw_new0": 1, "r_raw_new1": 1, "radiu": 2, "rai": 2, "ram": 1, "random": [1, 5], "rang": [1, 5], "rapidli": 2, "rate": 5, "raw": [1, 3], "raw_sid": 1, "re": 4, "readi": 4, "real": [2, 5], "realism": 5, "realist": [3, 5], "reason": 2, "rebecq18corl": 5, "recollect": 5, "recommend": 1, "record": 3, "red": 2, "reduc": [2, 5], "ref": 5, "refer": [1, 3], "region": 2, "registr": 2, "rel": 2, "remov": 4, "report": 6, "repres": [1, 5], "reproject": 2, "request": [1, 6], "requir": 3, "resampl": 1, "resiz": 1, "resolut": 5, "resolv": 5, "respect": 5, "result": [1, 5], "return": 5, "revis": 5, "rgb": [1, 3], "rho": 2, "rich": 5, "right": [1, 2], "rightmost": 2, "risk": 2, "robot": 3, "root": 1, "rotat": 1, "rpe": 1, "run": 4, "same": [1, 5], "sampl": [3, 5], "scale": 1, "scene": 5, "scipi": 1, "script": 5, "se": 2, "search": 2, "sec": 5, "second": [2, 5], "section": 5, "see": [1, 2, 4], "seen": 2, "seg": 1, "segment": 3, "select": 2, "semant": 3, "sensor": [3, 5], "separ": 5, "seq_length": 1, "seq_strid": 1, "sequenc": 1, "serv": 1, "set": [1, 5], "setup": 5, "shape": 1, "shift": 1, "show": [1, 2], "shown": 2, "shuffl": 1, "side": [2, 5], "signal": 1, "sim": 5, "similar": 3, "simul": 5, "sinc": 2, "size": [1, 2, 5], "skip": 1, "sky": 5, "slam": 3, "small": 2, "so": [1, 2, 5], "solut": 3, "some": [1, 4, 5], "someth": 4, "sota": 5, "sourc": 5, "space": 2, "spatial": 1, "specif": 5, "specifi": 1, "spheric": 5, "spline": 5, "ssl": 4, "standard": 5, "start": 4, "state": 3, "static": 5, "statist": 5, "step": [2, 5], "stereo": [2, 5], "stop_cach": 1, "store": 2, "stride": 1, "stylish": 3, "submit": 6, "subset": 1, "subset_framenum": 1, "sudo": 4, "support": [1, 5], "synchron": 5, "synthes": 1, "synthet": 2, "system": [2, 4], "t": [1, 2, 4, 5], "t_a": 2, "t_r": 2, "ta": 1, "ta_iter": 1, "take": [4, 5], "tartanair": [1, 2, 4, 5], "tartanair_data_root": 1, "tau": 2, "temperatur": 2, "termin": 4, "test": [4, 5], "text": 2, "thei": 5, "them": 6, "therefor": 2, "thi": [1, 2, 4], "thing": 5, "those": 5, "though": 5, "three": 5, "threshold": 5, "thu": 5, "tild": 2, "time": [2, 5], "togeth": 5, "tolist": 1, "tool": [1, 5], "toolkit": 3, "top": 1, "torchvis": 4, "train": 5, "trajectori": [1, 3, 5], "trajectory_id": 1, "transform": [1, 2], "translat": 5, "transpos": 1, "troubl": 4, "troubleshoot": 3, "true": 1, "truth": [1, 5], "two": [1, 2, 5], "type": 5, "typic": 2, "ultra": 5, "unabl": 4, "under": 3, "uninstal": 4, "unit": 2, "unlik": 2, "unzip": 1, "upgrad": 5, "upper": 1, "urlerror": 4, "urllib": 4, "urlopen": 4, "us": [1, 2, 4, 5], "user": 5, "usual": 5, "util": 1, "v1": [3, 5], "v2": [1, 3, 5], "v2e": 5, "valid": 2, "valu": [1, 2, 5], "varieti": 3, "variou": 5, "velodyn": 5, "verbos": 1, "veri": [2, 5], "verif": 2, "verifi": 4, "version": [1, 4], "via": 3, "video": 1, "view": [2, 5], "vision": 2, "visual": [1, 3], "vlp": 5, "voltmet": 5, "w": [1, 2], "wa": 4, "waitkei": 1, "wall": 2, "want": [2, 3], "warp": [2, 5], "we": [1, 2, 3, 4, 5, 6], "websit": 4, "welcom": 3, "well": 5, "were": 3, "wget": 4, "what": 1, "when": [1, 2, 5], "where": 1, "whether": 1, "which": [1, 2, 5], "while": [4, 5], "wide": 5, "width": 1, "wise": 2, "word": 2, "work": 4, "worker": 1, "world": [3, 5], "wsnote": 5, "x": 2, "xi": 1, "xxx": 5, "xyz": 1, "y": 1, "yaoyu": 5, "ye": 1, "yorai": 5, "you": [1, 3, 4], "your": [1, 3, 4], "zeros_lik": 1}, "titles": ["Environments", "Examples", "Dense Correspondence / Flow Sampling", "TartanAir Dataset Documentation", "Installation", "Modalities", "Troubleshooting", "API Reference"], "titleterms": {"ablat": 2, "api": [1, 7], "approxim": 2, "camera": 5, "case": 2, "chang": 2, "config": 1, "correspond": 2, "custom": [1, 7], "data": [1, 7], "dataload": [1, 7], "dataset": 3, "dens": 2, "depth": 5, "document": 3, "download": [1, 7], "environ": 0, "error": 2, "evalu": [1, 7], "event": 5, "exampl": 1, "file": 1, "fishey": 5, "flow": [1, 2, 5], "formul": 2, "fov": 2, "get": 3, "hard": 2, "imag": 5, "imu": 5, "initi": 7, "instal": 4, "introduct": 2, "invalid": 2, "issu": 4, "iter": [1, 7], "known": 4, "larg": 2, "licens": 3, "lidar": 5, "list": 7, "maco": 4, "map": 5, "method": 2, "modal": 5, "model": 5, "nearest": 2, "nois": 5, "numpi": 7, "occup": 5, "optic": 5, "our": 2, "panorama": 5, "pose": 5, "previou": 2, "problem": 2, "project": 2, "python": 1, "raw": 5, "refer": 7, "requir": 4, "rgb": 5, "sampl": [1, 2], "segment": 5, "semant": 5, "solut": 4, "some": 2, "start": 3, "tartanair": 3, "threshold": 2, "trajectori": 7, "troubleshoot": 6, "ubuntu": 4, "via": 1, "visual": 7, "yaml": 1}})
\ No newline at end of file
diff --git a/troubleshooting.html b/troubleshooting.html
new file mode 100644
index 0000000..1e104be
--- /dev/null
+++ b/troubleshooting.html
@@ -0,0 +1,113 @@
+
+
+
+
+
+
+
+
+
Troubleshooting — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+Troubleshooting
+We definitely have bugs. Please report them in GitHub issues or submit a pull request!
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/usage.html b/usage.html
new file mode 100644
index 0000000..8520f30
--- /dev/null
+++ b/usage.html
@@ -0,0 +1,152 @@
+
+
+
+
+
+
+
+
+
API Reference — TartanAir documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ TartanAir
+
+
+
+
+
+
+
+
+
+API Reference
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file