diff --git a/.prettierignore b/.prettierignore index 94cfebc..e2ed428 100644 --- a/.prettierignore +++ b/.prettierignore @@ -14,3 +14,5 @@ node_modules package-lock.json pnpm-lock.yaml yarn.lock + +src/routes/blog/**/*.md \ No newline at end of file diff --git a/src/lib/components/molecules/CodeBlock.svelte b/src/lib/components/molecules/CodeBlock.svelte index 5ae504b..fde565e 100644 --- a/src/lib/components/molecules/CodeBlock.svelte +++ b/src/lib/components/molecules/CodeBlock.svelte @@ -46,22 +46,24 @@
- {#if filename} -
{filename}
- {/if} - {#if lang} -
{lang}
- {/if} - + {#if lang} +
{lang}
+ {/if} + +
{#if code}

 
 
-
+ (size = window.innerWidth)} />
 
 
= 1440 ? 'scroll-container' : ''} style={divStyle}>
diff --git a/src/lib/components/singletons/PrevNextPost.svelte b/src/lib/components/singletons/PrevNextPost.svelte index 8f79391..5c70f71 100644 --- a/src/lib/components/singletons/PrevNextPost.svelte +++ b/src/lib/components/singletons/PrevNextPost.svelte @@ -43,7 +43,7 @@ {prevPost?.meta?.title}
{:else} -

You are reading our first post.

+

You are reading our most recent post.

{/if}
diff --git a/src/lib/data/blog-posts/index.ts b/src/lib/data/blog-posts/index.ts deleted file mode 100644 index 7d45408..0000000 --- a/src/lib/data/blog-posts/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -import { filterPosts, importPosts } from './utils'; - -// utils.ts does all the heavy lifting here, but to sum it up: -// Posts are fetched from the src/routes/(blog-article)/* folder. -// The folder name is used as the slug, and the file name MUST me +page.md -// I've left a sample post in there, but with the "hidden" property so it doesn't show up. - -export const allPosts = importPosts(true); -export const filteredPosts = filterPosts(allPosts); diff --git a/src/lib/data/blog-posts/utils.ts b/src/lib/data/blog-posts/utils.ts deleted file mode 100644 index 9307d9c..0000000 --- a/src/lib/data/blog-posts/utils.ts +++ /dev/null @@ -1,80 +0,0 @@ -// Disabling eslint because importing Prism is needed -// even if not directly used in this file -// eslint-disable-next-line no-unused-vars -import Prism from 'prismjs'; -// Here we assign it to a variable so the import above -// is not removed automatically on build -// eslint-disable-next-line @typescript-eslint/no-unused-vars -const ifYouRemoveMeTheBuildFails = Prism; -import 'prism-svelte'; -import readingTime from 'reading-time/lib/reading-time'; -import striptags from 'striptags'; -import type { BlogPost } from '$lib/utils/types'; - -// Define an interface for the imported modules -interface ImportedModule { - metadata: BlogPost; - default: { render: () => { html: string } }; -} - -export const importPosts = (render = false) => { - const blogImports = import.meta.glob('$routes/*/*/*.md', { eager: true }); - const innerImports = import.meta.glob('$routes/*/*/*/*.md', { eager: true }); - - const imports = { ...blogImports, ...innerImports }; - - const posts: BlogPost[] = []; - for (const path in imports) { - const post = imports[path] as ImportedModule; - if (post) { - posts.push({ - ...post.metadata, - html: render && post.default.render ? post.default.render()?.html : undefined - }); - } - } - - return posts; -}; - -export const filterPosts = (posts: BlogPost[]) => { - return posts - .filter((post) => !post.hidden) - .sort((a, b) => - new Date(a.date).getTime() > new Date(b.date).getTime() - ? -1 - : new Date(a.date).getTime() < new Date(b.date).getTime() - ? 1 - : 0 - ) - .map((post) => { - const readingTimeResult = post.html ? readingTime(striptags(post.html) || '') : undefined; - const relatedPosts = getRelatedPosts(posts, post); - - return { - ...post, - readingTime: readingTimeResult ? readingTimeResult.text : '', - relatedPosts: relatedPosts - } as BlogPost; - }); -}; - -// #region Unexported Functions - -const getRelatedPosts = (posts: BlogPost[], post: BlogPost) => { - // Get the first 3 posts that have the highest number of tags in common - const relatedPosts = posts - .filter((p) => p.slug !== post.slug) - .sort((a, b) => { - const aTags = a.tags?.filter((t) => post.tags?.includes(t)); - const bTags = b.tags?.filter((t) => post.tags?.includes(t)); - return aTags?.length > bTags?.length ? -1 : aTags?.length < bTags?.length ? 1 : 0; - }); - - return relatedPosts.slice(0, 3).map((p) => ({ - ...p, - readingTime: p.html ? readingTime(striptags(p.html) || '').text : '' - })); -}; - -// #endregion diff --git a/src/lib/utils/index_posts.ts b/src/lib/data/posts.ts similarity index 99% rename from src/lib/utils/index_posts.ts rename to src/lib/data/posts.ts index 4f95cb9..99c7f25 100644 --- a/src/lib/utils/index_posts.ts +++ b/src/lib/data/posts.ts @@ -28,6 +28,5 @@ export const fetchMarkdownPosts = async () => { }; }) ); - return allPosts; }; diff --git a/src/routes/(home)/+page.server.ts b/src/routes/(home)/+page.server.ts index 39bef38..65324f2 100644 --- a/src/routes/(home)/+page.server.ts +++ b/src/routes/(home)/+page.server.ts @@ -1,4 +1,4 @@ -import { fetchMarkdownPosts } from '$lib/utils/index_posts'; +import { fetchMarkdownPosts } from '$lib/data/posts'; import type { Contributor } from '$lib/utils/types'; import { fetchWithCache } from '$lib/utils/cache'; diff --git a/src/routes/+layout.svelte b/src/routes/+layout.svelte index 992ec06..66ebdc0 100644 --- a/src/routes/+layout.svelte +++ b/src/routes/+layout.svelte @@ -4,18 +4,27 @@ import Header from '$lib/components/organisms/Header.svelte'; import Footer from '$lib/components/organisms/Footer.svelte'; import { onNavigate } from '$app/navigation'; + let contentDiv: HTMLElement | null = null; + const supportsViewTransition = typeof window !== 'undefined' && 'startViewTransition' in document; + onNavigate((navigation) => { return new Promise((resolve) => { - const transition = document.startViewTransition(async () => { + if (supportsViewTransition) { + const transition = document.startViewTransition(async () => { + if (contentDiv) { + contentDiv.scrollTop = 0; + } + resolve(); + await navigation.complete; + }); + } else { if (contentDiv) { - // Fix scroll contentDiv.scrollTop = 0; } resolve(); - await navigation.complete; - }); + } }); }); diff --git a/src/routes/+layout.ts b/src/routes/+layout.ts index 1c3319e..1d3193b 100644 --- a/src/routes/+layout.ts +++ b/src/routes/+layout.ts @@ -1,9 +1,10 @@ export const prerender = true; -import { filteredPosts } from '$lib/data/blog-posts'; +export const load = async ({ fetch }) => { + const response = await fetch(`/api`); + const posts = await response.json(); -export async function load() { return { - posts: filteredPosts + posts }; -} +}; diff --git a/src/routes/api/+server.ts b/src/routes/api/+server.ts index 989cabf..eefbd18 100644 --- a/src/routes/api/+server.ts +++ b/src/routes/api/+server.ts @@ -1,4 +1,4 @@ -import { fetchMarkdownPosts } from '$lib/utils/index_posts'; +import { fetchMarkdownPosts } from '$lib/data/posts'; import { json } from '@sveltejs/kit'; export const GET = async () => { diff --git a/src/routes/blog/[slug]/+page.ts b/src/routes/blog/[slug]/+page.ts index 973220b..4412f7e 100644 --- a/src/routes/blog/[slug]/+page.ts +++ b/src/routes/blog/[slug]/+page.ts @@ -1,4 +1,4 @@ -import { fetchMarkdownPosts } from '$lib/utils/index_posts'; +import { fetchMarkdownPosts } from '$lib/data/posts'; export async function load({ params }) { const post = await import(`../${params.slug}.md`); diff --git a/src/routes/blog/benchmarking-the-torrust-bittorrent-tracker.md b/src/routes/blog/benchmarking-the-torrust-bittorrent-tracker.md index 001bf6c..1291ed5 100644 --- a/src/routes/blog/benchmarking-the-torrust-bittorrent-tracker.md +++ b/src/routes/blog/benchmarking-the-torrust-bittorrent-tracker.md @@ -128,41 +128,34 @@ There are many ways to keep this information in memory. The access to this data The output at the time of writing this post is: - - -```terminal -tokio::sync::RwLock> +> add_one_torrent: Avg/AdjAvg: (60ns, 59ns) update_one_torrent_in_parallel: Avg/AdjAvg: (10.909457ms, 0ns) add_multiple_torrents_in_parallel: Avg/AdjAvg: (13.88879ms, 0ns) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (7.772484ms, 7.782535ms) - +update_multiple_torrents_in_parallel: Avg/AdjAvg: (7.772484ms, 7.782535ms)\n std::sync::RwLock> add_one_torrent: Avg/AdjAvg: (43ns, 39ns) update_one_torrent_in_parallel: Avg/AdjAvg: (4.020937ms, 4.020937ms) add_multiple_torrents_in_parallel: Avg/AdjAvg: (5.896177ms, 5.768448ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (3.883823ms, 3.883823ms) - +update_multiple_torrents_in_parallel: Avg/AdjAvg: (3.883823ms, 3.883823ms)\n std::sync::RwLock>>> add_one_torrent: Avg/AdjAvg: (51ns, 49ns) update_one_torrent_in_parallel: Avg/AdjAvg: (3.252314ms, 3.149109ms) add_multiple_torrents_in_parallel: Avg/AdjAvg: (8.411094ms, 8.411094ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.106086ms, 4.106086ms) - +update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.106086ms, 4.106086ms)\n tokio::sync::RwLock>>> add_one_torrent: Avg/AdjAvg: (91ns, 90ns) update_one_torrent_in_parallel: Avg/AdjAvg: (3.542378ms, 3.435695ms) add_multiple_torrents_in_parallel: Avg/AdjAvg: (15.651172ms, 15.651172ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.368189ms, 4.257572ms) - +update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.368189ms, 4.257572ms)\n tokio::sync::RwLock>>> add_one_torrent: Avg/AdjAvg: (111ns, 109ns) update_one_torrent_in_parallel: Avg/AdjAvg: (6.590677ms, 6.808535ms) add_multiple_torrents_in_parallel: Avg/AdjAvg: (16.572217ms, 16.30488ms) -update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.073221ms, 4.000122ms) -``` - - +update_multiple_torrents_in_parallel: Avg/AdjAvg: (4.073221ms, 4.000122ms)`} +/> We are benchmarking four scenarios that we think are the most relevant: @@ -216,10 +209,9 @@ The last line `./target/release/aquatic_udp_load_test -p > "load-test-config.tom Edit the `load-test-config.toml`: - - -```toml -# ... + +# ...`} +/> Finally you can run the test with: @@ -255,10 +245,9 @@ Just as an example we show the test results with a non dedicated machine. **Test result**: - - -```terminal -Requests out: 388702.94/second + + - p100: 357`} +/> ## Comparative UDP Benchmarking With Other Trackers diff --git a/src/routes/blog/containerizing-rust-applications-best-practices.md b/src/routes/blog/containerizing-rust-applications-best-practices.md index ff4c298..620e4bd 100644 --- a/src/routes/blog/containerizing-rust-applications-best-practices.md +++ b/src/routes/blog/containerizing-rust-applications-best-practices.md @@ -88,48 +88,40 @@ All of the examples included in this blog post are publicly available in our "[C > **_Please Note:_** The actual `Containerfile` for the **Tracker** and **Index** services builds images for both `debug` and `release` modes. For learning purposes we are using a simplified version here which only builds the `release` mode: - - -```dockerfile -# Extracted example of our Containerfile. - + +CMD ["/usr/bin/full-example"]`} +/> The real version in production contains some duplicate stages to build the `debug` mode. Those stages are almost identical to the ones in this example and are therefore omitted. Only some flags and names change. @@ -216,25 +203,21 @@ A common pattern to build smaller docker images is to use multi-stage Dockerfile You can compile your application with all of the Rust tooling and then use the final binary in a slim operating system. This image does not contain the common packages contained in the default tag and only contains the minimal packages needed to run your compiled Rust application. - - -```dockerfile -# This is the first stage. This image is used to compile the Rust application. + +CMD ["app"]`} +/> The example is very easy and you can build and run the image with: @@ -322,35 +305,24 @@ We first create an **empty application configuration** which uses the same depen Then we build the application. With these layers yo do not need to re-build the dependencies when you change the application code. - - -```dockerfile -FROM rust:latest as builder - -WORKDIR /app - + src/main.rs - +RUN mkdir src && echo "fn main() { println!(\"if you see this, the build broke\"); }" > src/main.rs\n # This build step will cache the dependencies as they're not changed -RUN cargo build --release - +RUN cargo build --release\n # Now, remove the dummy main.rs and copy your source code -COPY . /app - +COPY . /app\n # You'll need to update the last modified of the main.rs file to inform cargo to rebuild it -RUN touch -a -m ./src/main.rs - +RUN touch -a -m ./src/main.rs\n # Build the application for release. Since dependencies are cached, this will only build your code -RUN cargo build --release - -CMD ["./target/release/custom-dependencies-cache"] -``` - - +RUN cargo build --release\n +CMD ["./target/release/custom-dependencies-cache"]`} +/> Instead of this custom solution, we use and recommend [cargo chef](https://github.com/LukeMathWalker/cargo-chef) which is a cargo-subcommand that specializes in speeding up Rust Docker builds using Docker layer caching. @@ -358,39 +330,25 @@ Instead of this custom solution, we use and recommend [cargo chef](https://githu In this example, we show how to use [cargo chef](https://github.com/LukeMathWalker/cargo-chef), that we prefer to use. - - -```dockerfile -FROM rust:latest as chef - -WORKDIR /app - + +RUN cargo build --release\n +CMD ["./target/release/dependencies-cache-with-cargo-chef"]`} +/> While it does more or less the same as the custom solution. It caches dependencies in a separate layer and has some other [benefits](https://github.com/LukeMathWalker/cargo-chef#benefits-of-cargo-chef). @@ -402,24 +360,19 @@ Cargo Binstall repo: . We are using it to install `cargo chef` and `cargo nextest` packages easily. - - -```dockerfile - -FROM rust:latest + +CMD ["./target/release/app"]`} +/> ## Archiving And Reusing Builds With Cargo Nextest @@ -438,13 +391,11 @@ We are using it for two reasons: - **Test partitioning**. We build the application in a docker stage and then run the tests in another stage. This way we can separate the build and test phases. - **Passing the binary to the next stage**. After building the application we archive the build artifacts and then we extract them in the next stage to run the tests. Finally we copy the binary to the final "runtime" stage. - - -```dockerfile -## First stage to install the nextest tool + +CMD ["/app/archiving-and-reusing-builds"]`} +/> @@ -527,22 +474,15 @@ There are some ways to avoid running the container as `root`. We will see all of ### Use the `USER` instruction - - -```dockerfile -FROM rust:latest - -WORKDIR /app - + +RUN cargo build --release\n +USER www-data\n +CMD ["./target/release/app"]`} +/> You can add the `USER` instruction before the last command. In that example we know that the base image already contains the user `www-data`. @@ -554,25 +494,17 @@ www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin`} But you can also create a specific user for your application: - - -```dockerfile -FROM rust:latest - -WORKDIR /app - + +RUN useradd -r -u 1001 -g appuser appuser\n +USER appuser\n +CMD ["./target/release/app"]`} +/> Using the `USER` instruction is considered a good practice because by default the container will not run as root. In that example, docker will run the container as the user with the ID `1001`. @@ -617,36 +549,27 @@ With the proposed solutions you would need to rebuild the docker image so that t There is al alternative to the previous solutions that makes it possible to **run the container with different user IDs without rebuilding the image**. - - -```dockerfile - -## Compile su-exec + +CMD ["/usr/local/bin/app"]`} +/> This is the approach we use in Torrust. You run the docker as `root` but we always use an entrypoint. That entrypoint creates a new user with an ID provided as an environment variable. @@ -678,51 +601,40 @@ Rust apps can be built in `debug` or `release` mode. The `Containerfile` builds We can abstract away the stages: - - -```dockerfile -## Base Builder Image + +# Runtime for release mode. It copies the binary from the \`test\` stage and runs it +# via the entrypoint added in the \`runtime\` stage.`} +/> Let's see each stage individually. @@ -801,10 +713,9 @@ RUN cargo nextest archive --tests --benches --examples --workspace --all-targets Now, that we have successfully built the application, we can run the tests. We extract the application from the archived artifacts and run the tests. - - -```dockerfile -## Extract and Test (release) + +RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin`} +/> Once the application has been built and tested we prepare the runtime. We start from a minimum "distroless" image variant. We add an entrypoint to setup the application and also to make sure we don't use the `root` user to run it. The entrypoint just runs the application provided as an argument, in our case, our application in `debug` or `release` mode, depending of which one you want to run. diff --git a/src/routes/blog/deploying-torrust-to-production.md b/src/routes/blog/deploying-torrust-to-production.md index a1b29e5..4669387 100644 --- a/src/routes/blog/deploying-torrust-to-production.md +++ b/src/routes/blog/deploying-torrust-to-production.md @@ -501,47 +501,35 @@ Notice: you have to edit the file `./storage/proxy/etc/nginx-conf/nginx.conf` no - - -```nginx -server + +}`} +/> Make sure you also change the domains: @@ -900,22 +869,17 @@ If you change the config files you will need to restart the services. You can also enable the debug for the Nginx proxy. Edit the file `./storage/proxy/etc/nginx-conf/nginx.conf` and add `error_log` to the server configuration: - - -```nginx -server + +}`} +/> If your server does not have too much disk space you could run out of disk space. diff --git a/src/routes/blog/how-to-contribute-to-this-site.md b/src/routes/blog/how-to-contribute-to-this-site.md index 8b5d7e6..5d6b2b5 100644 --- a/src/routes/blog/how-to-contribute-to-this-site.md +++ b/src/routes/blog/how-to-contribute-to-this-site.md @@ -66,10 +66,9 @@ To create a new post, create a new folder inside the `src/routes/(blog-article)` Inside the `+page.md` file, you must start with the front matter, which is a YAML-like syntax that is used to define metadata for the post. The front matter must be the first thing in the file, and must be separated from the rest of the content by three dashes (`---`). An example of a front matter is: - - -```md ---- + +---`} +/> ## Managing blog posts diff --git a/src/routes/blog/visualize-tracker-metrics-prometheus-grafana.md b/src/routes/blog/visualize-tracker-metrics-prometheus-grafana.md index 40ff527..34efb93 100644 --- a/src/routes/blog/visualize-tracker-metrics-prometheus-grafana.md +++ b/src/routes/blog/visualize-tracker-metrics-prometheus-grafana.md @@ -61,16 +61,15 @@ We will not include instructions about how to install Prometheus and Grafana. Pl This is the new services added to the docker compose configuration we are using in the live demo: - - -```yml -grafana: + + grafana_data: {}`} +/> @@ -112,28 +107,23 @@ You have to provide a docker compose environment variable `GF_SECURITY_ADMIN_PAS The `Nginx` service was also changed a little bit to make sure it's started after the Grafana container. We need that to server Grafana via Nginx (to use HTTPs). - - -```yml -proxy: + + - grafana`} +/> The Prometheus configuration is very simple. We only need to set the Tracker URL. - - -```yml -global: - scrape_interval: 15s # How often to scrape metrics - + + - targets: ['tracker:1212']`} +/> diff --git a/svelte.config.js b/svelte.config.js index 2c24c4d..6592381 100644 --- a/svelte.config.js +++ b/svelte.config.js @@ -11,12 +11,11 @@ import rehypePrettyCode from 'rehype-pretty-code'; const mdsvexOptions = { extensions: ['.md'], rehypePlugins: [ - rehypeExternalLinks, // Adds 'target' and 'rel' to external links - rehypeSlug, // Adds 'id' attributes to Headings (h1,h2,etc) + rehypeExternalLinks, + rehypeSlug, [ rehypeAutolinkHeadings, { - // Adds hyperlinks to the headings, requires rehypeSlug behavior: 'prepend', properties: { className: ['heading-link'], title: 'Permalink', ariaHidden: 'true' }, content: { @@ -33,7 +32,6 @@ const mdsvexOptions = { theme: 'github-dark', onVisitLine(node) { if (node.children.length === 0) { - // Ensure empty lines are preserved node.children = [{ type: 'text', value: '↹↹↹' }]; } }