diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index fdb840c8b9024ace83212636b5936cdfc88b68c3..b65efb027799bbc17914f2d20c759d96216823d6 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -87,6 +87,10 @@ pack-doc:
     - echo "GIT_DESCRIBE=$(git describe --always)" | tee -a .env
     - pipenv run mkdocs build -sd build
     - pushd build
+    - >
+      chromium --headless --disable-gpu --no-sandbox --no-zygote
+      --disable-software-rasterizer --disable-dev-shm-usage
+      --print-to-pdf=picodata_docs.pdf picodata_docs.html
     - tar -cvzf ../$FNAME .
     - popd
     - echo "Picodata doc successfully packed."
diff --git a/Pipfile b/Pipfile
index 6eed0d0c011425456b3cc7db4a4ef4d0186d1b4e..e766d6c09b5edf93e66b7ceae68251495b3285da 100644
--- a/Pipfile
+++ b/Pipfile
@@ -8,12 +8,14 @@ mkdocs = "==1.6.0"
 mkdocs-material = "==9.5.28"
 mkdocs-open-in-new-tab = "==1.0.3"
 pygments = "==2.18.0"
+beautifulsoup4 = "==4.12.3"
 
 [dev-packages]
 flake8 = "*"
 black = "*"
 mypy = "*"
 types-markdown = "*"
+types-beautifulsoup4 = "*"
 
 [requires]
 python_version = "3.12"
diff --git a/Pipfile.lock b/Pipfile.lock
index 9437d88e1d2e0870d80173c670fca196e2ccaeec..6e8d47658982b2380f6c591ca26a9a2345ff7023 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -1,7 +1,7 @@
 {
     "_meta": {
         "hash": {
-            "sha256": "95c6c052d73d423967b79c380f4d9d28416e2ce77799c888bc76c0dd80b1c9d7"
+            "sha256": "1bc71a1c7d177faac7b2f52d99ee48baf42d4214e3f30f29e8f768bedc9d441d"
         },
         "pipfile-spec": 6,
         "requires": {
@@ -24,6 +24,15 @@
             "markers": "python_version >= '3.8'",
             "version": "==2.15.0"
         },
+        "beautifulsoup4": {
+            "hashes": [
+                "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051",
+                "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"
+            ],
+            "index": "pypi",
+            "markers": "python_full_version >= '3.6.0'",
+            "version": "==4.12.3"
+        },
         "certifi": {
             "hashes": [
                 "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b",
@@ -513,6 +522,31 @@
             "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
             "version": "==1.16.0"
         },
+        "soupsieve": {
+            "hashes": [
+                "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb",
+                "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"
+            ],
+            "markers": "python_version >= '3.8'",
+            "version": "==2.6"
+        },
+        "types-beautifulsoup4": {
+            "hashes": [
+                "sha256:158370d08d0cd448bd11b132a50ff5279237a5d4b5837beba074de152a513059",
+                "sha256:c95e66ce15a4f5f0835f7fbc5cd886321ae8294f977c495424eaf4225307fd30"
+            ],
+            "index": "pypi",
+            "markers": "python_version >= '3.8'",
+            "version": "==4.12.0.20241020"
+        },
+        "types-html5lib": {
+            "hashes": [
+                "sha256:3f1e064d9ed2c289001ae6392c84c93833abb0816165c6ff0abfc304a779f403",
+                "sha256:98042555ff78d9e3a51c77c918b1041acbb7eb6c405408d8a9e150ff5beccafa"
+            ],
+            "markers": "python_version >= '3.8'",
+            "version": "==1.1.11.20241018"
+        },
         "urllib3": {
             "hashes": [
                 "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472",
diff --git a/docker/static/Dockerfile b/docker/static/Dockerfile
index 5f408b2d0ceb01cd4834540197541e61528b14f4..b3dbef29b43815112d6f763e030730c13c7a0434 100644
--- a/docker/static/Dockerfile
+++ b/docker/static/Dockerfile
@@ -1,5 +1,7 @@
 FROM python:3.12
 
+RUN apt-get update && apt-get install -y chromium
+
 ARG IMAGE_DIR=/builds
 WORKDIR $IMAGE_DIR
 
diff --git a/docs/extra/style.css b/docs/extra/style.css
index 655c97566e9bb71c211d60625d4af0c9b6f7dd1a..d143dea22476dbcc5b586f7038bac151499123c0 100644
--- a/docs/extra/style.css
+++ b/docs/extra/style.css
@@ -306,3 +306,62 @@ article > p > img {
 span.keys {
     white-space: nowrap;
 }
+
+@media print {
+    .pd-break-before-page {
+        break-before: page;
+    }
+
+    h1, h2, h3, h4, h5, h6 {
+        break-after: avoid;
+    }
+
+    div.highlight {
+        break-before: avoid-page;
+        break-inside: avoid;
+    }
+
+    article.md-typeset:not(.pd-toc) {
+        zoom: 0.8;
+    }
+
+    p > svg {
+        max-height: 700px;
+        width: auto;
+    }
+}
+
+article.pd-cover {
+    display: flex;
+    flex-direction: column;
+    height: 100vh;
+
+    .wrapper {
+        flex: auto;
+        text-align: center;
+        align-content: center;
+    }
+
+    img {
+        width: 96px;
+        margin-bottom: 0.5em;
+    }
+  }
+
+article.pd-toc.pd-toc {
+    h1 {
+        text-align: center;
+    }
+
+    ul {
+        list-style-type: none;
+    }
+
+    ul li {
+        margin-left: 0;
+    }
+
+    a {
+        color: var(--md-typeset-color);
+    }
+}
diff --git a/hooks/export_to_pdf.py b/hooks/export_to_pdf.py
new file mode 100644
index 0000000000000000000000000000000000000000..38d5037a20942be672eac2ad4fed4c379151d314
--- /dev/null
+++ b/hooks/export_to_pdf.py
@@ -0,0 +1,216 @@
+from bs4 import BeautifulSoup
+from bs4.element import Tag
+from mkdocs.config.defaults import MkDocsConfig
+from urllib.parse import urlsplit
+import os
+import re
+
+
+excluded_pages = ["sql_index.md", "connectors_index.md"]
+
+
+def exclude_items(items: list) -> list:
+    result: list = []
+
+    for item in items:
+        if isinstance(item, str):
+            if item not in excluded_pages:
+                result.append(item)
+
+        elif isinstance(item, dict):
+            new_item: dict = {}
+            for k, v in item.items():
+                if isinstance(v, str):
+                    if v not in excluded_pages:
+                        new_item[k] = v
+                elif isinstance(v, list):
+                    filtered_list = exclude_items(v)
+                    if filtered_list:
+                        new_item[k] = filtered_list
+            if new_item:
+                result.append(new_item)
+    return result
+
+
+def number_items(items: list, prefix: str = "") -> list[str]:
+    result = []
+
+    for i, item in enumerate(items, 1):
+
+        p = f"{prefix}.{i}" if prefix != "" else str(i)
+
+        if isinstance(item, str):
+            result.append(f"{p}. {item}")
+
+        elif isinstance(item, dict):
+            for k, v in item.items():
+                if isinstance(v, str):
+                    pass
+                elif isinstance(v, list):
+                    result.append(f"{p}. {k}")
+                    result.extend(number_items(v, p))
+    return result
+
+
+def update_href(path: str, href: str) -> str:
+    url = urlsplit(href)
+
+    if url.scheme or url.netloc:
+        # Skip external links
+        return href
+
+    if not url.path:
+        target = path
+    else:
+        target = os.path.join(path, url.path)
+        target = os.path.normpath(target)
+
+    if url.fragment:
+        target = os.path.join(target, url.fragment)
+
+    return f"#{target}"
+
+
+def on_post_build(config: MkDocsConfig):
+    picodata_doc_ver = os.getenv("PICODATA_DOC_VER", "")
+
+    nav = exclude_items(config["nav"])
+    nav = number_items(nav)
+
+    articles: BeautifulSoup = BeautifulSoup("", "html.parser")
+
+    for item in nav:
+        if not item.endswith(".md"):
+            h1 = articles.new_tag("h1")
+            h1.string = item
+
+            # Вставить разрыв страницы перед главным разделом
+            if item.count(".") == 1:
+                h1["class"] = ["pd-break-before-page"]
+
+            h1["id"] = item.split()[0].strip(".")
+
+            article = articles.new_tag("article")
+            article["class"] = ["md-content__inner", "md-typeset"]
+            article.append(h1)
+
+            articles.append(article)
+            continue
+
+        prefix, src_uri = item.split()
+
+        path, _ = os.path.splitext(src_uri)
+        dest_uri = os.path.join(path, "index.html")
+        abs_dest_path = os.path.normpath(os.path.join(config["site_dir"], dest_uri))
+
+        with open(abs_dest_path, "r") as f:
+            output = f.read()
+
+        soup = BeautifulSoup(output, "html.parser")
+        article = soup.find("article")  # type: ignore
+
+        # Удалить кнопку "Исходный код страницы"
+        assert isinstance(article.a, Tag)
+        article.a.extract()
+
+        # Удалить форму обратной связи
+        assert isinstance(article.form, Tag)
+        article.form.extract()
+
+        # Исправить ссылки на изображения
+        for tag in article.find_all("img"):
+            tag["src"] = tag["src"].replace("../", "")
+
+        # Заменить сворачиваемые блоки на выноски
+        for tag in article.find_all("details"):
+            tag.name = "div"
+            tag["class"].append("admonition")
+
+            admonition_title = tag.summary
+            admonition_title.name = "p"
+            admonition_title["class"] = ["admonition-title"]
+
+        # Пронумеровать содержимое тегов <h1> и переопределить атрибут "id"
+        for tag in article.find_all("h1"):
+            for child in tag.children:
+                if child.name:
+                    continue
+                title = f"{prefix} {child}"
+
+            tag.string = title
+            tag["id"] = path
+
+        # Удалить ссылки в заголовках
+        for tag in article.find_all("a", "headerlink"):
+            tag.extract()
+
+        # Обновить атрибуты "id" в заголовках h2–h6
+        for tag in article.find_all(re.compile(r"^h[2-6]$"), attrs={"id": True}):
+            tag["id"] = os.path.join(path, tag["id"])
+
+        # Обновить относительные ссылки
+        for tag in article.find_all(attrs={"href": True}):
+            tag["href"] = update_href(path, tag["href"])
+
+        # Обновить относительные ссылки в EBNF-диаграммах
+        for tag in article.find_all(attrs={"xlink:href": True}):
+            tag["xlink:href"] = update_href(path, tag["xlink:href"])
+
+        # Удалить теги <hr>
+        for tag in article.find_all("hr"):
+            tag.extract()
+
+        # Извлечь EBNF-диаграммы из выносок
+        for tag in article.find_all("p", "admonition-title"):
+            p = tag.find_next_sibling()
+            if p and p.svg:
+                tag.extract()
+                p.find_parent().unwrap()
+
+        articles.append(article)
+
+    # Добавить содержание
+    ul = articles.new_tag("ul")
+
+    for article in articles.find_all("article"):
+        h1 = article.h1  # type: ignore
+        a = articles.new_tag("a")
+        a.string = h1.string
+        a["href"] = f"#{h1["id"]}"
+
+        li = articles.new_tag("li")
+        li.append(a)
+
+        ul.append(li)
+
+    h1 = articles.new_tag("h1")
+    h1.string = "Содержание"
+
+    toc = articles.new_tag("article")
+    toc["class"] = ["md-content__inner md-typeset pd-toc"]
+    toc.append(h1)
+    toc.append(ul)
+
+    with open("tools/pdf_templates/main.html", "r") as f:
+        output = f.read()
+
+    main = BeautifulSoup(output, "html.parser")
+
+    with open("tools/pdf_templates/cover.html", "r") as f:
+        output = f.read()
+
+    cover = BeautifulSoup(output, "html.parser")
+
+    assert isinstance(cover.h1, Tag)
+    cover.h1.string = f"Picodata {picodata_doc_ver}"
+
+    assert isinstance(main.body, Tag)
+    assert isinstance(main.body.div, Tag)
+    main.body.div.append(cover)
+    main.body.div.append(toc)
+    main.body.div.append(articles)
+
+    docs = os.path.join(config["site_dir"], "picodata_docs.html")
+
+    with open(docs, "w") as f:
+        f.write(str(main))
diff --git a/mkdocs.yml b/mkdocs.yml
index 35d715035b77807751c8024277414eba7748ff76..a7dc154f26f90a2d4dfed99f99f9531a99c80369 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -303,3 +303,4 @@ hooks:
   - ./hooks/check_sorting.py
   - ./hooks/validate_system_tables.py
   - ./hooks/inline_svg.py
+  - ./hooks/export_to_pdf.py
diff --git a/tools/pdf_templates/cover.html b/tools/pdf_templates/cover.html
new file mode 100644
index 0000000000000000000000000000000000000000..4c4a159ad15bd1734ed86f6bbbe600488f023c3f
--- /dev/null
+++ b/tools/pdf_templates/cover.html
@@ -0,0 +1,9 @@
+<article class="pd-cover">
+  <div class="wrapper"></div>
+  <div class="wrapper md-typeset">
+    <img src="assets/icon.svg">
+    <h1></h1>
+    <h2>Описание программного обеспечения</h2>
+  </div>
+  <div class="wrapper"></div>
+</article>
diff --git a/tools/pdf_templates/main.html b/tools/pdf_templates/main.html
new file mode 100644
index 0000000000000000000000000000000000000000..342832995bea23d2ab6179a15da6d73638321c0b
--- /dev/null
+++ b/tools/pdf_templates/main.html
@@ -0,0 +1,22 @@
+<!doctype html>
+<html lang="ru">
+<head>
+  <meta charset="utf-8">
+  <meta name="viewport" content="width=device-width,initial-scale=1">
+  <style>:root{--md-text-font:"Inter";--md-code-font:"Roboto Mono"}</style>
+  <link rel="preconnect" href="https://fonts.googleapis.com">
+  <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
+  <link href="https://fonts.googleapis.com/css2?family=Montserrat:ital,wght@0,100..900;1,100..900&display=swap" rel="stylesheet">
+  <link href="https://fonts.googleapis.com/css?family=Inter:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback" rel="stylesheet">
+  <link href="assets/stylesheets/main.6543a935.min.css" rel="stylesheet">
+  <link href="extra/style.css" rel="stylesheet">
+</head>
+<body dir="ltr">
+  <input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer" autocomplete="off">
+  <input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search" autocomplete="off">
+  <header class="md-header md-header--shadow" data-md-component="header"></header>
+  <div class="md-content" data-md-component="content"></div>
+  <script id="__config" type="application/json">{"base": "", "features": ["content.code.annotate"]}</script>
+  <script src="assets/javascripts/bundle.fe8b6f2b.min.js"></script>
+</body>
+</html>