mirror of
https://github.com/jackyzha0/quartz.git
synced 2025-12-01 10:17:57 +01:00
Compare commits
4 Commits
feat/seman
...
chore/upda
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a335c7a5b | ||
|
|
863a394a37 | ||
|
|
ba2b8bb0c5 | ||
|
|
79e7e782ac |
4
.github/workflows/build-preview.yaml
vendored
4
.github/workflows/build-preview.yaml
vendored
@@ -11,12 +11,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build Preview
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
|
||||
8
.github/workflows/ci.yaml
vendored
8
.github/workflows/ci.yaml
vendored
@@ -19,12 +19,12 @@ jobs:
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
@@ -53,11 +53,11 @@ jobs:
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v5
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
- name: Get package version
|
||||
|
||||
2
.github/workflows/deploy-preview.yaml
vendored
2
.github/workflows/deploy-preview.yaml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
name: Deploy Preview to Cloudflare Pages
|
||||
steps:
|
||||
- name: Download build artifact
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v4
|
||||
id: preview-build-artifact
|
||||
with:
|
||||
name: preview-build
|
||||
|
||||
6
.github/workflows/docker-build-push.yaml
vendored
6
.github/workflows/docker-build-push.yaml
vendored
@@ -21,11 +21,11 @@ jobs:
|
||||
echo "OWNER_LOWERCASE=${OWNER,,}" >> ${GITHUB_ENV}
|
||||
env:
|
||||
OWNER: "${{ github.repository_owner }}"
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v5.2.0
|
||||
uses: rlespinasse/github-slug-action@v5.1.0
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
@@ -37,7 +37,7 @@ jobs:
|
||||
network=host
|
||||
- name: Install cosign
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: sigstore/cosign-installer@v3.10.0
|
||||
uses: sigstore/cosign-installer@v3.9.1
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
if: github.event_name != 'pull_request'
|
||||
|
||||
@@ -34,8 +34,6 @@ This part of the configuration concerns anything that can affect the whole site.
|
||||
- `{ provider: 'tinylytics', siteId: '<your-site-id>' }`: use [Tinylytics](https://tinylytics.app/);
|
||||
- `{ provider: 'cabin' }` or `{ provider: 'cabin', host: 'https://cabin.example.com' }` (custom domain): use [Cabin](https://withcabin.com);
|
||||
- `{provider: 'clarity', projectId: '<your-clarity-id-code' }`: use [Microsoft clarity](https://clarity.microsoft.com/). The project id can be found on top of the overview page.
|
||||
- `{ provider: 'matomo', siteId: '<your-matomo-id-code', host: 'matomo.example.com' }`: use [Matomo](https://matomo.org/), without protocol.
|
||||
- `{ provider: 'vercel' }`: use [Vercel Web Analytics](https://vercel.com/docs/concepts/analytics).
|
||||
- `locale`: used for [[i18n]] and date formatting
|
||||
- `baseUrl`: this is used for sitemaps and RSS feeds that require an absolute URL to know where the canonical 'home' of your site lives. This is normally the deployed URL of your site (e.g. `quartz.jzhao.xyz` for this site). Do not include the protocol (i.e. `https://`) or any leading or trailing slashes.
|
||||
- This should also include the subpath if you are [[hosting]] on GitHub pages without a custom domain. For example, if my repository is `jackyzha0/quartz`, GitHub pages would deploy to `https://jackyzha0.github.io/quartz` and the `baseUrl` would be `jackyzha0.github.io/quartz`.
|
||||
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
@@ -15,7 +15,7 @@ However, if you'd like to publish your site to the world, you need a way to host
|
||||
## Cloudflare Pages
|
||||
|
||||
1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/) and select your account.
|
||||
2. In Account Home, select **Compute (Workers)** > **Workers & Pages** > **Create application** > **Pages** > **Connect to Git**.
|
||||
2. In Account Home, select **Workers & Pages** > **Create application** > **Pages** > **Connect to Git**.
|
||||
3. Select the new GitHub repository that you created and, in the **Set up builds and deployments** section, provide the following information:
|
||||
|
||||
| Configuration option | Value |
|
||||
|
||||
@@ -14,6 +14,10 @@ This plugin accepts the following configuration options:
|
||||
- `renderEngine`: the engine to use to render LaTeX equations. Can be `"katex"` for [KaTeX](https://katex.org/), `"mathjax"` for [MathJax](https://www.mathjax.org/) [SVG rendering](https://docs.mathjax.org/en/latest/output/svg.html), or `"typst"` for [Typst](https://typst.app/) (a new way to compose LaTeX equation). Defaults to KaTeX.
|
||||
- `customMacros`: custom macros for all LaTeX blocks. It takes the form of a key-value pair where the key is a new command name and the value is the expansion of the macro. For example: `{"\\R": "\\mathbb{R}"}`
|
||||
|
||||
> [!note] Typst support
|
||||
>
|
||||
> Currently, typst doesn't support inline-math
|
||||
|
||||
## API
|
||||
|
||||
- Category: Transformer
|
||||
|
||||
1
index.d.ts
vendored
1
index.d.ts
vendored
@@ -13,4 +13,3 @@ interface CustomEventMap {
|
||||
|
||||
type ContentIndex = Record<FullSlug, ContentDetails>
|
||||
declare const fetchData: Promise<ContentIndex>
|
||||
declare const semanticCfg: import("./quartz/cfg").GlobalConfiguration["semanticSearch"]
|
||||
|
||||
1905
package-lock.json
generated
1905
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
47
package.json
47
package.json
@@ -2,7 +2,7 @@
|
||||
"name": "@jackyzha0/quartz",
|
||||
"description": "🌱 publish your digital garden and notes as a website",
|
||||
"private": true,
|
||||
"version": "4.5.2",
|
||||
"version": "4.5.1",
|
||||
"type": "module",
|
||||
"author": "jackyzha0 <j.zhao2k19@gmail.com>",
|
||||
"license": "MIT",
|
||||
@@ -36,37 +36,36 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@clack/prompts": "^0.11.0",
|
||||
"@floating-ui/dom": "^1.7.4",
|
||||
"@huggingface/transformers": "^3.7.5",
|
||||
"@floating-ui/dom": "^1.7.0",
|
||||
"@myriaddreamin/rehype-typst": "^0.6.0",
|
||||
"@napi-rs/simple-git": "0.1.22",
|
||||
"@napi-rs/simple-git": "0.1.19",
|
||||
"@tweenjs/tween.js": "^25.0.0",
|
||||
"ansi-truncate": "^1.4.0",
|
||||
"@webgpu/types": "^0.1.61",
|
||||
"ansi-truncate": "^1.2.0",
|
||||
"async-mutex": "^0.5.0",
|
||||
"chokidar": "^4.0.3",
|
||||
"cli-spinner": "^0.2.10",
|
||||
"d3": "^7.9.0",
|
||||
"esbuild-sass-plugin": "^3.3.1",
|
||||
"flexsearch": "^0.8.205",
|
||||
"flexsearch": "0.7.43",
|
||||
"github-slugger": "^2.0.0",
|
||||
"globby": "^15.0.0",
|
||||
"globby": "^14.1.0",
|
||||
"gray-matter": "^4.0.3",
|
||||
"hast-util-to-html": "^9.0.5",
|
||||
"hast-util-to-jsx-runtime": "^2.3.6",
|
||||
"hast-util-to-string": "^3.0.1",
|
||||
"is-absolute-url": "^5.0.0",
|
||||
"is-absolute-url": "^4.0.1",
|
||||
"js-yaml": "^4.1.0",
|
||||
"lightningcss": "^1.30.2",
|
||||
"lightningcss": "^1.30.1",
|
||||
"mdast-util-find-and-replace": "^3.0.2",
|
||||
"mdast-util-to-hast": "^13.2.0",
|
||||
"mdast-util-to-string": "^4.0.0",
|
||||
"micromorph": "^0.4.5",
|
||||
"minimatch": "^10.0.3",
|
||||
"onnxruntime-web": "^1.23.0",
|
||||
"pixi.js": "^8.13.2",
|
||||
"preact": "^10.27.2",
|
||||
"preact-render-to-string": "^6.6.1",
|
||||
"pretty-bytes": "^7.1.0",
|
||||
"minimatch": "^10.0.1",
|
||||
"pixi.js": "^8.9.2",
|
||||
"preact": "^10.26.7",
|
||||
"preact-render-to-string": "^6.5.13",
|
||||
"pretty-bytes": "^7.0.0",
|
||||
"pretty-time": "^1.1.0",
|
||||
"reading-time": "^1.5.0",
|
||||
"rehype-autolink-headings": "^7.1.0",
|
||||
@@ -85,9 +84,9 @@
|
||||
"remark-rehype": "^11.1.2",
|
||||
"remark-smartypants": "^3.0.2",
|
||||
"rfdc": "^1.4.1",
|
||||
"satori": "^0.18.3",
|
||||
"satori": "^0.13.1",
|
||||
"serve-handler": "^6.1.6",
|
||||
"sharp": "^0.34.4",
|
||||
"sharp": "^0.34.2",
|
||||
"shiki": "^1.26.2",
|
||||
"source-map-support": "^0.5.21",
|
||||
"to-vfile": "^8.0.0",
|
||||
@@ -95,22 +94,22 @@
|
||||
"unified": "^11.0.5",
|
||||
"unist-util-visit": "^5.0.0",
|
||||
"vfile": "^6.0.3",
|
||||
"workerpool": "^9.3.4",
|
||||
"ws": "^8.18.3",
|
||||
"workerpool": "^9.2.0",
|
||||
"ws": "^8.18.2",
|
||||
"yargs": "^18.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/d3": "^7.4.3",
|
||||
"@types/hast": "^3.0.4",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"@types/node": "^24.6.0",
|
||||
"@types/node": "^22.15.23",
|
||||
"@types/pretty-time": "^1.1.5",
|
||||
"@types/source-map-support": "^0.5.10",
|
||||
"@types/ws": "^8.18.1",
|
||||
"@types/yargs": "^17.0.33",
|
||||
"esbuild": "^0.25.10",
|
||||
"prettier": "^3.6.2",
|
||||
"tsx": "^4.20.6",
|
||||
"typescript": "^5.9.2"
|
||||
"esbuild": "^0.25.5",
|
||||
"prettier": "^3.5.3",
|
||||
"tsx": "^4.19.4",
|
||||
"typescript": "^5.8.3"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,6 @@
|
||||
import { GlobalConfiguration, QuartzConfig } from "./quartz/cfg"
|
||||
import { QuartzConfig } from "./quartz/cfg"
|
||||
import * as Plugin from "./quartz/plugins"
|
||||
|
||||
const semanticSearch: GlobalConfiguration["semanticSearch"] = {
|
||||
enable: true,
|
||||
model: "onnx-community/embeddinggemma-300m-ONNX",
|
||||
aot: true,
|
||||
dims: 768,
|
||||
dtype: "fp32",
|
||||
shardSizeRows: 1024,
|
||||
hnsw: { M: 16, efConstruction: 200 },
|
||||
chunking: { chunkSize: 256, chunkOverlap: 64 },
|
||||
vllm: { enable: true, concurrency: 16, batchSize: 128 },
|
||||
}
|
||||
|
||||
/**
|
||||
* Quartz 4 Configuration
|
||||
*
|
||||
@@ -64,7 +52,6 @@ const config: QuartzConfig = {
|
||||
},
|
||||
},
|
||||
},
|
||||
semanticSearch,
|
||||
},
|
||||
plugins: {
|
||||
transformers: [
|
||||
@@ -97,7 +84,6 @@ const config: QuartzConfig = {
|
||||
enableSiteMap: true,
|
||||
enableRSS: true,
|
||||
}),
|
||||
Plugin.SemanticIndex(semanticSearch),
|
||||
Plugin.Assets(),
|
||||
Plugin.Static(),
|
||||
Plugin.Favicon(),
|
||||
|
||||
@@ -151,19 +151,16 @@ async function startWatching(
|
||||
const changes: ChangeEvent[] = []
|
||||
watcher
|
||||
.on("add", (fp) => {
|
||||
fp = toPosixPath(fp)
|
||||
if (buildData.ignored(fp)) return
|
||||
changes.push({ path: fp as FilePath, type: "add" })
|
||||
void rebuild(changes, clientRefresh, buildData)
|
||||
})
|
||||
.on("change", (fp) => {
|
||||
fp = toPosixPath(fp)
|
||||
if (buildData.ignored(fp)) return
|
||||
changes.push({ path: fp as FilePath, type: "change" })
|
||||
void rebuild(changes, clientRefresh, buildData)
|
||||
})
|
||||
.on("unlink", (fp) => {
|
||||
fp = toPosixPath(fp)
|
||||
if (buildData.ignored(fp)) return
|
||||
changes.push({ path: fp as FilePath, type: "delete" })
|
||||
void rebuild(changes, clientRefresh, buildData)
|
||||
|
||||
@@ -42,14 +42,6 @@ export type Analytics =
|
||||
provider: "clarity"
|
||||
projectId?: string
|
||||
}
|
||||
| {
|
||||
provider: "matomo"
|
||||
host: string
|
||||
siteId: string
|
||||
}
|
||||
| {
|
||||
provider: "vercel"
|
||||
}
|
||||
|
||||
export interface GlobalConfiguration {
|
||||
pageTitle: string
|
||||
@@ -78,34 +70,6 @@ export interface GlobalConfiguration {
|
||||
* Region Codes: https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
|
||||
*/
|
||||
locale: ValidLocale
|
||||
/** Semantic search configuration */
|
||||
semanticSearch?: {
|
||||
enable: boolean
|
||||
model: string
|
||||
aot: boolean
|
||||
dtype: "fp32" | "fp16"
|
||||
dims: number
|
||||
shardSizeRows: number
|
||||
manifestUrl?: string
|
||||
manifestBaseUrl?: string
|
||||
disableCache?: boolean
|
||||
hnsw: {
|
||||
M: number
|
||||
efConstruction: number
|
||||
efSearch?: number
|
||||
}
|
||||
chunking: {
|
||||
chunkSize: number
|
||||
chunkOverlap: number
|
||||
noChunking?: boolean
|
||||
}
|
||||
vllm?: {
|
||||
enable: boolean
|
||||
vllmUrl?: string
|
||||
concurrency: number
|
||||
batchSize: number
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface QuartzConfig {
|
||||
|
||||
@@ -55,14 +55,11 @@ export type FolderState = {
|
||||
collapsed: boolean
|
||||
}
|
||||
|
||||
let numExplorers = 0
|
||||
export default ((userOpts?: Partial<Options>) => {
|
||||
const opts: Options = { ...defaultOptions, ...userOpts }
|
||||
const { OverflowList, overflowListAfterDOMLoaded } = OverflowListFactory()
|
||||
|
||||
const Explorer: QuartzComponent = ({ cfg, displayClass }: QuartzComponentProps) => {
|
||||
const id = `explorer-${numExplorers++}`
|
||||
|
||||
return (
|
||||
<div
|
||||
class={classNames(displayClass, "explorer")}
|
||||
@@ -80,7 +77,7 @@ export default ((userOpts?: Partial<Options>) => {
|
||||
type="button"
|
||||
class="explorer-toggle mobile-explorer hide-until-loaded"
|
||||
data-mobile={true}
|
||||
aria-controls={id}
|
||||
aria-controls="explorer-content"
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
@@ -119,7 +116,7 @@ export default ((userOpts?: Partial<Options>) => {
|
||||
<polyline points="6 9 12 15 18 9"></polyline>
|
||||
</svg>
|
||||
</button>
|
||||
<div id={id} class="explorer-content" aria-expanded={false} role="group">
|
||||
<div class="explorer-content" aria-expanded={false}>
|
||||
<OverflowList class="explorer-ul" />
|
||||
</div>
|
||||
<template id="template-file">
|
||||
|
||||
@@ -12,9 +12,9 @@ const OverflowList = ({
|
||||
)
|
||||
}
|
||||
|
||||
let numLists = 0
|
||||
let numExplorers = 0
|
||||
export default () => {
|
||||
const id = `list-${numLists++}`
|
||||
const id = `list-${numExplorers++}`
|
||||
|
||||
return {
|
||||
OverflowList: (props: JSX.HTMLAttributes<HTMLUListElement>) => (
|
||||
|
||||
@@ -7,12 +7,10 @@ import { i18n } from "../i18n"
|
||||
|
||||
export interface SearchOptions {
|
||||
enablePreview: boolean
|
||||
includeButton: boolean
|
||||
}
|
||||
|
||||
const defaultOptions: SearchOptions = {
|
||||
enablePreview: true,
|
||||
includeButton: true,
|
||||
}
|
||||
|
||||
export default ((userOpts?: Partial<SearchOptions>) => {
|
||||
@@ -22,6 +20,7 @@ export default ((userOpts?: Partial<SearchOptions>) => {
|
||||
return (
|
||||
<div class={classNames(displayClass, "search")}>
|
||||
<button class="search-button">
|
||||
<p>{i18n(cfg.locale).components.search.title}</p>
|
||||
<svg role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 19.9 19.7">
|
||||
<title>Search</title>
|
||||
<g class="search-path" fill="none">
|
||||
@@ -29,56 +28,20 @@ export default ((userOpts?: Partial<SearchOptions>) => {
|
||||
<circle cx="8" cy="8" r="7" />
|
||||
</g>
|
||||
</svg>
|
||||
<p>{i18n(cfg.locale).components.search.title}</p>
|
||||
</button>
|
||||
<search class="search-container">
|
||||
<form class="search-space">
|
||||
<div class="input-container">
|
||||
<input
|
||||
autocomplete="off"
|
||||
class="search-bar"
|
||||
name="search"
|
||||
type="text"
|
||||
aria-label={searchPlaceholder}
|
||||
placeholder={searchPlaceholder}
|
||||
/>
|
||||
<div class="search-mode-toggle" role="radiogroup" aria-label="Search mode">
|
||||
<button
|
||||
type="button"
|
||||
class="mode-option"
|
||||
data-mode="lexical"
|
||||
aria-pressed="true"
|
||||
aria-label="Full-text search"
|
||||
>
|
||||
<svg viewBox="0 0 20 20" role="img" aria-hidden="true">
|
||||
<g fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round">
|
||||
<path d="M4 6h12M4 10h8M4 14h6" />
|
||||
</g>
|
||||
</svg>
|
||||
<span class="sr-only">Full-text</span>
|
||||
</button>
|
||||
<button
|
||||
type="button"
|
||||
class="mode-option"
|
||||
data-mode="semantic"
|
||||
aria-pressed="false"
|
||||
aria-label="Semantic search"
|
||||
>
|
||||
<svg viewBox="0 0 20 20" role="img" aria-hidden="true">
|
||||
<g fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round">
|
||||
<circle cx="5.2" cy="10" r="2.4" />
|
||||
<circle cx="14.8" cy="4.8" r="2.1" />
|
||||
<circle cx="14.8" cy="15.2" r="2.1" />
|
||||
<path d="M7.1 8.7l5.2-2.4M7.1 11.3l5.2 2.4M14.8 6.9v6.2" />
|
||||
</g>
|
||||
</svg>
|
||||
<span class="sr-only">Semantic</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<output class="search-layout" data-preview={opts.enablePreview} />
|
||||
</form>
|
||||
</search>
|
||||
<div class="search-container">
|
||||
<div class="search-space">
|
||||
<input
|
||||
autocomplete="off"
|
||||
class="search-bar"
|
||||
name="search"
|
||||
type="text"
|
||||
aria-label={searchPlaceholder}
|
||||
placeholder={searchPlaceholder}
|
||||
/>
|
||||
<div class="search-layout" data-preview={opts.enablePreview}></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ const defaultOptions: Options = {
|
||||
layout: "modern",
|
||||
}
|
||||
|
||||
let numTocs = 0
|
||||
export default ((opts?: Partial<Options>) => {
|
||||
const layout = opts?.layout ?? defaultOptions.layout
|
||||
const { OverflowList, overflowListAfterDOMLoaded } = OverflowListFactory()
|
||||
@@ -30,13 +29,12 @@ export default ((opts?: Partial<Options>) => {
|
||||
return null
|
||||
}
|
||||
|
||||
const id = `toc-${numTocs++}`
|
||||
return (
|
||||
<div class={classNames(displayClass, "toc")}>
|
||||
<button
|
||||
type="button"
|
||||
class={fileData.collapseToc ? "collapsed toc-header" : "toc-header"}
|
||||
aria-controls={id}
|
||||
aria-controls="toc-content"
|
||||
aria-expanded={!fileData.collapseToc}
|
||||
>
|
||||
<h3>{i18n(cfg.locale).components.tableOfContents.title}</h3>
|
||||
@@ -55,10 +53,7 @@ export default ((opts?: Partial<Options>) => {
|
||||
<polyline points="6 9 12 15 18 9"></polyline>
|
||||
</svg>
|
||||
</button>
|
||||
<OverflowList
|
||||
id={id}
|
||||
class={fileData.collapseToc ? "collapsed toc-content" : "toc-content"}
|
||||
>
|
||||
<OverflowList class={fileData.collapseToc ? "collapsed toc-content" : "toc-content"}>
|
||||
{fileData.toc.map((tocEntry) => (
|
||||
<li key={tocEntry.slug} class={`depth-${tocEntry.depth}`}>
|
||||
<a href={`#${tocEntry.slug}`} data-for={tocEntry.slug}>
|
||||
|
||||
@@ -25,7 +25,6 @@ const headerRegex = new RegExp(/h[1-6]/)
|
||||
export function pageResources(
|
||||
baseDir: FullSlug | RelativeURL,
|
||||
staticResources: StaticResources,
|
||||
cfg?: GlobalConfiguration,
|
||||
): StaticResources {
|
||||
const contentIndexPath = joinSegments(baseDir, "static/contentIndex.json")
|
||||
const contentIndexScript = `const fetchData = fetch("${contentIndexPath}").then(data => data.json())`
|
||||
@@ -49,12 +48,6 @@ export function pageResources(
|
||||
spaPreserve: true,
|
||||
script: contentIndexScript,
|
||||
},
|
||||
{
|
||||
loadTime: "beforeDOMReady",
|
||||
contentType: "inline",
|
||||
spaPreserve: true,
|
||||
script: `const semanticCfg = ${JSON.stringify(cfg?.semanticSearch ?? {})};`,
|
||||
},
|
||||
...staticResources.js,
|
||||
],
|
||||
additionalHead: staticResources.additionalHead,
|
||||
@@ -238,9 +231,8 @@ export function renderPage(
|
||||
)
|
||||
|
||||
const lang = componentData.fileData.frontmatter?.lang ?? cfg.locale?.split("-")[0] ?? "en"
|
||||
const direction = i18n(cfg.locale).direction ?? "ltr"
|
||||
const doc = (
|
||||
<html lang={lang} dir={direction}>
|
||||
<html lang={lang}>
|
||||
<Head {...componentData} />
|
||||
<body data-slug={slug}>
|
||||
<div id="quartz-root" class="page">
|
||||
|
||||
@@ -68,6 +68,30 @@ type TweenNode = {
|
||||
stop: () => void
|
||||
}
|
||||
|
||||
// workaround for pixijs webgpu issue: https://github.com/pixijs/pixijs/issues/11389
|
||||
async function determineGraphicsAPI(): Promise<"webgpu" | "webgl"> {
|
||||
const adapter = await navigator.gpu?.requestAdapter().catch(() => null)
|
||||
const device = adapter && (await adapter.requestDevice().catch(() => null))
|
||||
if (!device) {
|
||||
return "webgl"
|
||||
}
|
||||
|
||||
const canvas = document.createElement("canvas")
|
||||
const gl =
|
||||
(canvas.getContext("webgl2") as WebGL2RenderingContext | null) ??
|
||||
(canvas.getContext("webgl") as WebGLRenderingContext | null)
|
||||
|
||||
// we have to return webgl so pixijs automatically falls back to canvas
|
||||
if (!gl) {
|
||||
return "webgl"
|
||||
}
|
||||
|
||||
const webglMaxTextures = gl.getParameter(gl.MAX_TEXTURE_IMAGE_UNITS)
|
||||
const webgpuMaxTextures = device.limits.maxSampledTexturesPerShaderStage
|
||||
|
||||
return webglMaxTextures === webgpuMaxTextures ? "webgpu" : "webgl"
|
||||
}
|
||||
|
||||
async function renderGraph(graph: HTMLElement, fullSlug: FullSlug) {
|
||||
const slug = simplifySlug(fullSlug)
|
||||
const visited = getVisited()
|
||||
@@ -349,6 +373,7 @@ async function renderGraph(graph: HTMLElement, fullSlug: FullSlug) {
|
||||
tweens.forEach((tween) => tween.stop())
|
||||
tweens.clear()
|
||||
|
||||
const pixiPreference = await determineGraphicsAPI()
|
||||
const app = new Application()
|
||||
await app.init({
|
||||
width,
|
||||
@@ -357,7 +382,7 @@ async function renderGraph(graph: HTMLElement, fullSlug: FullSlug) {
|
||||
autoStart: false,
|
||||
autoDensity: true,
|
||||
backgroundAlpha: 0,
|
||||
preference: "webgpu",
|
||||
preference: pixiPreference,
|
||||
resolution: window.devicePixelRatio,
|
||||
eventMode: "static",
|
||||
})
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import FlexSearch, { DefaultDocumentSearchResults, Id } from "flexsearch"
|
||||
import FlexSearch from "flexsearch"
|
||||
import { ContentDetails } from "../../plugins/emitters/contentIndex"
|
||||
import { SemanticClient, type SemanticResult } from "./semantic.inline"
|
||||
import { registerEscapeHandler, removeAllChildren, fetchCanonical } from "./util"
|
||||
import { registerEscapeHandler, removeAllChildren } from "./util"
|
||||
import { FullSlug, normalizeRelativeURLs, resolveRelative } from "../../util/path"
|
||||
|
||||
interface Item {
|
||||
@@ -10,51 +9,42 @@ interface Item {
|
||||
title: string
|
||||
content: string
|
||||
tags: string[]
|
||||
[key: string]: any
|
||||
}
|
||||
|
||||
// Can be expanded with things like "term" in the future
|
||||
type SearchType = "basic" | "tags"
|
||||
type SearchMode = "lexical" | "semantic"
|
||||
const SEARCH_MODE_STORAGE_KEY = "quartz:search:mode"
|
||||
|
||||
const loadStoredSearchMode = (): SearchMode | null => {
|
||||
if (typeof window === "undefined") {
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
const stored = window.localStorage.getItem(SEARCH_MODE_STORAGE_KEY)
|
||||
return stored === "lexical" || stored === "semantic" ? stored : null
|
||||
} catch (err) {
|
||||
console.warn("[Search] failed to read stored search mode:", err)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
const persistSearchMode = (mode: SearchMode) => {
|
||||
if (typeof window === "undefined") {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
window.localStorage.setItem(SEARCH_MODE_STORAGE_KEY, mode)
|
||||
} catch (err) {
|
||||
console.warn("[Search] failed to persist search mode:", err)
|
||||
}
|
||||
}
|
||||
|
||||
let searchMode: SearchMode = "lexical"
|
||||
let searchType: SearchType = "basic"
|
||||
let currentSearchTerm: string = ""
|
||||
let rawSearchTerm: string = ""
|
||||
let semantic: SemanticClient | null = null
|
||||
let semanticReady = false
|
||||
let semanticInitFailed = false
|
||||
type SimilarityResult = { item: Item; similarity: number }
|
||||
let chunkMetadata: Record<string, { parentSlug: string; chunkId: number }> = {}
|
||||
let manifestIds: string[] = []
|
||||
const encoder = (str: string) => str.toLowerCase().split(/([^a-z]|[^\x00-\x7F])/)
|
||||
let index = new FlexSearch.Document<Item>({
|
||||
charset: "latin:extra",
|
||||
encode: encoder,
|
||||
document: {
|
||||
id: "id",
|
||||
tag: "tags",
|
||||
index: [
|
||||
{
|
||||
field: "title",
|
||||
tokenize: "forward",
|
||||
},
|
||||
{
|
||||
field: "content",
|
||||
tokenize: "forward",
|
||||
},
|
||||
{
|
||||
field: "tags",
|
||||
tokenize: "forward",
|
||||
},
|
||||
],
|
||||
},
|
||||
})
|
||||
|
||||
const p = new DOMParser()
|
||||
const fetchContentCache: Map<FullSlug, Element[]> = new Map()
|
||||
const contextWindowWords = 30
|
||||
const numSearchResults = 8
|
||||
const numTagResults = 5
|
||||
|
||||
const tokenizeTerm = (term: string) => {
|
||||
const tokens = term.split(/\s+/).filter((t) => t.trim() !== "")
|
||||
const tokenLen = tokens.length
|
||||
@@ -112,102 +102,6 @@ function highlight(searchTerm: string, text: string, trim?: boolean) {
|
||||
}`
|
||||
}
|
||||
|
||||
// To be used with search and everything else with flexsearch
|
||||
const encoder = (str: string) =>
|
||||
str
|
||||
.toLowerCase()
|
||||
.split(/\s+/)
|
||||
.filter((token) => token.length > 0)
|
||||
|
||||
/**
|
||||
* Get parent document slug for a chunk ID
|
||||
*/
|
||||
function getParentSlug(slug: string): string {
|
||||
const meta = chunkMetadata[slug]
|
||||
return meta ? meta.parentSlug : slug
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregate semantic search results from chunks to documents using RRF
|
||||
* @param results Raw semantic results (chunk-level)
|
||||
* @param slugToDocIndex Map from document slug to index in idDataMap
|
||||
* @returns Object with rrfScores (for ranking) and maxScores (for display)
|
||||
*/
|
||||
function aggregateChunkResults(
|
||||
results: SemanticResult[],
|
||||
slugToDocIndex: Map<FullSlug, number>,
|
||||
): { rrfScores: Map<number, number>; maxScores: Map<number, number> } {
|
||||
// Group chunks by parent document
|
||||
const docChunks = new Map<string, Array<{ score: number }>>()
|
||||
|
||||
results.forEach(({ id, score }) => {
|
||||
// id is an index into manifestIds (the chunk IDs from embeddings)
|
||||
const chunkSlug = manifestIds[id]
|
||||
if (!chunkSlug) return
|
||||
|
||||
// Get parent document slug
|
||||
const parentSlug = getParentSlug(chunkSlug)
|
||||
|
||||
if (!docChunks.has(parentSlug)) {
|
||||
docChunks.set(parentSlug, [])
|
||||
}
|
||||
|
||||
docChunks.get(parentSlug)!.push({ score })
|
||||
})
|
||||
|
||||
// Apply RRF for ranking and track max similarity for display
|
||||
const rrfScores = new Map<number, number>()
|
||||
const maxScores = new Map<number, number>()
|
||||
const RRF_K = 60
|
||||
|
||||
for (const [parentSlug, chunks] of docChunks) {
|
||||
const docIdx = slugToDocIndex.get(parentSlug as FullSlug)
|
||||
if (typeof docIdx !== "number") continue
|
||||
|
||||
// Sort chunks by score descending to assign per-document ranks
|
||||
chunks.sort((a, b) => b.score - a.score)
|
||||
|
||||
// RRF formula: sum(1 / (k + rank)) across all chunks, using per-document ranks
|
||||
const rrfScore = chunks.reduce((sum, _, rank) => sum + 1.0 / (RRF_K + rank), 0)
|
||||
|
||||
// Max similarity score for display (original 0-1 range)
|
||||
const maxScore = chunks[0].score
|
||||
|
||||
rrfScores.set(docIdx, rrfScore)
|
||||
maxScores.set(docIdx, maxScore)
|
||||
}
|
||||
|
||||
return { rrfScores, maxScores }
|
||||
}
|
||||
|
||||
// Initialize the FlexSearch Document instance with the appropriate configuration
|
||||
const index = new FlexSearch.Document<Item>({
|
||||
tokenize: "forward",
|
||||
encode: encoder,
|
||||
document: {
|
||||
id: "id",
|
||||
tag: "tags",
|
||||
index: [
|
||||
{
|
||||
field: "title",
|
||||
tokenize: "forward",
|
||||
},
|
||||
{
|
||||
field: "content",
|
||||
tokenize: "forward",
|
||||
},
|
||||
{
|
||||
field: "tags",
|
||||
tokenize: "forward",
|
||||
},
|
||||
],
|
||||
},
|
||||
})
|
||||
|
||||
const p = new DOMParser()
|
||||
const fetchContentCache: Map<FullSlug, Element[]> = new Map()
|
||||
const numSearchResults = 10
|
||||
const numTagResults = 10
|
||||
function highlightHTML(searchTerm: string, el: HTMLElement) {
|
||||
const p = new DOMParser()
|
||||
const tokenizedTerms = tokenizeTerm(searchTerm)
|
||||
@@ -249,11 +143,7 @@ function highlightHTML(searchTerm: string, el: HTMLElement) {
|
||||
return html.body
|
||||
}
|
||||
|
||||
async function setupSearch(
|
||||
searchElement: HTMLDivElement,
|
||||
currentSlug: FullSlug,
|
||||
data: ContentIndex,
|
||||
) {
|
||||
async function setupSearch(searchElement: Element, currentSlug: FullSlug, data: ContentIndex) {
|
||||
const container = searchElement.querySelector(".search-container") as HTMLElement
|
||||
if (!container) return
|
||||
|
||||
@@ -268,183 +158,12 @@ async function setupSearch(
|
||||
const searchLayout = searchElement.querySelector(".search-layout") as HTMLElement
|
||||
if (!searchLayout) return
|
||||
|
||||
const searchSpace = searchElement?.querySelector(".search-space") as HTMLFormElement
|
||||
if (!searchSpace) return
|
||||
|
||||
// Create semantic search progress bar
|
||||
const progressBar = document.createElement("div")
|
||||
progressBar.className = "semantic-search-progress"
|
||||
progressBar.style.cssText = `
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
height: 2px;
|
||||
width: 0;
|
||||
background: var(--secondary);
|
||||
transition: width 0.3s ease, opacity 0.3s ease;
|
||||
opacity: 0;
|
||||
z-index: 9999;
|
||||
`
|
||||
searchBar.parentElement?.appendChild(progressBar)
|
||||
|
||||
const startSemanticProgress = () => {
|
||||
progressBar.style.opacity = "1"
|
||||
progressBar.style.width = "0"
|
||||
setTimeout(() => {
|
||||
progressBar.style.width = "100%"
|
||||
}, 10)
|
||||
}
|
||||
|
||||
const completeSemanticProgress = () => {
|
||||
progressBar.style.opacity = "0"
|
||||
setTimeout(() => {
|
||||
progressBar.style.width = "0"
|
||||
}, 300)
|
||||
}
|
||||
|
||||
const resetProgressBar = () => {
|
||||
progressBar.style.opacity = "0"
|
||||
progressBar.style.width = "0"
|
||||
}
|
||||
|
||||
const idDataMap = Object.keys(data) as FullSlug[]
|
||||
const slugToIndex = new Map<FullSlug, number>()
|
||||
idDataMap.forEach((slug, idx) => slugToIndex.set(slug, idx))
|
||||
const modeToggle = searchSpace.querySelector(".search-mode-toggle") as HTMLDivElement | null
|
||||
const modeButtons = modeToggle
|
||||
? Array.from(modeToggle.querySelectorAll<HTMLButtonElement>(".mode-option"))
|
||||
: []
|
||||
|
||||
const appendLayout = (el: HTMLElement) => {
|
||||
searchLayout.appendChild(el)
|
||||
}
|
||||
|
||||
const enablePreview = searchLayout.dataset.preview === "true"
|
||||
if (!semantic && !semanticInitFailed) {
|
||||
const client = new SemanticClient(semanticCfg)
|
||||
try {
|
||||
await client.ensureReady()
|
||||
semantic = client
|
||||
semanticReady = true
|
||||
|
||||
// Load chunk metadata and IDs from manifest
|
||||
try {
|
||||
const manifestUrl = "/embeddings/manifest.json"
|
||||
const res = await fetch(manifestUrl)
|
||||
if (res.ok) {
|
||||
const manifest = await res.json()
|
||||
chunkMetadata = manifest.chunkMetadata || {}
|
||||
manifestIds = manifest.ids || []
|
||||
console.debug(
|
||||
`[Search] Loaded manifest: ${manifestIds.length} chunks, ${Object.keys(chunkMetadata).length} chunked documents`,
|
||||
)
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn("[Search] failed to load chunk metadata:", err)
|
||||
chunkMetadata = {}
|
||||
manifestIds = []
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn("[SemanticClient] initialization failed:", err)
|
||||
client.dispose()
|
||||
semantic = null
|
||||
semanticReady = false
|
||||
semanticInitFailed = true
|
||||
}
|
||||
} else if (semantic && !semanticReady) {
|
||||
try {
|
||||
await semantic.ensureReady()
|
||||
semanticReady = true
|
||||
} catch (err) {
|
||||
console.warn("[SemanticClient] became unavailable:", err)
|
||||
semantic.dispose()
|
||||
semantic = null
|
||||
semanticReady = false
|
||||
semanticInitFailed = true
|
||||
}
|
||||
}
|
||||
const storedMode = loadStoredSearchMode()
|
||||
if (storedMode === "semantic") {
|
||||
if (semanticReady) {
|
||||
searchMode = storedMode
|
||||
}
|
||||
} else if (storedMode === "lexical") {
|
||||
searchMode = storedMode
|
||||
}
|
||||
if (!semanticReady && searchMode === "semantic") {
|
||||
searchMode = "lexical"
|
||||
}
|
||||
let searchSeq = 0
|
||||
let runSearchTimer: number | null = null
|
||||
let lastInputAt = 0
|
||||
searchLayout.dataset.mode = searchMode
|
||||
|
||||
const updateModeUI = (mode: SearchMode) => {
|
||||
modeButtons.forEach((button) => {
|
||||
const btnMode = (button.dataset.mode as SearchMode) ?? "lexical"
|
||||
const isActive = btnMode === mode
|
||||
button.classList.toggle("active", isActive)
|
||||
button.setAttribute("aria-pressed", String(isActive))
|
||||
})
|
||||
if (modeToggle) {
|
||||
modeToggle.dataset.mode = mode
|
||||
}
|
||||
searchLayout.dataset.mode = mode
|
||||
}
|
||||
|
||||
const computeDebounceDelay = (term: string): number => {
|
||||
const trimmed = term.trim()
|
||||
const lastTerm = currentSearchTerm
|
||||
const isExtension =
|
||||
lastTerm.length > 0 && trimmed.length > lastTerm.length && trimmed.startsWith(lastTerm)
|
||||
const isRetraction = lastTerm.length > trimmed.length
|
||||
const isReplacement =
|
||||
lastTerm.length > 0 && !trimmed.startsWith(lastTerm) && !lastTerm.startsWith(trimmed)
|
||||
const baseFullQueryDelay = 200
|
||||
const semanticPenalty = searchMode === "semantic" ? 60 : 0
|
||||
|
||||
if (isExtension && trimmed.length > 2) {
|
||||
return baseFullQueryDelay + semanticPenalty
|
||||
}
|
||||
|
||||
if (isReplacement && trimmed.length > 3) {
|
||||
return Math.max(90, baseFullQueryDelay - 80)
|
||||
}
|
||||
|
||||
if (isRetraction) {
|
||||
return 90
|
||||
}
|
||||
|
||||
return baseFullQueryDelay + (searchMode === "semantic" ? 40 : 0)
|
||||
}
|
||||
|
||||
const triggerSearchWithMode = (mode: SearchMode) => {
|
||||
if (mode === "semantic" && !semanticReady) {
|
||||
return
|
||||
}
|
||||
if (searchMode === mode) return
|
||||
searchMode = mode
|
||||
updateModeUI(mode)
|
||||
persistSearchMode(searchMode)
|
||||
if (rawSearchTerm.trim() !== "") {
|
||||
searchLayout.classList.add("display-results")
|
||||
const token = ++searchSeq
|
||||
void runSearch(rawSearchTerm, token)
|
||||
}
|
||||
}
|
||||
|
||||
updateModeUI(searchMode)
|
||||
|
||||
modeButtons.forEach((button) => {
|
||||
const btnMode = (button.dataset.mode as SearchMode) ?? "lexical"
|
||||
if (btnMode === "semantic") {
|
||||
button.disabled = !semanticReady
|
||||
button.setAttribute("aria-disabled", String(!semanticReady))
|
||||
}
|
||||
const handler = () => triggerSearchWithMode(btnMode)
|
||||
button.addEventListener("click", handler)
|
||||
window.addCleanup(() => button.removeEventListener("click", handler))
|
||||
})
|
||||
let preview: HTMLDivElement | undefined = undefined
|
||||
let previewInner: HTMLDivElement | undefined = undefined
|
||||
const results = document.createElement("div")
|
||||
@@ -466,23 +185,20 @@ async function setupSearch(
|
||||
removeAllChildren(preview)
|
||||
}
|
||||
searchLayout.classList.remove("display-results")
|
||||
searchType = "basic" // reset search type after closing
|
||||
searchButton.focus()
|
||||
resetProgressBar()
|
||||
}
|
||||
|
||||
function showSearch(type: SearchType) {
|
||||
function showSearch(searchTypeNew: SearchType) {
|
||||
searchType = searchTypeNew
|
||||
if (sidebar) sidebar.style.zIndex = "1"
|
||||
container.classList.add("active")
|
||||
if (type === "tags") {
|
||||
searchBar.value = "#"
|
||||
rawSearchTerm = "#"
|
||||
}
|
||||
searchBar.focus()
|
||||
}
|
||||
|
||||
let currentHover: HTMLInputElement | null = null
|
||||
|
||||
async function shortcutHandler(e: HTMLElementEventMap["keydown"]) {
|
||||
if ((e.key === "/" || e.key === "k") && (e.ctrlKey || e.metaKey) && !e.shiftKey) {
|
||||
if (e.key === "k" && (e.ctrlKey || e.metaKey) && !e.shiftKey) {
|
||||
e.preventDefault()
|
||||
const searchBarOpen = container.classList.contains("active")
|
||||
searchBarOpen ? hideSearch() : showSearch("basic")
|
||||
@@ -492,6 +208,9 @@ async function setupSearch(
|
||||
e.preventDefault()
|
||||
const searchBarOpen = container.classList.contains("active")
|
||||
searchBarOpen ? hideSearch() : showSearch("tags")
|
||||
|
||||
// add "#" prefix for tag search
|
||||
searchBar.value = "#"
|
||||
return
|
||||
}
|
||||
|
||||
@@ -503,27 +222,18 @@ async function setupSearch(
|
||||
if (!container.classList.contains("active")) return
|
||||
if (e.key === "Enter") {
|
||||
// If result has focus, navigate to that one, otherwise pick first result
|
||||
let anchor: HTMLAnchorElement | undefined
|
||||
if (results.contains(document.activeElement)) {
|
||||
anchor = document.activeElement as HTMLAnchorElement
|
||||
if (anchor.classList.contains("no-match")) return
|
||||
await displayPreview(anchor)
|
||||
e.preventDefault()
|
||||
anchor.click()
|
||||
const active = document.activeElement as HTMLInputElement
|
||||
if (active.classList.contains("no-match")) return
|
||||
await displayPreview(active)
|
||||
active.click()
|
||||
} else {
|
||||
anchor = document.getElementsByClassName("result-card")[0] as HTMLAnchorElement
|
||||
const anchor = document.getElementsByClassName("result-card")[0] as HTMLInputElement | null
|
||||
if (!anchor || anchor.classList.contains("no-match")) return
|
||||
await displayPreview(anchor)
|
||||
e.preventDefault()
|
||||
anchor.click()
|
||||
}
|
||||
if (anchor !== undefined)
|
||||
window.spaNavigate(new URL(new URL(anchor.href).pathname, window.location.toString()))
|
||||
} else if (
|
||||
e.key === "ArrowUp" ||
|
||||
(e.shiftKey && e.key === "Tab") ||
|
||||
(e.ctrlKey && e.key === "p")
|
||||
) {
|
||||
} else if (e.key === "ArrowUp" || (e.shiftKey && e.key === "Tab")) {
|
||||
e.preventDefault()
|
||||
if (results.contains(document.activeElement)) {
|
||||
// If an element in results-container already has focus, focus previous one
|
||||
@@ -536,7 +246,7 @@ async function setupSearch(
|
||||
if (prevResult) currentHover = prevResult
|
||||
await displayPreview(prevResult)
|
||||
}
|
||||
} else if (e.key === "ArrowDown" || e.key === "Tab" || (e.ctrlKey && e.key === "n")) {
|
||||
} else if (e.key === "ArrowDown" || e.key === "Tab") {
|
||||
e.preventDefault()
|
||||
// The results should already been focused, so we need to find the next one.
|
||||
// The activeElement is the search bar, so we need to find the first result and focus it.
|
||||
@@ -553,33 +263,25 @@ async function setupSearch(
|
||||
}
|
||||
}
|
||||
|
||||
const formatForDisplay = (term: string, id: number, renderType: SearchType) => {
|
||||
const formatForDisplay = (term: string, id: number) => {
|
||||
const slug = idDataMap[id]
|
||||
|
||||
// Check if query contains title words (for boosting exact matches)
|
||||
const queryTokens = tokenizeTerm(term)
|
||||
const titleTokens = tokenizeTerm(data[slug].title ?? "")
|
||||
const titleMatch = titleTokens.some((t) => queryTokens.includes(t))
|
||||
|
||||
return {
|
||||
id,
|
||||
slug,
|
||||
title: renderType === "tags" ? data[slug].title : highlight(term, data[slug].title ?? ""),
|
||||
title: searchType === "tags" ? data[slug].title : highlight(term, data[slug].title ?? ""),
|
||||
content: highlight(term, data[slug].content ?? "", true),
|
||||
tags: highlightTags(term, data[slug].tags, renderType),
|
||||
titleMatch, // Add title match flag for boosting
|
||||
tags: highlightTags(term.substring(1), data[slug].tags),
|
||||
}
|
||||
}
|
||||
|
||||
function highlightTags(term: string, tags: string[], renderType: SearchType) {
|
||||
if (!tags || renderType !== "tags") {
|
||||
function highlightTags(term: string, tags: string[]) {
|
||||
if (!tags || searchType !== "tags") {
|
||||
return []
|
||||
}
|
||||
|
||||
const tagTerm = term.toLowerCase()
|
||||
return tags
|
||||
.map((tag) => {
|
||||
if (tag.toLowerCase().includes(tagTerm)) {
|
||||
if (tag.toLowerCase().includes(term.toLowerCase())) {
|
||||
return `<li><p class="match-tag">#${tag}</p></li>`
|
||||
} else {
|
||||
return `<li><p>#${tag}</p></li>`
|
||||
@@ -592,40 +294,24 @@ async function setupSearch(
|
||||
return new URL(resolveRelative(currentSlug, slug), location.toString())
|
||||
}
|
||||
|
||||
const resultToHTML = ({ item, percent }: { item: Item; percent: number | null }) => {
|
||||
const { slug, title, content, tags, target } = item
|
||||
const resultToHTML = ({ slug, title, content, tags }: Item) => {
|
||||
const htmlTags = tags.length > 0 ? `<ul class="tags">${tags.join("")}</ul>` : ``
|
||||
const itemTile = document.createElement("a")
|
||||
const titleContent = target ? highlight(currentSearchTerm, target) : title
|
||||
const subscript = target ? `<b>${slug}</b>` : ``
|
||||
let percentLabel = "—"
|
||||
let percentAttr = ""
|
||||
if (percent !== null && Number.isFinite(percent)) {
|
||||
const bounded = Math.max(0, Math.min(100, percent))
|
||||
percentLabel = `${bounded.toFixed(1)}%`
|
||||
percentAttr = bounded.toFixed(3)
|
||||
}
|
||||
itemTile.classList.add("result-card")
|
||||
itemTile.id = slug
|
||||
itemTile.href = resolveUrl(slug).toString()
|
||||
itemTile.innerHTML = `<hgroup>
|
||||
<h3>${titleContent}</h3>
|
||||
${subscript}${htmlTags}
|
||||
${searchMode === "semantic" ? `<span class="result-likelihood" title="match likelihood"> ${percentLabel}</span>` : ""}
|
||||
${enablePreview && window.innerWidth > 600 ? "" : `<p>${content}</p>`}
|
||||
</hgroup>`
|
||||
if (percentAttr) itemTile.dataset.scorePercent = percentAttr
|
||||
else delete itemTile.dataset.scorePercent
|
||||
itemTile.innerHTML = `
|
||||
<h3 class="card-title">${title}</h3>
|
||||
${htmlTags}
|
||||
<p class="card-description">${content}</p>
|
||||
`
|
||||
itemTile.addEventListener("click", (event) => {
|
||||
if (event.altKey || event.ctrlKey || event.metaKey || event.shiftKey) return
|
||||
hideSearch()
|
||||
})
|
||||
|
||||
const handler = (evt: MouseEvent) => {
|
||||
if (evt.altKey || evt.ctrlKey || evt.metaKey || evt.shiftKey) return
|
||||
const anchor = evt.currentTarget as HTMLAnchorElement | null
|
||||
if (!anchor) return
|
||||
evt.preventDefault()
|
||||
const href = anchor.getAttribute("href")
|
||||
if (!href) return
|
||||
const url = new URL(href, window.location.toString())
|
||||
window.spaNavigate(url)
|
||||
const handler = (event: MouseEvent) => {
|
||||
if (event.altKey || event.ctrlKey || event.metaKey || event.shiftKey) return
|
||||
hideSearch()
|
||||
}
|
||||
|
||||
@@ -643,22 +329,15 @@ async function setupSearch(
|
||||
return itemTile
|
||||
}
|
||||
|
||||
async function displayResults(finalResults: SimilarityResult[]) {
|
||||
async function displayResults(finalResults: Item[]) {
|
||||
removeAllChildren(results)
|
||||
if (finalResults.length === 0) {
|
||||
results.innerHTML = `<a class="result-card no-match">
|
||||
<h3>No results.</h3>
|
||||
<p>Try another search term?</p>
|
||||
</a>`
|
||||
currentHover = null
|
||||
} else {
|
||||
const decorated = finalResults.map(({ item, similarity }) => {
|
||||
if (!Number.isFinite(similarity)) return { item, percent: null }
|
||||
const bounded = Math.max(-1, Math.min(1, similarity))
|
||||
const percent = ((bounded + 1) / 2) * 100
|
||||
return { item, percent }
|
||||
})
|
||||
results.append(...decorated.map(resultToHTML))
|
||||
results.append(...finalResults.map(resultToHTML))
|
||||
}
|
||||
|
||||
if (finalResults.length === 0 && preview) {
|
||||
@@ -678,8 +357,8 @@ async function setupSearch(
|
||||
return fetchContentCache.get(slug) as Element[]
|
||||
}
|
||||
|
||||
const targetUrl = resolveUrl(slug)
|
||||
const contents = await fetchCanonical(targetUrl)
|
||||
const targetUrl = resolveUrl(slug).toString()
|
||||
const contents = await fetch(targetUrl)
|
||||
.then((res) => res.text())
|
||||
.then((contents) => {
|
||||
if (contents === undefined) {
|
||||
@@ -709,296 +388,73 @@ async function setupSearch(
|
||||
const highlights = [...preview.getElementsByClassName("highlight")].sort(
|
||||
(a, b) => b.innerHTML.length - a.innerHTML.length,
|
||||
)
|
||||
if (highlights.length > 0) {
|
||||
const highlight = highlights[0]
|
||||
const container = preview
|
||||
if (container && highlight) {
|
||||
// Get the relative positions
|
||||
const containerRect = container.getBoundingClientRect()
|
||||
const highlightRect = highlight.getBoundingClientRect()
|
||||
// Calculate the scroll position relative to the container
|
||||
const relativeTop = highlightRect.top - containerRect.top + container.scrollTop - 20 // 20px buffer
|
||||
// Smoothly scroll the container
|
||||
container.scrollTo({
|
||||
top: relativeTop,
|
||||
behavior: "smooth",
|
||||
})
|
||||
}
|
||||
}
|
||||
highlights[0]?.scrollIntoView({ block: "start" })
|
||||
}
|
||||
|
||||
async function runSearch(rawTerm: string, token: number) {
|
||||
async function onType(e: HTMLElementEventMap["input"]) {
|
||||
if (!searchLayout || !index) return
|
||||
const trimmed = rawTerm.trim()
|
||||
if (trimmed === "") {
|
||||
removeAllChildren(results)
|
||||
if (preview) {
|
||||
removeAllChildren(preview)
|
||||
}
|
||||
currentHover = null
|
||||
searchLayout.classList.remove("display-results")
|
||||
resetProgressBar()
|
||||
return
|
||||
}
|
||||
currentSearchTerm = (e.target as HTMLInputElement).value
|
||||
searchLayout.classList.toggle("display-results", currentSearchTerm !== "")
|
||||
searchType = currentSearchTerm.startsWith("#") ? "tags" : "basic"
|
||||
|
||||
const modeForRanking: SearchMode = searchMode
|
||||
const initialType: SearchType = trimmed.startsWith("#") ? "tags" : "basic"
|
||||
let workingType: SearchType = initialType
|
||||
let highlightTerm = trimmed
|
||||
let tagTerm = ""
|
||||
let searchResults: DefaultDocumentSearchResults<Item> = []
|
||||
|
||||
if (initialType === "tags") {
|
||||
tagTerm = trimmed.substring(1).trim()
|
||||
const separatorIndex = tagTerm.indexOf(" ")
|
||||
if (separatorIndex !== -1) {
|
||||
const tag = tagTerm.substring(0, separatorIndex).trim()
|
||||
const query = tagTerm.substring(separatorIndex + 1).trim()
|
||||
const results = await index.searchAsync({
|
||||
query,
|
||||
let searchResults: FlexSearch.SimpleDocumentSearchResultSetUnit[]
|
||||
if (searchType === "tags") {
|
||||
currentSearchTerm = currentSearchTerm.substring(1).trim()
|
||||
const separatorIndex = currentSearchTerm.indexOf(" ")
|
||||
if (separatorIndex != -1) {
|
||||
// search by title and content index and then filter by tag (implemented in flexsearch)
|
||||
const tag = currentSearchTerm.substring(0, separatorIndex)
|
||||
const query = currentSearchTerm.substring(separatorIndex + 1).trim()
|
||||
searchResults = await index.searchAsync({
|
||||
query: query,
|
||||
// return at least 10000 documents, so it is enough to filter them by tag (implemented in flexsearch)
|
||||
limit: Math.max(numSearchResults, 10000),
|
||||
index: ["title", "content"],
|
||||
tag: { tags: tag },
|
||||
tag: tag,
|
||||
})
|
||||
if (token !== searchSeq) return
|
||||
searchResults = Object.values(results)
|
||||
workingType = "basic"
|
||||
highlightTerm = query
|
||||
for (let searchResult of searchResults) {
|
||||
searchResult.result = searchResult.result.slice(0, numSearchResults)
|
||||
}
|
||||
// set search type to basic and remove tag from term for proper highlightning and scroll
|
||||
searchType = "basic"
|
||||
currentSearchTerm = query
|
||||
} else {
|
||||
const results = await index.searchAsync({
|
||||
query: tagTerm,
|
||||
// default search by tags index
|
||||
searchResults = await index.searchAsync({
|
||||
query: currentSearchTerm,
|
||||
limit: numSearchResults,
|
||||
index: ["tags"],
|
||||
})
|
||||
if (token !== searchSeq) return
|
||||
searchResults = Object.values(results)
|
||||
highlightTerm = tagTerm
|
||||
}
|
||||
} else {
|
||||
const results = await index.searchAsync({
|
||||
query: highlightTerm,
|
||||
} else if (searchType === "basic") {
|
||||
searchResults = await index.searchAsync({
|
||||
query: currentSearchTerm,
|
||||
limit: numSearchResults,
|
||||
index: ["title", "content"],
|
||||
})
|
||||
if (token !== searchSeq) return
|
||||
searchResults = Object.values(results)
|
||||
}
|
||||
|
||||
const coerceIds = (hit?: DefaultDocumentSearchResults<Item>[number]): number[] => {
|
||||
if (!hit) return []
|
||||
return hit.result
|
||||
.map((value: Id) => {
|
||||
if (typeof value === "number") {
|
||||
return value
|
||||
}
|
||||
const parsed = Number.parseInt(String(value), 10)
|
||||
return Number.isNaN(parsed) ? null : parsed
|
||||
})
|
||||
.filter((value): value is number => value !== null)
|
||||
}
|
||||
|
||||
const getByField = (field: string): number[] => {
|
||||
const hit = searchResults.find((x) => x.field === field)
|
||||
return coerceIds(hit)
|
||||
const results = searchResults.filter((x) => x.field === field)
|
||||
return results.length === 0 ? [] : ([...results[0].result] as number[])
|
||||
}
|
||||
|
||||
// order titles ahead of content
|
||||
const allIds: Set<number> = new Set([
|
||||
...getByField("title"),
|
||||
...getByField("content"),
|
||||
...getByField("tags"),
|
||||
])
|
||||
|
||||
currentSearchTerm = highlightTerm
|
||||
|
||||
const candidateItems = new Map<string, Item>()
|
||||
const ensureItem = (id: number): Item | null => {
|
||||
const slug = idDataMap[id]
|
||||
if (!slug) return null
|
||||
const cached = candidateItems.get(slug)
|
||||
if (cached) return cached
|
||||
const item = formatForDisplay(highlightTerm, id, workingType)
|
||||
if (item) {
|
||||
candidateItems.set(slug, item)
|
||||
return item
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const baseIndices: number[] = []
|
||||
for (const id of allIds) {
|
||||
const item = ensureItem(id)
|
||||
if (!item) continue
|
||||
const idx = slugToIndex.get(item.slug)
|
||||
if (typeof idx === "number") {
|
||||
baseIndices.push(idx)
|
||||
}
|
||||
}
|
||||
|
||||
let semanticIds: number[] = []
|
||||
const semanticSimilarity = new Map<number, number>()
|
||||
|
||||
const integrateIds = (ids: number[]) => {
|
||||
ids.forEach((docId) => {
|
||||
ensureItem(docId)
|
||||
})
|
||||
}
|
||||
|
||||
const orchestrator = semanticReady && semantic ? semantic : null
|
||||
|
||||
const resolveSimilarity = (item: Item): number => {
|
||||
const semanticHit = semanticSimilarity.get(item.id)
|
||||
return semanticHit ?? Number.NaN
|
||||
}
|
||||
|
||||
const render = async () => {
|
||||
if (token !== searchSeq) return
|
||||
const useSemantic = semanticReady && semanticIds.length > 0
|
||||
const weights =
|
||||
modeForRanking === "semantic" && useSemantic
|
||||
? { base: 0.3, semantic: 1.0 }
|
||||
: { base: 1.0, semantic: useSemantic ? 0.3 : 0 }
|
||||
const rrf = new Map<string, number>()
|
||||
const push = (ids: number[], weight: number, applyTitleBoost: boolean = false) => {
|
||||
if (!ids.length || weight <= 0) return
|
||||
ids.forEach((docId, rank) => {
|
||||
const slug = idDataMap[docId]
|
||||
if (!slug) return
|
||||
const item = ensureItem(docId)
|
||||
if (!item) return
|
||||
|
||||
// Apply title boost for FlexSearch results (1.5x boost for exact title matches)
|
||||
let effectiveWeight = weight
|
||||
if (applyTitleBoost && item.titleMatch) {
|
||||
effectiveWeight *= 1.5
|
||||
}
|
||||
|
||||
const prev = rrf.get(slug) ?? 0
|
||||
rrf.set(slug, prev + effectiveWeight / (1 + rank))
|
||||
})
|
||||
}
|
||||
|
||||
push(baseIndices, weights.base, true) // FlexSearch with title boost
|
||||
push(semanticIds, weights.semantic, false) // Semantic without boost
|
||||
|
||||
const rankedEntries = Array.from(candidateItems.values())
|
||||
.map((item) => ({ item, score: rrf.get(item.slug) ?? 0 }))
|
||||
.sort((a, b) => b.score - a.score)
|
||||
.slice(0, numSearchResults)
|
||||
|
||||
const displayEntries: SimilarityResult[] = []
|
||||
for (const entry of rankedEntries) {
|
||||
const similarity = resolveSimilarity(entry.item)
|
||||
displayEntries.push({ item: entry.item, similarity })
|
||||
}
|
||||
|
||||
await displayResults(displayEntries)
|
||||
}
|
||||
|
||||
await render()
|
||||
|
||||
if (workingType === "tags" || !orchestrator || !semanticReady || highlightTerm.length < 2) {
|
||||
return
|
||||
}
|
||||
|
||||
const showProgress = modeForRanking === "semantic"
|
||||
if (showProgress) {
|
||||
startSemanticProgress()
|
||||
}
|
||||
|
||||
try {
|
||||
const { semantic: semRes } = await orchestrator.search(
|
||||
highlightTerm,
|
||||
numSearchResults * 3, // Request more chunks to ensure good document coverage
|
||||
)
|
||||
if (token !== searchSeq) {
|
||||
if (showProgress) completeSemanticProgress()
|
||||
return
|
||||
}
|
||||
|
||||
// Aggregate chunk results to document level using RRF
|
||||
const { rrfScores: semRrfScores, maxScores: semMaxScores } = aggregateChunkResults(
|
||||
semRes,
|
||||
slugToIndex,
|
||||
)
|
||||
|
||||
// Use RRF scores for ranking
|
||||
semanticIds = Array.from(semRrfScores.entries())
|
||||
.sort((a, b) => b[1] - a[1])
|
||||
.slice(0, numSearchResults)
|
||||
.map(([docIdx]) => docIdx)
|
||||
|
||||
// Use max chunk similarity for display (0-1 range)
|
||||
semanticSimilarity.clear()
|
||||
semMaxScores.forEach((score, docIdx) => {
|
||||
semanticSimilarity.set(docIdx, score)
|
||||
})
|
||||
|
||||
integrateIds(semanticIds)
|
||||
if (showProgress) completeSemanticProgress()
|
||||
} catch (err) {
|
||||
console.warn("[SemanticClient] search failed:", err)
|
||||
if (showProgress) completeSemanticProgress()
|
||||
orchestrator.dispose()
|
||||
semantic = null
|
||||
semanticReady = false
|
||||
semanticInitFailed = true
|
||||
if (searchMode === "semantic") {
|
||||
searchMode = "lexical"
|
||||
updateModeUI(searchMode)
|
||||
}
|
||||
modeButtons.forEach((button) => {
|
||||
if ((button.dataset.mode as SearchMode) === "semantic") {
|
||||
button.disabled = true
|
||||
button.setAttribute("aria-disabled", "true")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await render()
|
||||
}
|
||||
|
||||
function onType(e: HTMLElementEventMap["input"]) {
|
||||
if (!searchLayout || !index) return
|
||||
rawSearchTerm = (e.target as HTMLInputElement).value
|
||||
const hasQuery = rawSearchTerm.trim() !== ""
|
||||
searchLayout.classList.toggle("display-results", hasQuery)
|
||||
const term = rawSearchTerm
|
||||
const token = ++searchSeq
|
||||
if (runSearchTimer !== null) {
|
||||
window.clearTimeout(runSearchTimer)
|
||||
runSearchTimer = null
|
||||
}
|
||||
if (!hasQuery) {
|
||||
void runSearch("", token)
|
||||
return
|
||||
}
|
||||
const now = performance.now()
|
||||
lastInputAt = now
|
||||
const delay = computeDebounceDelay(term)
|
||||
const scheduledAt = lastInputAt
|
||||
runSearchTimer = window.setTimeout(() => {
|
||||
if (scheduledAt !== lastInputAt) {
|
||||
return
|
||||
}
|
||||
runSearchTimer = null
|
||||
void runSearch(term, token)
|
||||
}, delay)
|
||||
const finalResults = [...allIds].map((id) => formatForDisplay(currentSearchTerm, id))
|
||||
await displayResults(finalResults)
|
||||
}
|
||||
|
||||
document.addEventListener("keydown", shortcutHandler)
|
||||
window.addCleanup(() => document.removeEventListener("keydown", shortcutHandler))
|
||||
const openHandler = () => showSearch("basic")
|
||||
searchButton.addEventListener("click", openHandler)
|
||||
window.addCleanup(() => searchButton.removeEventListener("click", openHandler))
|
||||
searchButton.addEventListener("click", () => showSearch("basic"))
|
||||
window.addCleanup(() => searchButton.removeEventListener("click", () => showSearch("basic")))
|
||||
searchBar.addEventListener("input", onType)
|
||||
window.addCleanup(() => searchBar.removeEventListener("input", onType))
|
||||
window.addCleanup(() => {
|
||||
if (runSearchTimer !== null) {
|
||||
window.clearTimeout(runSearchTimer)
|
||||
runSearchTimer = null
|
||||
}
|
||||
resetProgressBar()
|
||||
})
|
||||
|
||||
registerEscapeHandler(container, hideSearch)
|
||||
await fillDocument(data)
|
||||
@@ -1006,17 +462,17 @@ async function setupSearch(
|
||||
|
||||
/**
|
||||
* Fills flexsearch document with data
|
||||
* @param index index to fill
|
||||
* @param data data to fill index with
|
||||
*/
|
||||
let indexPopulated = false
|
||||
async function fillDocument(data: ContentIndex) {
|
||||
if (indexPopulated) return
|
||||
let id = 0
|
||||
const promises = []
|
||||
const promises: Array<Promise<unknown>> = []
|
||||
for (const [slug, fileData] of Object.entries<ContentDetails>(data)) {
|
||||
promises.push(
|
||||
//@ts-ignore
|
||||
index.addAsync({
|
||||
index.addAsync(id++, {
|
||||
id,
|
||||
slug: slug as FullSlug,
|
||||
title: fileData.title,
|
||||
@@ -1024,7 +480,6 @@ async function fillDocument(data: ContentIndex) {
|
||||
tags: fileData.tags,
|
||||
}),
|
||||
)
|
||||
id++
|
||||
}
|
||||
|
||||
await Promise.all(promises)
|
||||
@@ -1034,9 +489,7 @@ async function fillDocument(data: ContentIndex) {
|
||||
document.addEventListener("nav", async (e: CustomEventMap["nav"]) => {
|
||||
const currentSlug = e.detail.url
|
||||
const data = await fetchData
|
||||
const searchElement = document.getElementsByClassName(
|
||||
"search",
|
||||
) as HTMLCollectionOf<HTMLDivElement>
|
||||
const searchElement = document.getElementsByClassName("search")
|
||||
for (const element of searchElement) {
|
||||
await setupSearch(element, currentSlug, data)
|
||||
}
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
export type SemanticResult = { id: number; score: number }
|
||||
|
||||
type ProgressMessage = {
|
||||
type: "progress"
|
||||
loadedRows: number
|
||||
totalRows: number
|
||||
}
|
||||
|
||||
type ReadyMessage = { type: "ready" }
|
||||
|
||||
type ResultMessage = {
|
||||
type: "search-result"
|
||||
seq: number
|
||||
semantic: SemanticResult[]
|
||||
}
|
||||
|
||||
type ErrorMessage = { type: "error"; seq?: number; message: string }
|
||||
|
||||
type SearchPayload = {
|
||||
semantic: SemanticResult[]
|
||||
}
|
||||
|
||||
type PendingResolver = {
|
||||
resolve: (payload: SearchPayload) => void
|
||||
reject: (err: Error) => void
|
||||
}
|
||||
|
||||
export class SemanticClient {
|
||||
private ready: Promise<void>
|
||||
private resolveReady!: () => void
|
||||
private worker: Worker | null = null
|
||||
private pending = new Map<number, PendingResolver>()
|
||||
private seq = 0
|
||||
private disposed = false
|
||||
private readySettled = false
|
||||
private configured = false
|
||||
private lastError: Error | null = null
|
||||
|
||||
constructor(private cfg?: any) {
|
||||
this.ready = new Promise((resolve) => {
|
||||
this.resolveReady = () => {
|
||||
if (this.readySettled) return
|
||||
this.readySettled = true
|
||||
resolve()
|
||||
}
|
||||
})
|
||||
|
||||
if (this.cfg?.enable === false) {
|
||||
this.lastError = new Error("semantic search disabled by configuration")
|
||||
this.resolveReady()
|
||||
return
|
||||
}
|
||||
|
||||
this.boot()
|
||||
}
|
||||
|
||||
private boot() {
|
||||
try {
|
||||
this.worker = new Worker("/semantic.worker.js", { type: "module" })
|
||||
} catch (err) {
|
||||
this.handleFatal(err)
|
||||
return
|
||||
}
|
||||
this.setupWorker()
|
||||
this.startInit()
|
||||
}
|
||||
|
||||
private setupWorker() {
|
||||
if (!this.worker) return
|
||||
this.worker.onmessage = (
|
||||
event: MessageEvent<ProgressMessage | ReadyMessage | ResultMessage | ErrorMessage>,
|
||||
) => {
|
||||
const msg = event.data
|
||||
if (msg.type === "progress") {
|
||||
// Progress updates during initialization - can be logged if needed
|
||||
return
|
||||
}
|
||||
if (msg.type === "ready") {
|
||||
this.configured = true
|
||||
this.lastError = null
|
||||
this.resolveReady()
|
||||
return
|
||||
}
|
||||
if (msg.type === "search-result") {
|
||||
const pending = this.pending.get(msg.seq)
|
||||
if (pending) {
|
||||
this.pending.delete(msg.seq)
|
||||
pending.resolve({ semantic: msg.semantic ?? [] })
|
||||
}
|
||||
return
|
||||
}
|
||||
if (msg.type === "error") {
|
||||
if (typeof msg.seq === "number") {
|
||||
const pending = this.pending.get(msg.seq)
|
||||
if (pending) {
|
||||
this.pending.delete(msg.seq)
|
||||
pending.reject(new Error(msg.message))
|
||||
}
|
||||
} else {
|
||||
this.handleFatal(msg.message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private startInit() {
|
||||
if (!this.worker) return
|
||||
const manifestUrl =
|
||||
typeof this.cfg?.manifestUrl === "string" && this.cfg.manifestUrl.length > 0
|
||||
? this.cfg.manifestUrl
|
||||
: "/embeddings/manifest.json"
|
||||
const disableCache = Boolean(this.cfg?.disableCache)
|
||||
const baseUrl =
|
||||
typeof this.cfg?.manifestBaseUrl === "string" ? this.cfg.manifestBaseUrl : undefined
|
||||
this.worker.postMessage({
|
||||
type: "init",
|
||||
cfg: this.cfg,
|
||||
manifestUrl,
|
||||
baseUrl,
|
||||
disableCache,
|
||||
})
|
||||
}
|
||||
|
||||
private rejectAll(err: Error, fatal = false) {
|
||||
for (const [id, pending] of this.pending.entries()) {
|
||||
pending.reject(err)
|
||||
this.pending.delete(id)
|
||||
}
|
||||
if (fatal) {
|
||||
this.lastError = err
|
||||
this.configured = false
|
||||
if (!this.readySettled) {
|
||||
this.resolveReady()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private handleFatal(err: unknown) {
|
||||
const error = err instanceof Error ? err : new Error(String(err))
|
||||
console.error("[SemanticClient] initialization failure:", error)
|
||||
this.rejectAll(error, true)
|
||||
if (this.worker) {
|
||||
this.worker.postMessage({ type: "reset" })
|
||||
this.worker.terminate()
|
||||
this.worker = null
|
||||
}
|
||||
}
|
||||
|
||||
async ensureReady() {
|
||||
await this.ready
|
||||
if (!this.configured) {
|
||||
throw this.lastError ?? new Error("semantic search unavailable")
|
||||
}
|
||||
}
|
||||
|
||||
async search(text: string, k: number): Promise<SearchPayload> {
|
||||
if (this.disposed) {
|
||||
throw new Error("semantic client has been disposed")
|
||||
}
|
||||
await this.ensureReady()
|
||||
if (!this.worker || !this.configured) {
|
||||
throw this.lastError ?? new Error("worker unavailable")
|
||||
}
|
||||
return new Promise<SearchPayload>((resolve, reject) => {
|
||||
const seq = ++this.seq
|
||||
this.pending.set(seq, { resolve, reject })
|
||||
this.worker?.postMessage({ type: "search", text, k, seq })
|
||||
})
|
||||
}
|
||||
|
||||
dispose() {
|
||||
if (this.disposed) return
|
||||
this.disposed = true
|
||||
this.rejectAll(new Error("semantic client disposed"))
|
||||
if (this.worker) {
|
||||
this.worker.postMessage({ type: "reset" })
|
||||
this.worker.terminate()
|
||||
}
|
||||
this.worker = null
|
||||
this.configured = false
|
||||
}
|
||||
}
|
||||
@@ -8,23 +8,24 @@
|
||||
}
|
||||
|
||||
& > .search-button {
|
||||
background-color: transparent;
|
||||
border: 1px var(--lightgray) solid;
|
||||
background-color: color-mix(in srgb, var(--lightgray) 60%, var(--light));
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
font-family: inherit;
|
||||
font-size: inherit;
|
||||
height: 2rem;
|
||||
padding: 0 1rem 0 0;
|
||||
padding: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
text-align: inherit;
|
||||
cursor: pointer;
|
||||
white-space: nowrap;
|
||||
width: 100%;
|
||||
justify-content: space-between;
|
||||
|
||||
& > p {
|
||||
display: inline;
|
||||
color: var(--gray);
|
||||
padding: 0 1rem;
|
||||
}
|
||||
|
||||
& svg {
|
||||
@@ -35,7 +36,7 @@
|
||||
|
||||
.search-path {
|
||||
stroke: var(--darkgray);
|
||||
stroke-width: 1.5px;
|
||||
stroke-width: 2px;
|
||||
transition: stroke 0.5s ease;
|
||||
}
|
||||
}
|
||||
@@ -77,97 +78,16 @@
|
||||
margin-bottom: 2em;
|
||||
}
|
||||
|
||||
& > .input-container {
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
position: relative;
|
||||
& > input {
|
||||
box-sizing: border-box;
|
||||
padding: 0.5em 1em;
|
||||
font-family: var(--bodyFont);
|
||||
color: var(--dark);
|
||||
font-size: 1.1em;
|
||||
border: 1px solid var(--lightgray);
|
||||
|
||||
.search-bar {
|
||||
flex: 1 1 auto;
|
||||
min-width: 0;
|
||||
box-sizing: border-box;
|
||||
padding: 0.5em 1em;
|
||||
font-family: var(--bodyFont);
|
||||
color: var(--dark);
|
||||
font-size: 1.1em;
|
||||
border: none;
|
||||
background: transparent;
|
||||
|
||||
&:focus {
|
||||
outline: none;
|
||||
}
|
||||
}
|
||||
|
||||
.semantic-search-progress {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
height: 2px;
|
||||
background-color: var(--secondary);
|
||||
width: 0;
|
||||
opacity: 0;
|
||||
transition:
|
||||
width 0.3s ease,
|
||||
opacity 0.2s ease;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.search-mode-toggle {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
border-radius: 9999px;
|
||||
height: 1.4rem;
|
||||
background-color: color-mix(in srgb, var(--darkgray) 12%, transparent);
|
||||
margin-right: 1rem;
|
||||
|
||||
.mode-option {
|
||||
border: none;
|
||||
background: transparent;
|
||||
font: inherit;
|
||||
color: var(--gray);
|
||||
border-radius: 9999px;
|
||||
cursor: pointer;
|
||||
transition:
|
||||
background-color 0.2s ease,
|
||||
color 0.2s ease;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 1.5rem;
|
||||
height: 1.5rem;
|
||||
position: relative;
|
||||
|
||||
&:focus-visible {
|
||||
outline: 2px solid var(--tertiary);
|
||||
outline-offset: 2px;
|
||||
}
|
||||
|
||||
&.active {
|
||||
background-color: var(--secondary);
|
||||
color: var(--light);
|
||||
}
|
||||
|
||||
svg {
|
||||
width: 18px;
|
||||
height: 18px;
|
||||
}
|
||||
|
||||
.sr-only {
|
||||
position: absolute;
|
||||
width: 1px;
|
||||
height: 1px;
|
||||
padding: 0;
|
||||
margin: -1px;
|
||||
overflow: hidden;
|
||||
clip: rect(0, 0, 0, 0);
|
||||
white-space: nowrap;
|
||||
border: 0;
|
||||
}
|
||||
}
|
||||
&:focus {
|
||||
outline: none;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,542 +0,0 @@
|
||||
# /// script
|
||||
# requires-python = ">=3.11"
|
||||
# dependencies = [
|
||||
# "langchain-text-splitters",
|
||||
# "numpy",
|
||||
# "openai",
|
||||
# "sentence-transformers",
|
||||
# "tiktoken",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os, json, argparse, hashlib, math, random, logging
|
||||
|
||||
from pathlib import Path
|
||||
from functools import lru_cache
|
||||
from collections.abc import Iterable
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
import tiktoken, numpy as np
|
||||
|
||||
from openai import OpenAI
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
DEFAULT_VLLM_URL = os.environ.get("VLLM_URL") or os.environ.get("VLLM_EMBED_URL") or "http://127.0.0.1:8000/v1"
|
||||
|
||||
|
||||
def resolve_vllm_base_url(url: str) -> str:
|
||||
if not url:
|
||||
raise ValueError("vLLM URL must be non-empty")
|
||||
|
||||
trimmed = url.rstrip("/")
|
||||
if trimmed.endswith("/v1/embeddings"):
|
||||
trimmed = trimmed[: -len("/embeddings")]
|
||||
elif trimmed.endswith("/embeddings"):
|
||||
trimmed = trimmed[: trimmed.rfind("/")]
|
||||
|
||||
if not trimmed.endswith("/v1"):
|
||||
trimmed = f"{trimmed}/v1"
|
||||
|
||||
return trimmed
|
||||
|
||||
|
||||
def load_jsonl(fp: str) -> Iterable[dict]:
|
||||
with open(fp, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
yield json.loads(line)
|
||||
|
||||
|
||||
def l2_normalize_rows(x: np.ndarray) -> np.ndarray:
|
||||
# x: [N, D]
|
||||
norms = np.linalg.norm(x, ord=2, axis=1, keepdims=True)
|
||||
norms[norms == 0] = 1.0
|
||||
return x / norms
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_tiktoken_encoder():
|
||||
# Get the o200k_base tokenizer (GPT-4o) with caching
|
||||
# change this if you want something else.
|
||||
return tiktoken.get_encoding("o200k_base")
|
||||
|
||||
|
||||
def count_tokens(text: str) -> int:
|
||||
# Count tokens using o200k_base encoding
|
||||
encoder = get_tiktoken_encoder()
|
||||
return len(encoder.encode(text))
|
||||
|
||||
|
||||
def get_text_splitter(chunk_size: int, overlap: int):
|
||||
encoder = get_tiktoken_encoder()
|
||||
return RecursiveCharacterTextSplitter(
|
||||
chunk_size=chunk_size * 4, # character approximation
|
||||
chunk_overlap=overlap * 4,
|
||||
separators=["\n\n", "\n", ". ", " ", ""],
|
||||
length_function=lambda t: len(encoder.encode(t)),
|
||||
is_separator_regex=False,
|
||||
)
|
||||
|
||||
|
||||
def chunk_document(
|
||||
doc: dict, max_tokens: int = 512, overlap_tokens: int = 128, min_chunk_size: int = 100
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Chunk a document if it exceeds max_tokens
|
||||
|
||||
Args:
|
||||
doc: {'slug': str, 'title': str, 'text': str}
|
||||
max_tokens: Maximum tokens per chunk
|
||||
overlap_tokens: Overlap between chunks
|
||||
min_chunk_size: Minimum chunk size (avoid tiny chunks)
|
||||
|
||||
Returns:
|
||||
List of chunk dicts with metadata
|
||||
"""
|
||||
text = doc["text"]
|
||||
token_count = count_tokens(text)
|
||||
|
||||
# No chunking needed
|
||||
if token_count <= max_tokens:
|
||||
return [
|
||||
{
|
||||
"slug": doc["slug"],
|
||||
"title": doc.get("title", doc["slug"]),
|
||||
"text": text,
|
||||
"chunk_id": 0,
|
||||
"parent_slug": doc["slug"],
|
||||
"is_chunked": False,
|
||||
}
|
||||
]
|
||||
|
||||
# Apply chunking
|
||||
splitter = get_text_splitter(max_tokens, overlap_tokens)
|
||||
raw_chunks = splitter.split_text(text)
|
||||
|
||||
# Filter out tiny chunks
|
||||
valid_chunks = [c for c in raw_chunks if count_tokens(c) >= min_chunk_size]
|
||||
|
||||
return [
|
||||
{
|
||||
"slug": f"{doc['slug']}#chunk{i}",
|
||||
"title": doc.get("title", doc["slug"]),
|
||||
"text": chunk,
|
||||
"chunk_id": i,
|
||||
"parent_slug": doc["slug"],
|
||||
"is_chunked": True,
|
||||
}
|
||||
for i, chunk in enumerate(valid_chunks)
|
||||
]
|
||||
|
||||
|
||||
def write_shards(vectors: np.ndarray, shard_size: int, dtype: str, out_dir: Path) -> list[dict]:
|
||||
out_dir.mkdir(parents=True, exist_ok=True)
|
||||
rows, dims = vectors.shape
|
||||
shards_meta: list[dict] = []
|
||||
np_dtype = np.float16 if dtype == "fp16" else np.float32
|
||||
bytes_per_value = np.dtype(np_dtype).itemsize
|
||||
row_offset = 0
|
||||
for si, start in enumerate(range(0, rows, shard_size)):
|
||||
end = min(start + shard_size, rows)
|
||||
shard = vectors[start:end] # [n, dims]
|
||||
bin_path = out_dir / f"vectors-{si:03d}.bin"
|
||||
payload = shard.astype(np_dtype, copy=False).tobytes(order="C")
|
||||
digest = hashlib.sha256(payload).hexdigest()
|
||||
with open(bin_path, "wb") as f:
|
||||
f.write(payload)
|
||||
shard_rows = int(shard.shape[0])
|
||||
shards_meta.append(
|
||||
{
|
||||
"path": f"/embeddings/{bin_path.name}",
|
||||
"rows": shard_rows,
|
||||
"rowOffset": row_offset,
|
||||
"byteLength": len(payload),
|
||||
"sha256": digest,
|
||||
"byteStride": dims * bytes_per_value,
|
||||
},
|
||||
)
|
||||
row_offset += shard_rows
|
||||
return shards_meta
|
||||
|
||||
|
||||
def write_hnsw_graph(levels: list[list[list[int]]], rows: int, out_path: Path) -> tuple[list[dict], str]:
|
||||
out_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
offset = 0
|
||||
meta: list[dict] = []
|
||||
digest = hashlib.sha256()
|
||||
with open(out_path, "wb") as f:
|
||||
for lvl in levels:
|
||||
indptr = np.zeros(rows + 1, dtype=np.uint32)
|
||||
edge_accum: list[int] = []
|
||||
for idx in range(rows):
|
||||
neighbors = lvl[idx] if idx < len(lvl) else []
|
||||
indptr[idx + 1] = indptr[idx] + len(neighbors)
|
||||
edge_accum.extend(neighbors)
|
||||
indptr_bytes = indptr.tobytes(order="C")
|
||||
indptr_offset = offset
|
||||
f.write(indptr_bytes)
|
||||
digest.update(indptr_bytes)
|
||||
offset += len(indptr_bytes)
|
||||
|
||||
if edge_accum:
|
||||
indices = np.asarray(edge_accum, dtype=np.uint32)
|
||||
indices_bytes = indices.tobytes(order="C")
|
||||
else:
|
||||
indices = np.zeros(0, dtype=np.uint32)
|
||||
indices_bytes = indices.tobytes(order="C")
|
||||
indices_offset = offset
|
||||
f.write(indices_bytes)
|
||||
digest.update(indices_bytes)
|
||||
offset += len(indices_bytes)
|
||||
|
||||
meta.append(
|
||||
{
|
||||
"level": len(meta),
|
||||
"indptr": {
|
||||
"offset": indptr_offset,
|
||||
"elements": int(indptr.shape[0]),
|
||||
"byteLength": len(indptr_bytes),
|
||||
},
|
||||
"indices": {
|
||||
"offset": indices_offset,
|
||||
"elements": int(indices.shape[0]),
|
||||
"byteLength": len(indices_bytes),
|
||||
},
|
||||
},
|
||||
)
|
||||
return meta, digest.hexdigest()
|
||||
|
||||
|
||||
|
||||
def embed_vllm(
|
||||
texts: list[str],
|
||||
model_id: str,
|
||||
vllm_url: str,
|
||||
batch_size: int = 64,
|
||||
concurrency: int = 8,
|
||||
) -> np.ndarray:
|
||||
base_url = resolve_vllm_base_url(vllm_url)
|
||||
api_key = os.environ.get("VLLM_API_KEY") or os.environ.get("OPENAI_API_KEY") or "not-set"
|
||||
client = OpenAI(base_url=base_url, api_key=api_key, timeout=300)
|
||||
|
||||
def list_available_models() -> list[str]:
|
||||
models: list[str] = []
|
||||
page = client.models.list()
|
||||
models.extend(model.id for model in page.data)
|
||||
while getattr(page, "has_more", False) and page.data:
|
||||
cursor = page.data[-1].id
|
||||
page = client.models.list(after=cursor)
|
||||
models.extend(model.id for model in page.data)
|
||||
return models
|
||||
|
||||
try:
|
||||
available_models = list_available_models()
|
||||
except Exception as exc:
|
||||
raise RuntimeError(f"failed to query {base_url}/models: {exc}") from exc
|
||||
|
||||
if model_id not in available_models:
|
||||
suggestions = ", ".join(sorted(available_models)) if available_models else "<none>"
|
||||
logger.warning(
|
||||
"model '%s' not served by vLLM at %s. Available models: %s. Use the first model, results may differ during semantic search (you can omit this message if your weights is a ONNX checkpoint of the same model.)", model_id, base_url, suggestions,
|
||||
)
|
||||
model_id = available_models[0]
|
||||
|
||||
# Apply model-specific prefixes for documents (asymmetric search)
|
||||
model_lower = model_id.lower()
|
||||
if "e5" in model_lower:
|
||||
# E5 models: use "passage:" prefix for documents
|
||||
prefixed = [f"passage: {t}" for t in texts]
|
||||
elif "qwen" in model_lower and "embedding" in model_lower:
|
||||
# Qwen3-Embedding: documents use plain text (no prefix)
|
||||
prefixed = texts
|
||||
elif "embeddinggemma" in model_lower:
|
||||
# embeddinggemma: use "title: none | text:" prefix for documents
|
||||
prefixed = [f"title: none | text: {t}" for t in texts]
|
||||
else:
|
||||
# Default: no prefix for unknown models
|
||||
prefixed = texts
|
||||
|
||||
print(
|
||||
"Embedding"
|
||||
f" {len(prefixed)} texts with vLLM"
|
||||
f" (model={model_id}, batch_size={batch_size}, concurrency={concurrency})",
|
||||
)
|
||||
|
||||
# Create batches
|
||||
batches = []
|
||||
for i in range(0, len(prefixed), batch_size):
|
||||
batch = prefixed[i : i + batch_size]
|
||||
batches.append((i, batch))
|
||||
|
||||
# Function to send a single batch request
|
||||
def send_batch(batch_info: tuple[int, list[str]]) -> tuple[int, list[np.ndarray]]:
|
||||
idx, batch = batch_info
|
||||
response = client.embeddings.create(model=model_id, input=batch)
|
||||
embeddings = [np.asarray(item.embedding, dtype=np.float32) for item in response.data]
|
||||
return (idx, embeddings)
|
||||
|
||||
# Send batches concurrently (or sequentially if only 1 batch)
|
||||
results: dict[int, list[np.ndarray]] = {}
|
||||
if len(batches) == 1:
|
||||
# Single batch - no need for threading
|
||||
idx, embeddings = send_batch(batches[0])
|
||||
results[idx] = embeddings
|
||||
else:
|
||||
# Multiple batches - use concurrent requests
|
||||
with ThreadPoolExecutor(max_workers=concurrency) as executor:
|
||||
futures = {executor.submit(send_batch, batch_info): batch_info[0] for batch_info in batches}
|
||||
completed = 0
|
||||
for future in as_completed(futures):
|
||||
idx, embeddings = future.result()
|
||||
results[idx] = embeddings
|
||||
completed += 1
|
||||
if completed % max(1, len(batches) // 10) == 0 or completed == len(batches):
|
||||
print(f" Completed {completed}/{len(batches)} batches ({completed * 100 // len(batches)}%)")
|
||||
|
||||
# Reconstruct in order
|
||||
out: list[np.ndarray] = []
|
||||
for i in sorted(results.keys()):
|
||||
out.extend(results[i])
|
||||
|
||||
return np.stack(out, axis=0)
|
||||
|
||||
|
||||
def embed_hf(texts: list[str], model_id: str, device: str) -> np.ndarray:
|
||||
# Prefer sentence-transformers for E5 and similar embed models
|
||||
from sentence_transformers import SentenceTransformer
|
||||
|
||||
model = SentenceTransformer(model_id, device=device)
|
||||
|
||||
# Apply model-specific prefixes for documents (asymmetric search)
|
||||
model_lower = model_id.lower()
|
||||
if "e5" in model_lower:
|
||||
# E5 models: use "passage:" prefix for documents
|
||||
prefixed = [f"passage: {t}" for t in texts]
|
||||
elif "qwen" in model_lower and "embedding" in model_lower:
|
||||
# Qwen3-Embedding: documents use plain text (no prefix)
|
||||
prefixed = texts
|
||||
elif "embeddinggemma" in model_lower:
|
||||
# embeddinggemma: use "title: none | text:" prefix for documents
|
||||
prefixed = [f"title: none | text: {t}" for t in texts]
|
||||
else:
|
||||
# Default: no prefix for unknown models
|
||||
prefixed = texts
|
||||
|
||||
vecs = model.encode(
|
||||
prefixed,
|
||||
batch_size=64,
|
||||
normalize_embeddings=True,
|
||||
convert_to_numpy=True,
|
||||
show_progress_bar=True,
|
||||
)
|
||||
return vecs.astype(np.float32, copy=False)
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("--jsonl", default="public/embeddings-text.jsonl")
|
||||
ap.add_argument("--model", default=os.environ.get("SEM_MODEL", "intfloat/multilingual-e5-large"))
|
||||
ap.add_argument("--dims", type=int, default=int(os.environ.get("SEM_DIMS", "1024")))
|
||||
ap.add_argument("--dtype", choices=["fp16", "fp32"], default=os.environ.get("SEM_DTYPE", "fp32"))
|
||||
ap.add_argument("--shard-size", type=int, default=int(os.environ.get("SEM_SHARD", "1024")))
|
||||
ap.add_argument("--out", default="public/embeddings")
|
||||
ap.add_argument("--use-vllm", action="store_true", default=bool(os.environ.get("USE_VLLM", "")))
|
||||
ap.add_argument(
|
||||
"--vllm-url",
|
||||
default=DEFAULT_VLLM_URL,
|
||||
help="Base URL for the vLLM OpenAI-compatible server (accepts either /v1 or /v1/embeddings)",
|
||||
)
|
||||
ap.add_argument("--chunk-size", type=int, default=512, help="Max tokens per chunk")
|
||||
ap.add_argument("--chunk-overlap", type=int, default=128, help="Overlap tokens between chunks")
|
||||
ap.add_argument("--no-chunking", action="store_true", help="Disable chunking (embed full docs)")
|
||||
ap.add_argument(
|
||||
"--concurrency",
|
||||
type=int,
|
||||
default=int(os.environ.get("VLLM_CONCURRENCY", "8")),
|
||||
help="Number of concurrent requests to vLLM (default: 8)",
|
||||
)
|
||||
ap.add_argument(
|
||||
"--batch-size",
|
||||
type=int,
|
||||
default=int(os.environ.get("VLLM_BATCH_SIZE", "64")),
|
||||
help="Batch size for vLLM requests (default: 64)",
|
||||
)
|
||||
args = ap.parse_args()
|
||||
|
||||
recs = list(load_jsonl(args.jsonl))
|
||||
if not recs:
|
||||
print("No input found in public/embeddings-text.jsonl; run the site build first to emit JSONL.")
|
||||
return
|
||||
|
||||
# Apply chunking
|
||||
if args.no_chunking:
|
||||
chunks = recs
|
||||
chunk_metadata = {}
|
||||
print(f"Chunking disabled. Processing {len(chunks)} full documents")
|
||||
else:
|
||||
chunks = []
|
||||
chunk_metadata = {}
|
||||
for rec in recs:
|
||||
doc_chunks = chunk_document(rec, max_tokens=args.chunk_size, overlap_tokens=args.chunk_overlap)
|
||||
chunks.extend(doc_chunks)
|
||||
# Build chunk metadata map
|
||||
for chunk in doc_chunks:
|
||||
if chunk["is_chunked"]:
|
||||
chunk_metadata[chunk["slug"]] = {
|
||||
"parentSlug": chunk["parent_slug"],
|
||||
"chunkId": chunk["chunk_id"],
|
||||
}
|
||||
chunked_count = sum(1 for c in chunks if c.get("is_chunked", False))
|
||||
print(f"Chunked {len(recs)} documents into {len(chunks)} chunks ({chunked_count} chunked, {len(chunks) - chunked_count} unchanged)")
|
||||
print(f" Chunk size: {args.chunk_size} tokens, overlap: {args.chunk_overlap} tokens")
|
||||
|
||||
ids = [c["slug"] for c in chunks]
|
||||
titles = [c.get("title", c["slug"]) for c in chunks]
|
||||
texts = [c["text"] for c in chunks]
|
||||
|
||||
if args.use_vllm:
|
||||
vecs = embed_vllm(
|
||||
texts,
|
||||
args.model,
|
||||
args.vllm_url,
|
||||
batch_size=args.batch_size,
|
||||
concurrency=args.concurrency,
|
||||
)
|
||||
else:
|
||||
device = "cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu"
|
||||
vecs = embed_hf(texts, args.model, device)
|
||||
|
||||
# Coerce dims and re-normalize
|
||||
if vecs.shape[1] != args.dims:
|
||||
if vecs.shape[1] > args.dims:
|
||||
vecs = vecs[:, : args.dims]
|
||||
else:
|
||||
vecs = np.pad(vecs, ((0, 0), (0, args.dims - vecs.shape[1])))
|
||||
vecs = l2_normalize_rows(vecs.astype(np.float32, copy=False))
|
||||
|
||||
out_dir = Path(args.out)
|
||||
shards = write_shards(vecs, args.shard_size, args.dtype, out_dir)
|
||||
|
||||
# Build a lightweight HNSW graph and store it in a compact binary layout
|
||||
def hnsw_build(data: np.ndarray, M: int = 16, efC: int = 200, seed: int = 0) -> dict:
|
||||
rng = random.Random(seed)
|
||||
N, D = data.shape
|
||||
levels: list[list[list[int]]] = [] # levels[L][i] = neighbors of node i at level L
|
||||
|
||||
# random level assignment using 1/e distribution
|
||||
node_levels = []
|
||||
for _ in range(N):
|
||||
lvl = 0
|
||||
while rng.random() < 1 / math.e:
|
||||
lvl += 1
|
||||
node_levels.append(lvl)
|
||||
max_level = max(node_levels) if N > 0 else 0
|
||||
for _ in range(max_level + 1):
|
||||
levels.append([[] for _ in range(N)])
|
||||
|
||||
def sim(i: int, j: int) -> float:
|
||||
return float((data[i] * data[j]).sum())
|
||||
|
||||
entry = 0 if N > 0 else -1
|
||||
|
||||
def search_layer(q: int, ep: int, ef: int, L: int) -> list[int]:
|
||||
if ep < 0:
|
||||
return []
|
||||
visited = set()
|
||||
cand: list[tuple[float, int]] = []
|
||||
top: list[tuple[float, int]] = []
|
||||
def push(node: int):
|
||||
if node in visited:
|
||||
return
|
||||
visited.add(node)
|
||||
cand.append((sim(q, node), node))
|
||||
push(ep)
|
||||
while cand:
|
||||
cand.sort(reverse=True)
|
||||
s, v = cand.pop(0)
|
||||
if len(top) >= ef and s <= top[-1][0]:
|
||||
break
|
||||
top.append((s, v))
|
||||
for u in levels[L][v]:
|
||||
push(u)
|
||||
top.sort(reverse=True)
|
||||
return [n for _, n in top]
|
||||
|
||||
for i in range(N):
|
||||
if i == 0:
|
||||
continue
|
||||
lvl = node_levels[i]
|
||||
ep = entry
|
||||
for L in range(max_level, lvl, -1):
|
||||
c = search_layer(i, ep, 1, L)
|
||||
if c:
|
||||
ep = c[0]
|
||||
for L in range(min(max_level, lvl), -1, -1):
|
||||
W = search_layer(i, ep, efC, L)
|
||||
# Select top M by similarity
|
||||
neigh = sorted(((sim(i, j), j) for j in W if j != i), reverse=True)[:M]
|
||||
for _, e in neigh:
|
||||
if e not in levels[L][i]:
|
||||
levels[L][i].append(e)
|
||||
if i not in levels[L][e]:
|
||||
levels[L][e].append(i)
|
||||
|
||||
# trim neighbors to M
|
||||
for L in range(len(levels)):
|
||||
for i in range(N):
|
||||
if len(levels[L][i]) > M:
|
||||
# keep top M by sim
|
||||
nb = levels[L][i]
|
||||
nb = sorted(nb, key=lambda j: sim(i, j), reverse=True)[:M]
|
||||
levels[L][i] = nb
|
||||
|
||||
return {
|
||||
"M": M,
|
||||
"efConstruction": efC,
|
||||
"entryPoint": entry,
|
||||
"maxLevel": max_level,
|
||||
"levels": levels,
|
||||
}
|
||||
|
||||
hnsw = hnsw_build(vecs, M=16, efC=200)
|
||||
hnsw_meta, hnsw_sha = write_hnsw_graph(hnsw["levels"], int(vecs.shape[0]), out_dir / "hnsw.bin")
|
||||
|
||||
manifest = {
|
||||
"version": 2,
|
||||
"dims": args.dims,
|
||||
"dtype": args.dtype,
|
||||
"normalized": True,
|
||||
"rows": int(vecs.shape[0]),
|
||||
"shardSizeRows": args.shard_size,
|
||||
"vectors": {
|
||||
"dtype": args.dtype,
|
||||
"rows": int(vecs.shape[0]),
|
||||
"dims": args.dims,
|
||||
"shards": shards,
|
||||
},
|
||||
"ids": ids,
|
||||
"titles": titles,
|
||||
"chunkMetadata": chunk_metadata,
|
||||
"hnsw": {
|
||||
"M": hnsw["M"],
|
||||
"efConstruction": hnsw["efConstruction"],
|
||||
"entryPoint": hnsw["entryPoint"],
|
||||
"maxLevel": hnsw["maxLevel"],
|
||||
"graph": {
|
||||
"path": "/embeddings/hnsw.bin",
|
||||
"sha256": hnsw_sha,
|
||||
"levels": hnsw_meta,
|
||||
},
|
||||
},
|
||||
}
|
||||
(out_dir / "manifest.json").write_text(json.dumps(manifest, ensure_ascii=False), encoding="utf-8")
|
||||
print(f"Wrote {len(shards)} vector shard(s), HNSW graph, and manifest to {out_dir}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -5,7 +5,6 @@ export default {
|
||||
title: "غير معنون",
|
||||
description: "لم يتم تقديم أي وصف",
|
||||
},
|
||||
direction: "rtl" as const,
|
||||
components: {
|
||||
callout: {
|
||||
note: "ملاحظة",
|
||||
|
||||
@@ -15,7 +15,7 @@ export default {
|
||||
success: "Erfolg",
|
||||
question: "Frage",
|
||||
warning: "Warnung",
|
||||
failure: "Fehlgeschlagen",
|
||||
failure: "Misserfolg",
|
||||
danger: "Gefahr",
|
||||
bug: "Fehler",
|
||||
example: "Beispiel",
|
||||
@@ -57,7 +57,7 @@ export default {
|
||||
title: "Inhaltsverzeichnis",
|
||||
},
|
||||
contentMeta: {
|
||||
readingTime: ({ minutes }) => `${minutes} Min. Lesezeit`,
|
||||
readingTime: ({ minutes }) => `${minutes} min read`,
|
||||
},
|
||||
},
|
||||
pages: {
|
||||
@@ -68,7 +68,7 @@ export default {
|
||||
error: {
|
||||
title: "Nicht gefunden",
|
||||
notFound: "Diese Seite ist entweder nicht öffentlich oder existiert nicht.",
|
||||
home: "Zur Startseite",
|
||||
home: "Return to Homepage",
|
||||
},
|
||||
folderContent: {
|
||||
folder: "Ordner",
|
||||
|
||||
@@ -21,7 +21,6 @@ export interface Translation {
|
||||
title: string
|
||||
description: string
|
||||
}
|
||||
direction?: "ltr" | "rtl"
|
||||
components: {
|
||||
callout: CalloutTranslation
|
||||
backlinks: {
|
||||
|
||||
@@ -5,7 +5,6 @@ export default {
|
||||
title: "بدون عنوان",
|
||||
description: "توضیح خاصی اضافه نشده است",
|
||||
},
|
||||
direction: "rtl" as const,
|
||||
components: {
|
||||
callout: {
|
||||
note: "یادداشت",
|
||||
|
||||
@@ -51,7 +51,7 @@ export default {
|
||||
},
|
||||
search: {
|
||||
title: "Szukaj",
|
||||
searchBarPlaceholder: "Wpisz frazę wyszukiwania",
|
||||
searchBarPlaceholder: "Search for something",
|
||||
},
|
||||
tableOfContents: {
|
||||
title: "Spis treści",
|
||||
|
||||
@@ -40,7 +40,7 @@ export const NotFoundPage: QuartzEmitterPlugin = () => {
|
||||
description: notFound,
|
||||
frontmatter: { title: notFound, tags: [] },
|
||||
})
|
||||
const externalResources = pageResources(path, resources, ctx.cfg.configuration)
|
||||
const externalResources = pageResources(path, resources)
|
||||
const componentData: QuartzComponentProps = {
|
||||
ctx,
|
||||
fileData: vfile.data,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { FilePath, joinSegments } from "../../util/path"
|
||||
import { QuartzEmitterPlugin } from "../types"
|
||||
import { write } from "./helpers"
|
||||
import fs from "fs"
|
||||
import { styleText } from "util"
|
||||
import { FullSlug } from "../../util/path"
|
||||
|
||||
export function extractDomainFromBaseUrl(baseUrl: string) {
|
||||
const url = new URL(`https://${baseUrl}`)
|
||||
@@ -10,25 +10,20 @@ export function extractDomainFromBaseUrl(baseUrl: string) {
|
||||
|
||||
export const CNAME: QuartzEmitterPlugin = () => ({
|
||||
name: "CNAME",
|
||||
async emit(ctx) {
|
||||
if (!ctx.cfg.configuration.baseUrl) {
|
||||
async emit({ argv, cfg }) {
|
||||
if (!cfg.configuration.baseUrl) {
|
||||
console.warn(
|
||||
styleText("yellow", "CNAME emitter requires `baseUrl` to be set in your configuration"),
|
||||
)
|
||||
return []
|
||||
}
|
||||
const content = extractDomainFromBaseUrl(ctx.cfg.configuration.baseUrl)
|
||||
const path = joinSegments(argv.output, "CNAME")
|
||||
const content = extractDomainFromBaseUrl(cfg.configuration.baseUrl)
|
||||
if (!content) {
|
||||
return []
|
||||
}
|
||||
|
||||
const path = await write({
|
||||
ctx,
|
||||
content,
|
||||
slug: "CNAME" as FullSlug,
|
||||
ext: "",
|
||||
})
|
||||
return [path]
|
||||
await fs.promises.writeFile(path, content)
|
||||
return [path] as FilePath[]
|
||||
},
|
||||
async *partialEmit() {},
|
||||
})
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
import { FullSlug, joinSegments } from "../../util/path"
|
||||
import { QuartzEmitterPlugin } from "../types"
|
||||
import path from "path"
|
||||
import fs from "node:fs/promises"
|
||||
import { globby } from "globby"
|
||||
|
||||
// @ts-ignore
|
||||
import spaRouterScript from "../../components/scripts/spa.inline"
|
||||
@@ -19,7 +16,7 @@ import {
|
||||
processGoogleFonts,
|
||||
} from "../../util/theme"
|
||||
import { Features, transform } from "lightningcss"
|
||||
import { transform as transpile, build as bundle } from "esbuild"
|
||||
import { transform as transpile } from "esbuild"
|
||||
import { write } from "./helpers"
|
||||
|
||||
type ComponentResources = {
|
||||
@@ -204,46 +201,6 @@ function addGlobalPageResources(ctx: BuildCtx, componentResources: ComponentReso
|
||||
})(window, document, "clarity", "script", "${cfg.analytics.projectId}");\`
|
||||
document.head.appendChild(clarityScript)
|
||||
`)
|
||||
} else if (cfg.analytics?.provider === "matomo") {
|
||||
componentResources.afterDOMLoaded.push(`
|
||||
const matomoScript = document.createElement("script");
|
||||
matomoScript.innerHTML = \`
|
||||
let _paq = window._paq = window._paq || [];
|
||||
|
||||
// Track SPA navigation
|
||||
// https://developer.matomo.org/guides/spa-tracking
|
||||
document.addEventListener("nav", () => {
|
||||
_paq.push(['setCustomUrl', location.pathname]);
|
||||
_paq.push(['setDocumentTitle', document.title]);
|
||||
_paq.push(['trackPageView']);
|
||||
});
|
||||
|
||||
_paq.push(['trackPageView']);
|
||||
_paq.push(['enableLinkTracking']);
|
||||
(function() {
|
||||
const u="//${cfg.analytics.host}/";
|
||||
_paq.push(['setTrackerUrl', u+'matomo.php']);
|
||||
_paq.push(['setSiteId', ${cfg.analytics.siteId}]);
|
||||
const d=document, g=d.createElement('script'), s=d.getElementsByTagName
|
||||
('script')[0];
|
||||
g.type='text/javascript'; g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
|
||||
})();
|
||||
\`
|
||||
document.head.appendChild(matomoScript);
|
||||
`)
|
||||
} else if (cfg.analytics?.provider === "vercel") {
|
||||
/**
|
||||
* script from {@link https://vercel.com/docs/analytics/quickstart?framework=html#add-the-script-tag-to-your-site|Vercel Docs}
|
||||
*/
|
||||
componentResources.beforeDOMLoaded.push(`
|
||||
window.va = window.va || function () { (window.vaq = window.vaq || []).push(arguments); };
|
||||
`)
|
||||
componentResources.afterDOMLoaded.push(`
|
||||
const vercelInsightsScript = document.createElement("script")
|
||||
vercelInsightsScript.src = "/_vercel/insights/script.js"
|
||||
vercelInsightsScript.defer = true
|
||||
document.head.appendChild(vercelInsightsScript)
|
||||
`)
|
||||
}
|
||||
|
||||
if (cfg.enableSPA) {
|
||||
@@ -360,47 +317,7 @@ export const ComponentResources: QuartzEmitterPlugin = () => {
|
||||
ext: ".js",
|
||||
content: postscript,
|
||||
})
|
||||
|
||||
// Bundle all worker files
|
||||
const workerFiles = await globby(["quartz/**/*.worker.ts"])
|
||||
for (const src of workerFiles) {
|
||||
const result = await bundle({
|
||||
entryPoints: [src],
|
||||
bundle: true,
|
||||
minify: true,
|
||||
platform: "browser",
|
||||
format: "esm",
|
||||
write: false,
|
||||
})
|
||||
const code = result.outputFiles[0].text
|
||||
const name = path.basename(src).replace(/\.ts$/, "")
|
||||
yield write({ ctx, slug: name as FullSlug, ext: ".js", content: code })
|
||||
}
|
||||
},
|
||||
async *partialEmit(ctx, _content, _resources, changeEvents) {
|
||||
// Handle worker file changes in incremental builds
|
||||
for (const changeEvent of changeEvents) {
|
||||
if (!/\.worker\.ts$/.test(changeEvent.path)) continue
|
||||
if (changeEvent.type === "delete") {
|
||||
const name = path.basename(changeEvent.path).replace(/\.ts$/, "")
|
||||
const dest = joinSegments(ctx.argv.output, `${name}.js`)
|
||||
try {
|
||||
await fs.unlink(dest)
|
||||
} catch {}
|
||||
continue
|
||||
}
|
||||
const result = await bundle({
|
||||
entryPoints: [changeEvent.path],
|
||||
bundle: true,
|
||||
minify: true,
|
||||
platform: "browser",
|
||||
format: "esm",
|
||||
write: false,
|
||||
})
|
||||
const code = result.outputFiles[0].text
|
||||
const name = path.basename(changeEvent.path).replace(/\.ts$/, "")
|
||||
yield write({ ctx, slug: name as FullSlug, ext: ".js", content: code })
|
||||
}
|
||||
},
|
||||
async *partialEmit() {},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ function generateRSSFeed(cfg: GlobalConfiguration, idx: ContentIndexMap, limit?:
|
||||
<title>${escapeHTML(content.title)}</title>
|
||||
<link>https://${joinSegments(base, encodeURI(slug))}</link>
|
||||
<guid>https://${joinSegments(base, encodeURI(slug))}</guid>
|
||||
<description><![CDATA[ ${content.richContent ?? content.description} ]]></description>
|
||||
<description>${content.richContent ?? content.description}</description>
|
||||
<pubDate>${content.date?.toUTCString()}</pubDate>
|
||||
</item>`
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ async function processContent(
|
||||
) {
|
||||
const slug = fileData.slug!
|
||||
const cfg = ctx.cfg.configuration
|
||||
const externalResources = pageResources(pathToRoot(slug), resources, ctx.cfg.configuration)
|
||||
const externalResources = pageResources(pathToRoot(slug), resources)
|
||||
const componentData: QuartzComponentProps = {
|
||||
ctx,
|
||||
fileData,
|
||||
|
||||
@@ -38,7 +38,7 @@ async function* processFolderInfo(
|
||||
const slug = joinSegments(folder, "index") as FullSlug
|
||||
const [tree, file] = folderContent
|
||||
const cfg = ctx.cfg.configuration
|
||||
const externalResources = pageResources(pathToRoot(slug), resources, ctx.cfg.configuration)
|
||||
const externalResources = pageResources(pathToRoot(slug), resources)
|
||||
const componentData: QuartzComponentProps = {
|
||||
ctx,
|
||||
fileData: file.data,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
export { ContentPage } from "./contentPage"
|
||||
export { TagPage } from "./tagPage"
|
||||
export { FolderPage } from "./folderPage"
|
||||
export { ContentIndex } from "./contentIndex"
|
||||
export { ContentIndex as ContentIndex } from "./contentIndex"
|
||||
export { AliasRedirects } from "./aliases"
|
||||
export { Assets } from "./assets"
|
||||
export { Static } from "./static"
|
||||
@@ -10,4 +10,3 @@ export { ComponentResources } from "./componentResources"
|
||||
export { NotFoundPage } from "./404"
|
||||
export { CNAME } from "./cname"
|
||||
export { CustomOgImages } from "./ogImage"
|
||||
export { SemanticIndex } from "./semantic"
|
||||
|
||||
@@ -1,235 +0,0 @@
|
||||
import { write } from "./helpers"
|
||||
import { QuartzEmitterPlugin } from "../types"
|
||||
import { FilePath, FullSlug, joinSegments, QUARTZ } from "../../util/path"
|
||||
import { ReadTimeResults } from "reading-time"
|
||||
import { GlobalConfiguration } from "../../cfg"
|
||||
import { spawn } from "child_process"
|
||||
|
||||
const DEFAULT_MODEL_ID = "onnx-community/Qwen3-Embedding-0.6B-ONNX"
|
||||
|
||||
const defaults: GlobalConfiguration["semanticSearch"] = {
|
||||
enable: true,
|
||||
model: DEFAULT_MODEL_ID,
|
||||
aot: false,
|
||||
dims: 1024,
|
||||
dtype: "fp32",
|
||||
shardSizeRows: 1024,
|
||||
hnsw: { M: 16, efConstruction: 200 },
|
||||
chunking: {
|
||||
chunkSize: 512,
|
||||
chunkOverlap: 128,
|
||||
noChunking: false,
|
||||
},
|
||||
vllm: {
|
||||
enable: false,
|
||||
vllmUrl:
|
||||
process.env.VLLM_URL || process.env.VLLM_EMBED_URL || "http://127.0.0.1:8000/v1/embeddings",
|
||||
concurrency: parseInt(process.env.VLLM_CONCURRENCY || "8", 10),
|
||||
batchSize: parseInt(process.env.VLLM_BATCH_SIZE || "64", 10),
|
||||
},
|
||||
}
|
||||
|
||||
type ContentDetails = {
|
||||
slug: string
|
||||
title: string
|
||||
filePath: FilePath
|
||||
content: string
|
||||
readingTime?: Partial<ReadTimeResults>
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if uv is installed
|
||||
*/
|
||||
function checkUvInstalled(): Promise<boolean> {
|
||||
return new Promise((resolve) => {
|
||||
const proc = spawn("uv", ["--version"], { shell: true })
|
||||
proc.on("error", () => resolve(false))
|
||||
proc.on("close", (code) => resolve(code === 0))
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Run the Python embedding build script using uv
|
||||
* Script uses PEP 723 inline metadata for dependency management
|
||||
*/
|
||||
function runEmbedBuild(
|
||||
jsonlPath: string,
|
||||
outDir: string,
|
||||
opts: {
|
||||
model: string
|
||||
dtype: string
|
||||
dims: number
|
||||
shardSizeRows: number
|
||||
chunking: { chunkSize: number; chunkOverlap: number; noChunking: boolean }
|
||||
vllm: { enable: boolean; vllmUrl?: string; concurrency: number; batchSize: number }
|
||||
},
|
||||
): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const scriptPath = joinSegments(QUARTZ, "embed_build.py")
|
||||
const args = [
|
||||
"run",
|
||||
scriptPath,
|
||||
"--jsonl",
|
||||
jsonlPath,
|
||||
"--model",
|
||||
opts.model,
|
||||
"--out",
|
||||
outDir,
|
||||
"--dtype",
|
||||
opts.dtype,
|
||||
"--dims",
|
||||
String(opts.dims),
|
||||
"--shard-size",
|
||||
String(opts.shardSizeRows),
|
||||
"--chunk-size",
|
||||
String(opts.chunking.chunkSize),
|
||||
"--chunk-overlap",
|
||||
String(opts.chunking.chunkOverlap),
|
||||
]
|
||||
|
||||
if (opts.chunking.noChunking) {
|
||||
args.push("--no-chunking")
|
||||
}
|
||||
|
||||
if (opts.vllm.enable) {
|
||||
args.push("--use-vllm")
|
||||
if (opts.vllm.vllmUrl) {
|
||||
args.push("--vllm-url", opts.vllm.vllmUrl)
|
||||
}
|
||||
args.push("--concurrency", String(opts.vllm.concurrency))
|
||||
args.push("--batch-size", String(opts.vllm.batchSize))
|
||||
}
|
||||
|
||||
console.log("\nRunning embedding generation:")
|
||||
console.log(` uv ${args.join(" ")}`)
|
||||
|
||||
const env = { ...process.env }
|
||||
if (opts.vllm.enable && !env.USE_VLLM) {
|
||||
env.USE_VLLM = "1"
|
||||
}
|
||||
|
||||
const proc = spawn("uv", args, {
|
||||
stdio: "inherit",
|
||||
shell: true,
|
||||
env,
|
||||
})
|
||||
|
||||
proc.on("error", (err) => {
|
||||
reject(new Error(`Failed to spawn uv: ${err.message}`))
|
||||
})
|
||||
|
||||
proc.on("close", (code) => {
|
||||
if (code === 0) {
|
||||
console.log("Embedding generation completed successfully")
|
||||
resolve()
|
||||
} else {
|
||||
reject(new Error(`embed_build.py exited with code ${code}`))
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
export const SemanticIndex: QuartzEmitterPlugin<Partial<GlobalConfiguration["semanticSearch"]>> = (
|
||||
opts,
|
||||
) => {
|
||||
const merged = { ...defaults, ...opts }
|
||||
const o = {
|
||||
enable: merged.enable!,
|
||||
model: merged.model!,
|
||||
aot: merged.aot!,
|
||||
dims: merged.dims!,
|
||||
dtype: merged.dtype!,
|
||||
shardSizeRows: merged.shardSizeRows!,
|
||||
hnsw: {
|
||||
M: merged.hnsw?.M ?? defaults.hnsw!.M!,
|
||||
efConstruction: merged.hnsw?.efConstruction ?? defaults.hnsw!.efConstruction!,
|
||||
efSearch: merged.hnsw?.efSearch,
|
||||
},
|
||||
chunking: {
|
||||
chunkSize: merged.chunking?.chunkSize ?? defaults.chunking!.chunkSize!,
|
||||
chunkOverlap: merged.chunking?.chunkOverlap ?? defaults.chunking!.chunkOverlap!,
|
||||
noChunking: merged.chunking?.noChunking ?? defaults.chunking!.noChunking!,
|
||||
},
|
||||
vllm: {
|
||||
enable: merged.vllm?.enable ?? defaults.vllm!.enable!,
|
||||
vllmUrl: merged.vllm?.vllmUrl ?? defaults.vllm!.vllmUrl,
|
||||
concurrency: merged.vllm?.concurrency ?? defaults.vllm!.concurrency!,
|
||||
batchSize: merged.vllm?.batchSize ?? defaults.vllm!.batchSize!,
|
||||
},
|
||||
}
|
||||
|
||||
if (!o.model) {
|
||||
throw new Error("Semantic search requires a model identifier")
|
||||
}
|
||||
|
||||
return {
|
||||
name: "SemanticIndex",
|
||||
getQuartzComponents() {
|
||||
return []
|
||||
},
|
||||
async *partialEmit() {},
|
||||
async *emit(ctx, content, _resources) {
|
||||
if (!o.enable) return
|
||||
|
||||
const docs: ContentDetails[] = []
|
||||
for (const [_, file] of content) {
|
||||
const slug = file.data.slug!
|
||||
const title = file.data.frontmatter?.title ?? slug
|
||||
const text = file.data.text
|
||||
if (text) {
|
||||
docs.push({
|
||||
slug,
|
||||
title,
|
||||
filePath: file.data.filePath!,
|
||||
content: text,
|
||||
readingTime: file.data.readingTime,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Emit JSONL with the exact text used for embeddings
|
||||
const jsonl = docs
|
||||
.map((d) => ({ slug: d.slug, title: d.title, text: d.content }))
|
||||
.map((o) => JSON.stringify(o))
|
||||
.join("\n")
|
||||
|
||||
const jsonlSlug = "embeddings-text" as FullSlug
|
||||
yield write({
|
||||
ctx,
|
||||
slug: jsonlSlug,
|
||||
ext: ".jsonl",
|
||||
content: jsonl,
|
||||
})
|
||||
|
||||
// If aot is false, run the embedding generation script
|
||||
if (!o.aot) {
|
||||
console.log("\nGenerating embeddings (aot=false)...")
|
||||
|
||||
// Check for uv
|
||||
const hasUv = await checkUvInstalled()
|
||||
if (!hasUv) {
|
||||
throw new Error(
|
||||
"uv is required for embedding generation. Install it from https://docs.astral.sh/uv/",
|
||||
)
|
||||
}
|
||||
|
||||
const jsonlPath = joinSegments(ctx.argv.output, "embeddings-text.jsonl")
|
||||
const outDir = joinSegments(ctx.argv.output, "embeddings")
|
||||
|
||||
try {
|
||||
await runEmbedBuild(jsonlPath, outDir, o)
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err)
|
||||
throw new Error(`Embedding generation failed: ${message}`)
|
||||
}
|
||||
} else {
|
||||
console.log(
|
||||
"\nSkipping embedding generation (aot=true). Expecting pre-generated embeddings in public/embeddings/",
|
||||
)
|
||||
}
|
||||
},
|
||||
externalResources(_ctx) {
|
||||
return {}
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -73,7 +73,7 @@ async function processTagPage(
|
||||
const slug = joinSegments("tags", tag) as FullSlug
|
||||
const [tree, file] = tagContent
|
||||
const cfg = ctx.cfg.configuration
|
||||
const externalResources = pageResources(pathToRoot(slug), resources, ctx.cfg.configuration)
|
||||
const externalResources = pageResources(pathToRoot(slug), resources)
|
||||
const componentData: QuartzComponentProps = {
|
||||
ctx,
|
||||
fileData: file.data,
|
||||
|
||||
@@ -488,7 +488,16 @@ export const ObsidianFlavoredMarkdown: QuartzTransformerPlugin<Partial<Options>>
|
||||
{
|
||||
data: { hProperties: { className: ["callout-content"] }, hName: "div" },
|
||||
type: "blockquote",
|
||||
children: [...calloutContent],
|
||||
children: [
|
||||
{
|
||||
data: {
|
||||
hProperties: { className: ["callout-content-inner"] },
|
||||
hName: "div",
|
||||
},
|
||||
type: "blockquote",
|
||||
children: [...calloutContent],
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import { QuartzTransformerPlugin } from "../types"
|
||||
import rehypeRaw from "rehype-raw"
|
||||
import { PluggableList } from "unified"
|
||||
|
||||
export interface Options {
|
||||
/** Replace {{ relref }} with quartz wikilinks []() */
|
||||
@@ -104,9 +102,5 @@ export const OxHugoFlavouredMarkdown: QuartzTransformerPlugin<Partial<Options>>
|
||||
}
|
||||
return src
|
||||
},
|
||||
htmlPlugins() {
|
||||
const plugins: PluggableList = [rehypeRaw]
|
||||
return plugins
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,13 +39,23 @@ li,
|
||||
ol,
|
||||
ul,
|
||||
.katex,
|
||||
.math,
|
||||
.typst-doc,
|
||||
.typst-doc * {
|
||||
.math {
|
||||
color: var(--darkgray);
|
||||
fill: var(--darkgray);
|
||||
overflow-wrap: break-word;
|
||||
text-wrap: pretty;
|
||||
hyphens: auto;
|
||||
}
|
||||
|
||||
p,
|
||||
ul,
|
||||
text,
|
||||
a,
|
||||
li,
|
||||
ol,
|
||||
ul,
|
||||
.katex,
|
||||
.math {
|
||||
overflow-wrap: anywhere;
|
||||
/* tr and td removed from list of selectors for overflow-wrap, allowing them to use default 'normal' property value */
|
||||
}
|
||||
|
||||
.math {
|
||||
@@ -211,7 +221,7 @@ a {
|
||||
}
|
||||
|
||||
& .sidebar {
|
||||
gap: 1.2rem;
|
||||
gap: 2rem;
|
||||
top: 0;
|
||||
box-sizing: border-box;
|
||||
padding: $topSpacing 2rem 2rem 2rem;
|
||||
|
||||
@@ -11,11 +11,14 @@
|
||||
|
||||
& > .callout-content {
|
||||
display: grid;
|
||||
transition: grid-template-rows 0.1s cubic-bezier(0.02, 0.01, 0.47, 1);
|
||||
overflow: hidden;
|
||||
transition: grid-template-rows 0.3s ease;
|
||||
|
||||
& > :first-child {
|
||||
margin-top: 0;
|
||||
& > .callout-content-inner {
|
||||
overflow: hidden;
|
||||
|
||||
& > :first-child {
|
||||
margin-top: 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,19 +121,8 @@
|
||||
--callout-icon: var(--callout-icon-quote);
|
||||
}
|
||||
|
||||
&.is-collapsed {
|
||||
& > .callout-title > .fold-callout-icon {
|
||||
transform: rotateZ(-90deg);
|
||||
}
|
||||
|
||||
.callout-content > :first-child {
|
||||
transition:
|
||||
height 0.1s cubic-bezier(0.02, 0.01, 0.47, 1),
|
||||
margin 0.1s cubic-bezier(0.02, 0.01, 0.47, 1);
|
||||
overflow-y: clip;
|
||||
height: 0;
|
||||
margin-top: -1rem;
|
||||
}
|
||||
&.is-collapsed > .callout-title > .fold-callout-icon {
|
||||
transform: rotateZ(-90deg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,548 +0,0 @@
|
||||
// Unified semantic search worker: handles data loading and query execution
|
||||
import { env, pipeline } from "@huggingface/transformers"
|
||||
import "onnxruntime-web/webgpu"
|
||||
import "onnxruntime-web/wasm"
|
||||
|
||||
export {}
|
||||
|
||||
type VectorShardMeta = {
|
||||
path: string
|
||||
rows: number
|
||||
rowOffset: number
|
||||
byteLength: number
|
||||
sha256?: string
|
||||
byteStride: number
|
||||
}
|
||||
|
||||
type LevelSection = {
|
||||
level: number
|
||||
indptr: { offset: number; elements: number; byteLength: number }
|
||||
indices: { offset: number; elements: number; byteLength: number }
|
||||
}
|
||||
|
||||
type ChunkMetadata = {
|
||||
parentSlug: string
|
||||
chunkId: number
|
||||
}
|
||||
|
||||
type Manifest = {
|
||||
version: number
|
||||
dims: number
|
||||
dtype: string
|
||||
normalized: boolean
|
||||
rows: number
|
||||
shardSizeRows: number
|
||||
vectors: {
|
||||
dtype: string
|
||||
rows: number
|
||||
dims: number
|
||||
shards: VectorShardMeta[]
|
||||
}
|
||||
ids: string[]
|
||||
titles?: string[]
|
||||
chunkMetadata?: Record<string, ChunkMetadata>
|
||||
hnsw: {
|
||||
M: number
|
||||
efConstruction: number
|
||||
entryPoint: number
|
||||
maxLevel: number
|
||||
graph: {
|
||||
path: string
|
||||
sha256?: string
|
||||
levels: LevelSection[]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type InitMessage = {
|
||||
type: "init"
|
||||
cfg: any
|
||||
manifestUrl: string
|
||||
baseUrl?: string
|
||||
disableCache?: boolean
|
||||
}
|
||||
|
||||
type SearchMessage = { type: "search"; text: string; k: number; seq: number }
|
||||
type ResetMessage = { type: "reset" }
|
||||
|
||||
type WorkerMessage = InitMessage | SearchMessage | ResetMessage
|
||||
|
||||
type ReadyMessage = { type: "ready" }
|
||||
|
||||
type ProgressMessage = {
|
||||
type: "progress"
|
||||
loadedRows: number
|
||||
totalRows: number
|
||||
}
|
||||
|
||||
type SearchHit = { id: number; score: number }
|
||||
|
||||
type SearchResultMessage = {
|
||||
type: "search-result"
|
||||
seq: number
|
||||
semantic: SearchHit[]
|
||||
}
|
||||
|
||||
type ErrorMessage = { type: "error"; seq?: number; message: string }
|
||||
|
||||
type WorkerState = "idle" | "loading" | "ready" | "error"
|
||||
|
||||
// IndexedDB configuration
|
||||
const DB_NAME = "semantic-search-cache"
|
||||
const STORE_NAME = "assets"
|
||||
const DB_VERSION = 1
|
||||
const hasIndexedDB = typeof indexedDB !== "undefined"
|
||||
const supportsSharedArrayBuffer = typeof SharedArrayBuffer !== "undefined"
|
||||
|
||||
// State
|
||||
let state: WorkerState = "idle"
|
||||
let manifest: Manifest | null = null
|
||||
let cfg: any = null
|
||||
let vectorsView: Float32Array | null = null
|
||||
let dims = 0
|
||||
let rows = 0
|
||||
let classifier: any = null
|
||||
let envConfigured = false
|
||||
let entryPoint = -1
|
||||
let maxLevel = 0
|
||||
let efDefault = 128
|
||||
let levelGraph: { indptr: Uint32Array; indices: Uint32Array }[] = []
|
||||
let abortController: AbortController | null = null
|
||||
let dbPromise: Promise<IDBDatabase> | null = null
|
||||
|
||||
// IndexedDB helpers
|
||||
function openDatabase(): Promise<IDBDatabase> {
|
||||
if (!hasIndexedDB) {
|
||||
return Promise.reject(new Error("indexedDB unavailable"))
|
||||
}
|
||||
if (!dbPromise) {
|
||||
dbPromise = new Promise((resolve, reject) => {
|
||||
const req = indexedDB.open(DB_NAME, DB_VERSION)
|
||||
req.onupgradeneeded = () => {
|
||||
const db = req.result
|
||||
if (!db.objectStoreNames.contains(STORE_NAME)) {
|
||||
db.createObjectStore(STORE_NAME)
|
||||
}
|
||||
}
|
||||
req.onsuccess = () => resolve(req.result)
|
||||
req.onerror = () => reject(req.error ?? new Error("failed to open cache store"))
|
||||
})
|
||||
}
|
||||
return dbPromise
|
||||
}
|
||||
|
||||
async function readAsset(hash: string): Promise<ArrayBuffer | null> {
|
||||
if (!hasIndexedDB) {
|
||||
return null
|
||||
}
|
||||
const db = await openDatabase()
|
||||
return new Promise((resolve, reject) => {
|
||||
const tx = db.transaction(STORE_NAME, "readonly")
|
||||
const store = tx.objectStore(STORE_NAME)
|
||||
const req = store.get(hash)
|
||||
req.onsuccess = () => {
|
||||
const value = req.result
|
||||
if (value instanceof ArrayBuffer) {
|
||||
resolve(value)
|
||||
} else if (value && value.buffer instanceof ArrayBuffer) {
|
||||
resolve(value.buffer as ArrayBuffer)
|
||||
} else {
|
||||
resolve(null)
|
||||
}
|
||||
}
|
||||
req.onerror = () => reject(req.error ?? new Error("failed to read cached asset"))
|
||||
})
|
||||
}
|
||||
|
||||
async function writeAsset(hash: string, buffer: ArrayBuffer): Promise<void> {
|
||||
if (!hasIndexedDB) {
|
||||
return
|
||||
}
|
||||
const db = await openDatabase()
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const tx = db.transaction(STORE_NAME, "readwrite")
|
||||
const store = tx.objectStore(STORE_NAME)
|
||||
const req = store.put(buffer, hash)
|
||||
req.onsuccess = () => resolve()
|
||||
req.onerror = () => reject(req.error ?? new Error("failed to cache asset"))
|
||||
})
|
||||
}
|
||||
|
||||
function toAbsolute(path: string, baseUrl?: string): string {
|
||||
if (path.startsWith("http://") || path.startsWith("https://")) {
|
||||
return path
|
||||
}
|
||||
const base = baseUrl ?? self.location.origin
|
||||
return new URL(path, base).toString()
|
||||
}
|
||||
|
||||
async function fetchBinary(
|
||||
path: string,
|
||||
disableCache: boolean,
|
||||
sha?: string,
|
||||
): Promise<ArrayBuffer> {
|
||||
if (!disableCache && sha && hasIndexedDB) {
|
||||
try {
|
||||
const cached = await readAsset(sha)
|
||||
if (cached) {
|
||||
return cached
|
||||
}
|
||||
} catch {
|
||||
// fall through to network fetch on cache errors
|
||||
}
|
||||
}
|
||||
const res = await fetch(path, { signal: abortController?.signal ?? undefined })
|
||||
if (!res.ok) {
|
||||
throw new Error(`failed to fetch ${path}: ${res.status} ${res.statusText}`)
|
||||
}
|
||||
const payload = await res.arrayBuffer()
|
||||
if (!disableCache && sha && hasIndexedDB) {
|
||||
try {
|
||||
await writeAsset(sha, payload)
|
||||
} catch {
|
||||
// ignore cache write failures
|
||||
}
|
||||
}
|
||||
return payload
|
||||
}
|
||||
|
||||
async function populateVectors(
|
||||
manifest: Manifest,
|
||||
baseUrl: string | undefined,
|
||||
disableCache: boolean | undefined,
|
||||
): Promise<{ buffer: Float32Array; rowsLoaded: number }> {
|
||||
if (manifest.vectors.dtype !== "fp32") {
|
||||
throw new Error(`unsupported embedding dtype '${manifest.vectors.dtype}', regenerate with fp32`)
|
||||
}
|
||||
const rows = manifest.rows
|
||||
const dims = manifest.dims
|
||||
const totalBytes = rows * dims * Float32Array.BYTES_PER_ELEMENT
|
||||
const buffer = supportsSharedArrayBuffer
|
||||
? new Float32Array(new SharedArrayBuffer(totalBytes))
|
||||
: new Float32Array(totalBytes)
|
||||
let loadedRows = 0
|
||||
for (const shard of manifest.vectors.shards) {
|
||||
const absolute = toAbsolute(shard.path, baseUrl)
|
||||
const payload = await fetchBinary(absolute, Boolean(disableCache), shard.sha256)
|
||||
const view = new Float32Array(payload)
|
||||
if (view.length !== shard.rows * dims) {
|
||||
throw new Error(
|
||||
`shard ${shard.path} has mismatched length (expected ${shard.rows * dims}, got ${view.length})`,
|
||||
)
|
||||
}
|
||||
buffer.set(view, shard.rowOffset * dims)
|
||||
loadedRows = Math.min(rows, shard.rowOffset + shard.rows)
|
||||
const progress: ProgressMessage = {
|
||||
type: "progress",
|
||||
loadedRows,
|
||||
totalRows: rows,
|
||||
}
|
||||
self.postMessage(progress)
|
||||
}
|
||||
return { buffer, rowsLoaded: loadedRows }
|
||||
}
|
||||
|
||||
async function populateGraph(
|
||||
manifest: Manifest,
|
||||
baseUrl: string | undefined,
|
||||
disableCache: boolean | undefined,
|
||||
): Promise<ArrayBuffer> {
|
||||
const graphMeta = manifest.hnsw.graph
|
||||
const absolute = toAbsolute(graphMeta.path, baseUrl)
|
||||
return await fetchBinary(absolute, Boolean(disableCache), graphMeta.sha256)
|
||||
}
|
||||
|
||||
function configureRuntimeEnv() {
|
||||
if (envConfigured) return
|
||||
env.allowLocalModels = false
|
||||
env.allowRemoteModels = true
|
||||
const wasmBackend = env.backends?.onnx?.wasm
|
||||
if (!wasmBackend) {
|
||||
throw new Error("transformers.js ONNX runtime backend unavailable")
|
||||
}
|
||||
const cdnBase = `https://cdn.jsdelivr.net/npm/@huggingface/transformers@${env.version}/dist/`
|
||||
wasmBackend.wasmPaths = cdnBase
|
||||
envConfigured = true
|
||||
}
|
||||
|
||||
async function ensureEncoder() {
|
||||
if (classifier) return
|
||||
if (!cfg?.model) {
|
||||
throw new Error("semantic worker missing model identifier")
|
||||
}
|
||||
configureRuntimeEnv()
|
||||
const dtype = typeof cfg?.dtype === "string" && cfg.dtype.length > 0 ? cfg.dtype : "fp32"
|
||||
const pipelineOpts: Record<string, unknown> = {
|
||||
device: "wasm",
|
||||
dtype,
|
||||
local_files_only: false,
|
||||
}
|
||||
classifier = await pipeline("feature-extraction", cfg.model, pipelineOpts)
|
||||
cfg.dtype = dtype
|
||||
}
|
||||
|
||||
function vectorSlice(id: number): Float32Array {
|
||||
if (!vectorsView) {
|
||||
throw new Error("vector buffer not configured")
|
||||
}
|
||||
const start = id * dims
|
||||
const end = start + dims
|
||||
return vectorsView.subarray(start, end)
|
||||
}
|
||||
|
||||
function dot(a: Float32Array, b: Float32Array): number {
|
||||
let s = 0
|
||||
for (let i = 0; i < dims; i++) {
|
||||
s += a[i] * b[i]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
function neighborsFor(level: number, node: number): Uint32Array {
|
||||
const meta = levelGraph[level]
|
||||
if (!meta) return new Uint32Array()
|
||||
const { indptr, indices } = meta
|
||||
if (node < 0 || node + 1 >= indptr.length) return new Uint32Array()
|
||||
const start = indptr[node]
|
||||
const end = indptr[node + 1]
|
||||
return indices.subarray(start, end)
|
||||
}
|
||||
|
||||
function insertSortedDescending(arr: SearchHit[], item: SearchHit) {
|
||||
let idx = arr.length
|
||||
while (idx > 0 && arr[idx - 1].score < item.score) {
|
||||
idx -= 1
|
||||
}
|
||||
arr.splice(idx, 0, item)
|
||||
}
|
||||
|
||||
function bruteForceSearch(query: Float32Array, k: number): SearchHit[] {
|
||||
if (!vectorsView) return []
|
||||
const hits: SearchHit[] = []
|
||||
for (let id = 0; id < rows; id++) {
|
||||
const score = dot(query, vectorSlice(id))
|
||||
if (hits.length < k) {
|
||||
insertSortedDescending(hits, { id, score })
|
||||
} else if (score > hits[hits.length - 1].score) {
|
||||
insertSortedDescending(hits, { id, score })
|
||||
hits.length = k
|
||||
}
|
||||
}
|
||||
return hits
|
||||
}
|
||||
|
||||
function hnswSearch(query: Float32Array, k: number): SearchHit[] {
|
||||
if (!manifest || !vectorsView || entryPoint < 0 || levelGraph.length === 0) {
|
||||
return bruteForceSearch(query, k)
|
||||
}
|
||||
const ef = Math.max(efDefault, k * 10)
|
||||
let ep = entryPoint
|
||||
let epScore = dot(query, vectorSlice(ep))
|
||||
for (let level = maxLevel; level > 0; level--) {
|
||||
let changed = true
|
||||
while (changed) {
|
||||
changed = false
|
||||
const neigh = neighborsFor(level, ep)
|
||||
for (let i = 0; i < neigh.length; i++) {
|
||||
const candidate = neigh[i]
|
||||
if (candidate >= rows) continue
|
||||
const score = dot(query, vectorSlice(candidate))
|
||||
if (score > epScore) {
|
||||
epScore = score
|
||||
ep = candidate
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const visited = new Set<number>()
|
||||
const candidateQueue: SearchHit[] = []
|
||||
const best: SearchHit[] = []
|
||||
insertSortedDescending(candidateQueue, { id: ep, score: epScore })
|
||||
insertSortedDescending(best, { id: ep, score: epScore })
|
||||
visited.add(ep)
|
||||
|
||||
while (candidateQueue.length > 0) {
|
||||
const current = candidateQueue.shift()!
|
||||
const worstBest = best.length >= ef ? best[best.length - 1].score : -Infinity
|
||||
if (current.score < worstBest && best.length >= ef) {
|
||||
break
|
||||
}
|
||||
const neigh = neighborsFor(0, current.id)
|
||||
for (let i = 0; i < neigh.length; i++) {
|
||||
const candidate = neigh[i]
|
||||
if (candidate >= rows || visited.has(candidate)) continue
|
||||
visited.add(candidate)
|
||||
const score = dot(query, vectorSlice(candidate))
|
||||
const hit = { id: candidate, score }
|
||||
insertSortedDescending(candidateQueue, hit)
|
||||
if (best.length < ef || score > best[best.length - 1].score) {
|
||||
insertSortedDescending(best, hit)
|
||||
if (best.length > ef) {
|
||||
best.pop()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
best.sort((a, b) => b.score - a.score)
|
||||
return best.slice(0, k)
|
||||
}
|
||||
|
||||
async function embed(text: string, isQuery: boolean = false): Promise<Float32Array> {
|
||||
await ensureEncoder()
|
||||
// Apply model-specific prefixes for asymmetric search
|
||||
let prefixedText = text
|
||||
if (cfg?.model) {
|
||||
const modelName = cfg.model.toLowerCase()
|
||||
switch (true) {
|
||||
case modelName.includes("e5"): {
|
||||
// E5 models require query: or passage: prefix
|
||||
prefixedText = isQuery ? `query: ${text}` : `passage: ${text}`
|
||||
break
|
||||
}
|
||||
case modelName.includes("qwen") && modelName.includes("embedding"): {
|
||||
// Qwen3-Embedding requires task instruction for queries only
|
||||
if (isQuery) {
|
||||
const task = "Given a web search query, retrieve relevant passages that answer the query"
|
||||
prefixedText = `Instruct: ${task}\nQuery: ${text}`
|
||||
}
|
||||
// Documents use plain text (no prefix)
|
||||
break
|
||||
}
|
||||
case modelName.includes("embeddinggemma"): {
|
||||
// embeddinggemma requires specific prefixes
|
||||
prefixedText = isQuery
|
||||
? `task: search result | query: ${text}`
|
||||
: `title: none | text: ${text}`
|
||||
break
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
const out = await classifier(prefixedText, { pooling: "mean", normalize: true })
|
||||
const data = Array.from(out?.data ?? out) as number[]
|
||||
const vec = new Float32Array(dims)
|
||||
for (let i = 0; i < dims; i++) vec[i] = data[i] ?? 0
|
||||
return vec
|
||||
}
|
||||
|
||||
async function handleInit(msg: InitMessage) {
|
||||
if (state === "loading" || state === "ready") {
|
||||
throw new Error("worker already initialized or loading")
|
||||
}
|
||||
|
||||
state = "loading"
|
||||
abortController?.abort()
|
||||
abortController = new AbortController()
|
||||
|
||||
try {
|
||||
cfg = msg.cfg
|
||||
|
||||
const manifestUrl = toAbsolute(msg.manifestUrl, msg.baseUrl)
|
||||
const response = await fetch(manifestUrl, { signal: abortController.signal })
|
||||
if (!response.ok) {
|
||||
throw new Error(
|
||||
`failed to fetch manifest ${manifestUrl}: ${response.status} ${response.statusText}`,
|
||||
)
|
||||
}
|
||||
manifest = (await response.json()) as Manifest
|
||||
|
||||
if (manifest.vectors.dtype !== "fp32") {
|
||||
throw new Error(
|
||||
`unsupported embedding dtype '${manifest.vectors.dtype}', regenerate with fp32`,
|
||||
)
|
||||
}
|
||||
|
||||
dims = manifest.dims
|
||||
rows = manifest.rows
|
||||
|
||||
const { buffer: vectorBuffer } = await populateVectors(manifest, msg.baseUrl, msg.disableCache)
|
||||
vectorsView = vectorBuffer
|
||||
|
||||
const graphBuffer = await populateGraph(manifest, msg.baseUrl, msg.disableCache)
|
||||
|
||||
entryPoint = manifest.hnsw.entryPoint
|
||||
maxLevel = manifest.hnsw.maxLevel
|
||||
efDefault = Math.max(64, manifest.hnsw.M * 4)
|
||||
levelGraph = manifest.hnsw.graph.levels.map((level) => {
|
||||
const indptr = new Uint32Array(graphBuffer, level.indptr.offset, level.indptr.elements)
|
||||
const indices = new Uint32Array(graphBuffer, level.indices.offset, level.indices.elements)
|
||||
return { indptr, indices }
|
||||
})
|
||||
|
||||
state = "ready"
|
||||
const ready: ReadyMessage = { type: "ready" }
|
||||
self.postMessage(ready)
|
||||
} catch (err) {
|
||||
state = "error"
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
async function handleSearch(msg: SearchMessage) {
|
||||
if (state !== "ready") {
|
||||
throw new Error("worker not ready for search")
|
||||
}
|
||||
if (!manifest || !vectorsView) {
|
||||
throw new Error("semantic worker not configured")
|
||||
}
|
||||
|
||||
const queryVec = await embed(msg.text, true)
|
||||
const semanticHits = hnswSearch(queryVec, Math.max(1, msg.k))
|
||||
const message: SearchResultMessage = {
|
||||
type: "search-result",
|
||||
seq: msg.seq,
|
||||
semantic: semanticHits,
|
||||
}
|
||||
self.postMessage(message)
|
||||
}
|
||||
|
||||
function handleReset() {
|
||||
abortController?.abort()
|
||||
abortController = null
|
||||
state = "idle"
|
||||
manifest = null
|
||||
cfg = null
|
||||
vectorsView = null
|
||||
dims = 0
|
||||
rows = 0
|
||||
classifier = null
|
||||
envConfigured = false
|
||||
levelGraph = []
|
||||
entryPoint = -1
|
||||
maxLevel = 0
|
||||
}
|
||||
|
||||
self.onmessage = (event: MessageEvent<WorkerMessage>) => {
|
||||
const data = event.data
|
||||
|
||||
if (data.type === "reset") {
|
||||
handleReset()
|
||||
return
|
||||
}
|
||||
|
||||
if (data.type === "init") {
|
||||
void handleInit(data).catch((err: unknown) => {
|
||||
const message: ErrorMessage = {
|
||||
type: "error",
|
||||
message: err instanceof Error ? err.message : String(err),
|
||||
}
|
||||
self.postMessage(message)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if (data.type === "search") {
|
||||
void handleSearch(data).catch((err: unknown) => {
|
||||
const message: ErrorMessage = {
|
||||
type: "error",
|
||||
seq: data.seq,
|
||||
message: err instanceof Error ? err.message : String(err),
|
||||
}
|
||||
self.postMessage(message)
|
||||
})
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user