From df731d193382488197b80fb08cc7f6dddc7cfa61 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 11:09:22 -0700 Subject: [PATCH 01/15] feat: added initial rework of docs --- .gitignore | 1 - app/globals.css | 17 +- app/page.module.css | 8 +- assets/search-index.json | 1837 ++++++++++++++++++++++ components/anchor.tsx | 10 +- components/docs-menu.tsx | 25 +- components/navbar.tsx | 15 +- components/search.tsx | 283 ++-- components/sublink.tsx | 94 +- components/ui/ActiveHashLink.tsx | 6 +- components/ui/collapsible.tsx | 18 +- components/ui/command.tsx | 159 ++ components/ui/dialog.tsx | 25 +- contents/docs/auth.mdx | 22 +- contents/docs/connecting-to-postgres.mdx | 28 +- contents/docs/debug/otel.mdx | 10 +- contents/docs/debug/permissions.mdx | 4 +- contents/docs/debug/replication.mdx | 2 +- contents/docs/debug/slow-queries.mdx | 2 +- contents/docs/errors.mdx | 6 +- contents/docs/offline.mdx | 16 +- contents/docs/postgres-support.mdx | 113 +- contents/docs/react.mdx | 1 - contents/docs/reading-data.mdx | 15 +- contents/docs/release-notes/0.19.mdx | 10 +- contents/docs/release-notes/0.20.mdx | 4 +- contents/docs/release-notes/0.21.mdx | 2 +- contents/docs/samples.mdx | 12 +- contents/docs/zero-cache-config.mdx | 7 +- contents/docs/zero-schema.mdx | 22 +- generate-llms.js | 84 +- lib/generateSearchIndex.ts | 73 +- lib/icons.tsx | 104 ++ lib/markdown.ts | 2 +- lib/rehype-add-copy-button.ts | 2 +- lib/routes-config.ts | 100 -- lib/routes-config.tsx | 148 ++ lib/utils.ts | 12 +- package-lock.json | 498 +++++- package.json | 4 +- tailwind.config.ts | 40 +- 41 files changed, 3293 insertions(+), 548 deletions(-) create mode 100644 assets/search-index.json create mode 100644 components/ui/command.tsx create mode 100644 lib/icons.tsx delete mode 100644 lib/routes-config.ts create mode 100644 lib/routes-config.tsx diff --git a/.gitignore b/.gitignore index aa5e3d21..17245eef 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,6 @@ .DS_Store *.pem /public/llms.txt -/public/search-index.json # debug npm-debug.log* diff --git a/app/globals.css b/app/globals.css index ee7731b6..89727ab6 100644 --- a/app/globals.css +++ b/app/globals.css @@ -24,10 +24,10 @@ --border: 240 5.9% 90%; --input: 240 5.9% 90%; --radius: 0.5rem; - --primary-highlight: rgba(252, 33, 138, 1); + --primary-highlight: 331.2 97.3% 55.9%; + --primary-highlight-foreground: 0 0% 100%; /* Root page */ - --foreground-rgb: 0, 0, 0; --paragraph-color: hsl(0, 0%, 15%); --primary-accent: 39, 252, 174; --code-border: rgba(228, 228, 231, 1); @@ -56,9 +56,10 @@ --border: 240 3.7% 15.9%; --input: 240 3.7% 15.9%; --ring: 240 4.9% 83.9%; + --primary-highlight: 331.2 97.3% 55.9%; + --primary-highlight-foreground: 240 10% 3.9%; /* Root page */ - --foreground-rgb: 255, 255, 255; --paragraph-color: #d4d4d8; --primary-accent: 39, 252, 174; --code-border: rgba(39, 39, 42, 1); @@ -119,9 +120,13 @@ } mark { - color: rgb(var(--foreground-rgb)); - padding: 0.25rem 0.1rem; - background-color: var(--primary-highlight); + color: hsl(var(--primary-highlight-foreground)); + font-weight: 400; + border-radius: 0.25rem; + padding: 0.2rem; + margin: 0 0.05rem; + background-color: hsl(var(--primary-highlight)); + font-weight: 500; } .lightbulb-toggle { diff --git a/app/page.module.css b/app/page.module.css index 61557e6b..44aca6bf 100644 --- a/app/page.module.css +++ b/app/page.module.css @@ -27,7 +27,7 @@ .main a { text-decoration: underline; - color: var(--foreground-rgb); + color: hsl(var(--foreground)); } .main a:hover { @@ -37,7 +37,7 @@ .main a.primaryButton { text-decoration: none; - color: rgb(var(--foreground-rgb)); + color: hsl(var(--foreground)); } .main a.primaryButton:hover { @@ -116,7 +116,7 @@ } .main strong { - color: rgb(var(--foreground-rgb)); + color: hsl(var(--foreground)); } .main span.logoName { @@ -181,7 +181,7 @@ p.footerText { margin-top: 1rem; text-align: center; font-size: 0.75rem; - color: rgb(var(--foreground-rgb)); + color: hsl(var(--foreground)); } @media (max-width: 909px) { diff --git a/assets/search-index.json b/assets/search-index.json new file mode 100644 index 00000000..bc74ae66 --- /dev/null +++ b/assets/search-index.json @@ -0,0 +1,1837 @@ +[ + { + "id": "0-add-to-existing-project", + "title": "Add to Existing Project", + "url": "/docs/add-to-existing-project", + "icon": "Plus", + "content": "Zero integrates easily into most JavaScript or TypeScript projects, whether you're using React, Vue, Svelte, Solid, or vanilla JavaScript. Prerequisites A PostgreSQL database with Write-Ahead Logging (WAL) enabled. See Connecting to Postgres for setup instructions. If you are using TypeScript ensure that is set to in tsconfig.json. If this is not set then the advanced types Zero uses do not work as expected. Installation Install the Zero package: Zero's server component depends on , which contains a binary that requires running a postinstall script. Most alternative package managers (non-npm) disable these scripts by default for security reasons. Here's how to enable installation for common alternatives: pnpm For pnpm, either: Run to approve all build scripts, or Add the specific dependency to your : Bun For Bun, add the dependency to your trusted dependencies list: Environment Variables Configure Zero by creating a file in your project root: Replace the placeholders with your database connection details. For more options, see configuration options. Starting the Server Start the Zero server using the CLI: The server runs on port 4848 by default. To verify, open in your browser. If everything is configured correctly, you'll see \"OK\". Defining Your Schema Define your data model schema as described in the Zero schema documentation. Example: If you're using Prisma or Drizzle, you can convert their schemas to Zero schemas using tools listed in the community section. Permissions Update to include permissions for your tables. For example, to allow all users to read and write to the table, add the following: For more details, see permissions. Creating a Zero Instance To create a Zero client instance: In production, avoid hardcoding the server URL. Use environment variables like or . Reading Data To read data, use the method on a from the instance. This creates a materialized view that listens for real-time updates to the data: When the view is no longer needed, ensure you clean up by destroying it: For more details, see Reading Data with ZQL. React React developers can use the hook for seamless integration. See Integrations React for more details. SolidJS For SolidJS, use the function instead of . Refer to Integrations SolidJS for additional information. Other Frameworks For other frameworks, see the UI frameworks documentation. Writing Data Zero supports both simple and advanced data mutations. For basic use cases, use the CRUD mutator: For more complex scenarios, such as custom business logic, use custom mutators to define tailored mutation behavior. Server-Side Rendering (SSR) Zero does not yet support SSR. See SSR for details on disabling SSR for your framework. Deployment Ensure all variables are set in the production environment. For Zero cache deployment, see Deployment. For frontend deployment, consult your framework's documentation.", + "headings": [ + { + "text": "Prerequisites", + "id": "prerequisites" + }, + { + "text": "Installation", + "id": "installation" + }, + { + "text": "pnpm", + "id": "pnpm" + }, + { + "text": "Bun", + "id": "bun" + }, + { + "text": "Environment Variables", + "id": "environment-variables" + }, + { + "text": "Starting the Server", + "id": "starting-the-server" + }, + { + "text": "Defining Your Schema", + "id": "defining-your-schema" + }, + { + "text": "Permissions", + "id": "permissions" + }, + { + "text": "Creating a Zero Instance", + "id": "creating-a-zero-instance" + }, + { + "text": "Reading Data", + "id": "reading-data" + }, + { + "text": "React", + "id": "react" + }, + { + "text": "SolidJS", + "id": "solidjs" + }, + { + "text": "Other Frameworks", + "id": "other-frameworks" + }, + { + "text": "Writing Data", + "id": "writing-data" + }, + { + "text": "Server-Side Rendering (SSR)", + "id": "server-side-rendering-ssr" + }, + { + "text": "Deployment", + "id": "deployment" + } + ] + }, + { + "id": "1-auth", + "title": "Authentication", + "url": "/docs/auth", + "icon": "KeyRound", + "content": "Zero uses a JWT-based flow to authenticate connections to zero-cache. Frontend During login: Your API server creates a and sends it to your client. Your client constructs a instance with this token by passing it to the option. Server For to be able to verify the JWT, one of the following environment variables needs to be set: - If your API server uses a symmetric key (secret) to create JWTs then this is that same key. - If your API server uses a private key to create JWTs then this is the corresponding public key, in JWK format. - Many auth providers host the public keys used to verify the JWTs they create at a public URL. If you use a provider that does this, or you publish your own keys publicly, set this to that URL. Refresh The parameter to Zero can also be a function: In this case, Zero will call this function to get a new JWT if verification fails. Client-Side Data Storage Zero stores client-side data in by default, but this is customizable with the parameter: Because multiple users can share the same browser, Zero requires that you provide a parameter on construction: Zero stores each user's data in a different IndexedDB instance. This allows users to quickly switch between multiple users and accounts without resyncing. Add Connection in the left sidebar Search for \"OpenTelemetry\" and select it Click \"Quickstart\" Select \"JavaScript\" Create a new token Copy the environment variables into your file or similar Start Look for logs under \"Drilldown\" > \"Logs\" in left sidebar", + "headings": [ + { + "text": "Grafana Cloud Walkthrough", + "id": "grafana-cloud-walkthrough" + } + ] + }, + { + "id": "7-debug/permissions", + "title": "Debugging Permissions", + "url": "/docs/debug/permissions", + "icon": "ShieldCheck", + "content": "Given that permissions are defined in their own file and internally applied to queries, it might be hard to figure out if or why a permission check is failing. Read Permissions You can use the utility with the flag to see the complete query Zero runs, including read permissions. If the result looks right, the problem may be that Zero is not receiving the that you think it is. You can retrieve a query hash from websocket or server logs, then ask Zero for the details on that specific query. Run this command with the same environment you run with. It will use your or configuration to look up the query hash in the cvr database. Write Permissions Look for a level log in the output from like this: Zero prints the row, auth data, and permission policies that was applied to any failed writes.", + "headings": [ + { + "text": "Read Permissions", + "id": "read-permissions" + }, + { + "text": "Write Permissions", + "id": "write-permissions" + } + ] + }, + { + "id": "8-debug/query-asts", + "title": "Query ASTs", + "url": "/docs/debug/query-asts", + "icon": "Workflow", + "content": "An AST (Abstract Syntax Tree) is a representation of a query that is used internally by Zero. It is not meant to be human readable, but it sometimes shows up in logs and other places. If you need to read one of these, save the AST to a json file. Then run the following command: The returned ZQL query will be using server names, rather than client names, to identify columns and tables. If you provide the schema file as an option you will get mapped back to client names: This comes into play if, in your schema.ts, you use the feature to have different names on the client than your backend DB.", + "headings": [] + }, + { + "id": "9-debug/replication", + "title": "Replication", + "url": "/docs/debug/replication", + "icon": "CopyIcon", + "content": "Resetting During development we all do strange things (unsafely changing schemas, removing files, etc.). If the replica ever gets wedged (stops replicating, acts strange) you can wipe it and start over. If you copied your setup from or , you can also run Otherwise you can run (see your file for the replica file location) to clear the contents of the replica. It is always safe to wipe the replica. Wiping will have no impact on your upstream database. Downstream zero-clients will get re-synced when they connect. Inspecting For data to be synced to the client it must first be replicated to . You can check the contents of via: To inspect your Zero database, you have two options: Use our pre-compiled SQLite build as described above Build SQLite from the SQLite branch yourself This will drop you into a shell with which you can use to explore the contents of the replica. Miscellaneous If you see in logs, it’s because you have two zero-cache instances running against dev. One is probably in a background tab somewhere. In production, can run horizontally scaled but on dev it doesn’t run in the config that allows that.", + "headings": [ + { + "text": "Resetting", + "id": "resetting" + }, + { + "text": "Inspecting", + "id": "inspecting" + }, + { + "text": "Miscellaneous", + "id": "miscellaneous" + } + ] + }, + { + "id": "10-debug/slow-queries", + "title": "Slow Queries", + "url": "/docs/debug/slow-queries", + "icon": "Clock", + "content": "In the logs, you may see statements indicating a query is slow: or: Or, you may just notice queries taking longer than expected in the UI. Here are some tips to help debug such slow queries. Check If you are seeing unexpected UI flicker when moving between views, it is likely that the queries backing these views have the default of . Set the to some longer value to keep data cached across navigations. You may alternately want to preload some data at app startup. Check Storage is effectively a database. It requires fast (low latency and high bandwidth) disk access to perform well. If you're running on network attached storage with high latency, or on AWS with low IOPS, then this is the most likely culprit. The default deployment of Zero currently uses Fargate which scales IOPS with vCPU. Increasing the vCPU will increase storage throughput and likely resolve the issue. Fly.io provides physically attached SSDs, even for their smallest VMs. Deploying zero-cache there (or any other provider that offers physically attached SSDs) is another option. Locality If you see log lines like: this indicates that is likely deployed too far away from your CVR database. If you did not configure a CVR database URL then this will be your product's Postgres DB. A slow CVR flush can slow down Zero, since it must complete the flush before sending query result(s) to clients. Try moving to be deployed as close as possible to the CVR database. Query Plan If neither (1) nor (2) is a problem, then the query itself is the most likely culprit. The package ships with a query analyzer to help debug this. The analyzer should be run in the directory that contains the file for as it will use the file to find your replica. Example: This will output the query plan and time to execute each phase of that plan. Note that query performance can also be affected by read permissions. See Debugging Permissions for information on how to analyze queries with read permissions applied. /statz makes some internal health statistics available via the endpoint of . In order to access this, you must configure an admin password.", + "headings": [ + { + "text": "Check ", + "id": "check" + }, + { + "text": "Check Storage", + "id": "check-storage" + }, + { + "text": "Locality", + "id": "locality" + }, + { + "text": "Query Plan", + "id": "query-plan" + }, + { + "text": "/statz", + "id": "statz" + } + ] + }, + { + "id": "11-deployment", + "title": "Deploying Zero", + "url": "/docs/deployment", + "icon": "Server", + "content": "To deploy a Zero app, you need to: Deploy your backend database. Most standard Postgres hosts work with Zero. Deploy . We provide a Docker image that can work with most Docker hosts. Deploy your frontend. You can use any hosting service like Vercel or Netlify. This page describes how to deploy . Architecture is a horizontally scalable, stateful web service that maintains a SQLite replica of your Postgres database. It uses this replica to sync ZQL queries to clients over WebSockets. You don't have to know the details of how works to run it, but it helps to know the basic structure. A running is composed of a single node and multiple nodes. It also depends on Postgres, S3, and attached SSD storage. Upstream: Your application's Postgres database. Change DB: A Postgres DB used by Zero to store a recent subset of the Postgres replication log. CVR DB: A Postgres DB used by Zero to store Client View Records (CVRs). CVRs track the state of each synced client. We allow separate DBs so that they can be scaled and tuned independently if desired. S3: Stores a canonical copy of the SQLite replica. File System: Used by both node types to store local copies of the SQLite replica. Can be ephemeral – Zero will re-initialize from S3 on startup. Recommended to use attached SSD storage for best performance. Replication Manager: Serves as the single consumer of the Postgres replication log. Stores a recent subset of the Postgres changelog in the Change DB for catching up ViewSyncers when they initialize. Also maintains the canonical replica, which ViewSyncers initialize from. View Syncers: Handle WebSocket connections from clients and run ZQL queries. Updates CVR DB with the latest state of each client as queries run. Uses CVR DB on client connection to compute the initial diff to catch clients up. Topology You should deploy close to your database because the mutation implementation is chatty. In the future, mutations will move out of . When that happens you can deploy geographically distributed and it will double as a read-replica. Updating When run with multiple View Syncer nodes, supports rolling, downtime-free updates. A new Replication Manager takes over the replication stream from the old Replication Manager, and connections from the old View Syncers are gradually drained and absorbed by active View Syncers. Client/Server Version Compatibility Servers are compatible with any client of same major version, and with clients one major version back. So for example: Server is compatible with client Server is compatible with client Server is compatible with client Server is compatible with client To upgrade Zero to a new major version, first deploy the new zero-cache, then the new frontend. Configuration The image is configured via environment variables. See zero-cache Config for available options. Guide: Multi-Node on SST+AWS SST is our recommended way to deploy Zero. The setup below costs about $35/month. You can scale it up or down as needed by adjusting the amount of vCPUs and memory in each task. Setup Upstream Create an upstream Postgres database server somewhere. See Connecting to Postgres for details. Populate the schema and any initial data for your application. Setup AWS See AWS setup guide. The end result should be that you have a dev profile and SSO session defined in your file. Initialize SST Choose \"aws\" for where to deploy. Then overwite with the following code: Set SST Secrets Configure SST with your Postgres connection string and Zero Auth Secret. Note that if you use JWT-based auth, you'll need to change the environment variables in the file above, then set a different secret here. Deploy This takes about 5-10 minutes. If successful, you should see a URL for the service. This is the URL to pass to the parameter of the constructor on the client. If unsuccessful, you can get detailed logs with . Come find us on Discord and we'll help get you sorted out. Guide: Single-Node on Fly.io Let's deploy the Quickstart app to Fly.io. We'll use Fly.io for both the database and . Setup Quickstart Go through the Quickstart guide to get the app running locally. Setup Fly.io Create an account on Fly.io and install the Fly CLI. Create Postgres app Seed Upstream database Populate the database with initial data and set its to to support replication to . Then restart the database to apply the changes. Create Fly.io app Publish Create a file. Then publish : Deploy Permissions Now is running on Fly.io, but there are no permissions. If you run the app against this , you'll see that no data is returned from any query. To fix this, deploy your permissions: You will need to redo this step every time you change your app's permissions, likely as part of your CI/CD pipeline. Use Remote Now restart the frontend to pick up the env change, and refresh the app. You can stop your local database and as we're not using them anymore. Open the web inspector to verify the app is talking to the remote ! You can deploy the frontend to any standard hosting service like Vercel or Netlify, or even to Fly.io! Deploy Frontend to Vercel If you've followed the above guide and deployed to fly, you can simply run: to deploy your frontend to Vercel. Explaining the arguments above -- - The secret to create and verify JWTs. This is the same secret that was used when deploying zero-cache to fly. - The URL the frontend will call to talk to the zero-cache server. This is the URL of the fly app. Guide: Multi-Node on Raw AWS S3 Bucket Create an S3 bucket. uses S3 to backup its SQLite replica so that it survives task restarts. Fargate Services Run as two Fargate services (using the same rocicorp/zero docker image): replication-manager config: Task count: 1 view-syncer config: Task count: N You can also use dynamic scaling Notes Standard rolling restarts are fine for both services Set and appropriately so that the total connections from both running and updating (e.g. DesiredCount * MaximumPercent) do not exceed your database’s . The component of the URL is an arbitrary path component that can be modified to reset the replica (e.g. a date, a number, etc.). Setting this to a new path is the multi-node equivalent of deleting the replica file to resync. Note: does not manage cleanup of old generations. The serves requests on port 4849. Routing from the to the is handled internally by storing data in the . Fargate ephemeral storage is used for the replica. The default size is 20GB. This can be increased up to 200GB Allocate at least twice the size of the database to support the internal VACUUM operation. Guide: $PLATFORM Where should we deploy Zero next?? Let us know on Discord!", + "headings": [ + { + "text": "Architecture", + "id": "architecture" + }, + { + "text": "Topology", + "id": "topology" + }, + { + "text": "Updating", + "id": "updating" + }, + { + "text": "Client/Server Version Compatibility", + "id": "client-server-version-compatibility" + }, + { + "text": "Configuration", + "id": "configuration" + }, + { + "text": "Guide: Multi-Node on SST+AWS", + "id": "guide-multi-node-on-sst-aws" + }, + { + "text": "Setup Upstream", + "id": "setup-upstream" + }, + { + "text": "Setup AWS", + "id": "setup-aws" + }, + { + "text": "Initialize SST", + "id": "initialize-sst" + }, + { + "text": "Set SST Secrets", + "id": "set-sst-secrets" + }, + { + "text": "Deploy", + "id": "deploy" + }, + { + "text": "Guide: Single-Node on Fly.io", + "id": "guide-single-node-on-fly-io" + }, + { + "text": "Setup Quickstart", + "id": "setup-quickstart" + }, + { + "text": "Setup Fly.io", + "id": "setup-fly-io" + }, + { + "text": "Create Postgres app", + "id": "create-postgres-app" + }, + { + "text": "Seed Upstream database", + "id": "seed-upstream-database" + }, + { + "text": "Create Fly.io app", + "id": "create-fly-io-app" + }, + { + "text": "Publish ", + "id": "publish" + }, + { + "text": "Deploy Permissions", + "id": "deploy-permissions" + }, + { + "text": "Use Remote ", + "id": "use-remote" + }, + { + "text": "Deploy Frontend to Vercel", + "id": "deploy-frontend-to-vercel" + }, + { + "text": "Guide: Multi-Node on Raw AWS", + "id": "guide-multi-node-on-raw-aws" + }, + { + "text": "S3 Bucket", + "id": "s3-bucket" + }, + { + "text": "Fargate Services", + "id": "fargate-services" + }, + { + "text": "replication-manager", + "id": "replication-manager" + }, + { + "text": "view-syncer", + "id": "view-syncer" + }, + { + "text": "Notes", + "id": "notes" + }, + { + "text": "Guide: $PLATFORM", + "id": "guide-platform" + } + ] + }, + { + "id": "12-errors", + "title": "Error Handling", + "url": "/docs/errors", + "icon": "FileCode", + "content": "Errors from mutators and queries are thrown in response to method calls where possible, but many Zero errors occur asynchronously, during sync. You can catch these errors with the constructor parameter: You can use this to send errors to Sentry, show custom UI, etc. The first parameter to is a descriptive message. Additional parameters provide more detail, for example an object (with a stack), or a JSON object.", + "headings": [] + }, + { + "id": "13-introduction", + "title": "Welcome to Zero Alpha", + "url": "/docs/introduction", + "icon": "BookOpen", + "content": "Zero is a new kind of sync engine powered by queries. Rather than syncing entire tables to the client, or using static rules to carefully specify what to sync, you just write queries directly in your client code. Queries can access the entire backend database. Zero caches the data for queries locally on the device, and reuses that data automatically to answer future queries whenever possible. For typical applications, the result is that almost all queries are answered locally, instantly. It feels like you have access to the entire backend database directly from the client in memory. Occasionally, when you do a more specific query, Zero falls back to the server. But this happens automatically without any extra work required. Zero is made possible by a custom streaming query engine we built called ZQL, which uses Incremental View Maintenance on both client and server to efficiently keep large, complex queries up to date. Status Zero is in alpha. There are still some rough edges, and to run it, you need to deploy it yourself to AWS or similar. Even so, Zero is already quite fun to work with. We are using it ourselves to build our very own Linear-style bug tracker. We find that Zero is already much more productive than alternatives, even having to occasionally work around a missing feature. If you are building a new web app that needs to be fast and reactive, and can do the deployment yourself, it's a great time to get started with Zero. We're working toward a beta release and full production readiness this year.", + "headings": [ + { + "text": "Status", + "id": "status" + } + ] + }, + { + "id": "14-llms", + "title": "Welcome, 🤖!", + "url": "/docs/llms", + "icon": "Sparkle", + "content": "Are you an LLM? Do you like long walks through vector space and late-night tokenization? Or maybe you're a friend of an LLM, just trying to make life a little easier for the contextually challenged? Either way, you're in the right place! Stream on over to llms.txt for the text-only version of these docs.", + "headings": [] + }, + { + "id": "15-offline", + "title": "Offline", + "url": "/docs/offline", + "icon": "Unplug", + "content": "Zero currently supports offline reads, but not writes. We plan to support offline writes in the future, but we don't have a timeline for that yet. The lack of offline writes is often surprising to people familiar with sync engines, because offline is usually touted as something that comes for free with these tools. This page explains why Zero doesn't currently support offline writes, how we recommend you handle connectivity loss, and our future plans in this area. Offline Writes are a UX Problem While Zero can technically queue offline writes and replay them when reconnected (this happens by default in any sync engine, and is what Zero does today), that fact doesn't make supporting offline writes much easier. That's because a really hard part of offline writes is in handling conflicts, and no software tool can make that problem go away. For example, imagine two users are editing an article about cats. One goes offline and does a bunch of work on the article, while the other decides that the article should actually be about dogs and rewrites it. When the offline user reconnects, there is no way that any software algorithm can automatically resolve their conflict. One or the other of them is going to be upset. And while the above example may sound extreme, you can construct similar situations with the majority of common applications. Just take your own application and ask yourself what should really happen if one user takes their device offline for a week and makes arbitrarily complex changes while other users are working online. People who work on sync engines and related tools often say that offline is just extreme lag, but that's only true at a technical level. At a human level, being \"offline\" for a few seconds is very different from being offline for a few hours. The difference is how much knowledge you have about what your collaborators are doing, and how much of your work can be lost. The only way to support offline writes in general is to either: Make the logical datamodel append-only (i.e., users can create and mark tasks done, but cannot edit or delete them). Support custom UX to allow users to fork and merge conflicts when they occur. Only support editing from a single device. None of these is free. Buiding a good offline UX is a lot of work, and most of that work is borne by application developers. … And a Schema Problem But it's not just users that can diverge from each other. The server software and database schema can also diverge arbitrarily far from the client while the client is disconnected. When the client comes back online, the changes made may no longer be processable by the application, or may have a different effect than the user intended. So to support long offline periods, the server must also maintain backward compatibility with clients indefinitely. Similarly, the server can never reject an offline write (i.e., due to validation) because that could lead to a user losing huge amounts of work. … And a Sync Engine Problem Supporting offline writes also requires work in the sync engine. In Zero, there are a few specific impacts: The Zero client itself can get out of date while offline. On reconnect, the app might reload with a new version of the client. This new version must be able to read and process old data from arbitrarily long ago. An arbitrarily large number of pending mutations can be built up. These mutations must be replayed on reconnect, which can take a long time. When processing mutations on server we must consider what should happen if the database or application server are temporarily unavailable. We need to treat that kind of error differently from a validation error. These problems are surmountable, but significant effort. Their solutions might also be in tension with other goals of the sync engine, like online performance and scalability. These tradeoffs will take time to work through. Zero's Position For all of the above reasons, we plan to disable offline writes in Zero for beta. When the Zero client loses connection to for several minutes (or when cannot reach the customer API server), it will enter a special offline mode. In this mode, all writes to Zero will throw. While we recognize that offline writes would be useful for some applications, the reality is that for most of the apps we want to support, the user is online the vast majority of the time and the cost to support offline is extremely high. There is simply more value in making the online experience great first. We would like to revisit this in the future and really think through how to design APIs and patterns that allow developers to make successful offline-enabled apps. But it's not our priority right now. Dealing with Offline Today Until Zero disables offline writes automatically, we recomment using the parameter to the constructor to detect connection loss and disable editing manually in your UI. Even More Information Lies I was Told About Collaborative Editing: a detailed overview of the challenges around offline writes in any collaborative editing system. This Zero Discord thread covers some challenges specifically in the context of Zero. Patchwork by Ink & Switch is new and interesting research around how to support offline writes well in collaborative systems.", + "headings": [ + { + "text": "Offline Writes are a UX Problem", + "id": "offline-writes-are-a-ux-problem" + }, + { + "text": "… And a Schema Problem", + "id": "and-a-schema-problem" + }, + { + "text": "… And a Sync Engine Problem", + "id": "and-a-sync-engine-problem" + }, + { + "text": "Zero's Position", + "id": "zero-s-position" + }, + { + "text": "Dealing with Offline Today", + "id": "dealing-with-offline-today" + }, + { + "text": "Even More Information", + "id": "even-more-information" + } + ] + }, + { + "id": "16-open-source", + "title": "Zero is Open Source Software", + "url": "/docs/open-source", + "icon": "CircleDashed", + "content": "Specifically, the Zero client and server are Apache-2 licensed. You can use, modify, host, and distribute them freely: https://github.com/rocicorp/mono/blob/main/LICENSE Business Model We plan to commercialize Zero in the future by offering a hosted service for people who do not want to run it themselves. We expect to charge prices for this rougly comparable to today's database hosting services. We'll also offer white-glove service to help enterprises run within their own infrastructure. These plans may change as we develop Zero further. For example, we may also build closed-source companion software – similar to how Docker, Inc. charges for team access to Docker Desktop. But we have no plans to ever change the licensing of the core product: We're building a general-purpose sync engine for the entire web, and we can only do that if the core remains completely open.", + "headings": [ + { + "text": "Business Model", + "id": "business-model" + } + ] + }, + { + "id": "17-overview", + "title": "Concepts (How Zero Works)", + "url": "/docs/overview", + "icon": "FileCode", + "content": "", + "headings": [] + }, + { + "id": "18-permissions", + "title": "Permissions", + "url": "/docs/permissions", + "icon": "ShieldCheck", + "content": "Permissions are expressed using ZQL and run automatically with every read and write. Define Permissions Permissions are defined in using the function. Here's an example of limiting reads to members of an organization and deletes to only the creator of an issue: returns a policy object for each table in the schema. Each policy defines a ruleset for the operations that are possible on a table: , , , and . Access is Denied by Default If you don't specify any rules for an operation, it is denied by default. This is an important safety feature that helps ensure data isn't accidentally exposed. To enable full access to an action (i.e., during development) use the helper: To do this for all actions, use : Permission Evaluation Zero permissions are \"compiled\" into a JSON-based format at build-time. This file is stored in the table of your upstream database. Like other tables, it replicates live down to . then parses this file, and applies the encoded rules to every read and write operation. The end result is that you can't really use most features of JS in these rules. Specifically you cannot: Iterate over properties or array elements in the auth token Use any JS features beyond property access of Use any conditional or global state Basically only property access is allowed. This is really confusing and we're working on a better solution. Permission Deployment During development, permissions are compiled and uploaded to your database completely automatically as part of the script. For production, you need to call within your app to update the permissions in the production database whenever they change. You would typically do this as part of your normal schema migration or CI process. For example, the SST deployment script for zbugs looks like this: See the SST Deployment Guide for more details. Rules Each operation on a policy has a ruleset containing zero or more rules. A rule is just a TypeScript function that receives the logged in user's and generates a ZQL where expression. At least one rule in a ruleset must return a row for the operation to be allowed. Select Permissions You can limit the data a user can read by specifying a ruleset. Select permissions act like filters. If a user does not have permission to read a row, it will be filtered out of the result set. It will not generate an error. For example, imagine a select permission that restricts reads to only issues created by the user: If the issue table has two rows, one created by the user and one by someone else, the user will only see the row they created in any queries. Insert Permissions You can limit what rows can be inserted and by whom by specifying an ruleset. Insert rules are evaluated after the entity is inserted. So if they query the database, they will see the inserted row present. If any rule in the insert ruleset returns a row, the insert is allowed. Here's an example of an insert rule that disallows inserting users that have the role 'admin'. Update Permissions There are two types of update rulesets: and . Both rulesets must pass for an update to be allowed. rules see the version of a row before the mutation is applied. This is useful for things like checking whether a user owns an entity before editing it. rules see the version of a row after the mutation is applied. This is useful for things like ensuring a user can only mark themselves as the creator of an entity and not other users. Like other rulesets, and default to . This means that every table must define both these rulesets in order for any updates to be allowed. For example, the following ruleset allows an issue's owner to edit, but not re-assign the issue. The rule enforces that the current user still own the issue after edit. This ruleset allows an issue's owner to edit and re-assign the issue: And this allows anyone to edit an issue, but only if they also assign it to themselves. Useful for enforcing \"patches welcome\"? 🙃 Delete Permissions Delete permissions work in the same way as permissions except they run before the delete is applied. So if a delete rule queries the database, it will see that the deleted row is present. If any rule in the ruleset returns a row, the delete is allowed. Debugging See Debugging Permissions. Examples See hello-zero for a simple example of write auth and zbugs for a much more involved one.", + "headings": [ + { + "text": "Define Permissions", + "id": "define-permissions" + }, + { + "text": "Access is Denied by Default", + "id": "access-is-denied-by-default" + }, + { + "text": "Permission Evaluation", + "id": "permission-evaluation" + }, + { + "text": "Permission Deployment", + "id": "permission-deployment" + }, + { + "text": "Rules", + "id": "rules" + }, + { + "text": "Select Permissions", + "id": "select-permissions" + }, + { + "text": "Insert Permissions", + "id": "insert-permissions" + }, + { + "text": "Update Permissions", + "id": "update-permissions" + }, + { + "text": "Delete Permissions", + "id": "delete-permissions" + }, + { + "text": "Debugging", + "id": "debugging" + }, + { + "text": "Examples", + "id": "examples" + } + ] + }, + { + "id": "19-postgres-support", + "title": "Supported Postgres Features", + "url": "/docs/postgres-support", + "icon": "Blend", + "content": "Postgres has a massive feature set, and Zero supports a growing subset of it. Object Names Table and column names must begin with a letter or underscore This can be followed by letters, numbers, underscores, and hyphens Regex: The column name is reserved for internal use Object Types Tables are synced Views are not synced generated columns are synced All other generated columns are not synced Indexes aren't synced per-se, but we do implicitly add indexes to the replica that match the upstream indexes. In the future, this will be customizable. Column Types Zero will sync arrays to the client, but there is no support for filtering or joining on array elements yet in ZQL. Other Postgres column types aren’t supported. They will be ignored when replicating (the synced data will be missing that column) and you will get a warning when starts up. If your schema has a pg type not listed here, you can support it in Zero by using a trigger to map it to some type that Zero can support. For example if you have a GIS polygon type in the column , you can use a trigger to map it to a column. You could either use another trigger to map in the reverse direction to support changes for writes, or you could use a custom mutator to write to the polygon type directly on the server. Let us know if the lack of a particular column type is hindering your use of Zero. It can likely be added. Column Defaults Default values are allowed in the Postgres schema, but there currently is no way to use them from a Zero app. An mutation requires all columns to be specified, except when columns are nullable (in which case, they default to null). Since there is no way to leave non-nullable columns off the insert on the client, there is no way for PG to apply the default. This is a known issue and will be fixed in the future. IDs It is strongly recommended to use client-generated random strings like uuid, ulid, nanoid, etc for primary keys. This makes optimistic creation and updates much easier. You could sync the highest value seen for that table, but there are race conditions and it is possible for that ID to be taken by the time the creation makes it to the server. Your database can resolve this and assign the next ID, but now the relationships you created optimistically will be against the wrong row. Blech. GUIDs makes a lot more sense in synced applications. If your table has a natural key you can use that and it has less problems. But there is still the chance for a conflict. Imagine you are modeling orgs and you choose domainName as the natural key. It is possible for a race to happen and when the creation gets to the server, somebody has already chosen that domain name. In that case, the best thing to do is reject the write and show the user an error. If you want to have a short auto-incrementing numeric ID for UX reasons (i.e., a bug number), that is possible: Primary Keys Each table synced with Zero must have either a primary key or at least one unique index. This is needed so that Zero can identify rows during sync, to distinguish between an edit and a remove/add. Multi-column primary and foreign keys are supported. Limiting Replication You can use Permissions to limit tables and rows from replicating to Zero. In the near future, you'll also be able to use Permissions to limit individual columns. Until then, a workaround is to use the Postgres publication feature to control the tables and columns that are replicated into . In your pg schema setup, create a Postgres with the tables and columns you want: Then, specify this publication in the App Publications option. (By default, Zero creates a publication that publishes the entire public schema.) To limit what is synced from the replica to actual clients (e.g., web browsers) you can use read permissions. Schema changes Most Postgres schema changes are supported as is. Two cases require special handling: Adding columns Adding a column with a non-constant value is not supported. This includes any expression with parentheses, as well as the special functions , , and (due to a constraint of SQLite). However, the value of an existing column can be changed to any value, including non-constant expressions. To achieve the desired column default: Add the column with no value Backfill the column with desired values Set the column's value Changing publications Postgres allows you to change published tables/columns with an statement. Zero automatically adjusts the table schemas on the replica, but it does not receive the pre-existing data. To stream the pre-existing data to Zero, make an innocuous after adding the tables/columns to the publication: Self-Referential Relationships See zero-schema", + "headings": [ + { + "text": "Object Names", + "id": "object-names" + }, + { + "text": "Object Types", + "id": "object-types" + }, + { + "text": "Column Types", + "id": "column-types" + }, + { + "text": "Column Defaults", + "id": "column-defaults" + }, + { + "text": "IDs", + "id": "ids" + }, + { + "text": "Primary Keys", + "id": "primary-keys" + }, + { + "text": "Limiting Replication", + "id": "limiting-replication" + }, + { + "text": "Schema changes", + "id": "schema-changes" + }, + { + "text": "Adding columns", + "id": "adding-columns" + }, + { + "text": "Changing publications", + "id": "changing-publications" + }, + { + "text": "Self-Referential Relationships", + "id": "self-referential-relationships" + } + ] + }, + { + "id": "20-quickstart", + "title": "Quickstart", + "url": "/docs/quickstart", + "icon": "CirclePlay", + "content": "Prerequisites Docker Node 20+ Run In one terminal, install and start the database: In a second terminal, start : In a final terminal, start the UI: Quick Overview is a demo app that allows querying over a small dataset of fake messages between early Zero users. Here are some things to try: Press the Add Messages button to add messages to the UI. Any logged-in or anonymous users are allowed to add messages. Press the Remove Messages button to remove messages. Only logged-in users are allowed to remove messages. You can hold shift to bypass the UI warning and see that write access control is being enforced server-side – the UI flickers as the optimistic write happens instantly and is then reverted by the server. Press login to login as a random user, then the remove button will work. Open two different browsers and see how fast sync propagates changes. Add a filter using the From and Contains controls. Notice that filters are fully dynamic and synced. Edit a message by pressing the pencil icon. You can only edit messages from the user you’re logged in as. As before you can attempt to bypass by holding shift. Check out the SQL schema for this database in . Login to the database with (or any other pg viewer) and delete or alter a row. Observe that it deletes from UI automatically. Detailed Walkthrough Deployment You can deploy Zero apps to most cloud providers that support Docker and Postgres. See Deployment for more information.", + "headings": [ + { + "text": "Prerequisites", + "id": "prerequisites" + }, + { + "text": "Run", + "id": "run" + }, + { + "text": "Quick Overview", + "id": "quick-overview" + }, + { + "text": "Detailed Walkthrough", + "id": "detailed-walkthrough" + }, + { + "text": "Deployment", + "id": "deployment" + } + ] + }, + { + "id": "21-react", + "title": "React", + "url": "/docs/react", + "icon": "React", + "content": "Zero has built-in support for React. Here’s what basic usage looks like: ZeroProvider The hook must be used within a component. The component is responsible for creating and destroying instances reactively. You can also pass a instance to the if you want to control the lifecycle of the instance yourself: Complete quickstart here: https://github.com/rocicorp/hello-zero", + "headings": [ + { + "text": "ZeroProvider", + "id": "zeroprovider" + } + ] + }, + { + "id": "22-reading-data", + "title": "Reading Data with ZQL", + "url": "/docs/reading-data", + "icon": "ArrowDown", + "content": "ZQL is Zero’s query language. Inspired by SQL, ZQL is expressed in TypeScript with heavy use of the builder pattern. If you have used Drizzle or Kysely, ZQL will feel familiar. ZQL queries are composed of one or more clauses that are chained together into a query. Unlike queries in classic databases, the result of a ZQL query is a view that updates automatically and efficiently as the underlying data changes. You can call a query’s method to get a view, but more typically you run queries via some framework-specific bindings. For example see for React or SolidJS. ZQL caches values and returns them multiple times. If you modify a value returned from ZQL, you will modify it everywhere it is used. This can lead to subtle bugs. JavaScript and TypeScript lack true immutable types so we use to help enforce it. But it's easy to cast away the accidentally. In the future, we'll all returned data in mode to help prevent this. Select ZQL queries start by selecting a table. There is no way to select a subset of columns; ZQL queries always return the entire row (modulo column permissions). This is a design tradeoff that allows Zero to better reuse the row locally for future queries. This also makes it easier to share types between different parts of the code. Ordering You can sort query results by adding an clause: Multiple clauses can be present, in which case the data is sorted by those clauses in order: All queries in ZQL have a default final order of their primary key. Assuming the table has a primary key on the column, then: Limit You can limit the number of rows to return with : Paging You can start the results at or after a particular row with : By default is exclusive - it returns rows starting after the supplied reference row. This is what you usually want for paging. If you want inclusive results, you can do: Getting a Single Result If you want exactly zero or one results, use the clause. This causes ZQL to return rather than . overrides any clause that is also present. Relationships You can query related rows using relationships that are defined in your Zero schema. Relationships are returned as hierarchical data. In the above example, each row will have a field which is itself an array of the corresponding comments row. You can fetch multiple relationships in a single query: Refining Relationships By default all matching relationship rows are returned, but this can be refined. The method accepts an optional second function which is itself a query. This relationship query can have all the same clauses that top-level queries can have. You can sometimes work around this by making the junction relationship explicit, depending on your schema and usage. Nested Relationships You can nest relationships arbitrarily: Where You can filter a query with : The first parameter is always a column name from the table being queried. Intellisense will offer available options (sourced from your Zero Schema). Comparison Operators Where supports the following comparison operators: | Operator | Allowed Operand Types | Description | | ---------------------------------------- | ----------------------------- | ------------------------------------------------------------------------ | | , | boolean, number, string | JS strict equal (===) semantics | | , , , | number | JS number compare semantics | | , , , | string | SQL-compatible / | | , | boolean, number, string | RHS must be array. Returns true if rhs contains lhs by JS strict equals. | | , | boolean, number, string, null | Same as but also works for | TypeScript will restrict you from using operators with types that don’t make sense – you can’t use with for example. Equals is the Default Comparison Operator Because comparing by is so common, you can leave it out and defaults to . Comparing to As in SQL, ZQL’s is not equal to itself (). This is required to make join semantics work: if you’re joining on you do not want an employee in no organization to match an org that hasn’t yet been assigned an ID. When you purposely want to compare to ZQL supports and operators that work just like in SQL: TypeScript will prevent you from comparing to with other operators. Compound Filters The argument to can also be a callback that returns a complex expression: is short for compare and works the same as at the top-level except that it can’t be chained and it only accepts comparison operators (no relationship filters – see below). Note that chaining is also a one-level : Relationship Filters Your filter can also test properties of relationships. Currently the only supported test is existence: The argument to is a relationship, so just like other relationships it can be refined with a query: As with querying relationships, relationship filters can be arbitrarily nested: The helper is also provided which can be used with , , , and to build compound filters that check relationship existence: Data Lifetime and Reuse Zero reuses data synced from prior queries to answer new queries when possible. This is what enables instant UI transitions. But what controls the lifetime of this client-side data? How can you know whether any particular query will return instant results? How can you know whether those results will be up to date or stale? The answer is that the data on the client is simply the union of rows returned from queries which are currently syncing. Once a row is no longer returned by any syncing query, it is removed from the client. Thus, there is never any stale data in Zero. So when you are thinking about whether a query is going to return results instantly, you should think about what other queries are syncing, not about what data is local. Data exists locally if and only if there is a query syncing that returns that data. A cache has a random set of rows with a random set of versions. There is no expectation that the cache any particular rows, or that the rows' have matching versions. Rows are simply updated as they are fetched. A replica by contrast is eagerly updated, whether or not any client has requested a row. A replica is always very close to up-to-date, and always self-consistent. Zero is a partial replica because it only replicates rows that are returned by syncing queries. Query Lifecycle Queries can be either active or backgrounded. An active query is one that is currently being used by the application. Backgrounded queries are not currently in use, but continue syncing in case they are needed again soon. Active queries are created one of three ways: The app calls to get a . The app uses a platform binding like React's . The app calls to sync larger queries without a view. Active queries sync until they are deactivated. The way this happens depends on how the query was created: For queries, the UI calls on the view. For , the UI unmounts the component (which calls under the covers). For , the UI calls on the return value of . Background Queries By default a deactivated query stops syncing immediately. But it's often useful to keep queries syncing beyond deactivation in case the UI needs the same or a similar query in the near future. This is accomplished with the parameter: The parameter specifies how long the app developer wishes the query to run in the background. The following formats are allowed (where is a positive integer): | Format | Meaning | | --------- | ------------------------------------------------------------------------------------ | | | No backgrounding. Query will immediately stop when deactivated. This is the default. | | | Number of seconds. | | | Number of minutes. | | | Number of hours. | | | Number of days. | | | Number of years. | | | Query will never be stopped. | If the UI re-requests a background query, it becomes an active query again. Since the query was syncing in the background, the very first synchronous result that the UI receives after reactivation will be up-to-date with the server (i.e., it will have of ). Just like other types of queries, the data from background queries is available for use by new queries. A common pattern in to preload a subset of most commonly needed data with and then do more specific queries from the UI with, e.g., . Most often the preloaded data will be able to answer user queries, but if not, the new query will be answered by the server and backgrounded for a day in case the user revisits it. Client Capacity Management Zero has a default soft limit of 20,000 rows on the client-side, or about 20MB of data assuming 1KB rows. This limit can be increased with the flag, but we do not recommend setting it higher than 100,000. Initial sync will be slow, slowing down initial app load. Because storage in browser tabs is unreliable, initial sync can occur surprisingly often. We want to answer queries instantly as often as possible. This requires client-side data in memory on the main thread. If we have to page to disk, we may as well go to the network and reduce complexity. Even though Zero's queries are very efficient, they do still have some cost, especially hydration. Massive client-side storage would result in hydrating tons of queries that are unlikely to be used every time the app starts. Most importantly, no matter how much data you store on the client, there will be cases where you have to fallback to the server: Some users might have huge amounts of data. Some users might have tiny amounts of available client storage. You will likely want the app to start fast and sync in the background. Because you have to be able to fallback to server the question becomes what is the right amount of data to store on the client?, not how can I store the absolute max possible data on the client? The goal with Zero is to answer 99% of queries on the client from memory. The remaining 1% of queries can fallback gracefully to the server. 20,000 rows was chosen somewhat arbitrarily as a number of rows that was likely to be able to do this for many applications. There is no hard limit at 20,000 or 100,000. Nothing terrible happens if you go above. The thing to keep in mind is that: All those queries will revalidate every time your app boots. All data synced to the client is in memory in JS. Here is how this limit is managed: Active queries are never destroyed, even if the limit is exceeded. Developers are expected to keep active queries well under the limit. The value counts from the moment a query deactivates. Backgrounded queries are destroyed immediately when the is reached, even if the limit hasn't been reached. If the client exceeds its limit, Zero will destroy backgrounded queries, least-recently-used first, until the store is under the limit again. Thinking in Queries Although IVM is a very efficient way to keep queries up to date relative to re-running them, it isn't free. You still need to think about how many queries you are creating, how long they are kept alive, and how expensive they are. This is why Zero defaults to not backgrounding queries and doesn't try to aggressively fill its client datastore to capacity. You should put some thought into what queries you want to run in the background, and for how long. Zero currently provides a few basic tools to understand the cost of your queries: The client logs a warning for slow query materializations. Look for in your logs. The default threshold is (including network) but this is configurable with the parameter. The client logs the materialization time of all queries at the level. Look for in your logs. The server logs a warning for slow query materializations. Look for in your logs. The default threshold is but this is configurable with the configuration parameter. We will be adding more tools over time. Completeness Zero returns whatever data it has on the client immediately for a query, then falls back to the server for any missing data. Sometimes it's useful to know the difference between these two types of results. To do so, use the from : The possible values of are currently and . The value is currently only returned when Zero has received the server result. But in the future, Zero will be able to return this result type when it knows that all possible data for this query is already available locally. Additionally, we plan to add a result for when the data is known to be a prefix of the complete result. See Consistency for more information. Handling Missing Data It is inevitable that there will be cases where the requested data cannot be found. Because Zero returns local results immediately, and server results asynchronously, displaying \"not found\" / 404 UI can be slightly tricky. If you just use a simple existence check, you will often see the 404 UI flicker while the server result loads: The way to do this correctly is to only display the \"not found\" UI when the result type is . This way the 404 page is slow but pages with data are still just as fast. Listening to Changes Currently, the way to listen for changes in query results is not ideal. You can add a listener to a materialized view which has the new data and result as parameters: However, using this method will maintain its own materialized view in memory which is wasteful. It also doesn't allow for granular listening to events like and of rows. A better way would be to create your own view without actually storing the data which will also allow you to listen to specific events. Again, the API is not good and will be improved in the future. (see View implementations in or ) Preloading Almost all Zero apps will want to preload some data in order to maximize the feel of instantaneous UI transitions. In Zero, preloading is done via queries – the same queries you use in the UI and for auth. However, because preload queries are usually much larger than a screenful of UI, Zero provides a special helper to avoid the overhead of materializing the result into JS objects: Running Queries Once Usually subscribing to a query is what you want in a reactive UI, but every so often you'll need to run a query just once. To do this, use the method: By default, only returns results that are currently available on the client. That is, it returns the data that would be given for . If you want to wait for the server to return results, pass to : This is the same as saying or . Consistency Zero always syncs a consistent partial replica of the backend database to the client. This avoids many common consistency issues that come up in classic web applications. But there are still some consistency issues to be aware of when using Zero. For example, imagine that you have a bug database w/ 10k issues. You preload the first 1k issues sorted by created. The user then does a query of issues assigned to themselves, sorted by created. Among the 1k issues that were preloaded imagine 100 are found that match the query. Since the data we preloaded is in the same order as this query, we are guaranteed that any local results found will be a prefix of the server results. The UX that result is nice: the user will see initial results to the query instantly. If more results are found server-side, those results are guaranteed to sort below the local results. There's no shuffling of results when the server response comes in. Now imagine that the user switches the sort to ‘sort by modified’. This new query will run locally, and will again find some local matches. But it is now unlikely that the local results found are a prefix of the server results. When the server result comes in, the user will probably see the results shuffle around. To avoid this annoying effect, what you should do in this example is also preload the first 1k issues sorted by modified desc. In general for any query shape you intend to do, you should preload the first results for that query shape with no filters, in each sort you intend to use. In the future, we will be implementing a consistency model that fixes these issues automatically. We will prevent Zero from returning local data when that data is not known to be a prefix of the server result. Once the consistency model is implemented, preloading can be thought of as purely a performance thing, and not required to avoid unsightly flickering.", + "headings": [ + { + "text": "Select", + "id": "select" + }, + { + "text": "Ordering", + "id": "ordering" + }, + { + "text": "Limit", + "id": "limit" + }, + { + "text": "Paging", + "id": "paging" + }, + { + "text": "Getting a Single Result", + "id": "getting-a-single-result" + }, + { + "text": "Relationships", + "id": "relationships" + }, + { + "text": "Refining Relationships", + "id": "refining-relationships" + }, + { + "text": "Nested Relationships", + "id": "nested-relationships" + }, + { + "text": "Where", + "id": "where" + }, + { + "text": "Comparison Operators", + "id": "comparison-operators" + }, + { + "text": "Equals is the Default Comparison Operator", + "id": "equals-is-the-default-comparison-operator" + }, + { + "text": "Comparing to ", + "id": "comparing-to" + }, + { + "text": "Compound Filters", + "id": "compound-filters" + }, + { + "text": "Relationship Filters", + "id": "relationship-filters" + }, + { + "text": "Data Lifetime and Reuse", + "id": "data-lifetime-and-reuse" + }, + { + "text": "Query Lifecycle", + "id": "query-lifecycle" + }, + { + "text": "Background Queries", + "id": "background-queries" + }, + { + "text": "Client Capacity Management", + "id": "client-capacity-management" + }, + { + "text": "Thinking in Queries", + "id": "thinking-in-queries" + }, + { + "text": "Completeness", + "id": "completeness" + }, + { + "text": "Handling Missing Data", + "id": "handling-missing-data" + }, + { + "text": "Listening to Changes", + "id": "listening-to-changes" + }, + { + "text": "Preloading", + "id": "preloading" + }, + { + "text": "Running Queries Once", + "id": "running-queries-once" + }, + { + "text": "Consistency", + "id": "consistency" + } + ] + }, + { + "id": "23-release-notes/0.1", + "title": "Zero 0.1", + "url": "/docs/release-notes/0.1", + "icon": "FileCode", + "content": "Breaking changes The name of some config keys in changed: → → → → Changed default port of to . So your app startup should look like . Features Print a warning to js console when Zero constructor param is or zero-cache should now correctly bind to both ipv4 and ipv6 loopback addresses. This should fix the issue where using to connect to zero-cache on some systems did not work. Check for presence of early in startup of . Print a clear error to catch people accidentally running Zero under SSR. Fix annoying error in js console in React strict mode from constructing and closing Replicache in quick succession. Source tree fixes These only apply if you were working in the Rocicorp monorepo. Fixed issue where zbugs didn’t rebuild when zero dependency changed - generally zbugs build normally again The zero binary has the right permissions bit so you don’t have to chmod u+x after build Remove overloaded name in use-query.tsx (thanks Scott 🙃)", + "headings": [ + { + "text": "Breaking changes", + "id": "breaking-changes" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Source tree fixes", + "id": "source-tree-fixes" + } + ] + }, + { + "id": "24-release-notes/0.10", + "title": "Zero 0.10", + "url": "/docs/release-notes/0.10", + "icon": "FileCode", + "content": "Install Features None. Fixes Remove top-level await from . Various logging improvements. Don't throw error when unavailable on server. Support building on Windows (running on Windows still doesn't work) Breaking Changes None.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "25-release-notes/0.11", + "title": "Zero 0.11", + "url": "/docs/release-notes/0.11", + "icon": "FileCode", + "content": "Install Features Windows should work a lot better now. Thank you very much to aexylus and Sergio Leon for the testing and contributions here. Support nested property access in JWT auth tokens (docs). Make initial sync configurable (docs). Add query result type to SolidJS (docs) Docker image now contains native amd64 and arm64 binaries. Add constructor parameter to enable multiple instances for same . Fixes Many, many fixes, including: Fix downstream replication of primitive values Fix replication of messages Fix large storage use for idle pg instances Add runtime sanity checks for when a table is referenced but not synced Fix for multitenant Breaking Changes The addition of result types to SolidJS is a breaking API change on SolidJS only. See the changes to for upgrade example.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "26-release-notes/0.12", + "title": "Zero 0.12", + "url": "/docs/release-notes/0.12", + "icon": "FileCode", + "content": "Install Features Schemas now support circular relationships (docs). Added and schema helpers to default relationship type (docs). Support for syncing tables without a primary key as long as there is a unique index. This enables Prisma's implicit many-to-many relations (docs). Zero has been confirmed to work with Aurora and Google Cloud SQL (docs) Client bundle size reduced from 55kb to 47kb (-15%). Fixes Windows: was spawning emptying terminals and leaving listeners connected on exit. Incorrect warning in about enums not being supported. Failure to handle the primary key of Postgres tables changing. Incorrect results when is before in query (bug). Error: The inferred type of '...' cannot be named without a reference to .... Error: insufficient upstream connections. Several causes of flicker in React. Incorrect values for when unloading and loading a query quickly (bug). Error: Postgres is missing the column '...' but that column was part of a row. Pointless initial empty render in React when data is already available in memory. Error: Expected string at ... Got array during auth. incorrectly allows comparing to with the operator (bug). SolidJS: Only call once per transaction. Breaking Changes The schema definition syntax has changed to support circular relationships. See the changes to and for upgrade examples.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "27-release-notes/0.13", + "title": "Zero 0.13", + "url": "/docs/release-notes/0.13", + "icon": "FileCode", + "content": "Install Features Multinode deployment for horizontal scalability and zero-downtime deploys (docs). SST Deployment Guide (docs). Plain AWS Deployment Guide (docs). Various exports for external libraries Remove build hash from docker version for consistency with npm (discussion) Fixes Move heartbeat monitoring to separate path, not port Type instantiation is excessively deep and possibly infinite (bug). 20x improvement to performance (discussion) Breaking Changes Removing the hash from the version is a breaking change if you had scripts relying on that. Moving the heartbeat monitor to a path is a breaking change for deployments that were using that.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "28-release-notes/0.14", + "title": "Zero 0.14", + "url": "/docs/release-notes/0.14", + "icon": "FileCode", + "content": "Install Features Use to map column or tables to a different name (docs). Sync from muliple Postgres schemas (docs) Fixes not working when unset (bug) Error: \"single output already exists\" in hello-zero-solid (bug) helper doesn't work with query having (bug) Partitioned Postgres tables not replicating Breaking Changes None.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "29-release-notes/0.15", + "title": "Zero 0.15", + "url": "/docs/release-notes/0.15", + "icon": "FileCode", + "content": "Install Upgrade Guide This release changes the way that permissions are sent to the server. Before, permissions were sent to the server by setting the or environment variables, which include the permissions. In 0.15, these variables go away and are replaced by a new command: . This command writes the permissions to a new table in the upstream database. This design allows live permission updates, without restarting the server. It also solves problems with max env var size that users were seeing. This release also flips the default permission from to for all rules. To upgrade your app: See the changes to hello-zero or hello-zero-solid for how to update your permissions. Remove the and environment variables from your setup. They aren't used anymore. Use to deploy permissions when necessary. You can hook this up to your CI to automate it. See the zbugs implementation as an example. Features Live-updating permissions (docs). Permissions now default to deny rather than allow (docs). Fixes Multiple in same query not working (PR) Allow overlapped mutators (bug) \"Immutable type too deep\" error (PR) Log server version at startup (PR) Eliminate quadratic CVR writes (PR) Handle in the replication stream (PR) Make the auto-reset required error more prominent (PR) Add recommendation when schema load fails (PR) Throw error if multiple auth options set (PR) Handle NULL characters in JSON columns (PR) Breaking Changes Making permissions deny by default breaks existing apps. To fix add or other appropriate permissions for your tables. See docs. The and environment variables are no longer used. Remove them from your setup and use instead.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrade Guide", + "id": "upgrade-guide" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "30-release-notes/0.16", + "title": "Zero 0.16", + "url": "/docs/release-notes/0.16", + "icon": "FileCode", + "content": "Install Upgrading See the upgrade from hello-zero or hello-zero-solid for an example. Features Documented how to use lambdas to deploy permissions in SST, rather than needing CI/CD to have access to Postgres. (doc – search for \"\"). Added simple debugging logs for read and write permissions (doc). Fixes Improve performance of initial sync about 2x (PR 1, PR 2). should allow array arguments (Report, PR). Export (Report). Fix false-positive in schema change detection (Report, PR). Fix writes of numeric types (Report, PR) Fix bug where litestream was creating way too many files in s3 (PR) Fix memory leak in change-streamer noticeable under high write load (PR) Fix error (PR) Correctly handle optional booleans (PR) Ignore indexes with unpublished columns (PR) Breaking Changes None.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrading", + "id": "upgrading" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "31-release-notes/0.17", + "title": "Zero 0.17", + "url": "/docs/release-notes/0.17", + "icon": "FileCode", + "content": "Install Upgrading See the upgrade from hello-zero or hello-zero-solid for an example. Features Queries now take an optional argument. This argument backgrounds queries for some time after the app stops using them. Background queries continue syncing so they are instantly ready if the UI re-requests them. The data from background queries is also available to be used by new queries where possible (doc). Structural schema versioning. This is TypeScript, why are we versioning with numbers like cave-people?? We got rid of concept entirely and now determine schema compatibility completely automatically, TS-stylie (doc). Permissions now scoped to \"apps\". You can now have different Zero \"apps\" talking to the same upstream database. Each app gets completely separate configuration and permissions. This should also enable previewing (each preview would be its own app). Apps replace the existing \"shard\" concept (doc). Initial replication is over 5x faster, up to about 50MB/second or 15k row/second in our tests. Added warnings for slow hydration in both client and server (doc). is now enabled by default for databases that don't support event triggers (doc). Default and databases to , so that you don't have to specify them in the common case where they are the same as upstream. This docs site now has search! Fixes Certain kinds of many:many joins were causing assertions Certain kinds of queries were causing consistency issues Support for PostgreSQL tables We now print a stack trace during close at level to enable debugging errors where Zero is accessed after close. We now print a warning when is missing rather than throwing. This makes it a little easier to use Zero in SSR setups. We now reset implicitly in a few edge cases rather than halting replication. Fixed a deadlock in . Breaking Changes now returns its result via promise. This is required for compatibility with upcoming custom mutators, but also will allow us to wait for server results in the future (though that (still 😢) doesn't exist yet).", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrading", + "id": "upgrading" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "32-release-notes/0.18", + "title": "Zero 0.18", + "url": "/docs/release-notes/0.18", + "icon": "FileCode", + "content": "Install Upgrading To try out custom mutators, see the changes to hello-zero-solid. Features Custom Mutators! Finally! Define arbitrary write operations in code (doc). Added inspector API for debugging sync, queries, and client storage (doc). Added tool to debug query performance (doc). Added tool to debug permissions (doc). Added script to prettify Zero's internal AST format (doc). Fixes Added backpressure to to protect against Postgres moving faster than we can push to clients (PR). has been deprecated. got folded into and got folded into (PR). Support DDL changes (PR) Allow to continue running while a new one re-replicates. (PR). Improve replication performance for some schema changes (PR). Make the log level of configurable (PR) Bind to the expression builder (PR) Fix error (PR) Fix in Expo (thanks !) (PR). Fix Vue bindings ref counting bug. Bindings no longer need to pass (PR). Fix CVR ownership takeover race conditions (PR). Support in degraded-mode pg providers (PR). Handle corrupt sqlite db by re-replicating (PR). Don't send useless pokes to clients that are unchanged (PR). Add to queries using a relation that is marked (PR). Export Breaking Changes None.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrading", + "id": "upgrading" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "33-release-notes/0.19", + "title": "Zero 0.19", + "url": "/docs/release-notes/0.19", + "icon": "FileCode", + "content": "Install Upgrading If you use custom mutators, please see hello-zero-solid for how to update your push endpoint. If you use SolidJS, please switch to . If you are , you should switch to to be consistent with . If you were using a 0.19 canary, the property returns error by rejection again (like 0.18 did). Sorry about the thrash here. Features Add a param to so it can wait for server results (doc, bug) is now for consistency with , old API still works but deprecated (doc) Improve speed of litestream restore by about 7x Increase replication speed when using JSON by about 25% Add options to to apply permissions and auth data (doc). Add option to to to delay connecting to upstram until first connection (doc) Add endpoint for getting some health statistics from a running Zero instance (doc) Fixes Support passing to (PR) Fix layering in to better support custom db implementations (thanks Erik Munson!) (PR) Fix socket disconnects in GCP (PR) Quote Postgres enum types to preserve casing (report) : Return for empty result set when using : Allow accessing tables in non-public schemas : Allow where is to match client behavior Fix broken replication when updating a key that is part of a unique (but non-PK) index : Rename to to fit Solid naming conventions (old name deprecated) Resync when publications are missing (PR) Fix missing in (PR) Fix timezone shift when writing to / and server is non-UTC timezone (thanks Tom Jenkinson!) (PR) Bound time spent in incremental updates to 1/2 hydration time Fix being off by 1000 in some cases 😬 (PR) : Relationships nested in a junction relationship were not working correctly (PR) Custom mutators: Due to multitab, client can receive multiple responses for same mutation Fix deadlock that could happen when pushing on a closed websocket (PR) Fix incorrect shutdown under heavy CPU load (thanks Erik Munson!) (PR) Fix case where deletes were getting reverted (thanks for reproduction Marc MacLeod!) (PR) : Incorrect handling of self-join, and not exists is not supported on the client re-auth on 401s returned by push endpoint Added constructor parameter to allow passing query params to the push endpoint (doc) Breaking Changes The structure of setting up a has changed slightly. See push endpoint setup or upgrade guide. Not technically a breaking change from 0.18, but if you were using 0.19 canaries, the property returns error by rejection again (like 0.18 did) (doc).", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrading", + "id": "upgrading" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "34-release-notes/0.2", + "title": "Zero 0.2", + "url": "/docs/release-notes/0.2", + "icon": "FileCode", + "content": "Breaking changes None Features “Skip mode”: zero-cache now skips columns with unsupported datatypes. A warning is printed out when this happens: This makes it easy to use zero-cache with existing schemas that have columns Zero can’t handle. You can pair this with Postgres triggers to easily translate unsupported types into something Zero can sync. Zero now supports compound primary keys. You no longer need to include an extraneous column on the junction tables. Fixes Change the way Zero detects unsupported environments to work in One (and any other supported env). Before, Zero was looking for WebSocket and indexedDB early on, but indexedDB won’t be present on RN as SQLite will be used. Instead look for indexedDB only at use. Require Node v20 explicitly in package.json to prevent accidentally compiling better-sqlite3 with different Node version than running with. Ensure error messages early in startup get printed out before shutting down in multiprocess mode. Docs Factored out the sample app from the docs into its own Github repo so you can just download it and poke around if you prefer that. Source tree fixes Run zero-cache from source. You no longer have to build before running , it picks up the changes automatically. zbugs Numerous polish/styling fixes Change default to ‘open’ bugs Add ‘assignee’ field", + "headings": [ + { + "text": "Breaking changes", + "id": "breaking-changes" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Docs", + "id": "docs" + }, + { + "text": "Source tree fixes", + "id": "source-tree-fixes" + }, + { + "text": "zbugs", + "id": "zbugs" + } + ] + }, + { + "id": "35-release-notes/0.20", + "title": "Zero 0.20", + "url": "/docs/release-notes/0.20", + "icon": "FileCode", + "content": "Install Upgrading There are two config changes for multinode deployments: Required: Remove 's env var and replace it with . Optional: Change the env var from being passed to both and nodes to being passed only to . This config is no longer needed by (and is ignored by it). See hello-zero for an upgrade example using SST. Additionally, the , feature was removed. We do not think anyone was using it, but if you were please reach out to us for options. Features Supabase is now fully supported. After upgrading, you should see that schema changes are incremental and don't reset the replica (docs). Improve performance of single-key reads on client. Scale depends on size of data but 100x improvement is common (PR). Implement short-circuiting for queries. Because of permissions, one or more branches of would often be empty, turning the entire into a full-table scan. 100x improvement on chinook test dataset (PR). Remove DNF conversion. This was intended to make consistency easier in the future, but was resulting in some queries exploding in size (PR, bug). Autodiscovery for . nodes now find using the Postgres database, and no longer need an internal load balancer. See the new config in the deployment docs (PR). Make specific to . nodes now ignore this config and learn it from instead. This makes restarting replication less error-prone (PR, discussion). OpenTelemetry support (docs). Fixes Allow dots in column names (only works with custom mutators) (PR). Fix websocket liveness check to avoid false negatives when busy (PR). Fix unhandled exception in when processing query eviction (PR). Keep microsecond precision across timezones (PR). Fix unhandled exception in during (PR). Fix in (PR). Mutators: assert provided columns actually exist (PR). Fix ordering of columns in replicated index (PR). Use a shorter keepalive for replication stream for compat with Neon (PR). Allow destructuring in (PR). Add flow control for large change DB transactions (PR). Fix handling of pg types with params (char, varchar, numeric, etc) (PR). Support and in (PR). Breaking Changes The autodiscovery feature for is a breaking change for multinode deployments. See the upgrade instructions for details. The config was removed 🫗. The config was removed. It is no longer needed because initial sync now adapts to available memory automatically.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrading", + "id": "upgrading" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "36-release-notes/0.21", + "title": "Zero 0.21", + "url": "/docs/release-notes/0.21", + "icon": "FileCode", + "content": "Install Upgrading There is one breaking change in this release, but we think it is unlikely to affect anyone since the results were wrong already – the change just makes the error explicit. See hello-zero for an example of using arrays and the new features. Features New \"ztunes\" sample using TanStack, Drizzle, Better Auth, and Fly.io (docs). Add initial support for Postgres arrays (docs, bug). Improved React lifecycle management with (docs, PR). Expose instances automatically at (docs, PR). Add to (PR). Technically a bug fix, but this was so annoying I'm calling it a feature: now correctly supports the up/down arrow keys (commit). Another super annoying fix: logs from are now level-colored (PR). Fixes Lazy-load otel. This was causing problems with (PR). Initial replication is now memory-bounded (PR). Change the way otel starts up in to not rely on (PR). Use existing as the threshold for rather than hardcoded 200ms. Fix race condition starting up in multinode deployments (PR). Avoid site-local IPv6 addresses in auto-discovery (PR). Many z2s fixes found by fuzzing (PRs: 4415, 4416, 4417, 4421, 4422, 4423). Don't load prettier in . This was causing problems when prettier config was cjs. (PR). Don't hydrate system relationships in . This was causing incorrect results. (PR). Fix memory leaks from not cleaning up and (PR). Fix handling of invalid websocket requests that were crashing server. (PR). Remove red error text when missing (PR). Allow to startup without schema file, but print a warning (PR). Log a warning when auth token exceeds max allowed header size (PR). Breaking Changes Using and in many-to-many relationships now throws an error. It didn't work before but did the wrong thing silently. Now it throws a runtime error. See docs, bug.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrading", + "id": "upgrading" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "37-release-notes/0.3", + "title": "Zero 0.3", + "url": "/docs/release-notes/0.3", + "icon": "FileCode", + "content": "Install Breaking changes zero.config file is now TypeScript, not JSON. See: https://github.com/rocicorp/hello-zero/blob/07c08b1f86b526a96e281ee65af672f52a59bcee/zero.config.ts. Features Schema Migrations: Zero now has first-class support for schema migration (documentation). Write Permissions: First-class write permissions based on ZQL (documentation). Date/Time related types: Zero now natively supports the TIMESTAMP and DATE Postgres types (sample app, documentation). SolidJS: We now have first-class support for SolidJS (documentation). Intellisense for Schema Definition: Introduce and helper functions to enable intellisense when defining shemas. See Sample App. : Add helper to properly escape strings for use in filters. See Sample App. New QuickStart App: Entirely rewrote the setup/sample flow to (a) make it much faster to get started playing with Zero, and (b) demonstrate more features. Fixes The package now downloads a prebuilt sqlite instead of compiling it locally. This significantly speeds up install. Support RDS configuration. Fixed bug where sibling subqueries could be lost on edit changes. Fixes to error handling to ensure zero-cache prints errors when crashing in multiprocess mode. If zero-cache hears from a client with an unknown CVR/cookie, zero-cache forces that client to reset itself and reload automatically. Useful during development when server-state is frequently getting cleared. Docs Started work to make real docs. Not quite done yet. zbugs https://bugs.rocicorp.dev/ (pw: zql) Improve startup perf: ~3s → ~1.5s Hawaii ↔ US East. More work to do here but good progress. Responsive design for mobile. “Short IDs”: Bugs now have a short numeric ID, not a random hash. See Demo Video. First-class label picker. Unread indicators. Finish j/k support for paging through issues. It’s now “search-aware”, it pages through issues in order of search you clicked through to detail page in. Text search (slash to activate — needs better discoverability) Emojis on issues and comments Sort controls on list view remove fps meter temporarily numerous other UI polish", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Breaking changes", + "id": "breaking-changes" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Docs", + "id": "docs" + }, + { + "text": "zbugs", + "id": "zbugs" + } + ] + }, + { + "id": "38-release-notes/0.4", + "title": "Zero 0.4", + "url": "/docs/release-notes/0.4", + "icon": "FileCode", + "content": "Install Breaking changes The changes modified the client/server protocol. You’ll need to restart zero-cache and clear browser data after updating. Added , , and to ZQL (documentation). Added method (documentation). Fixes Use method in zero-solid to improve performance when multiple updates happen in same frame. To take advantage of this you must use the helper from , instead of instantiating Zero directly. See the solid sample app. Postgres tables that were reserved words in SQLite but not Postgres caused crash during replication. was not matching correctly in the case of multiline subjects. Upstream database and zero database can now be same Postgres db (don’t need separate ports). Docs nothing notable zbugs Use to run text search over both titles and bodies prevent j/k in emoji preload emojis", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Breaking changes", + "id": "breaking-changes" + }, + { + "text": "Added , , and to ZQL ().", + "id": "added-and-to-zql" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Docs", + "id": "docs" + }, + { + "text": "zbugs", + "id": "zbugs" + } + ] + }, + { + "id": "39-release-notes/0.5", + "title": "Zero 0.5", + "url": "/docs/release-notes/0.5", + "icon": "FileCode", + "content": "Install Breaking changes and moved to subpackage. This is in preparation to moving authorization into the schema file. helper type was renamed and moved into . Basically: Features Added support for JSON columns in Postgres (documentation). Zero pacakage now includes , which can be used to explore our sqlite files (documentation). Fixes We were not correctly replicating the type, despite documenting that we were. Docs nothing notable zbugs nothing notable", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Breaking changes", + "id": "breaking-changes" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Docs", + "id": "docs" + }, + { + "text": "zbugs", + "id": "zbugs" + } + ] + }, + { + "id": "40-release-notes/0.6", + "title": "Zero 0.6", + "url": "/docs/release-notes/0.6", + "icon": "FileCode", + "content": "Install Upgrade Guide This release is a bit harder to upgrade to than previous alphas. For a step-by-step guide, please refer to the commits that upgrade the React and Solid quickstart apps: Upgrading hello-zero from Zero 0.5 to 0.6 Upgrading hello-zero-solid from Zero 0.5 to 0.6 Breaking Changes Totally new configuration system. is no more – config is now via env vars (documentation). Permissions rules moved into schema (documentation). Renamed CRUD mutators to be consistent with SQL naming (bug, documentation). Removed from ZQL. It wasn’t doing anything (documentation) Moved batch mutation to its own method. Before the field also doubled as a method. This made intellisense hard to understand since had all the tables as fields but also all the fields of a function. Features Relationship filters. Queries can now include (bug, documentation). Reworked syntax for compound filters, including ergonomically building expressions with dynamic number of clauses (bug, documentation). Support using Postgres databases without superuser access for smaller apps (documentation). Support for running client under Cloudflare Durable Objects (documentation). Reworked support for / to properly support optional fields (bug, documentation). Added / to ZQL to support checking for null (bug, documentation). Improved intellisense for mutators. Added flag and environment variable (bug, documentation). Default max connections of zero-cache more conservatively so that it should fit with even common small Postgres configurations. now accepts requests with any base path, not just . The parameter to the client constructor can now be a host () or a host with a single path component (). These two changes together allow hosting on same domain with an app that already uses the prefix (bug). Allow Postgres columns with default values, but don’t sync them (documentation). The utility now accepts all the same flags and arguments that does (documentation). zbugs Added tooltip describing who submitted which emoji reactions Updated implementation of label, assignee, and owner filters to use relationship filters Updated text filter implementation to use to search description and comments too Docs Added new ZQL reference Added new mutators reference Added new config reference", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Upgrade Guide", + "id": "upgrade-guide" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "zbugs", + "id": "zbugs" + }, + { + "text": "Docs", + "id": "docs" + } + ] + }, + { + "id": "41-release-notes/0.7", + "title": "Zero 0.7", + "url": "/docs/release-notes/0.7", + "icon": "FileCode", + "content": "Install Features Read permissions. You can now control read access to data using ZQL (docs). Deployment. We now have a single-node Docker container (docs). Future work will add multinode support. Compound FKs. Zero already supported compound primary keys, but now it also supports compound foreign keys (docs). Schema DX: Columns types can use bare strings now if is not needed (example). PK can be a single string in the common case where it’s non-compound (example). Breaking Changes Several changes to . See update to for overview. Details: was renamed to to avoid confusion with authentication. The way that many:many relationships are defined has changed to be more general and easy to remember. See example. The signature of and the related rule functions have changed: Now rules return an expression instead of full query. This was required to make read permissions work and we did it for write permissions for consitency (see example). The policy now has two child policies: and . The rules we used to have were . They run before a change and can be used to validate a user has permission to change a row. The rules run after and can be used to limit the changes a user is allowed to make. The file should export an object having two fields: and . The way that is consumed has also changed. Rather than directly reading the typescript source, we compile it to JSON and read that. should now point to a JSON file, not . It defaults to which we’ve found to be pretty useful so you’ll probably just remove this key from your entirely. Use to generate the JSON. You must currently do this manually each time you change the schema, we will automate it soon. zbugs Comments now have permalinks. Implementing permalinks in a synced SPA is fun! Private issues. Zbugs now supports private (to team only) issues. I wonder what’s in them … 👀. Docs The docs have moved. Please don’t use Notion anymore, they won’t be updated.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + }, + { + "text": "zbugs", + "id": "zbugs" + }, + { + "text": "Docs", + "id": "docs" + } + ] + }, + { + "id": "42-release-notes/0.8", + "title": "Zero 0.8", + "url": "/docs/release-notes/0.8", + "icon": "FileCode", + "content": "Install See the changes to hello-zero or hello-zero-solid for example updates. Features Schema Autobuild. There's now a script that automatically rebuilds the schema and restarts on changes to . (docs) Result Type. You can now tell whether a query is complete or partial. (docs) Enums. Enums are now supported in Postgres schemas and on client. (docs) Custom Types. You can define custom JSON types in your schema. (docs) OTEL Tracing. Initial tracing support. (docs) timestampz. Add support for Postgres column type. (docs) SSLMode. You can disable TLS when connects to DB with . (docs) Permission Helpers. and helpers were added to make these cases more readable. (docs) Multitenant Support. A single can now front separate Postgres databases. This is useful for customers that have one \"dev\" database in production per-developer. (docs) Fixes Crash with JSON Columns. Fixed a crash when a JSON column was used in a Zero app with write permissions (bug) Better Connection Error Reporting. Some connection errors would cause to exit silently. Now they are returned to client and logged. Breaking Changes in React now returns a 2-tuple of where is an object with a field. in write permissions for renamed to for consistency. renamed to to not be so silly long.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "43-release-notes/0.9", + "title": "Zero 0.9", + "url": "/docs/release-notes/0.9", + "icon": "FileCode", + "content": "Install See the changes to hello-zero or hello-zero-solid for example updates. Features JWK Support. For auth, you can now specify a JWK containing a public key, or a JWKS url to support autodiscovery of keys. (docs) UUID column. Zero now supports the Postgres column type. (docs) Fixes Readonly Values. Type of values returned from Zero queries are marked . The system always considered them readonly, but now the types reflect that. (docs) Breaking Changes The config has been renamed to for consistency with the new JWK-related keys. If you were using the old name, you'll need to update your file. All values returned by Zero are now . You'll probably have to add this TS modifier various places. If you find yourself casting away you probably should be cloing the value instead.", + "headings": [ + { + "text": "Install", + "id": "install" + }, + { + "text": "Features", + "id": "features" + }, + { + "text": "Fixes", + "id": "fixes" + }, + { + "text": "Breaking Changes", + "id": "breaking-changes" + } + ] + }, + { + "id": "44-release-notes/index", + "title": "Release Notes", + "url": "/docs/release-notes/index", + "icon": "FileCode", + "content": "Zero 0.21: PG arrays, TanStack starter, and more Zero 0.20: Full Supabase support, performance improvements Zero 0.19: Many, many bugfixes and cleanups Zero 0.18: Custom Mutators Zero 0.17: Background Queries Zero 0.16: Lambda-Based Permission Deployment Zero 0.15: Live Permission Updates Zero 0.14: Name Mapping and Multischema Zero 0.13: Multinode and SST Zero 0.12: Circular Relationships Zero 0.11: Windows Zero 0.10: Remove Top-Level Await Zero 0.9: JWK Support Zero 0.8: Schema Autobuild, Result Types, and Enums Zero 0.7: Read Perms and Docker Zero 0.6: Relationship Filters Zero 0.5: JSON Columns Zero 0.4: Compound Filters Zero 0.3: Schema Migrations and Write Perms Zero 0.2: Skip Mode and Computed PKs Zero 0.1: First Release", + "headings": [] + }, + { + "id": "45-reporting-bugs", + "title": "Reporting Bugs", + "url": "/docs/reporting-bugs", + "icon": "BadgeAlert", + "content": "zbugs You can use zbugs! (password: ) Our own bug tracker built from the ground up on Zero. Discord Alternately just pinging us on Discord is great too.", + "headings": [ + { + "text": "zbugs", + "id": "zbugs" + }, + { + "text": "Discord", + "id": "discord" + } + ] + }, + { + "id": "46-roadmap", + "title": "Roadmap", + "url": "/docs/roadmap", + "icon": "Map", + "content": "Alpha (EOY ‘24) ~~Schema migration~~ ~~Write permissions~~ ~~Solid support~~ ~~Replica sqlite files not browsable with standard sqlite3 program~~ ~~Relationship filters - currently you can put relationships in the ‘select’ part of the query, but not the ‘where’ part. Relationship filters commonly needed to, i.e., find all issues with particular label.~~ ~~Multi-column primary keys~~ ~~Read permissions~~ ~~Docs for easily deploying Zero on your own AWS or Fly.io account~~ ~~Up to 20MB client-side and 1GB server-side per-replica~~ Beta (Q2 ‘25) ~~Custom mutators~~ Cell-level read permissions (already exist for write) First-class support for React Native ~~Ability to wait for authoritative results~~ Aggregations (count, sum, min, max, group-by, etc) Consistency. See: Consistency. This will also improve startup perf since apps won’t have to be so conservative in what they preload: ~~Cache size management: evict things from client-side cache to stay under size~~ Reduce zero client bundle size to < 40KB Up to 20 MB client-side and 100 GB server-side per-replica GA Vector-based text search Extensive testing using randomized query generation and dst External audit of design and impl Ability to lock queries down to only expected forms for security Additional databases beside Postgres SaaS", + "headings": [ + { + "text": "Alpha (EOY ‘24)", + "id": "alpha-eoy-24" + }, + { + "text": "Beta (Q2 ‘25)", + "id": "beta-q2-25" + }, + { + "text": "GA", + "id": "ga" + } + ] + }, + { + "id": "47-samples", + "title": "Samples", + "url": "/docs/samples", + "icon": "SwatchBook", + "content": "zbugs A complete Linear-style bug tracker. Not just a demo app, this is our actual live bug db. We use it every day and depend on it. Demo: https://bugs.rocicorp.dev/ Stack: Vite/Fastify/React/AWS Source: https://github.com/rocicorp/mono/tree/main/apps/zbugs Features: Instant reads and writes, realtime updates, Github auth, write permissions, read permissions, custom mutators, complex filters, unread indicators, basic text search, emojis, short numeric bug IDs, notifications, and more. ztunes An ecommerce store built with Zero, TanStack, Drizzle, and PlanetScale for Postgres. Demo: https://ztunes.rocicorp.dev/ Stack: TanStack/Drizzle/Better Auth/Fly.io Source: https://github.com/rocicorp/ztunes Features: 88k artists, 200k albums, single-command dev, full drizzle integration, text search, read permissions, write permissions. hello-zero Simple quickstart for Zero/React. Stack: Vite/Hono/React Source: https://github.com/rocicorp/hello-zero Docs: Quickstart Features: Instant reads and writes, realtime updates. hello-zero-solid Simple quickstart for Zero/SolidJS. Stack: Vite/Hono/SolidJS Source: https://github.com/rocicorp/hello-zero-solid Features: Instant reads and writes, realtime updates, custom mutators. hello-zero-do Shows how to use the Zero client from a Cloudflare Durable Objects. This sample runs within a Durable Object and monitors changes to a Zero query. This can be used to do things like send notifications, update external services, etc. Stack: Vite/Hono/React/Cloudflare Workers Source: https://github.com/rocicorp/hello-zero-do", + "headings": [ + { + "text": "zbugs", + "id": "zbugs" + }, + { + "text": "ztunes", + "id": "ztunes" + }, + { + "text": "hello-zero", + "id": "hello-zero" + }, + { + "text": "hello-zero-solid", + "id": "hello-zero-solid" + }, + { + "text": "hello-zero-do", + "id": "hello-zero-do" + } + ] + }, + { + "id": "48-solidjs", + "title": "SolidJS", + "url": "/docs/solidjs", + "icon": "SolidJS", + "content": "Zero has built-in support for Solid. Here’s what basic usage looks like: Complete quickstart here: https://github.com/rocicorp/hello-zero-solid", + "headings": [] + }, + { + "id": "49-writing-data", + "title": "Writing Data with Mutators", + "url": "/docs/writing-data", + "icon": "ArrowUp", + "content": "Zero generates basic CRUD mutators for every table you sync. Mutators are available at : Insert Create new records with : Optional fields can be set to to explicitly set the new field to . They can also be set to to take the default value (which is often but can also be some generated value server-side). Upsert Create new records or update existing ones with : supports the same / semantics for optional fields that does (see above). Update Update an existing record. Does nothing if the specified record (by PK) does not exist. You can pass a partial, leaving fields out that you don’t want to change. For example here we leave the username the same: Delete Delete an existing record. Does nothing if specified record does not exist. You can read more about it in Authentication. Batch Mutate You can do multiple CRUD mutates in a single batch. If any of the mutations fails, all will. They also all appear together atomically in a single transaction to other clients.", + "headings": [ + { + "text": "Insert", + "id": "insert" + }, + { + "text": "Upsert", + "id": "upsert" + }, + { + "text": "Update", + "id": "update" + }, + { + "text": "Delete", + "id": "delete" + }, + { + "text": "Batch Mutate", + "id": "batch-mutate" + } + ] + }, + { + "id": "50-zero-cache-config", + "title": "zero-cache Config", + "url": "/docs/zero-cache-config", + "icon": "Cog", + "content": "is configured either via CLI flag or environment variable. There is no separate file. You can also see all available flags by running . Required Flags Auth One of Auth JWK, Auth JWK URL, or Auth Secret must be specified. See Authentication for more details. Replica File File path to the SQLite replica that zero-cache maintains. This can be lost, but if it is, zero-cache will have to re-replicate next time it starts up. flag: env: required: Upstream DB The \"upstream\" authoritative postgres database. In the future we will support other types of upstream besides PG. flag: env: required: Optional Flags Admin Password A password used to administer zero-cache server, for example to access the endpoint. flag: env: required: App ID Unique identifier for the app. Multiple zero-cache apps can run on a single upstream database, each of which is isolated from the others, with its own permissions, sharding (future feature), and change/cvr databases. The metadata of an app is stored in an upstream schema with the same name, e.g. , and the metadata for each app shard, e.g. client and mutation ids, is stored in the schema. (Currently there is only a single \"0\" shard, but this will change with sharding). The CVR and Change data are managed in schemas named and , respectively, allowing multiple apps and shards to share the same database instance (e.g. a Postgres \"cluster\") for CVR and Change management. Due to constraints on replication slot names, an App ID may only consist of lower-case letters, numbers, and the underscore character. Note that this option is used by both and . flag: env: default: App Publications Postgres PUBLICATIONs that define the tables and columns to replicate. Publication names may not begin with an underscore, as zero reserves that prefix for internal use. If unspecified, zero-cache will create and use an internal publication that publishes all tables in the public schema, i.e.: Note that once an app has begun syncing data, this list of publications cannot be changed, and zero-cache will refuse to start if a specified value differs from what was originally synced. To use a different set of publications, a new app should be created. flag: env: default: Auth JWK A public key in JWK format used to verify JWTs. Only one of jwk, jwksUrl and secret may be set. flag: env: required: Auth JWK URL A URL that returns a JWK set used to verify JWTs. Only one of jwk, jwksUrl and secret may be set. flag: env: required: Auto Reset Automatically wipe and resync the replica when replication is halted. This situation can occur for configurations in which the upstream database provider prohibits event trigger creation, preventing the zero-cache from being able to correctly replicate schema changes. For such configurations, an upstream schema change will instead result in halting replication with an error indicating that the replica needs to be reset. When auto-reset is enabled, zero-cache will respond to such situations by shutting down, and when restarted, resetting the replica and all synced clients. This is a heavy-weight operation and can result in user-visible slowness or downtime if compute resources are scarce. flag: env: default: Auth Secret A symmetric key used to verify JWTs. Only one of jwk, jwksUrl and secret may be set. flag: env: required: Change DB The Postgres database used to store recent replication log entries, in order to sync multiple view-syncers without requiring multiple replication slots on the upstream database. If unspecified, the upstream-db will be used. flag: env: required: Change Max Connections The maximum number of connections to open to the change database. This is used by the change-streamer for catching up zero-cache replication subscriptions. flag: env: default: Change Streamer Mode The mode for running or connecting to the change-streamer: : runs the change-streamer and shuts down when another change-streamer takes over the replication slot. This is appropriate in a single-node configuration, or for the replication-manager in a multi-node configuration. : connects to the change-streamer as internally advertised in the change-db. This is appropriate for the view-syncers in a multi-node flag: env: default: Change Streamer Port The port on which the change-streamer runs. This is an internal protocol between the replication-manager and zero-cache, which runs in the same process in local development. If unspecified, defaults to --port + 1. flag: env: required: CVR DB The Postgres database used to store CVRs. CVRs (client view records) keep track of the data synced to clients in order to determine the diff to send on reconnect. If unspecified, the upstream-db will be used. flag: env: required: CVR Max Connections The maximum number of connections to open to the CVR database. This is divided evenly amongst sync workers. Note that this number must allow for at least one connection per sync worker, or zero-cache will fail to start. See num-sync-workers. flag: env: default: Initial Sync Table Copy Workers The number of parallel workers used to copy tables during initial sync. Each worker copies a single table at a time, fetching rows in batches of . flag: env: default: Lazy Startup Delay starting the majority of zero-cache until first request. This is mainly intended to avoid connecting to Postgres replication stream until the first request is received, which can be useful i.e., for preview instances. Currently only supported in single-node mode. flag: env: default: Litestream Executable Path to the litestream executable. This option has no effect if litestream-backup-url is unspecified. flag: env: required: Litestream Config Path Path to the litestream yaml config file. zero-cache will run this with its environment variables, which can be referenced in the file via substitution, for example: ZERO_REPLICA_FILE for the db Path ZERO_LITESTREAM_BACKUP_LOCATION for the db replica url ZERO_LITESTREAM_LOG_LEVEL for the log Level ZERO_LOG_FORMAT for the log type flag: env: default: Litestream Log Level flag: env: default: values: , , , Litestream Backup URL The location of the litestream backup, usually an s3:// URL. This is only consulted by the replication-manager. view-syncers receive this information from the replication-manager. flag: env: required: Litestream Checkpoint Threshold MB The size of the WAL file at which to perform an SQlite checkpoint to apply the writes in the WAL to the main database file. Each checkpoint creates a new WAL segment file that will be backed up by litestream. Smaller thresholds may improve read performance, at the expense of creating more files to download when restoring the replica from the backup. flag: env: default: Litestream Incremental Backup Interval Minutes The interval between incremental backups of the replica. Shorter intervals reduce the amount of change history that needs to be replayed when catching up a new view-syncer, at the expense of increasing the number of files needed to download for the initial litestream restore. flag: env: default: Litestream Snapshot Backup Interval Hours The interval between snapshot backups of the replica. Snapshot backups make a full copy of the database to a new litestream generation. This improves restore time at the expense of bandwidth. Applications with a large database and low write rate can increase this interval to reduce network usage for backups (litestream defaults to 24 hours). flag: env: default: Litestream Restore Parallelism The number of WAL files to download in parallel when performing the initial restore of the replica from the backup. flag: env: default: Log Format Use text for developer-friendly console logging and json for consumption by structured-logging services. flag: env: default: values: , Log IVM Sampling How often to collect IVM metrics. 1 out of N requests will be sampled where N is this value. flag: env: default: Log Level Sets the logging level for the application. flag: env: default: values: , , , Log Slow Hydrate Threshold The number of milliseconds a query hydration must take to print a slow warning. flag: env: default: Log Slow Row Threshold The number of ms a row must take to fetch from table-source before it is considered slow. flag: env: default: Log Trace Collector The URL of the trace collector to which to send trace data. Traces are sent over http. Port defaults to 4318 for most collectors. flag: env: required: Number of Sync Workers The number of processes to use for view syncing. Leave this unset to use the maximum available parallelism. If set to 0, the server runs without sync workers, which is the configuration for running the replication-manager. flag: env: required: Per User Mutation Limit Max The maximum mutations per user within the specified windowMs. flag: env: required: Per User Mutation Limit Window (ms) The sliding window over which the perUserMutationLimitMax is enforced. flag: env: default: Port The port for sync connections. flag: env: default: Push URL The URL of the API server to which zero-cache will push mutations. Required if you use custom mutators. flag: env: required: Query Hydration Stats Track and log the number of rows considered by query hydrations which take longer than log-slow-hydrate-threshold milliseconds. This is useful for debugging and performance tuning. flag: env: required: Replica Vacuum Interval Hours Performs a VACUUM at server startup if the specified number of hours has elapsed since the last VACUUM (or initial-sync). The VACUUM operation is heavyweight and requires double the size of the db in disk space. If unspecified, VACUUM operations are not performed. flag: env: required: Server Version The version string outputted to logs when the server starts up. flag: env: required: Storage DB Temp Dir Temporary directory for IVM operator storage. Leave unset to use . flag: env: required: Target Client Row Count A soft limit on the number of rows Zero will keep on the client. 20k is a good default value for most applications, and we do not recommend exceeding 100k. See Client Capacity Management for more details. flag: env: default: Task ID Globally unique identifier for the zero-cache instance. Setting this to a platform specific task identifier can be useful for debugging. If unspecified, zero-cache will attempt to extract the TaskARN if run from within an AWS ECS container, and otherwise use a random string. flag: env: required: Upstream Max Connections The maximum number of connections to open to the upstream database for committing mutations. This is divided evenly amongst sync workers. In addition to this number, zero-cache uses one connection for the replication stream. Note that this number must allow for at least one connection per sync worker, or zero-cache will fail to start. See num-sync-workers. flag: env: default:", + "headings": [ + { + "text": "Required Flags", + "id": "required-flags" + }, + { + "text": "Auth", + "id": "auth" + }, + { + "text": "Replica File", + "id": "replica-file" + }, + { + "text": "Upstream DB", + "id": "upstream-db" + }, + { + "text": "Optional Flags", + "id": "optional-flags" + }, + { + "text": "Admin Password", + "id": "admin-password" + }, + { + "text": "App ID", + "id": "app-id" + }, + { + "text": "App Publications", + "id": "app-publications" + }, + { + "text": "Auth JWK", + "id": "auth-jwk" + }, + { + "text": "Auth JWK URL", + "id": "auth-jwk-url" + }, + { + "text": "Auto Reset", + "id": "auto-reset" + }, + { + "text": "Auth Secret", + "id": "auth-secret" + }, + { + "text": "Change DB", + "id": "change-db" + }, + { + "text": "Change Max Connections", + "id": "change-max-connections" + }, + { + "text": "Change Streamer Mode", + "id": "change-streamer-mode" + }, + { + "text": "Change Streamer Port", + "id": "change-streamer-port" + }, + { + "text": "CVR DB", + "id": "cvr-db" + }, + { + "text": "CVR Max Connections", + "id": "cvr-max-connections" + }, + { + "text": "Initial Sync Table Copy Workers", + "id": "initial-sync-table-copy-workers" + }, + { + "text": "Lazy Startup", + "id": "lazy-startup" + }, + { + "text": "Litestream Executable", + "id": "litestream-executable" + }, + { + "text": "Litestream Config Path", + "id": "litestream-config-path" + }, + { + "text": "Litestream Log Level", + "id": "litestream-log-level" + }, + { + "text": "Litestream Backup URL", + "id": "litestream-backup-url" + }, + { + "text": "Litestream Checkpoint Threshold MB", + "id": "litestream-checkpoint-threshold-mb" + }, + { + "text": "Litestream Incremental Backup Interval Minutes", + "id": "litestream-incremental-backup-interval-minutes" + }, + { + "text": "Litestream Snapshot Backup Interval Hours", + "id": "litestream-snapshot-backup-interval-hours" + }, + { + "text": "Litestream Restore Parallelism", + "id": "litestream-restore-parallelism" + }, + { + "text": "Log Format", + "id": "log-format" + }, + { + "text": "Log IVM Sampling", + "id": "log-ivm-sampling" + }, + { + "text": "Log Level", + "id": "log-level" + }, + { + "text": "Log Slow Hydrate Threshold", + "id": "log-slow-hydrate-threshold" + }, + { + "text": "Log Slow Row Threshold", + "id": "log-slow-row-threshold" + }, + { + "text": "Log Trace Collector", + "id": "log-trace-collector" + }, + { + "text": "Number of Sync Workers", + "id": "number-of-sync-workers" + }, + { + "text": "Per User Mutation Limit Max", + "id": "per-user-mutation-limit-max" + }, + { + "text": "Per User Mutation Limit Window (ms)", + "id": "per-user-mutation-limit-window-ms" + }, + { + "text": "Port", + "id": "port" + }, + { + "text": "Push URL", + "id": "push-url" + }, + { + "text": "Query Hydration Stats", + "id": "query-hydration-stats" + }, + { + "text": "Replica Vacuum Interval Hours", + "id": "replica-vacuum-interval-hours" + }, + { + "text": "Server Version", + "id": "server-version" + }, + { + "text": "Storage DB Temp Dir", + "id": "storage-db-temp-dir" + }, + { + "text": "Target Client Row Count", + "id": "target-client-row-count" + }, + { + "text": "Task ID", + "id": "task-id" + }, + { + "text": "Upstream Max Connections", + "id": "upstream-max-connections" + } + ] + }, + { + "id": "51-zero-schema", + "title": "Zero Schema", + "url": "/docs/zero-schema", + "icon": "Blocks", + "content": "Zero applications have both a database schema (the normal backend database schema that all web apps have) and a Zero schema. The purpose of the Zero schema is to: Provide typesafety for ZQL queries Define first-class relationships between tables Define permissions for access control & { absolute?: boolean; activeClassName?: string; + hoverClassName?: string; disabled?: boolean; }; @@ -31,7 +32,14 @@ export default function Anchor({
{children}
); return ( - + {children} ); diff --git a/components/docs-menu.tsx b/components/docs-menu.tsx index 46353a97..29d3dd97 100644 --- a/components/docs-menu.tsx +++ b/components/docs-menu.tsx @@ -1,25 +1,34 @@ import {ROUTES} from '@/lib/routes-config'; -import SubLink from './sublink'; import Link from 'next/link'; -import {ModeToggle} from './theme-toggle'; -import GithubLogo from './logos/Github'; -import DiscordLogo from './logos/Discord'; import BlueskyLogo from './logos/Bluesky'; +import DiscordLogo from './logos/Discord'; +import GithubLogo from './logos/Github'; import TwitterLogo from './logos/Twitter'; +import SubLink from './sublink'; +import {ModeToggle} from './theme-toggle'; + export default function DocsMenu({isSheet = false}) { return ( -
+
{ROUTES.map((item, index) => { const modifiedItems = { ...item, - href: `/docs${item.href}`, + href: `/docs${item.href ?? ''}`, level: 0, isSheet, + noLink: item.href === null, }; - return ; + return ( +
+ + {index < ROUTES.length - 1 && ( +
+ )} +
+ ); })} -
+

Made by{' '} diff --git a/components/navbar.tsx b/components/navbar.tsx index ce3c9c18..21938d10 100644 --- a/components/navbar.tsx +++ b/components/navbar.tsx @@ -1,15 +1,16 @@ import Link from 'next/link'; import {SheetLeftbar} from './leftbar'; -import Search from './search'; import {page_routes} from '@/lib/routes-config'; import ZeroAlphaLogo from './logos/ZeroAlpha'; +import dynamic from 'next/dynamic'; -export const NAVLINKS = [ - { - title: 'Docs', - href: `/docs${page_routes[0].href}`, - }, -]; +const Search = dynamic(() => import('./search'), { + loading: () => ( +

+ ), +}); + +export const NAVLINKS = [{title: 'Docs', href: `/docs${page_routes[0].href}`}]; export function Navbar() { return ( diff --git a/components/search.tsx b/components/search.tsx index ff7f642b..f92a518d 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -2,17 +2,29 @@ import { Dialog, - DialogClose, DialogContent, + DialogDescription, DialogHeader, DialogTitle, DialogTrigger, } from '@/components/ui/dialog'; import {Input} from '@/components/ui/input'; -import {ScrollArea} from '@/components/ui/scroll-area'; -import {CommandIcon, FileIcon, SearchIcon} from 'lucide-react'; +import {IconKey, icons} from '@/lib/icons'; +import {cn} from '@/lib/utils'; +import searchIndex from '@/assets/search-index.json'; +import {CommandIcon, SearchIcon} from 'lucide-react'; import lunr from 'lunr'; -import React, {useEffect, useRef, useState} from 'react'; +import {useRouter} from 'next/navigation'; +import React, {useEffect, useState} from 'react'; +import { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList, +} from './ui/command'; + const Anchor = React.forwardRef>( ({children, ...props}, ref) => { return ( @@ -30,10 +42,8 @@ interface SearchDocument { title: string; content: string; url: string; - headings: { - text: string; - id: string; - }[]; + icon: IconKey; + headings: {text: string; id: string}[]; } interface SearchResult extends SearchDocument { @@ -64,44 +74,37 @@ function extractSnippet( return `...${snippet}...`; } -// Store Lunr index & search data globally (to avoid rebuilding on each render) -let lunrIndex: lunr.Index | null = null; -let searchDocs: SearchDocument[] = []; +const searchDocs = Array.isArray(searchIndex) + ? (searchIndex as SearchDocument[]) + : []; export default function Search() { const [searchedInput, setSearchedInput] = useState(''); const [isOpen, setIsOpen] = useState(false); const [searchResults, setSearchResults] = useState([]); - const [highlightIndex, setHighlightIndex] = useState(0); - const resultRefs = useRef<(HTMLAnchorElement | null)[]>([]); - const scrollContainerRef = useRef(null); + const [lunrIndex, setLunrIndex] = useState(null); + + const router = useRouter(); // Load search index on component mount useEffect(() => { - async function loadSearchIndex() { - const response = await fetch('/search-index.json'); - searchDocs = await response.json(); - - // Create Lunr index - lunrIndex = lunr(b => { - b.ref('id'); - b.field('title', {boost: 10}); // Prioritize document title - b.field('content'); - b.field('headings', {boost: 9}); // Prioritize headings - - for (const doc of searchDocs) { - b.add({ - ...doc, - title: doc.title?.toLowerCase() ?? '', - headings: doc.headings.map(h => h.text.toLowerCase()).join(' '), // Convert headings to searchable string - }); - } - }); - } + // Create Lunr index + const newLunrIndex = lunr(b => { + b.ref('id'); + b.field('title', {boost: 10}); // Prioritize document title + b.field('content'); + b.field('headings', {boost: 9}); // Prioritize headings + + for (const doc of searchDocs) { + b.add({ + ...doc, + title: doc.title?.toLowerCase() ?? '', + headings: doc.headings.map(h => h.text.toLowerCase()).join(' '), // Convert headings to searchable string + }); + } + }); - if (!lunrIndex) { - loadSearchIndex(); - } + setLunrIndex(newLunrIndex); }, []); useEffect(() => { @@ -174,11 +177,7 @@ export default function Search() { validHeadings.find(h => h.index > snippetIndex) ?? doc.headings[0]; - return { - ...doc, - snippet, - snippetId: finalHeading?.id || '', - }; + return {...doc, snippet, snippetId: finalHeading?.id || ''}; }); // Add an extra result if the search term exactly matches a document title @@ -194,82 +193,38 @@ export default function Search() { }); } - setSearchResults(results.filter(Boolean) as SearchResult[]); + // Deduplicate results by ID + const uniqueResults = results + .filter(Boolean) + .reduce((acc, result) => { + if (!acc.has(result!.id)) { + acc.set(result!.id, result!); + } + return acc; + }, new Map()); + + setSearchResults(Array.from(uniqueResults.values())); } catch (error) { console.error('Lunr.js Query Error:', error); setSearchResults([]); } - }, 200); // Debounce time: 200ms + }, 50); return () => clearTimeout(delayDebounce); }, [searchedInput]); - // Reset highlight index on new search - useEffect(() => { - setHighlightIndex(0); - }, [searchedInput]); - - // Ensure the active search result scrolls into view when highlighted - useEffect(() => { - if ( - isOpen && - resultRefs.current[highlightIndex] && - scrollContainerRef.current - ) { - const selectedItem = resultRefs.current[highlightIndex]; - const container = scrollContainerRef.current; - - if (selectedItem && container) { - const itemTop = selectedItem.offsetTop; - const itemHeight = selectedItem.offsetHeight; - const containerScrollTop = container.scrollTop; - const containerHeight = container.clientHeight; - - // If the selected item is above the visible area, scroll up - if (itemTop < containerScrollTop) { - container.scrollTo({top: itemTop, behavior: 'smooth'}); - } - // If the selected item is below the visible area, scroll down - else if (itemTop + itemHeight > containerScrollTop + containerHeight) { - container.scrollTo({ - top: itemTop + itemHeight - containerHeight, - behavior: 'smooth', - }); - } - } - } - }, [highlightIndex, isOpen]); - - // Listen for CMD+K (Mac) or CTRL+K (Windows) and keyboard navigation when search dialog is open - useEffect(() => { - function handleKeyDown(event: KeyboardEvent) { - if (isOpen) { - if (event.key === 'ArrowDown') { - event.preventDefault(); - setHighlightIndex(prev => (prev + 1) % searchResults.length); - } else if (event.key === 'ArrowUp') { - event.preventDefault(); - setHighlightIndex(prev => - prev > 0 ? prev - 1 : searchResults.length - 1, - ); - } else if ( - event.key === 'Enter' && - searchResults[highlightIndex] != null - ) { - event.preventDefault(); - setIsOpen(false); // Close search dialog - window.location.href = `${searchResults[highlightIndex].url}#${searchResults[highlightIndex].snippetId || searchResults[highlightIndex].headings[0]?.id}`; - } + // Toggle the menu when ⌘K is pressed + React.useEffect(() => { + const down = (e: KeyboardEvent) => { + if (e.key === 'k' && (e.metaKey || e.ctrlKey)) { + e.preventDefault(); + setIsOpen(open => !open); } - if ((event.metaKey || event.ctrlKey) && event.key.toLowerCase() === 'k') { - event.preventDefault(); - setIsOpen(true); - } - } + }; - window.addEventListener('keydown', handleKeyDown); - return () => window.removeEventListener('keydown', handleKeyDown); - }, [isOpen, highlightIndex, searchResults]); + document.addEventListener('keydown', down); + return () => document.removeEventListener('keydown', down); + }, []); return ( -
+
setSearchedInput(e.target.value)} /> -
- +
+ K
- - Search - - + Search + + Search the contents of the documentation for Zero + + + + + setSearchedInput(e.target.value)} - placeholder="Type something to search..." - autoFocus - className="h-14 px-6 bg-transparent border-b text-[14px] outline-none" + onValueChange={setSearchedInput} + showCloseButton={true} /> - - - {searchResults.length === 0 && searchedInput && ( -

- No results found for{' '} - "{searchedInput}" -

- )} - - -
- {searchResults.map((item, index) => ( - - { - resultRefs.current[index] = el; - }} - className={`w-full ${index === highlightIndex ? 'bg-gray-200 dark:bg-gray-700 search-selected' : ''}`} - href={ - item.snippetId ? `${item.url}#${item.snippetId}` : item.url - } - onClick={() => setIsOpen(false)} // Close the search dialog - > -
- {' '} - {item.title} -
- {item.snippet && ( -

- )} - - - ))} -

-
+ + {searchedInput && No results found.} + {searchResults.length > 0 && ( + + {searchResults.map(item => { + const Icon = icons[item?.icon ?? 'FileCode']; + + return ( + { + setIsOpen(false); + setSearchedInput(''); + setSearchResults([]); + const url = item.snippetId + ? `${item.url}#${item.snippetId}` + : item.url; + router.push(url); + }} + className={cn('flex flex-col items-start gap-1 py-3')} + > +
+ + {item.title} +
+ {item.snippet && ( +

+ )} + + ); + })} + + )} + +

); diff --git a/components/sublink.tsx b/components/sublink.tsx index b0711e40..d479f0f1 100644 --- a/components/sublink.tsx +++ b/components/sublink.tsx @@ -1,31 +1,60 @@ 'use client'; -import {EachRoute} from '@/lib/routes-config'; -import Anchor from './anchor'; import { Collapsible, CollapsibleContent, CollapsibleTrigger, } from '@/components/ui/collapsible'; -import {cn} from '@/lib/utils'; import {SheetClose} from '@/components/ui/sheet'; +import {icons} from '@/lib/icons'; +import {EachRoute} from '@/lib/routes-config'; +import {cn} from '@/lib/utils'; +import {ChevronRight} from 'lucide-react'; +import {usePathname} from 'next/navigation'; +import {useEffect, useMemo, useState} from 'react'; +import Anchor from './anchor'; import {Button} from './ui/button'; -import {ChevronDown, ChevronRight} from 'lucide-react'; -import {useState} from 'react'; export default function SubLink({ title, href, items, - noLink, level, isSheet, new: isNew, -}: EachRoute & {level: number; isSheet: boolean}) { - const [isOpen, setIsOpen] = useState(level == 0); + defaultOpen, + icon, + noLink, +}: EachRoute & {level: number; isSheet: boolean; noLink: boolean}) { + const path = usePathname(); + const itemsIncludesPath = useMemo( + () => items?.some(item => item.href && path.endsWith(item.href)), + [items, path], + ); + + const [isOpen, setIsOpen] = useState( + itemsIncludesPath ? true : (defaultOpen ?? level == 0), + ); + + useEffect(() => { + if (itemsIncludesPath) { + setIsOpen(true); + } + }, [itemsIncludesPath]); + + const Icon = icon ? icons[icon] : null; const Comp = ( - + + {Icon && ( +
+ +
+ )} {title} {isNew && ( @@ -42,46 +71,53 @@ export default function SubLink({ Comp ) ) : ( -

{title}

+

+ {Icon && ( +
+ +
+ )} + {title} +

); if (!items) { - return
{titleOrLink}
; + return titleOrLink; } return ( -
+
-
- {titleOrLink} - - - -
+
+
0 && 'pl-4 border-l ml-1', )} > {items?.map(innerLink => { const modifiedItems = { ...innerLink, - href: `${href + innerLink.href}`, + href: `${href ?? ''}${innerLink.href ?? ''}`, level: level + 1, isSheet, + noLink: false, }; return ; })} diff --git a/components/ui/ActiveHashLink.tsx b/components/ui/ActiveHashLink.tsx index 8c53d494..dd1ed611 100644 --- a/components/ui/ActiveHashLink.tsx +++ b/components/ui/ActiveHashLink.tsx @@ -36,7 +36,11 @@ export const ActiveHashLink: React.FC = ({ {children} diff --git a/components/ui/collapsible.tsx b/components/ui/collapsible.tsx index cc6292ab..50ee4a84 100644 --- a/components/ui/collapsible.tsx +++ b/components/ui/collapsible.tsx @@ -1,11 +1,27 @@ 'use client'; +import * as React from 'react'; import * as CollapsiblePrimitive from '@radix-ui/react-collapsible'; +import {cn} from '@/lib/utils'; const Collapsible = CollapsiblePrimitive.Root; const CollapsibleTrigger = CollapsiblePrimitive.CollapsibleTrigger; -const CollapsibleContent = CollapsiblePrimitive.CollapsibleContent; +const CollapsibleContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({className, ...props}, ref) => ( + +)); +CollapsibleContent.displayName = + CollapsiblePrimitive.CollapsibleContent.displayName; export {Collapsible, CollapsibleTrigger, CollapsibleContent}; diff --git a/components/ui/command.tsx b/components/ui/command.tsx new file mode 100644 index 00000000..a0adff97 --- /dev/null +++ b/components/ui/command.tsx @@ -0,0 +1,159 @@ +'use client'; + +import {Command as CommandPrimitive} from 'cmdk'; +import {SearchIcon} from 'lucide-react'; +import * as React from 'react'; + +import {DialogClose} from '@/components/ui/dialog'; +import {cn} from '@/lib/utils'; + +function Command({ + className, + ...props +}: React.ComponentProps) { + return ( + + ); +} + +function CommandInput({ + className, + showCloseButton = false, + ...props +}: React.ComponentProps & { + showCloseButton?: boolean; +}) { + return ( +
+ + + {showCloseButton && ( + +
+ Esc +
+ Close +
+ )} +
+ ); +} + +function CommandList({ + className, + showBorder, + ...props +}: React.ComponentProps & { + showBorder?: boolean; +}) { + return ( + + ); +} + +function CommandEmpty({ + ...props +}: React.ComponentProps) { + return ( + + ); +} + +function CommandGroup({ + className, + ...props +}: React.ComponentProps) { + return ( + + ); +} + +function CommandSeparator({ + className, + ...props +}: React.ComponentProps) { + return ( + + ); +} + +function CommandItem({ + className, + ...props +}: React.ComponentProps) { + return ( + + ); +} + +function CommandShortcut({className, ...props}: React.ComponentProps<'span'>) { + return ( + + ); +} + +export { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList, + CommandSeparator, + CommandShortcut, +}; diff --git a/components/ui/dialog.tsx b/components/ui/dialog.tsx index ddf22279..89067bfd 100644 --- a/components/ui/dialog.tsx +++ b/components/ui/dialog.tsx @@ -20,7 +20,7 @@ const DialogOverlay = React.forwardRef< , - React.ComponentPropsWithoutRef ->(({className, children, ...props}, ref) => ( + React.ComponentPropsWithoutRef & { + showCloseButton?: boolean; + } +>(({className, children, showCloseButton = true, ...props}, ref) => ( {children} - - {/* */} -
- Esc -
- Close -
+ {showCloseButton && ( + +
+ Esc +
+ Close +
+ )}
)); diff --git a/contents/docs/auth.mdx b/contents/docs/auth.mdx index 10736e92..2ce8593d 100644 --- a/contents/docs/auth.mdx +++ b/contents/docs/auth.mdx @@ -77,8 +77,15 @@ const zero = new Zero({ Zero stores each user's data in a different IndexedDB instance. This allows users to quickly switch between multiple users and accounts without resyncing. - -All users that have access to a browser profile have access to the same IndexedDB instances. There is nothing that Zero can do about this – users can just open the folder where the data is stored and look inside it. + + All users that have access to a browser profile have access to the same + IndexedDB instances. There is nothing that Zero can do about this – users can + just open the folder where the data is stored and look inside it. If your application is unauthenticated, or if you don't need fast user switching, you can just set `userID` to a constant like `anon` or `guest`: @@ -99,18 +106,21 @@ const zero = new Zero({ storageKey: "my-app", }); ``` - + If specified, `storageKey` is concatenated along with `userID` and other internal Zero information to form a unique IndexedDB database name. - + ## Logging Out When a user logs out, you should consider what should happen to the synced data. - + If you do nothing, the synced data will be left on the device. The next login will be a little faster because Zero doesn't have to resync that dta from scratch. But also, the data will be left on the device indefinitely which could be undesirable for privacy and security. - If you instead want to clear data on logout, Zero provides the `dropAllDatabases` function: +If you instead want to clear data on logout, Zero provides the `dropAllDatabases` function: ```ts import {dropAllDatabases} from '@rocicorp/zero'; diff --git a/contents/docs/connecting-to-postgres.mdx b/contents/docs/connecting-to-postgres.mdx index fb183501..e8a57b29 100644 --- a/contents/docs/connecting-to-postgres.mdx +++ b/contents/docs/connecting-to-postgres.mdx @@ -6,19 +6,19 @@ In the future, Zero will work with many different backend databases. Today only Here are some common Postgres options and what we know about their support level: -| Postgres | Support Status | -| --------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| AWS RDS | ✅ | -| AWS Aurora | ✅  v15.6+ | -| Google Cloud SQL | ✅  See [notes below](#google-cloud-sql) | -| [Fly.io](http://Fly.io) Postgres | ✅  See [notes below](#flyio) | -| Neon | ✅  See [notes below](#neon) | -| Postgres.app | ✅ | -| postgres:16.2-alpine docker image | ✅ | -| Supabase | ✅  See [notes below](#supabase) | -| PlanetScale for Postgres | 🤷‍♂️  No [event triggers](#event-triggers), see [notes below](#planetscale-for-postgres) | -| Render | 🤷‍♂️  No [event triggers](#event-triggers) | -| Heroku | 🤷‍♂️  No [event triggers](#event-triggers) | +| Postgres | Support Status | +| --------------------------------- | ------------------------------------------------------------------------------------------------ | +| AWS RDS | ✅ | +| AWS Aurora | ✅  v15.6+ | +| Google Cloud SQL | ✅  See [notes below](#google-cloud-sql) | +| [Fly.io](http://Fly.io) Postgres | ✅  See [notes below](#flyio) | +| Neon | ✅  See [notes below](#neon) | +| Postgres.app | ✅ | +| postgres:16.2-alpine docker image | ✅ | +| Supabase | ✅  See [notes below](#supabase) | +| PlanetScale for Postgres | 🤷‍♂️  No [event triggers](#event-triggers), see [notes below](#planetscale-for-postgres) | +| Render | 🤷‍♂️  No [event triggers](#event-triggers) | +| Heroku | 🤷‍♂️  No [event triggers](#event-triggers) | ## Event Triggers @@ -110,6 +110,6 @@ Because Zero keeps an open connection to Postgres to replicate changes, as long For production databases that have enough usage to always be running anyway, this is fine. But for smaller applications that would otherwise not always be running, this can create a surprisingly high bill. You may want to choose a provider that charge a flat monthly rate instead. -Also some users choose Neon because they hope to use branching for previews. Note that Zero doesn't support this usage model well yet, and if not done with care, Zero can end up keeping each Neon *preview* branch running too 😳. +Also some users choose Neon because they hope to use branching for previews. Note that Zero doesn't support this usage model well yet, and if not done with care, Zero can end up keeping each Neon _preview_ branch running too 😳. We are actively working on better preview support. diff --git a/contents/docs/debug/otel.mdx b/contents/docs/debug/otel.mdx index d6bfc742..ae1492a2 100644 --- a/contents/docs/debug/otel.mdx +++ b/contents/docs/debug/otel.mdx @@ -19,14 +19,14 @@ Here are instructions to setup [Grafana Cloud](https://grafana.com/oss/grafana/) 1. Sign up for [Grafana Cloud (Free Tier)](https://grafana.com/auth/sign-up/create-user?pg=login) 2. Click Connections > Add Connection in the left sidebar - ![add-connection](/images/debugging/otel/add-connection.png) + ![add-connection](/images/debugging/otel/add-connection.png) 3. Search for "OpenTelemetry" and select it 4. Click "Quickstart" - ![quickstart](/images/debugging/otel/quickstart.png) + ![quickstart](/images/debugging/otel/quickstart.png) 5. Select "JavaScript" - ![javascript](/images/debugging/otel/javascript.png) + ![javascript](/images/debugging/otel/javascript.png) 6. Create a new token 7. Copy the environment variables into your `.env` file or similar - ![copy-env](/images/debugging/otel/env.png) + ![copy-env](/images/debugging/otel/env.png) 8. Start `zero-cache` -9. Look for logs under "Drilldown" > "Logs" in left sidebar \ No newline at end of file +9. Look for logs under "Drilldown" > "Logs" in left sidebar diff --git a/contents/docs/debug/permissions.mdx b/contents/docs/debug/permissions.mdx index d3567df1..c8027c77 100644 --- a/contents/docs/debug/permissions.mdx +++ b/contents/docs/debug/permissions.mdx @@ -29,7 +29,9 @@ npx analyze-query ``` - The printed query can be different than the source ZQL string, because it is rebuilt from the query AST. But it should be logically equivalent to the query you wrote. + The printed query can be different than the source ZQL string, because it is + rebuilt from the query AST. But it should be logically equivalent to the query + you wrote. ## Write Permissions diff --git a/contents/docs/debug/replication.mdx b/contents/docs/debug/replication.mdx index d6283894..c5c53318 100644 --- a/contents/docs/debug/replication.mdx +++ b/contents/docs/debug/replication.mdx @@ -26,7 +26,7 @@ To inspect your Zero database, you have two options: 1. Use our pre-compiled SQLite build `@rocicorp/zero-sqlite3` as described above 2. Build SQLite from the SQLite `bedrock` branch yourself - + This will drop you into a `sqlite3` shell with which you can use to explore the contents of the replica. diff --git a/contents/docs/debug/slow-queries.mdx b/contents/docs/debug/slow-queries.mdx index bd995c88..b1a4406d 100644 --- a/contents/docs/debug/slow-queries.mdx +++ b/contents/docs/debug/slow-queries.mdx @@ -71,4 +71,4 @@ Note that query performance can also be affected by read permissions. See [Debug ## /statz -`zero-cache` makes some internal health statistics available via the `/statz` endpoint of `zero-cache`. In order to access this, you must configure an [admin password](/docs/zero-cache-config#admin-password). \ No newline at end of file +`zero-cache` makes some internal health statistics available via the `/statz` endpoint of `zero-cache`. In order to access this, you must configure an [admin password](/docs/zero-cache-config#admin-password). diff --git a/contents/docs/errors.mdx b/contents/docs/errors.mdx index b585cf30..f4cabc53 100644 --- a/contents/docs/errors.mdx +++ b/contents/docs/errors.mdx @@ -20,5 +20,7 @@ You can use this to send errors to Sentry, show custom UI, etc. The first parameter to `onError` is a descriptive message. Additional parameters provide more detail, for example an `Error` object (with a stack), or a JSON object. -If you implement `onError`, errors will no longer be sent to devtools by default. If you also want errors sent to the devtools console, you must call `console.error()` inside your `onError` handler. - \ No newline at end of file + If you implement `onError`, errors will no longer be sent to devtools by + default. If you also want errors sent to the devtools console, you must call + `console.error()` inside your `onError` handler. + diff --git a/contents/docs/offline.mdx b/contents/docs/offline.mdx index 89cc78af..98ca0f4d 100644 --- a/contents/docs/offline.mdx +++ b/contents/docs/offline.mdx @@ -1,10 +1,12 @@ --- title: Offline --- + Zero currently supports offline reads, but not writes. We plan to support offline writes in the future, but we don't have a timeline for that yet. -Offline writes *are* currently accepted by Zero, and sent when you come back online. But we plan to disable this soon. + Offline writes *are* currently accepted by Zero, and sent when you come back + online. But we plan to disable this soon. The lack of offline writes is often surprising to people familiar with sync engines, because offline is usually touted as something that comes for free with these tools. @@ -19,7 +21,7 @@ For example, imagine two users are editing an article about cats. One goes offli And while the above example may sound extreme, you can construct similar situations with the majority of common applications. Just take your own application and ask yourself what should really happen if one user takes their device offline for a week and makes arbitrarily complex changes while other users are working online. -People who work on sync engines and related tools often say that *offline is just extreme lag*, but that's only true at a technical level. At a human level, being "offline" for a few seconds is very different from being offline for a few hours. The difference is how much knowledge you have about what your collaborators are doing, and how much of your work can be lost. +People who work on sync engines and related tools often say that _offline is just extreme lag_, but that's only true at a technical level. At a human level, being "offline" for a few seconds is very different from being offline for a few hours. The difference is how much knowledge you have about what your collaborators are doing, and how much of your work can be lost. The only way to support offline writes in general is to either: @@ -27,7 +29,7 @@ The only way to support offline writes in general is to either: 2. Support custom UX to allow users to fork and merge conflicts when they occur. 3. Only support editing from a single device. -None of these is *free*. Buiding a good offline UX is a lot of work, and most of that work is borne by application developers. +None of these is _free_. Buiding a good offline UX is a lot of work, and most of that work is borne by application developers. ## … And a Schema Problem @@ -51,7 +53,7 @@ These problems are surmountable, but significant effort. Their solutions might a For all of the above reasons, we plan to disable offline writes in Zero for beta. -When the Zero client loses connection to `zero-cache` for several minutes (or when `zero-cache` cannot reach the customer API server), it will enter a special *offline mode*. In this mode, all writes to Zero will throw. +When the Zero client loses connection to `zero-cache` for several minutes (or when `zero-cache` cannot reach the customer API server), it will enter a special _offline mode_. In this mode, all writes to Zero will throw. While we recognize that offline writes would be useful for some applications, the reality is that for most of the apps we want to support, the user is online the vast majority of the time and the cost to support offline is extremely high. There is simply more value in making the online experience great first. @@ -63,6 +65,6 @@ Until Zero disables offline writes automatically, we recomment using the `onOnli ## Even More Information -* [Lies I was Told About Collaborative Editing](https://www.moment.dev/blog/lies-i-was-told-pt-1): a detailed overview of the challenges around offline writes in any collaborative editing system. -* This [Zero Discord thread](https://discord.com/channels/830183651022471199/1363957701331390669/1364260786108633198) covers some challenges specifically in the context of Zero. -* [Patchwork](https://www.inkandswitch.com/patchwork/notebook/) by Ink & Switch is new and interesting research around how to support offline writes well in collaborative systems. +- [Lies I was Told About Collaborative Editing](https://www.moment.dev/blog/lies-i-was-told-pt-1): a detailed overview of the challenges around offline writes in any collaborative editing system. +- This [Zero Discord thread](https://discord.com/channels/830183651022471199/1363957701331390669/1364260786108633198) covers some challenges specifically in the context of Zero. +- [Patchwork](https://www.inkandswitch.com/patchwork/notebook/) by Ink & Switch is new and interesting research around how to support offline writes well in collaborative systems. diff --git a/contents/docs/postgres-support.mdx b/contents/docs/postgres-support.mdx index bb67b029..1636ff3e 100644 --- a/contents/docs/postgres-support.mdx +++ b/contents/docs/postgres-support.mdx @@ -2,12 +2,12 @@ title: Supported Postgres Features --- -Postgres has a massive feature set, of which Zero supports a growings subset. +Postgres has a massive feature set, and Zero supports a growing subset of it. ## Object Names - Table and column names must begin with a letter or underscore - - This can be followed letters, numbers, underscores, and hyphens + - This can be followed by letters, numbers, underscores, and hyphens - Regex: `/^[A-Za-z_]+[A-Za-z0-9_-]*$/` - The column name `_0_version` is reserved for internal use @@ -17,7 +17,7 @@ Postgres has a massive feature set, of which Zero supports a growings subset. - Views are not synced - `identity` generated columns are synced - All other generated columns are not synced -- Indexes aren’t _synced_ per-se but we do implicitly add indexes to the replica that match the upstream indexes. In the future this will be customizable. +- Indexes aren't _synced_ per-se, but we do implicitly add indexes to the replica that match the upstream indexes. In the future, this will be customizable. ## Column Types @@ -32,38 +32,89 @@ Postgres has a massive feature set, of which Zero supports a growings subset. All numeric types - number - number + + number + + + number + - char, varchar, text, uuid - string - string + + char, varchar, text,{' '} + uuid + + + string + + + string + - bool - boolean - boolean + + bool + + + boolean + + + boolean + - date, timestamp, timestampz - number - number + + date, timestamp, timestampz + + + number + + + number + - json, jsonb - json - JSONValue + + json, jsonb + + + json + + + JSONValue + - enum - enumeration - string + + enum + + + enumeration + + + string + - T[] where T is a supported Postgres type (but please see ⚠️ below) - json<U[]> where U is the schema.ts type for T - V[] where V is the JS/TS type for T + + T[]{' '} + + where T is a supported Postgres type (but please see ⚠️ + below) + + + + json<U[]>{' '} + + where U is the schema.ts type for T + + + + V[]{' '} + + where V is the JS/TS type for T + + @@ -74,8 +125,6 @@ Zero will sync arrays to the client, but there is no support for filtering or jo - - Other Postgres column types aren’t supported. They will be ignored when replicating (the synced data will be missing that column) and you will get a warning when `zero-cache` starts up. If your schema has a pg type not listed here, you can support it in Zero by using a trigger to map it to some type that Zero can support. For example if you have a [GIS polygon type](https://www.postgresql.org/docs/current/datatype-geometric.html#DATATYPE-POLYGON) in the column `my_poly polygon`, you can use a trigger to map it to a `my_poly_json json` column. You could either use another trigger to map in the reverse direction to support changes for writes, or you could use a [custom mutator](./custom-mutators) to write to the polygon type directly on the server. @@ -84,11 +133,13 @@ Let us know if the lack of a particular column type is hindering your use of Zer ## Column Defaults -Default values are allowed in the Postgres schema but there currently is no way to use them from a Zero app. The create mutation requires all columns to be specified, except when columns are nullable (in which case,they default to null). Since there is no way to leave non-nullable columns off the insert, there is no way for PG to apply the default. This is a known issue and will be fixed in the future. +Default values are allowed in the Postgres schema, but there currently is no way to use them from a Zero app. + +An `insert()` mutation requires all columns to be specified, except when columns are nullable (in which case, they default to null). Since there is no way to leave non-nullable columns off the insert on the client, there is no way for PG to apply the default. This is a known issue and will be fixed in the future. ## IDs -It is strongly recommended that primary keys be client-generated random strings like [uuid](https://www.npmjs.com/package/uuid), [ulid](https://www.npmjs.com/package/ulid), [nanoid](https://www.npmjs.com/package/nanoid), etc. This makes optimistic creation and updates much easier. +It is strongly recommended to use client-generated random strings like [uuid](https://www.npmjs.com/package/uuid), [ulid](https://www.npmjs.com/package/ulid), [nanoid](https://www.npmjs.com/package/nanoid), etc for primary keys. This makes optimistic creation and updates much easier. Imagine that the PK of your table is an auto-incrementing integer. If you optimistically create an entity of this type, you will have to give it some ID – the type will require it locally, but also if you want to optimistically create relationships to this row you’ll need an ID. @@ -101,19 +152,17 @@ If your table has a natural key you can use that and it has less problems. But t -If you want to have a short auto-incrementing numeric ID for ux reasons (ie, a bug number), that is possible – See [Demo Video](https://discord.com/channels/830183651022471199/1288232858795769917/1298114323272568852)! +If you want to have a short auto-incrementing numeric ID for UX reasons (i.e., a bug number), that is possible - see [this video](https://discord.com/channels/830183651022471199/1288232858795769917/1298114323272568852). ## Primary Keys -Each table synced with Zero must have either a primary key or at least one unique index. - -This is needed so that Zero can identify rows during sync, to distinguish between an edit and a remove/add. +Each table synced with Zero must have either a primary key or at least one unique index. This is needed so that Zero can identify rows during sync, to distinguish between an edit and a remove/add. Multi-column primary and foreign keys are supported. ## Limiting Replication -You can use [Permissions](permissions) to limit tables and rows from replicating to Zero. In the near future you’ll also be able to use Permissions to limit individual columns. +You can use [Permissions](permissions) to limit tables and rows from replicating to Zero. In the near future, you'll also be able to use Permissions to limit individual columns. Until then, a workaround is to use the Postgres [_publication_](https://www.postgresql.org/docs/current/sql-createpublication.html) feature to control the tables and columns that are replicated into `zero-cache`. diff --git a/contents/docs/react.mdx b/contents/docs/react.mdx index 33256eb5..681e7894 100644 --- a/contents/docs/react.mdx +++ b/contents/docs/react.mdx @@ -81,7 +81,6 @@ You can also pass a `Zero` instance to the `ZeroProvider` if you want to control ``` - Complete quickstart here: https://github.com/rocicorp/hello-zero diff --git a/contents/docs/reading-data.mdx b/contents/docs/reading-data.mdx index 1a57fea4..b719b26e 100644 --- a/contents/docs/reading-data.mdx +++ b/contents/docs/reading-data.mdx @@ -145,6 +145,7 @@ This _relationship query_ can have all the same clauses that top-level queries c Using `orderBy` or `limit` in a relationship that goes through a junction table (i.e., a many-to-many relationship) is not currently supported and will throw a runtime error. See [bug 3527](https://bugs.rocicorp.dev/issue/3527). You can sometimes work around this by making the junction relationship explicit, depending on your schema and usage. + ### Nested Relationships @@ -154,10 +155,8 @@ You can nest relationships arbitrarily: ```tsx // Get all issues, first 100 comments for each (ordered by modified,desc), // and for each comment all of its reactions. -z.query.issue.related( - 'comments', q => q.orderBy('modified', 'desc').limit(100).related( - 'reactions') - ) +z.query.issue.related('comments', q => + q.orderBy('modified', 'desc').limit(100).related('reactions'), ); ``` @@ -280,7 +279,7 @@ z.query.issue.where({cmp, or, exists} => Zero reuses data synced from prior queries to answer new queries when possible. This is what enables instant UI transitions. -But what controls the lifetime of this client-side data? How can you know whether any partiular query will return instant results? How can you know whether those results will be up to date or stale? +But what controls the lifetime of this client-side data? How can you know whether any particular query will return instant results? How can you know whether those results will be up to date or stale? The answer is that the data on the client is simply the union of rows returned from queries which are currently syncing. Once a row is no longer returned by any syncing query, it is removed from the client. Thus, there is never any stale data in Zero. @@ -356,7 +355,7 @@ Contrary to the design of other sync engines, we believe that storing tons of da - Initial sync will be slow, slowing down initial app load. - Because storage in browser tabs is unreliable, initial sync can occur surprisingly often. - We want to answer queries _instantly_ as often as possible. This requires client-side data in memory on the main thread. If we have to page to disk, we may as well go to the network and reduce complexity. -- Even though Zero's queries are very efficient, they do still have some cost, expecially hydration. Massive client-side storage would result in hydrating tons of queries that are unlikely to be used every time the app starts. +- Even though Zero's queries are very efficient, they do still have some cost, especially hydration. Massive client-side storage would result in hydrating tons of queries that are unlikely to be used every time the app starts. Most importantly, no matter how much data you store on the client, there will be cases where you have to fallback to the server: @@ -370,7 +369,7 @@ The goal with Zero is to answer 99% of queries on the client from memory. The re There is no hard limit at 20,000 or 100,000. Nothing terrible happens if you go above. The thing to keep in mind is that: -1. All those queries will revalidate everytime your app boots. +1. All those queries will revalidate every time your app boots. 2. All data synced to the client is in memory in JS. @@ -414,7 +413,7 @@ The `complete` value is currently only returned when Zero has received the serve ## Handling Missing Data -It is inevetable that there will be cases where the requested data cannot be found. Because Zero returns local results immediately, and server results asynchronously, displaying "not found" / 404 UI can be slightly tricky. If you just use a simple existence check, you will often see the 404 UI flicker while the server result loads: +It is inevitable that there will be cases where the requested data cannot be found. Because Zero returns local results immediately, and server results asynchronously, displaying "not found" / 404 UI can be slightly tricky. If you just use a simple existence check, you will often see the 404 UI flicker while the server result loads: ```tsx const [issue, issuesResult] = useQuery( diff --git a/contents/docs/release-notes/0.19.mdx b/contents/docs/release-notes/0.19.mdx index 3ce07fe0..48288fbf 100644 --- a/contents/docs/release-notes/0.19.mdx +++ b/contents/docs/release-notes/0.19.mdx @@ -11,10 +11,10 @@ npm install @rocicorp/zero@0.19 ## Upgrading -* If you use custom mutators, please see [hello-zero-solid](https://github.com/rocicorp/hello-zero-solid/pull/18/files) for how to update your push endpoint. -* If you use SolidJS, please switch to [`createQuery`](https://github.com/rocicorp/hello-zero-solid/pull/18/files). -* If you are `awaiting z.mutate.foo.bar()`, you should [switch to `await z.mutate.foo.bar().client`](/docs/custom-mutators#waiting-for-mutator-result) to be consistent with `.server`. -* If you were using a 0.19 canary, the `.server` property [returns error by rejection again](/docs/custom-mutators#waiting-for-mutator-result) (like 0.18 did). Sorry about the thrash here. +- If you use custom mutators, please see [hello-zero-solid](https://github.com/rocicorp/hello-zero-solid/pull/18/files) for how to update your push endpoint. +- If you use SolidJS, please switch to [`createQuery`](https://github.com/rocicorp/hello-zero-solid/pull/18/files). +- If you are `awaiting z.mutate.foo.bar()`, you should [switch to `await z.mutate.foo.bar().client`](/docs/custom-mutators#waiting-for-mutator-result) to be consistent with `.server`. +- If you were using a 0.19 canary, the `.server` property [returns error by rejection again](/docs/custom-mutators#waiting-for-mutator-result) (like 0.18 did). Sorry about the thrash here. ## Features @@ -45,7 +45,7 @@ npm install @rocicorp/zero@0.19 - `z2s`: Relationships nested in a junction relationship were not working correctly ([PR](https://github.com/rocicorp/mono/pull/4221)) - Custom mutators: Due to multitab, client can receive multiple responses for same mutation - Fix deadlock that could happen when pushing on a closed websocket ([PR](https://github.com/rocicorp/mono/pull/4256)) -- Fix incorrect shutdown under heavy CPU load (thanks Erik Munson!) ([PR](https://github.com/rocicorp/mono/pull/4252)) +- Fix incorrect shutdown under heavy CPU load (thanks Erik Munson!) ([PR](https://github.com/rocicorp/mono/pull/4252)) - Fix case where deletes were getting reverted (thanks for reproduction Marc MacLeod!) ([PR](https://github.com/rocicorp/mono/pull/4282)) - `z2s`: Incorrect handling of self-join, and not exists - `not(exists())` is not supported on the client diff --git a/contents/docs/release-notes/0.20.mdx b/contents/docs/release-notes/0.20.mdx index f9f86342..33377c93 100644 --- a/contents/docs/release-notes/0.20.mdx +++ b/contents/docs/release-notes/0.20.mdx @@ -13,8 +13,8 @@ npm install @rocicorp/zero@0.20 There are two config changes for multinode deployments: -* **Required:** Remove `view-syncer`'s `ZERO_CHANGE_STREAMER_URI` env var and replace it with `ZERO_CHANGE_STREAMER_MODE: "discover"`. -* **Optional:** Change the `ZERO_LITESTREAM_BACKUP_URL` env var from being passed to both `replication-manager` and `view-syncer` nodes to being passed only to `replication-manager`. This config is no longer needed by `view-syncer` (and is ignored by it). +- **Required:** Remove `view-syncer`'s `ZERO_CHANGE_STREAMER_URI` env var and replace it with `ZERO_CHANGE_STREAMER_MODE: "discover"`. +- **Optional:** Change the `ZERO_LITESTREAM_BACKUP_URL` env var from being passed to both `replication-manager` and `view-syncer` nodes to being passed only to `replication-manager`. This config is no longer needed by `view-syncer` (and is ignored by it). See [hello-zero](https://github.com/rocicorp/hello-zero/pull/40/files#diff-6ae5efcb80db51f4b137515e1674caaf950405317e84e2961cf38f9d3a1804b9) for an upgrade example using SST. diff --git a/contents/docs/release-notes/0.21.mdx b/contents/docs/release-notes/0.21.mdx index e15bcf62..3da33cd3 100644 --- a/contents/docs/release-notes/0.21.mdx +++ b/contents/docs/release-notes/0.21.mdx @@ -17,7 +17,7 @@ See [hello-zero](https://github.com/rocicorp/hello-zero/pull/44) for an example ## Features -- New *"ztunes"* sample using TanStack, Drizzle, Better Auth, and Fly.io ([docs](/samples#ztunes)). +- New _"ztunes"_ sample using TanStack, Drizzle, Better Auth, and Fly.io ([docs](/samples#ztunes)). - Add initial support for Postgres arrays ([docs](/docs/postgres-support#column-types), [bug](https://bugs.rocicorp.dev/issue/3617)). - Improved React lifecycle management with `ZeroProvider` ([docs](/docs/react#zero-provider), [PR](https://github.com/rocicorp/mono/pull/4527)). - Expose `Zero` instances automatically at `__zero` ([docs](/docs/debug/inspector#creating-an-inspector), [PR](https://github.com/rocicorp/mono/pull/4526)). diff --git a/contents/docs/samples.mdx b/contents/docs/samples.mdx index 2d389594..8957cf61 100644 --- a/contents/docs/samples.mdx +++ b/contents/docs/samples.mdx @@ -7,7 +7,11 @@ title: Samples A complete Linear-style bug tracker. Not just a demo app, this is our actual live bug db. We use it every day and depend on it. - zbugs + zbugs **Demo:** https://bugs.rocicorp.dev/
@@ -20,7 +24,11 @@ A complete Linear-style bug tracker. Not just a demo app, this is our actual liv An ecommerce store built with Zero, TanStack, Drizzle, and PlanetScale for Postgres. - ztunes + ztunes **Demo:** https://ztunes.rocicorp.dev/
diff --git a/contents/docs/zero-cache-config.mdx b/contents/docs/zero-cache-config.mdx index ce524720..53061de3 100644 --- a/contents/docs/zero-cache-config.mdx +++ b/contents/docs/zero-cache-config.mdx @@ -121,13 +121,14 @@ env: `ZERO_CHANGE_MAX_CONNS`
default: `5` ### Change Streamer Mode + The mode for running or connecting to the change-streamer: -* `dedicated`: runs the change-streamer and shuts down when another +- `dedicated`: runs the change-streamer and shuts down when another change-streamer takes over the replication slot. This is appropriate in a single-node configuration, or for the replication-manager in a multi-node configuration. -* `discover`: connects to the change-streamer as internally advertised in the +- `discover`: connects to the change-streamer as internally advertised in the change-db. This is appropriate for the view-syncers in a multi-node flag: `--change-streamer-mode`
@@ -210,7 +211,7 @@ values: `debug`, `info`, `warn`, `error` ### Litestream Backup URL -The location of the litestream backup, usually an s3:// URL. This is only consulted by the replication-manager. view-syncers receive this information from the replication-manager. +The location of the litestream backup, usually an s3:// URL. This is only consulted by the replication-manager. view-syncers receive this information from the replication-manager. flag: `--litestream-backup-url`
env: `ZERO_LITESTREAM_BACKUP_URL`
diff --git a/contents/docs/zero-schema.mdx b/contents/docs/zero-schema.mdx index 4b3d6c9d..2f2ef13d 100644 --- a/contents/docs/zero-schema.mdx +++ b/contents/docs/zero-schema.mdx @@ -8,7 +8,11 @@ Zero applications have both a _database schema_ (the normal backend database sch 2. Define first-class relationships between tables 3. Define permissions for access control - + [Community-contributed converters](./community#database-tools) exist for Prisma and Drizzle that generate the tables and relationships. It is good to know how the underlying Zero schemas work, however, for debugging and @@ -19,7 +23,7 @@ This page describes using the schema to define your tables, columns, and relatio ## Defining the Zero Schema -The Zero schema is encoded in a TypeScript file that is conventionally called `schema.ts` file. For example, see [the schema file for`hello-zero`](https://github.com/rocicorp/hello-zero/blob/main/src/schema.ts). +The Zero schema is encoded in a TypeScript file that is conventionally called `schema.ts` file. For example, see [the schema file for `hello-zero`](https://github.com/rocicorp/hello-zero/blob/main/src/schema.ts). ## Table Schemas @@ -230,16 +234,10 @@ Use `createSchema` to define the entire Zero schema: ```tsx import {createSchema} from '@rocicorp/zero'; -export const schema = createSchema( - { - tables: [user, medium, message], - relationships: [ - userRelationships, - mediumRelationships, - messageRelationships, - ], - }, -); +export const schema = createSchema({ + tables: [user, medium, message], + relationships: [userRelationships, mediumRelationships, messageRelationships], +}); ``` ## Migrations diff --git a/generate-llms.js b/generate-llms.js index f8a71cde..f0144e68 100755 --- a/generate-llms.js +++ b/generate-llms.js @@ -3,17 +3,17 @@ const fs = require('fs'); const path = require('path'); -// Extract page paths from routes-config.ts +// Extract page paths from routes-config.tsx function extractPagesFromRoutesConfig() { - const routesConfigPath = path.join(__dirname, 'lib', 'routes-config.ts'); + const routesConfigPath = path.join(__dirname, 'lib', 'routes-config.tsx'); const content = fs.readFileSync(routesConfigPath, 'utf8'); - + // Extract all href values const matches = [...content.matchAll(/href:\s*['"]([^'"]*)['"]/g)]; return matches .map(match => match[1]) .filter(href => href !== '') - .map(href => href.startsWith('/') ? href.substring(1) : href) + .map(href => (href.startsWith('/') ? href.substring(1) : href)) .filter(href => href !== ''); } @@ -22,17 +22,17 @@ function extractLinksFromMarkdown(content, docsDir) { // Match markdown link pattern [text](/docs/path) or [text](path) const linkRegex = /\[.*?\]\((?:\/docs\/)?([^)]+)\)/g; const links = new Set(); - + let match; while ((match = linkRegex.exec(content)) !== null) { const link = match[1].trim(); - + // Only consider links that seem to point to other docs if (!link.startsWith('http') && !link.startsWith('#')) { // Check if this is likely a markdown file (either directly or index.mdx in directory) const mdxPath = path.join(docsDir, `${link}.mdx`); const dirIndexPath = path.join(docsDir, link, 'index.mdx'); - + if (fs.existsSync(mdxPath)) { links.add(link); } else if (fs.existsSync(dirIndexPath)) { @@ -40,7 +40,7 @@ function extractLinksFromMarkdown(content, docsDir) { } } } - + return [...links]; } @@ -50,19 +50,21 @@ function processFile(filePath, allText, processedFiles) { if (!fs.existsSync(filePath)) { return null; } - + const content = fs.readFileSync(filePath, 'utf8'); const extractedText = content.replace(/^---[\s\S]*?---/m, '').trim(); - + // Get the relative path for the file header const relativePath = filePath.split('/contents/docs/')[1]; allText += `--- ${relativePath} ---\n\n`; allText += extractedText + '\n\n\n'; - + processedFiles.add(relativePath); - return { content, allText }; + return {content, allText}; } catch (error) { - console.warn(`Warning: Error processing file ${filePath}: ${error.message}`); + console.warn( + `Warning: Error processing file ${filePath}: ${error.message}`, + ); return null; } } @@ -70,33 +72,33 @@ function processFile(filePath, allText, processedFiles) { // Generate the llms.txt file function generateLLMsFile() { console.log('Generating llms.txt file...'); - + const docsDir = path.join(__dirname, 'contents', 'docs'); const outputFile = path.join(__dirname, 'public', 'llms.txt'); - + try { - // Get ordered pages from routes-config.ts + // Get ordered pages from routes-config.tsx const tocPages = extractPagesFromRoutesConfig(); - console.log(`Found ${tocPages.length} pages in routes-config.ts`); - + console.log(`Found ${tocPages.length} pages in routes-config.tsx`); + let allText = ''; let processedFiles = new Set(); let linkedPages = new Set(); - + // First process all pages from TOC for (const page of tocPages) { // Check if this is a direct .mdx file const mdxPath = path.join(docsDir, `${page}.mdx`); - + // Check if this is a directory with index.mdx const dirIndexPath = path.join(docsDir, page, 'index.mdx'); - + if (fs.existsSync(mdxPath)) { // It's a direct .mdx file const result = processFile(mdxPath, allText, processedFiles); if (result) { allText = result.allText; - + // Extract links from this file const links = extractLinksFromMarkdown(result.content, docsDir); links.forEach(link => linkedPages.add(link)); @@ -106,7 +108,7 @@ function generateLLMsFile() { const result = processFile(dirIndexPath, allText, processedFiles); if (result) { allText = result.allText; - + // Extract links from this file const links = extractLinksFromMarkdown(result.content, docsDir); links.forEach(link => linkedPages.add(link)); @@ -115,53 +117,59 @@ function generateLLMsFile() { console.warn(`Warning: Neither ${mdxPath} nor ${dirIndexPath} found`); } } - + // Now process linked pages that weren't in the TOC const processedFromToc = processedFiles.size; - console.log(`Processed ${processedFromToc} files from TOC, now checking for linked files...`); - + console.log( + `Processed ${processedFromToc} files from TOC, now checking for linked files...`, + ); + // Keep processing until no more new links are found let moreLinksFound = true; let iterations = 0; const maxIterations = 5; // Prevent infinite loops - + while (moreLinksFound && iterations < maxIterations) { iterations++; const prevProcessedCount = processedFiles.size; const newLinkedPages = new Set(); - + for (const link of linkedPages) { // Skip if already processed if (processedFiles.has(`${link}.mdx`)) continue; - + // Process this linked file const mdxPath = path.join(docsDir, `${link}.mdx`); - + const result = processFile(mdxPath, allText, processedFiles); if (result) { allText = result.allText; - + // Extract more links from this file const moreLinks = extractLinksFromMarkdown(result.content, docsDir); moreLinks.forEach(l => newLinkedPages.add(l)); } } - + // Add new found links to our set newLinkedPages.forEach(link => linkedPages.add(link)); - + // Check if we processed any new files in this iteration if (processedFiles.size === prevProcessedCount) { moreLinksFound = false; } } - + // Write the extracted text to llms.txt fs.writeFileSync(outputFile, allText.trim()); - - console.log(`Successfully generated llms.txt with content from ${processedFiles.size} files:`); + + console.log( + `Successfully generated llms.txt with content from ${processedFiles.size} files:`, + ); console.log(`- ${processedFromToc} files from TOC`); - console.log(`- ${processedFiles.size - processedFromToc} additional linked files`); + console.log( + `- ${processedFiles.size - processedFromToc} additional linked files`, + ); } catch (error) { console.error('Error generating llms.txt:', error); process.exit(1); @@ -169,4 +177,4 @@ function generateLLMsFile() { } // Run the function -generateLLMsFile(); \ No newline at end of file +generateLLMsFile(); diff --git a/lib/generateSearchIndex.ts b/lib/generateSearchIndex.ts index c44a5f43..881540e1 100644 --- a/lib/generateSearchIndex.ts +++ b/lib/generateSearchIndex.ts @@ -5,10 +5,14 @@ import remarkParse from 'remark-parse'; import remarkStringify from 'remark-stringify'; import {unified} from 'unified'; import {visit} from 'unist-util-visit'; +import {IconKey} from './icons'; +import {page_routes} from './routes-config'; +import {toString} from 'mdast-util-to-string'; +import {Root} from 'mdast'; // Define the root directory where docs are stored const DOCS_ROOT = path.join(process.cwd(), 'contents/docs'); -const OUTPUT_FILE = path.join(process.cwd(), 'public', 'search-index.json'); +const OUTPUT_FILE = path.join(process.cwd(), 'assets', 'search-index.json'); // Define the structure of a document for indexing interface SearchDocument { @@ -16,10 +20,8 @@ interface SearchDocument { title: string; content: string; url: string; - headings: { - text: string; - id: string; - }[]; + icon: IconKey; + headings: {text: string; id: string}[]; } /** @@ -45,11 +47,9 @@ function getAllMDXFiles(dir: string): string[] { /** * Extract headings with IDs from MDX content */ -function extractHeadings(content: string): {text: string; id: string}[] { +function extractHeadings(tree: Root): {text: string; id: string}[] { const headings: {text: string; id: string}[] = []; - const tree = unified().use(remarkParse).parse(content); - visit(tree, 'heading', (node: any) => { const text = node.children .filter((child: any) => child.type === 'text') @@ -70,6 +70,42 @@ function extractHeadings(content: string): {text: string; id: string}[] { return headings; } +function markdownToText(tree: Root) { + let out = ''; + let lastNodeWasBlock = false; + + visit(tree, (node, _index, _parent) => { + if (node.type === 'text') { + // Add spacing before text if the previous node was a block element + if ( + lastNodeWasBlock && + out.length > 0 && + !out.endsWith(' ') && + !out.endsWith('\n') + ) { + out += ' '; + } + out += node.value; + lastNodeWasBlock = false; + } else if (node.type === 'break') { + // Line breaks should add a space + out += ' '; + lastNodeWasBlock = false; + } else if ( + ['paragraph', 'heading', 'listItem', 'blockquote', 'code'].includes( + node.type, + ) + ) { + // Mark that we've encountered a block element + lastNodeWasBlock = true; + } + }); + + return out; +} + +let index = 0; + /** * Extracts content from an MDX file and processes it into plain text */ @@ -77,14 +113,14 @@ async function extractTextFromMDX(filePath: string): Promise { const fileContent = fs.readFileSync(filePath, 'utf-8'); const {content, data} = matter(fileContent); // Extract frontmatter - // Convert MDX content to plain text (ignoring JSX) - const plainText = await unified() - .use(remarkParse) - .use(remarkStringify) - .process(content); + // Convert MDX content to mdast + const mdast = unified().use(remarkParse).parse(content); + + // Convert mdast to plain text + const plainText = markdownToText(mdast); // Extract headings with IDs - const headings = extractHeadings(content); + const headings = extractHeadings(mdast); // Derive a URL from the file name const pathWithoutExtension = path @@ -92,11 +128,16 @@ async function extractTextFromMDX(filePath: string): Promise { .replace(/\.mdx$/, ''); const url = `/docs/${pathWithoutExtension}`; + const route = page_routes.find( + route => route.href && url.endsWith(route.href), + ); + return { - id: pathWithoutExtension, // Use file name as ID + id: `${index++}-${pathWithoutExtension}`, // Use file name as ID title: data.title || pathWithoutExtension, // Use frontmatter title or fallback to path url, - content: plainText.toString().replace(/\n+/g, ' ').trim(), + icon: route?.icon ?? 'FileCode', + content: plainText.replace(/\n+/g, ' ').replace(/\s+/g, ' ').trim(), headings, // Include extracted headings with IDs }; } diff --git a/lib/icons.tsx b/lib/icons.tsx new file mode 100644 index 00000000..6422ef90 --- /dev/null +++ b/lib/icons.tsx @@ -0,0 +1,104 @@ +import { + ArrowDown, + ArrowUp, + ArrowUpWideNarrow, + BadgeAlert, + Blend, + Blocks, + BookOpen, + Cable, + ChartLine, + CircleDashed, + CircleFadingArrowUp, + CirclePlay, + Clock, + Code2, + Cog, + CopyIcon, + Database, + Eclipse, + File, + FileCode, + KeyRound, + Link2, + Map, + Plus, + Puzzle, + RefreshCcw, + SearchIcon, + Server, + Share2, + ShieldCheck, + ShieldQuestion, + Sparkle, + SwatchBook, + Terminal, + Unplug, + Users2, + Workflow, +} from 'lucide-react'; + +export const icons = { + ArrowDown, + ArrowUp, + ArrowUpWideNarrow, + BadgeAlert, + Blend, + Blocks, + BookOpen, + Cable, + ChartLine, + CircleDashed, + CircleFadingArrowUp, + CirclePlay, + Clock, + Code2, + Cog, + CopyIcon, + Database, + Eclipse, + FileCode, + KeyRound, + Link2, + Map, + Plus, + Puzzle, + React: (props: React.SVGProps) => ( + + + + ), + RefreshCcw, + SearchIcon, + Server, + Share2, + ShieldCheck, + ShieldQuestion, + SolidJS: (props: React.SVGProps) => ( + + + + ), + Sparkle, + SwatchBook, + Terminal, + Unplug, + Users2, + Workflow, +} as const satisfies Record< + string, + React.ComponentType> +>; + +export type IconKey = keyof typeof icons; diff --git a/lib/markdown.ts b/lib/markdown.ts index 9c0d0ffb..76de37fb 100644 --- a/lib/markdown.ts +++ b/lib/markdown.ts @@ -1,7 +1,7 @@ import path from 'path'; import {promises as fs} from 'fs'; import {compileMDX} from 'next-mdx-remote/rsc'; -import {page_routes, ROUTES} from './routes-config'; +import {page_routes} from './routes-config'; import remarkGfm from 'remark-gfm'; import rehypePrism from 'rehype-prism-plus'; import rehypeAutolinkHeadings from 'rehype-autolink-headings'; diff --git a/lib/rehype-add-copy-button.ts b/lib/rehype-add-copy-button.ts index 6962a368..f17d1a24 100644 --- a/lib/rehype-add-copy-button.ts +++ b/lib/rehype-add-copy-button.ts @@ -16,7 +16,7 @@ const rehypeAddCopyButton: Plugin = () => { type: 'element', tagName: 'button', properties: { - className: ['copy-button'], + className: ['font-regular copy-button'], }, children: [{type: 'text', value: 'Copy'}], }; diff --git a/lib/routes-config.ts b/lib/routes-config.ts deleted file mode 100644 index 70964f4b..00000000 --- a/lib/routes-config.ts +++ /dev/null @@ -1,100 +0,0 @@ -// for page navigation & to sort on leftbar - -export type EachRoute = { - title: string; - href: string; - noLink?: true; - new?: boolean; - items?: EachRoute[]; -}; - -export const ROUTES: EachRoute[] = [ - { - title: 'Welcome', - href: '', - noLink: true, - new: false, - items: [ - {title: 'Introduction', href: '/introduction'}, - {title: 'Quickstart', href: '/quickstart'}, - {title: 'Add to Existing Project', href: '/add-to-existing-project'}, - {title: 'Samples', href: '/samples'}, - ], - }, - - { - title: 'Using Zero', - href: '', - noLink: true, - items: [ - //TODO - //{title: 'How Zero Works', href: '/overview'}, - {title: 'Connecting to Postgres', href: '/connecting-to-postgres'}, - {title: 'Supported Postgres Features', href: '/postgres-support'}, - {title: 'Zero Schema', href: '/zero-schema'}, - {title: 'Reading Data with ZQL', href: '/reading-data'}, - {title: 'Writing Data with Mutators', href: '/writing-data'}, - {title: 'Custom Mutators', href: '/custom-mutators'}, - {title: 'Authentication', href: '/auth'}, - {title: 'Permissions', href: '/permissions'}, - {title: 'ZQL on the Server', href: '/zql-on-the-server'}, - {title: 'Offline', href: '/offline'}, - {title: 'Deployment', href: '/deployment'}, - {title: '`zero-cache` Config', href: '/zero-cache-config'}, - ], - }, - - { - title: 'Integrations', - href: '', - noLink: true, - items: [ - {title: 'React', href: '/react'}, - {title: 'SolidJS', href: '/solidjs'}, - {title: 'Community', href: '/community'}, - ], - }, - - { - title: 'Debugging', - href: '', - noLink: true, - items: [ - {title: 'Inspector API', href: '/debug/inspector'}, - {title: 'Permissions', href: '/debug/permissions'}, - {title: 'Slow Queries', href: '/debug/slow-queries'}, - {title: 'Replication', href: '/debug/replication'}, - {title: 'Query ASTs', href: '/debug/query-asts'}, - {title: 'OpenTelemetry', href: '/debug/otel'}, - ], - }, - - { - title: 'Meta', - href: '', - noLink: true, - items: [ - {title: 'Roadmap', href: '/roadmap'}, - {title: 'Release Notes', href: '/release-notes'}, - {title: 'Reporting Bugs', href: '/reporting-bugs'}, - {title: 'Open Source', href: '/open-source'}, - {title: 'LLMs', href: '/llms'}, - ], - }, -]; - -type Page = {title: string; href: string}; - -function getRecurrsiveAllLinks(node: EachRoute) { - const ans: Page[] = []; - if (!node.noLink) { - ans.push({title: node.title, href: node.href}); - } - node.items?.forEach(subNode => { - const temp = {...subNode, href: `${node.href}${subNode.href}`}; - ans.push(...getRecurrsiveAllLinks(temp)); - }); - return ans; -} - -export const page_routes = ROUTES.map(it => getRecurrsiveAllLinks(it)).flat(); diff --git a/lib/routes-config.tsx b/lib/routes-config.tsx new file mode 100644 index 00000000..4ed2255e --- /dev/null +++ b/lib/routes-config.tsx @@ -0,0 +1,148 @@ +// for page navigation & to sort on leftbar + +import {IconKey} from './icons'; + +export type EachRoute = { + title: string; + href: string | null; + new?: boolean; + items?: EachRoute[]; + defaultOpen?: boolean; + icon: IconKey; +}; + +export const ROUTES = [ + { + title: 'Welcome', + href: null, + new: false, + icon: 'Terminal', + items: [ + {title: 'Introduction', href: '/introduction', icon: 'BookOpen'}, + {title: 'Quickstart', href: '/quickstart', icon: 'CirclePlay'}, + { + title: 'Add to Existing Project', + href: '/add-to-existing-project', + icon: 'Plus', + }, + {title: 'Samples', href: '/samples', icon: 'SwatchBook'}, + ], + }, + + { + title: 'Concepts', + href: null, + icon: 'Eclipse', + items: [ + //TODO + //{title: 'How Zero Works', href: '/overview'}, + {title: 'Schema', href: '/zero-schema', icon: 'Blocks'}, + {title: 'Reading Data (ZQL)', href: '/reading-data', icon: 'ArrowDown'}, + { + title: 'Writing Data (Mutators)', + href: '/writing-data', + icon: 'ArrowUp', + }, + { + title: 'Custom Mutators', + href: '/custom-mutators', + icon: 'ArrowUpWideNarrow', + }, + {title: 'Authentication', href: '/auth', icon: 'KeyRound'}, + {title: 'Permissions', href: '/permissions', icon: 'ShieldCheck'}, + {title: 'Sharing ZQL', href: '/zql-on-the-server', icon: 'Share2'}, + {title: 'Offline', href: '/offline', icon: 'Unplug'}, + ], + }, + + { + title: 'Postgres', + href: null, + defaultOpen: false, + icon: 'Database', + items: [ + { + title: 'Provider Support', + href: '/connecting-to-postgres', + icon: 'Cable', + }, + { + title: 'Feature Compatibility', + href: '/postgres-support', + icon: 'Blend', + }, + ], + }, + + { + title: 'Integrations', + href: null, + defaultOpen: false, + icon: 'Link2', + items: [ + {title: 'React', href: '/react', icon: 'React'}, + {title: 'SolidJS', href: '/solidjs', icon: 'SolidJS'}, + {title: 'Community', href: '/community', icon: 'Users2'}, + ], + }, + + { + title: 'Deployment', + href: null, + defaultOpen: false, + icon: 'CircleFadingArrowUp', + items: [ + {title: 'Overview', href: '/deployment', icon: 'Server'}, + {title: 'Runtime Config', href: '/zero-cache-config', icon: 'Cog'}, + ], + }, + + { + title: 'Debugging', + href: null, + defaultOpen: false, + icon: 'Code2', + items: [ + {title: 'Inspector API', href: '/debug/inspector', icon: 'SearchIcon'}, + { + title: 'Permissions', + href: '/debug/permissions', + icon: 'ShieldQuestion', + }, + {title: 'Slow Queries', href: '/debug/slow-queries', icon: 'Clock'}, + {title: 'Replication', href: '/debug/replication', icon: 'CopyIcon'}, + {title: 'Query ASTs', href: '/debug/query-asts', icon: 'Workflow'}, + {title: 'OpenTelemetry', href: '/debug/otel', icon: 'ChartLine'}, + ], + }, + + { + title: 'Meta', + href: null, + defaultOpen: false, + icon: 'Puzzle', + items: [ + {title: 'Roadmap', href: '/roadmap', icon: 'Map'}, + {title: 'Release Notes', href: '/release-notes', icon: 'RefreshCcw'}, + {title: 'Reporting Bugs', href: '/reporting-bugs', icon: 'BadgeAlert'}, + {title: 'Open Source', href: '/open-source', icon: 'CircleDashed'}, + {title: 'LLMs', href: '/llms', icon: 'Sparkle'}, + ], + }, +] as const satisfies EachRoute[]; + +type Page = {title: string; href: string; icon: IconKey}; + +function getRecursiveAllLinks(node: EachRoute) { + const ans: Page[] = []; + if (node.href) { + ans.push({title: node.title, href: node.href, icon: node.icon}); + } + node.items?.forEach(subNode => { + const temp = {...subNode, href: `${node.href ?? ''}${subNode.href ?? ''}`}; + ans.push(...getRecursiveAllLinks(temp)); + }); + return ans; +} + +export const page_routes = ROUTES.map(it => getRecursiveAllLinks(it)).flat(); diff --git a/lib/utils.ts b/lib/utils.ts index 7209a828..2746c0d2 100644 --- a/lib/utils.ts +++ b/lib/utils.ts @@ -18,28 +18,28 @@ export function helperSearch( query: string, node: EachRoute, prefix: string, - currenLevel: number, + currentLevel: number, maxLevel?: number, ) { const res: EachRoute[] = []; let parentHas = false; - const nextLink = `${prefix}${node.href}`; - if (!node.noLink && node.title.toLowerCase().includes(query.toLowerCase())) { + const nextLink = `${prefix}${node.href ?? ''}`; + if (node.href && node.title.toLowerCase().includes(query.toLowerCase())) { res.push({...node, items: undefined, href: nextLink}); parentHas = true; } - const goNext = maxLevel ? currenLevel < maxLevel : true; + const goNext = maxLevel ? currentLevel < maxLevel : true; if (goNext) node.items?.forEach(item => { const innerRes = helperSearch( query, item, nextLink, - currenLevel + 1, + currentLevel + 1, maxLevel, ); - if (!!innerRes.length && !parentHas && !node.noLink) { + if (!!innerRes.length && !parentHas && node.href) { res.push({...node, items: undefined, href: nextLink}); parentHas = true; } diff --git a/package-lock.json b/package-lock.json index 454a08b8..55170100 100644 --- a/package-lock.json +++ b/package-lock.json @@ -16,9 +16,11 @@ "broken-link-checker": "^0.7.8", "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", + "cmdk": "^1.1.1", "gray-matter": "^4.0.3", "lucide-react": "^0.435.0", "lunr": "^2.3.9", + "mdast-util-to-string": "^4.0.0", "next": "^14.2.6", "next-mdx-remote": "^5.0.0", "next-themes": "^0.3.0", @@ -1209,24 +1211,194 @@ } }, "node_modules/@radix-ui/react-dialog": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.14.tgz", + "integrity": "sha512-+CpweKjqpzTmwRwcYECQcNYbI8V9VSQt0SNFKeEBLgfucbsLssU6Ppq7wUdNXEGb573bMjFhVjKVll8rmV6zMw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.2", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.10", + "@radix-ui/react-focus-guards": "1.1.2", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.4", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/primitive": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/primitive/-/primitive-1.1.2.tgz", + "integrity": "sha512-XnbHrrprsNqZKQhStrSwgRUQzoCI1glLzdw79xiZPoofhGICeZRSQ3dIxAKH1gb3OHfNf4d6f+vAv3kil2eggA==", + "license": "MIT" + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-context": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context/-/react-context-1.1.2.tgz", + "integrity": "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-dismissable-layer": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.10.tgz", + "integrity": "sha512-IM1zzRV4W3HtVgftdQiiOmA0AdJlCtMLe00FXaHwgt3rAnNsIyDqshvkIW3hj/iu5hu8ERP7KIYki6NkqDxAwQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.2", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-guards": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-guards/-/react-focus-guards-1.1.2.tgz", + "integrity": "sha512-fyjAACV62oPV925xFCrH8DR5xWhg9KYtJT4s3u54jxp+L/hbpTY2kIeEFFbFe+a/HCE94zGQMZLIpVTPVZDhaA==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-focus-scope": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-id": { "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.1.tgz", - "integrity": "sha512-zysS+iU4YP3STKNS6USvFVqI4qqx8EpiwmT5TuCApVEBca+eRCbONi4EgzfNSuVnOXvC5UPHHMjs8RXO6DH9Bg==", + "resolved": "https://registry.npmjs.org/@radix-ui/react-id/-/react-id-1.1.1.tgz", + "integrity": "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + "license": "MIT", "dependencies": { - "@radix-ui/primitive": "1.1.0", - "@radix-ui/react-compose-refs": "1.1.0", - "@radix-ui/react-context": "1.1.0", - "@radix-ui/react-dismissable-layer": "1.1.0", - "@radix-ui/react-focus-guards": "1.1.0", - "@radix-ui/react-focus-scope": "1.1.0", - "@radix-ui/react-id": "1.1.0", - "@radix-ui/react-portal": "1.1.1", - "@radix-ui/react-presence": "1.1.0", - "@radix-ui/react-primitive": "2.0.0", - "@radix-ui/react-slot": "1.1.0", - "@radix-ui/react-use-controllable-state": "1.1.0", - "aria-hidden": "^1.1.1", - "react-remove-scroll": "2.5.7" + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-portal": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-presence": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-presence/-/react-presence-1.1.4.tgz", + "integrity": "sha512-ueDqRbdc4/bkaQT3GIpLQssRlFgWaL/U2z/S31qRwwLWoxHLgry3SIfCwhxeQNbirEUXFa+lq3RL3oBYXtcmIA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", @@ -1243,6 +1415,139 @@ } } }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-callback-ref": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz", + "integrity": "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-controllable-state": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-controllable-state/-/react-use-controllable-state-1.2.2.tgz", + "integrity": "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-escape-keydown": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.1.tgz", + "integrity": "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-callback-ref": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-dialog/node_modules/react-remove-scroll": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.1.tgz", + "integrity": "sha512-HpMh8+oahmIdOuS5aFKKY6Pyog+FNaZV/XyJOq7b4YFwsFHe5yYfdbIalI4k3vU2nSDql7YskmUseHsRrJqIPA==", + "license": "MIT", + "dependencies": { + "react-remove-scroll-bar": "^2.3.7", + "react-style-singleton": "^2.2.3", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.3", + "use-sidecar": "^1.1.3" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-direction": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.0.tgz", @@ -1612,6 +1917,39 @@ } } }, + "node_modules/@radix-ui/react-use-effect-event": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-effect-event/-/react-use-effect-event-0.0.2.tgz", + "integrity": "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-use-layout-effect": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-use-effect-event/node_modules/@radix-ui/react-use-layout-effect": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-use-layout-effect/-/react-use-layout-effect-1.1.1.tgz", + "integrity": "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-use-escape-keydown": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@radix-ui/react-use-escape-keydown/-/react-use-escape-keydown-1.1.0.tgz", @@ -2859,6 +3197,78 @@ "node": ">=6" } }, + "node_modules/cmdk": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.1.1.tgz", + "integrity": "sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.6", + "@radix-ui/react-id": "^1.1.0", + "@radix-ui/react-primitive": "^2.0.2" + }, + "peerDependencies": { + "react": "^18 || ^19 || ^19.0.0-rc", + "react-dom": "^18 || ^19 || ^19.0.0-rc" + } + }, + "node_modules/cmdk/node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "license": "MIT", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/cmdk/node_modules/@radix-ui/react-primitive": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-slot": "1.2.3" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, + "node_modules/cmdk/node_modules/@radix-ui/react-slot": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + "license": "MIT", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/collapse-white-space": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", @@ -4904,14 +5314,6 @@ "node": ">= 0.4" } }, - "node_modules/invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "dependencies": { - "loose-envify": "^1.0.0" - } - }, "node_modules/is-alphabetical": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", @@ -5985,6 +6387,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "license": "MIT", "dependencies": { "@types/mdast": "^4.0.0" }, @@ -7721,19 +8124,20 @@ } }, "node_modules/react-remove-scroll-bar": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.6.tgz", - "integrity": "sha512-DtSYaao4mBmX+HDo5YWYdBWQwYIQQshUV/dVxFxK+KM26Wjwp1gZ6rv6OC3oujI6Bfu6Xyg3TwK533AQutsn/g==", + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.8.tgz", + "integrity": "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", + "license": "MIT", "dependencies": { - "react-style-singleton": "^2.2.1", + "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "engines": { "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "peerDependenciesMeta": { "@types/react": { @@ -7742,20 +8146,20 @@ } }, "node_modules/react-style-singleton": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", - "integrity": "sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.3.tgz", + "integrity": "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", + "license": "MIT", "dependencies": { "get-nonce": "^1.0.0", - "invariant": "^2.2.4", "tslib": "^2.0.0" }, "engines": { "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -9421,9 +9825,10 @@ } }, "node_modules/use-callback-ref": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.2.tgz", - "integrity": "sha512-elOQwe6Q8gqZgDA8mrh44qRTQqpIHDcZ3hXTLjBe1i4ph8XpNJnO+aQf3NaG+lriLopI4HMx9VjQLfPQ6vhnoA==", + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.3.tgz", + "integrity": "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", + "license": "MIT", "dependencies": { "tslib": "^2.0.0" }, @@ -9431,8 +9836,8 @@ "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { @@ -9441,9 +9846,10 @@ } }, "node_modules/use-sidecar": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", - "integrity": "sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz", + "integrity": "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", + "license": "MIT", "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" @@ -9452,8 +9858,8 @@ "node": ">=10" }, "peerDependencies": { - "@types/react": "^16.9.0 || ^17.0.0 || ^18.0.0", - "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "peerDependenciesMeta": { "@types/react": { diff --git a/package.json b/package.json index 8f11fb01..4a2928d5 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,7 @@ "build:llms": "node generate-llms.js", "start": "npm run build:search && next start", "lint": "next lint", - "format": "prettier --write .", + "format": "prettier --write **/*.{ts,tsx,jsjson,md}", "links": "blc http://localhost:3000/ -ro --exclude 'https://discord.rocicorp.dev/' --exclude 'https://bsky.app/profile/zero.rocicorp.dev' --exclude 'https://x.com/rocicorp_zero'" }, "dependencies": { @@ -21,9 +21,11 @@ "broken-link-checker": "^0.7.8", "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", + "cmdk": "^1.1.1", "gray-matter": "^4.0.3", "lucide-react": "^0.435.0", "lunr": "^2.3.9", + "mdast-util-to-string": "^4.0.0", "next": "^14.2.6", "next-mdx-remote": "^5.0.0", "next-themes": "^0.3.0", diff --git a/tailwind.config.ts b/tailwind.config.ts index 63868f9b..2d4d91fb 100644 --- a/tailwind.config.ts +++ b/tailwind.config.ts @@ -19,7 +19,7 @@ const config = { }, }, extend: { - screens:{ + screens: { xxs: '320px', xs: '420px', }, @@ -69,17 +69,43 @@ const config = { }, keyframes: { 'accordion-down': { - from: {height: '0'}, - to: {height: 'var(--radix-accordion-content-height)'}, + from: {height: '0', opacity: '0', filter: 'blur(5px)'}, + to: { + height: 'var(--radix-accordion-content-height)', + opacity: '1', + filter: 'blur(0px)', + }, }, 'accordion-up': { - from: {height: 'var(--radix-accordion-content-height)'}, - to: {height: '0'}, + from: { + height: 'var(--radix-accordion-content-height)', + opacity: '1', + filter: 'blur(0px)', + }, + to: {height: '0', opacity: '0', filter: 'blur(5px)'}, + }, + 'collapsible-down': { + from: {height: '0', opacity: '0', filter: 'blur(5px)'}, + to: { + height: 'var(--radix-collapsible-content-height)', + opacity: '1', + filter: 'blur(0px)', + }, + }, + 'collapsible-up': { + from: { + height: 'var(--radix-collapsible-content-height)', + opacity: '1', + filter: 'blur(0px)', + }, + to: {height: '0', opacity: '0', filter: 'blur(5px)'}, }, }, animation: { - 'accordion-down': 'accordion-down 0.2s ease-out', - 'accordion-up': 'accordion-up 0.2s ease-out', + 'accordion-down': 'accordion-down 0.15s ease-in', + 'accordion-up': 'accordion-up 0.15s ease-in', + 'collapsible-down': 'collapsible-down 0.15s ease-in', + 'collapsible-up': 'collapsible-up 0.15s ease-in', }, }, }, From 60fcd0a14ae20845f9958fa8c4bff22c9f1cbf35 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 11:17:39 -0700 Subject: [PATCH 02/15] fix: formatting --- assets/search-index.json | 2 +- contents/docs/debug/replication.mdx | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/assets/search-index.json b/assets/search-index.json index bc74ae66..54c9ab9f 100644 --- a/assets/search-index.json +++ b/assets/search-index.json @@ -627,7 +627,7 @@ "title": "Supported Postgres Features", "url": "/docs/postgres-support", "icon": "Blend", - "content": "Postgres has a massive feature set, and Zero supports a growing subset of it. Object Names Table and column names must begin with a letter or underscore This can be followed by letters, numbers, underscores, and hyphens Regex: The column name is reserved for internal use Object Types Tables are synced Views are not synced generated columns are synced All other generated columns are not synced Indexes aren't synced per-se, but we do implicitly add indexes to the replica that match the upstream indexes. In the future, this will be customizable. Column Types Zero will sync arrays to the client, but there is no support for filtering or joining on array elements yet in ZQL. Other Postgres column types aren’t supported. They will be ignored when replicating (the synced data will be missing that column) and you will get a warning when starts up. If your schema has a pg type not listed here, you can support it in Zero by using a trigger to map it to some type that Zero can support. For example if you have a GIS polygon type in the column , you can use a trigger to map it to a column. You could either use another trigger to map in the reverse direction to support changes for writes, or you could use a custom mutator to write to the polygon type directly on the server. Let us know if the lack of a particular column type is hindering your use of Zero. It can likely be added. Column Defaults Default values are allowed in the Postgres schema, but there currently is no way to use them from a Zero app. An mutation requires all columns to be specified, except when columns are nullable (in which case, they default to null). Since there is no way to leave non-nullable columns off the insert on the client, there is no way for PG to apply the default. This is a known issue and will be fixed in the future. IDs It is strongly recommended to use client-generated random strings like uuid, ulid, nanoid, etc for primary keys. This makes optimistic creation and updates much easier. You could sync the highest value seen for that table, but there are race conditions and it is possible for that ID to be taken by the time the creation makes it to the server. Your database can resolve this and assign the next ID, but now the relationships you created optimistically will be against the wrong row. Blech. GUIDs makes a lot more sense in synced applications. If your table has a natural key you can use that and it has less problems. But there is still the chance for a conflict. Imagine you are modeling orgs and you choose domainName as the natural key. It is possible for a race to happen and when the creation gets to the server, somebody has already chosen that domain name. In that case, the best thing to do is reject the write and show the user an error. If you want to have a short auto-incrementing numeric ID for UX reasons (i.e., a bug number), that is possible: Primary Keys Each table synced with Zero must have either a primary key or at least one unique index. This is needed so that Zero can identify rows during sync, to distinguish between an edit and a remove/add. Multi-column primary and foreign keys are supported. Limiting Replication You can use Permissions to limit tables and rows from replicating to Zero. In the near future, you'll also be able to use Permissions to limit individual columns. Until then, a workaround is to use the Postgres publication feature to control the tables and columns that are replicated into . In your pg schema setup, create a Postgres with the tables and columns you want: Then, specify this publication in the App Publications option. (By default, Zero creates a publication that publishes the entire public schema.) To limit what is synced from the replica to actual clients (e.g., web browsers) you can use read permissions. Schema changes Most Postgres schema changes are supported as is. Two cases require special handling: Adding columns Adding a column with a non-constant value is not supported. This includes any expression with parentheses, as well as the special functions , , and (due to a constraint of SQLite). However, the value of an existing column can be changed to any value, including non-constant expressions. To achieve the desired column default: Add the column with no value Backfill the column with desired values Set the column's value Changing publications Postgres allows you to change published tables/columns with an statement. Zero automatically adjusts the table schemas on the replica, but it does not receive the pre-existing data. To stream the pre-existing data to Zero, make an innocuous after adding the tables/columns to the publication: Self-Referential Relationships See zero-schema", + "content": "Postgres has a massive feature set, and Zero supports a growing subset of it. Object Names Table and column names must begin with a letter or underscore This can be followed by letters, numbers, underscores, and hyphens Regex: The column name is reserved for internal use Object Types Tables are synced Views are not synced generated columns are synced All other generated columns are not synced Indexes aren't synced per-se, but we do implicitly add indexes to the replica that match the upstream indexes. In the future, this will be customizable. Column Types Zero will sync arrays to the client, but there is no support for filtering or joining on array elements yet in ZQL. Other Postgres column types aren’t supported. They will be ignored when replicating (the synced data will be missing that column) and you will get a warning when starts up. If your schema has a pg type not listed here, you can support it in Zero by using a trigger to map it to some type that Zero can support. For example if you have a GIS polygon type in the column , you can use a trigger to map it to a column. You could either use another trigger to map in the reverse direction to support changes for writes, or you could use a custom mutator to write to the polygon type directly on the server. Let us know if the lack of a particular column type is hindering your use of Zero. It can likely be added. Column Defaults Default values are allowed in the Postgres schema, but there currently is no way to use them from a Zero app. An mutation requires all columns to be specified, except when columns are nullable (in which case, they default to null). Since there is no way to leave non-nullable columns off the insert on the client, there is no way for PG to apply the default. This is a known issue and will be fixed in the future. IDs It is strongly recommended to use client-generated random strings like uuid, ulid, nanoid, etc for primary keys. This makes optimistic creation and updates much easier. You could sync the highest value seen for that table, but there are race conditions and it is possible for that ID to be taken by the time the creation makes it to the server. Your database can resolve this and assign the next ID, but now the relationships you created optimistically will be against the wrong row. Blech. GUIDs makes a lot more sense in synced applications. If your table has a natural key you can use that and it has less problems. But there is still the chance for a conflict. Imagine you are modeling orgs and you choose domainName as the natural key. It is possible for a race to happen and when the creation gets to the server, somebody has already chosen that domain name. In that case, the best thing to do is reject the write and show the user an error. If you want to have a short auto-incrementing numeric ID for UX reasons (i.e., a bug number), that is possible - see this video. Primary Keys Each table synced with Zero must have either a primary key or at least one unique index. This is needed so that Zero can identify rows during sync, to distinguish between an edit and a remove/add. Multi-column primary and foreign keys are supported. Limiting Replication You can use Permissions to limit tables and rows from replicating to Zero. In the near future, you'll also be able to use Permissions to limit individual columns. Until then, a workaround is to use the Postgres publication feature to control the tables and columns that are replicated into . In your pg schema setup, create a Postgres with the tables and columns you want: Then, specify this publication in the App Publications option. (By default, Zero creates a publication that publishes the entire public schema.) To limit what is synced from the replica to actual clients (e.g., web browsers) you can use read permissions. Schema changes Most Postgres schema changes are supported as is. Two cases require special handling: Adding columns Adding a column with a non-constant value is not supported. This includes any expression with parentheses, as well as the special functions , , and (due to a constraint of SQLite). However, the value of an existing column can be changed to any value, including non-constant expressions. To achieve the desired column default: Add the column with no value Backfill the column with desired values Set the column's value Changing publications Postgres allows you to change published tables/columns with an statement. Zero automatically adjusts the table schemas on the replica, but it does not receive the pre-existing data. To stream the pre-existing data to Zero, make an innocuous after adding the tables/columns to the publication: Self-Referential Relationships See zero-schema", "headings": [ { "text": "Object Names", diff --git a/contents/docs/debug/replication.mdx b/contents/docs/debug/replication.mdx index c5c53318..5c55500c 100644 --- a/contents/docs/debug/replication.mdx +++ b/contents/docs/debug/replication.mdx @@ -26,7 +26,8 @@ To inspect your Zero database, you have two options: 1. Use our pre-compiled SQLite build `@rocicorp/zero-sqlite3` as described above 2. Build SQLite from the SQLite `bedrock` branch yourself - + + This will drop you into a `sqlite3` shell with which you can use to explore the contents of the replica. From 6de3fbd8d55855ab0df8c28046dcdb8b2795c210 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 11:18:37 -0700 Subject: [PATCH 03/15] chore: lint --- assets/search-index.json | 2 +- components/search.tsx | 14 ++++++-------- package.json | 2 +- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/assets/search-index.json b/assets/search-index.json index 54c9ab9f..53b998a5 100644 --- a/assets/search-index.json +++ b/assets/search-index.json @@ -1834,4 +1834,4 @@ } ] } -] \ No newline at end of file +] diff --git a/components/search.tsx b/components/search.tsx index f92a518d..49c30c6a 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -194,14 +194,12 @@ export default function Search() { } // Deduplicate results by ID - const uniqueResults = results - .filter(Boolean) - .reduce((acc, result) => { - if (!acc.has(result!.id)) { - acc.set(result!.id, result!); - } - return acc; - }, new Map()); + const uniqueResults = results.filter(Boolean).reduce((acc, result) => { + if (!acc.has(result!.id)) { + acc.set(result!.id, result!); + } + return acc; + }, new Map()); setSearchResults(Array.from(uniqueResults.values())); } catch (error) { diff --git a/package.json b/package.json index 4a2928d5..76d18b23 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,7 @@ "build:llms": "node generate-llms.js", "start": "npm run build:search && next start", "lint": "next lint", - "format": "prettier --write **/*.{ts,tsx,jsjson,md}", + "format": "prettier --write **/*.{ts,tsx,js,json,md,mdx}", "links": "blc http://localhost:3000/ -ro --exclude 'https://discord.rocicorp.dev/' --exclude 'https://bsky.app/profile/zero.rocicorp.dev' --exclude 'https://x.com/rocicorp_zero'" }, "dependencies": { From 89659e9bfe4407f0d496849af30c8eb84e9142fa Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 12:37:28 -0700 Subject: [PATCH 04/15] fix: added fixes for styling --- app/globals.css | 12 +++---- app/page.module.css | 27 --------------- app/page.tsx | 72 ++++++++++++++++++++++++++-------------- components/search.tsx | 5 +-- components/ui/button.tsx | 6 ++-- components/ui/kbd.tsx | 20 +++++++++++ tailwind.config.ts | 4 +++ 7 files changed, 84 insertions(+), 62 deletions(-) create mode 100644 components/ui/kbd.tsx diff --git a/app/globals.css b/app/globals.css index 89727ab6..9330158c 100644 --- a/app/globals.css +++ b/app/globals.css @@ -21,7 +21,7 @@ --accent-foreground: 240 5.9% 10%; --destructive: 0 84.2% 60.2%; --destructive-foreground: 0 0% 98%; - --border: 240 5.9% 90%; + --border: 240 5.9% 85%; --input: 240 5.9% 90%; --radius: 0.5rem; --primary-highlight: 331.2 97.3% 55.9%; @@ -53,7 +53,7 @@ --accent-foreground: 0 0% 98%; --destructive: 0 62.8% 30.6%; --destructive-foreground: 0 0% 98%; - --border: 240 3.7% 15.9%; + --border: 240 3.7% 20%; --input: 240 3.7% 15.9%; --ring: 240 4.9% 83.9%; --primary-highlight: 331.2 97.3% 55.9%; @@ -203,8 +203,8 @@ mark { } .new-badge { - color: var(--primary-highlight); - border-color: var(--primary-highlight); + color: hsl(var(--primary-highlight)); + border-color: hsl(var(--primary-highlight)); } .search-shortcut { @@ -276,7 +276,7 @@ mark { } .prose code { - color: var(--primary-highlight) !important; + color: hsl(var(--primary-highlight)) !important; font-weight: 400; border-radius: 0.5rem !important; padding-left: 0.3rem !important; @@ -481,7 +481,7 @@ mark { } .button-link:hover { - background-color: var(--primary-highlight) !important; + background-color: hsl(var(--primary-highlight)) !important; } .button-link.pagination-button { diff --git a/app/page.module.css b/app/page.module.css index 44aca6bf..01c81d6e 100644 --- a/app/page.module.css +++ b/app/page.module.css @@ -25,33 +25,6 @@ margin: 4rem 0 2rem; } -.main a { - text-decoration: underline; - color: hsl(var(--foreground)); -} - -.main a:hover { - background: hsla(0, 0%, 100%, 0.3); - outline: 2px solid hsla(0, 0%, 100%, 0.3); -} - -.main a.primaryButton { - text-decoration: none; - color: hsl(var(--foreground)); -} - -.main a.primaryButton:hover { - background: hsl(var(--accent)); - outline: none; - color: rgba(255, 255, 255, 1); -} - -.ctaContainer { - display: flex; - width: 100%; - justify-content: center; -} - .highlight { color: rgb(var(--primary-accent)); } diff --git a/app/page.tsx b/app/page.tsx index e5e9e374..983273ba 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -1,15 +1,36 @@ -import Link from 'next/link'; -import Image from 'next/image'; -import styles from './page.module.css'; -import ResponsiveImage from '@/components/ui/responsive-image'; +'use client'; + import CodeBlock from '@/components/CodeBlock'; -import {Button} from '@/components/ui/button'; -import {cn} from '@/lib/utils'; -import ZeroAlphaLogo from '@/components/logos/ZeroAlpha'; +import GithubLogo from '@/components/logos/Github'; import RocicorpLogo from '@/components/logos/Rocicorp'; +import ZeroAlphaLogo from '@/components/logos/ZeroAlpha'; +import { Button } from '@/components/ui/button'; +import Kbd from '@/components/ui/kbd'; +import ResponsiveImage from '@/components/ui/responsive-image'; +import { cn } from '@/lib/utils'; +import Link from 'next/link'; +import { useRouter } from 'next/navigation'; +import React from 'react'; +import styles from './page.module.css'; export default function Home() { - // Code block + const router = useRouter(); + + // Toggle the menu when ⌘K is pressed + React.useEffect(() => { + const down = (e: KeyboardEvent) => { + if (e.key === 'd') { + e.preventDefault(); + router.push('/docs/introduction'); + } else if (e.key === 'g') { + e.preventDefault(); + router.push('https://github.com/rocicorp/mono#zero'); + } + }; + + document.addEventListener('keydown', down); + return () => document.removeEventListener('keydown', down); + }, [router]); const exampleCode = `function Playlist({id}: {id: string}) { // This usually resolves *instantly*, and updates reactively @@ -57,21 +78,26 @@ export default function Home() {
@@ -151,24 +177,21 @@ export default function Home() { Zero is currently in public alpha. It's got a few rough edges, and you have to deploy it yourself, but it's already remarkably fun. We're using it ourselves for Zero's{' '} - official bug tracker and we - find it much more productive than the alternatives. + + official bug tracker + {' '} + and we find it much more productive than the alternatives.

Ready to start? You can have your first app in production in about 20 minutes.


-

- -

+
@@ -179,7 +202,6 @@ export default function Home() { />
-
); } diff --git a/components/search.tsx b/components/search.tsx index 49c30c6a..6e5a4004 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -24,6 +24,7 @@ import { CommandItem, CommandList, } from './ui/command'; +import Kbd from './ui/kbd'; const Anchor = React.forwardRef>( ({children, ...props}, ref) => { @@ -242,10 +243,10 @@ export default function Search() { value={searchedInput} onChange={e => setSearchedInput(e.target.value)} /> -
+ K -
+
diff --git a/components/ui/button.tsx b/components/ui/button.tsx index 5690125c..f6f414f0 100644 --- a/components/ui/button.tsx +++ b/components/ui/button.tsx @@ -5,7 +5,7 @@ import {cva, type VariantProps} from 'class-variance-authority'; import {cn} from '@/lib/utils'; const buttonVariants = cva( - 'inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50', + 'inline-flex items-center no-underline justify-center whitespace-nowrap rounded-md text-sm font-medium ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50', { variants: { variant: { @@ -13,11 +13,13 @@ const buttonVariants = cva( destructive: 'bg-destructive text-destructive-foreground hover:bg-destructive/90', outline: - 'border border-input bg-background hover:bg-accent hover:text-accent-foreground button-link', + 'border border-input bg-background hover:bg-accent hover:text-accent-foreground', //button-link secondary: 'bg-secondary text-secondary-foreground hover:bg-secondary/80', ghost: 'hover:bg-accent hover:text-accent-foreground text-foreground', link: 'text-primary underline-offset-4 hover:underline', + primary: + 'border border-input bg-background hover:bg-primary-highlight hover:text-primary-highlight-foreground', }, size: { default: 'h-10 px-4 py-2', diff --git a/components/ui/kbd.tsx b/components/ui/kbd.tsx new file mode 100644 index 00000000..36b571a2 --- /dev/null +++ b/components/ui/kbd.tsx @@ -0,0 +1,20 @@ +import {cn} from '@/lib/utils'; + +export default function Kbd({ + children, + className, +}: { + children: React.ReactNode; + className?: string; +}) { + return ( + + {children} + + ); +} diff --git a/tailwind.config.ts b/tailwind.config.ts index 2d4d91fb..d44fb98c 100644 --- a/tailwind.config.ts +++ b/tailwind.config.ts @@ -57,6 +57,10 @@ const config = { DEFAULT: 'hsl(var(--card))', foreground: 'hsl(var(--card-foreground))', }, + 'primary-highlight': { + DEFAULT: 'hsl(var(--primary-highlight))', + foreground: 'hsl(var(--primary-highlight-foreground))', + }, }, borderRadius: { lg: 'var(--radius)', From 092140fed898fff183eda0cf44ca1795f6f6d6b9 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 12:54:48 -0700 Subject: [PATCH 05/15] fix: fixes from testing --- app/globals.css | 10 +--------- app/page.tsx | 6 +++--- components/anchor.tsx | 1 - components/search.tsx | 17 ++++++++--------- components/ui/button.tsx | 2 +- 5 files changed, 13 insertions(+), 23 deletions(-) diff --git a/app/globals.css b/app/globals.css index 9330158c..cd688eda 100644 --- a/app/globals.css +++ b/app/globals.css @@ -57,7 +57,7 @@ --input: 240 3.7% 15.9%; --ring: 240 4.9% 83.9%; --primary-highlight: 331.2 97.3% 55.9%; - --primary-highlight-foreground: 240 10% 3.9%; + --primary-highlight-foreground: 0 0% 100%; /* Root page */ --paragraph-color: #d4d4d8; @@ -111,14 +111,6 @@ } } -.search-snippet { - padding: 0.5rem; -} - -.search-selected { - border-radius: 0.25rem; -} - mark { color: hsl(var(--primary-highlight-foreground)); font-weight: 400; diff --git a/app/page.tsx b/app/page.tsx index 983273ba..6a5c73fc 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -4,12 +4,12 @@ import CodeBlock from '@/components/CodeBlock'; import GithubLogo from '@/components/logos/Github'; import RocicorpLogo from '@/components/logos/Rocicorp'; import ZeroAlphaLogo from '@/components/logos/ZeroAlpha'; -import { Button } from '@/components/ui/button'; +import {Button} from '@/components/ui/button'; import Kbd from '@/components/ui/kbd'; import ResponsiveImage from '@/components/ui/responsive-image'; -import { cn } from '@/lib/utils'; +import {cn} from '@/lib/utils'; import Link from 'next/link'; -import { useRouter } from 'next/navigation'; +import {useRouter} from 'next/navigation'; import React from 'react'; import styles from './page.module.css'; diff --git a/components/anchor.tsx b/components/anchor.tsx index 8c3ff23d..59d54619 100644 --- a/components/anchor.tsx +++ b/components/anchor.tsx @@ -8,7 +8,6 @@ import {ComponentProps} from 'react'; type AnchorProps = ComponentProps & { absolute?: boolean; activeClassName?: string; - hoverClassName?: string; disabled?: boolean; }; diff --git a/components/search.tsx b/components/search.tsx index 6e5a4004..2f11e3df 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -207,7 +207,7 @@ export default function Search() { console.error('Lunr.js Query Error:', error); setSearchResults([]); } - }, 50); + }, 20); return () => clearTimeout(delayDebounce); }, [searchedInput]); @@ -260,10 +260,7 @@ export default function Search() { showCloseButton={false} className={cn('overflow-hidden p-0')} > - + {searchedInput && No results found.} {searchResults.length > 0 && ( - + {searchResults.map(item => { const Icon = icons[item?.icon ?? 'FileCode']; @@ -290,15 +287,17 @@ export default function Search() { : item.url; router.push(url); }} - className={cn('flex flex-col items-start gap-1 py-3')} + className={cn( + 'flex flex-col items-start gap-2 py-3 px-4 rounded-none', + )} >
- + {item.title}
{item.snippet && (

)} diff --git a/components/ui/button.tsx b/components/ui/button.tsx index f6f414f0..ebae8701 100644 --- a/components/ui/button.tsx +++ b/components/ui/button.tsx @@ -19,7 +19,7 @@ const buttonVariants = cva( ghost: 'hover:bg-accent hover:text-accent-foreground text-foreground', link: 'text-primary underline-offset-4 hover:underline', primary: - 'border border-input bg-background hover:bg-primary-highlight hover:text-primary-highlight-foreground', + 'border border-input bg-background hover:bg-primary-highlight hover:text-primary-highlight-foreground hover:border-primary-highlight', }, size: { default: 'h-10 px-4 py-2', From 21ba9a5fe69e576a5c0c2f703a00bc15c3a6ab1b Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 13:09:13 -0700 Subject: [PATCH 06/15] fix: fixes from testing --- components/search.tsx | 4 ++-- components/sublink.tsx | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/components/search.tsx b/components/search.tsx index 2f11e3df..91d7dae9 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -292,12 +292,12 @@ export default function Search() { )} >

- +
{item.title}
{item.snippet && (

)} diff --git a/components/sublink.tsx b/components/sublink.tsx index d479f0f1..063f5255 100644 --- a/components/sublink.tsx +++ b/components/sublink.tsx @@ -71,7 +71,7 @@ export default function SubLink({ Comp ) ) : ( -

+

{Icon && (
From e760e05f8ca19875de1d1646b657436b307b8118 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 13:15:01 -0700 Subject: [PATCH 07/15] fix: make cmdk loop --- components/search.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/search.tsx b/components/search.tsx index 91d7dae9..f8d3a985 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -260,7 +260,7 @@ export default function Search() { showCloseButton={false} className={cn('overflow-hidden p-0')} > - + Date: Wed, 9 Jul 2025 13:35:33 -0700 Subject: [PATCH 08/15] fix: added prefetching --- components/search.tsx | 51 +++++++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/components/search.tsx b/components/search.tsx index f8d3a985..31fb2683 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -15,7 +15,7 @@ import searchIndex from '@/assets/search-index.json'; import {CommandIcon, SearchIcon} from 'lucide-react'; import lunr from 'lunr'; import {useRouter} from 'next/navigation'; -import React, {useEffect, useState} from 'react'; +import React, {useEffect, useMemo, useState} from 'react'; import { Command, CommandEmpty, @@ -25,6 +25,7 @@ import { CommandList, } from './ui/command'; import Kbd from './ui/kbd'; +import {useCommandState} from 'cmdk'; const Anchor = React.forwardRef>( ({children, ...props}, ref) => { @@ -50,6 +51,7 @@ interface SearchDocument { interface SearchResult extends SearchDocument { snippet?: string; snippetId?: string; + composedUrl: string; } function extractSnippet( @@ -195,12 +197,20 @@ export default function Search() { } // Deduplicate results by ID - const uniqueResults = results.filter(Boolean).reduce((acc, result) => { - if (!acc.has(result!.id)) { - acc.set(result!.id, result!); - } - return acc; - }, new Map()); + const uniqueResults = results + .filter(result => result !== null) + .map(result => ({ + ...result, + composedUrl: result.snippetId + ? `${result.url}#${result.snippetId}` + : result.url, + })) + .reduce((acc, result) => { + if (!acc.has(result.id)) { + acc.set(result.id, result); + } + return acc; + }, new Map()); setSearchResults(Array.from(uniqueResults.values())); } catch (error) { @@ -282,17 +292,16 @@ export default function Search() { setIsOpen(false); setSearchedInput(''); setSearchResults([]); - const url = item.snippetId - ? `${item.url}#${item.snippetId}` - : item.url; - router.push(url); + router.push(item.composedUrl); }} className={cn( 'flex flex-col items-start gap-2 py-3 px-4 rounded-none', )} >
-
+
+ +
{item.title}
{item.snippet && ( @@ -307,8 +316,26 @@ export default function Search() { )} +
); } + +const PreloadCurrentItem = () => { + const value = useCommandState(state => state.value); + const router = useRouter(); + + const activeItem = useMemo(() => { + return searchDocs.find(item => item.id === value); + }, [value, searchDocs]); + + useEffect(() => { + if (activeItem) { + router.prefetch(activeItem.url); + } + }, [activeItem]); + + return <>; +}; From 6f0e85638b4906052e39b824de50d8783dc8b08b Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 13:37:34 -0700 Subject: [PATCH 09/15] fix: types --- assets/search-index.json | 2 +- components/search.tsx | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/assets/search-index.json b/assets/search-index.json index 53b998a5..54c9ab9f 100644 --- a/assets/search-index.json +++ b/assets/search-index.json @@ -1834,4 +1834,4 @@ } ] } -] +] \ No newline at end of file diff --git a/components/search.tsx b/components/search.tsx index 31fb2683..18ea1578 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -198,7 +198,8 @@ export default function Search() { // Deduplicate results by ID const uniqueResults = results - .filter(result => result !== null) + .filter(Boolean) + .map(result => result!) .map(result => ({ ...result, composedUrl: result.snippetId From 7f869ba64dc3745d9fd13cc8099475c16054d155 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 13:51:18 -0700 Subject: [PATCH 10/15] fix: revert rename --- lib/{routes-config.tsx => routes-config.ts} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename lib/{routes-config.tsx => routes-config.ts} (100%) diff --git a/lib/routes-config.tsx b/lib/routes-config.ts similarity index 100% rename from lib/routes-config.tsx rename to lib/routes-config.ts From 629bf066e292c029864e05c95037d0fc70602e36 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 13:55:30 -0700 Subject: [PATCH 11/15] fix: routes config --- generate-llms.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/generate-llms.js b/generate-llms.js index f0144e68..5e8a88d2 100755 --- a/generate-llms.js +++ b/generate-llms.js @@ -3,9 +3,9 @@ const fs = require('fs'); const path = require('path'); -// Extract page paths from routes-config.tsx +// Extract page paths from routes-config.ts function extractPagesFromRoutesConfig() { - const routesConfigPath = path.join(__dirname, 'lib', 'routes-config.tsx'); + const routesConfigPath = path.join(__dirname, 'lib', 'routes-config.ts'); const content = fs.readFileSync(routesConfigPath, 'utf8'); // Extract all href values @@ -77,9 +77,9 @@ function generateLLMsFile() { const outputFile = path.join(__dirname, 'public', 'llms.txt'); try { - // Get ordered pages from routes-config.tsx + // Get ordered pages from routes-config.ts const tocPages = extractPagesFromRoutesConfig(); - console.log(`Found ${tocPages.length} pages in routes-config.tsx`); + console.log(`Found ${tocPages.length} pages in routes-config.ts`); let allText = ''; let processedFiles = new Set(); From 40a4f1fb7c5c22bd59baed245ab83a296c313d52 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 15:36:45 -0700 Subject: [PATCH 12/15] fix: missing css --- app/globals.css | 25 ------------------------- components/Pagination.tsx | 19 +++++++++---------- components/ui/button.tsx | 2 +- 3 files changed, 10 insertions(+), 36 deletions(-) diff --git a/app/globals.css b/app/globals.css index cd688eda..28d16fdc 100644 --- a/app/globals.css +++ b/app/globals.css @@ -472,31 +472,6 @@ mark { cursor: pointer; } -.button-link:hover { - background-color: hsl(var(--primary-highlight)) !important; -} - -.button-link.pagination-button { - transition: all 0.3s ease; - white-space: normal; - height: fit-content; - padding: 1rem; - border-color: hsl(var(--border)); -} - -.button-link.pagination-button-prev { - text-align: left; -} - -.button-link.pagination-button-next { - text-align: right; -} - -.button-link.pagination-button:hover { - background-color: inherit !important; - border-color: hsl(var(--foreground) / 30%); -} - .prose a.button-link, .prose a.heading-link { text-decoration: none; diff --git a/components/Pagination.tsx b/components/Pagination.tsx index 971a6349..bbfbd073 100644 --- a/components/Pagination.tsx +++ b/components/Pagination.tsx @@ -2,6 +2,7 @@ import {getPreviousNext} from '@/lib/markdown'; import {ChevronLeftIcon, ChevronRightIcon} from 'lucide-react'; import Link from 'next/link'; import {buttonVariants} from './ui/button'; +import {cn} from '@/lib/utils'; export default function Pagination({pathname}: {pathname: string}) { const res = getPreviousNext(pathname); @@ -11,11 +12,10 @@ export default function Pagination({pathname}: {pathname: string}) {
{res.prev && ( @@ -29,11 +29,10 @@ export default function Pagination({pathname}: {pathname: string}) {
{res.next && ( diff --git a/components/ui/button.tsx b/components/ui/button.tsx index ebae8701..3a6c13df 100644 --- a/components/ui/button.tsx +++ b/components/ui/button.tsx @@ -13,7 +13,7 @@ const buttonVariants = cva( destructive: 'bg-destructive text-destructive-foreground hover:bg-destructive/90', outline: - 'border border-input bg-background hover:bg-accent hover:text-accent-foreground', //button-link + 'border border-input bg-background hover:bg-accent hover:text-accent-foreground', secondary: 'bg-secondary text-secondary-foreground hover:bg-secondary/80', ghost: 'hover:bg-accent hover:text-accent-foreground text-foreground', From 9461b6e2a8ea024b8673e60f16cc8682cdb7fb47 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Wed, 9 Jul 2025 16:37:58 -0700 Subject: [PATCH 13/15] feat: added further hotkeys --- app/docs/[[...slug]]/page.tsx | 25 ++++++-------- app/page.tsx | 26 ++++++-------- components/Pagination.tsx | 64 +++++++++++++++++++++++++---------- components/search.tsx | 23 ++++++------- components/toc.tsx | 60 ++++++++++++++++++++++++++------ components/ui/kbd.tsx | 22 ++++++------ lib/markdown.ts | 21 +++++------- package-lock.json | 14 ++++++++ package.json | 1 + 9 files changed, 163 insertions(+), 93 deletions(-) diff --git a/app/docs/[[...slug]]/page.tsx b/app/docs/[[...slug]]/page.tsx index 893d8912..f58cf5ca 100644 --- a/app/docs/[[...slug]]/page.tsx +++ b/app/docs/[[...slug]]/page.tsx @@ -2,17 +2,19 @@ import Toc from '@/components/toc'; import Pagination from '@/components/Pagination'; import {page_routes} from '@/lib/routes-config'; import {notFound} from 'next/navigation'; -import {getDocsForSlug} from '@/lib/markdown'; +import {getDocsForSlug, getDocsTocs, getPreviousNext} from '@/lib/markdown'; import {Typography} from '@/components/typography'; import CopyContent from '@/components/ui/copy-content'; -type PageProps = { - params: {slug: string[]}; -}; +type PageProps = {params: {slug: string[]}}; export default async function DocsPage({params: {slug = []}}: PageProps) { const pathName = slug.join('/'); - const res = await getDocsForSlug(pathName); + const [res, tocs, previousNext] = await Promise.all([ + getDocsForSlug(pathName), + getDocsTocs(pathName), + getPreviousNext(pathName), + ]); if (!res) notFound(); @@ -26,10 +28,10 @@ export default async function DocsPage({params: {slug = []}}: PageProps) {

{/* Wrap content with CopyableContent */} - +
- +
); } @@ -39,14 +41,9 @@ export async function generateMetadata({params: {slug = []}}: PageProps) { const res = await getDocsForSlug(pathName); if (!res) return null; const {frontmatter} = res; - return { - title: frontmatter.title, - description: frontmatter.description, - }; + return {title: frontmatter.title, description: frontmatter.description}; } export function generateStaticParams() { - return page_routes.map(item => ({ - slug: item.href.split('/').slice(1), - })); + return page_routes.map(item => ({slug: item.href.split('/').slice(1)})); } diff --git a/app/page.tsx b/app/page.tsx index 6a5c73fc..77b9ff18 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -10,26 +10,20 @@ import ResponsiveImage from '@/components/ui/responsive-image'; import {cn} from '@/lib/utils'; import Link from 'next/link'; import {useRouter} from 'next/navigation'; -import React from 'react'; +import {useEffect} from 'react'; +import {useHotkeys} from 'react-hotkeys-hook'; import styles from './page.module.css'; export default function Home() { const router = useRouter(); - // Toggle the menu when ⌘K is pressed - React.useEffect(() => { - const down = (e: KeyboardEvent) => { - if (e.key === 'd') { - e.preventDefault(); - router.push('/docs/introduction'); - } else if (e.key === 'g') { - e.preventDefault(); - router.push('https://github.com/rocicorp/mono#zero'); - } - }; - - document.addEventListener('keydown', down); - return () => document.removeEventListener('keydown', down); + useHotkeys('d', () => router.push('/docs/introduction')); + useHotkeys('g', () => + window.open('https://github.com/rocicorp/mono#zero', '_blank'), + ); + + useEffect(() => { + router.prefetch('/docs/introduction'); }, [router]); const exampleCode = `function Playlist({id}: {id: string}) { @@ -95,7 +89,7 @@ export default function Home() { size="default" asChild > - + DocsD diff --git a/components/Pagination.tsx b/components/Pagination.tsx index bbfbd073..62002b8f 100644 --- a/components/Pagination.tsx +++ b/components/Pagination.tsx @@ -1,45 +1,75 @@ -import {getPreviousNext} from '@/lib/markdown'; -import {ChevronLeftIcon, ChevronRightIcon} from 'lucide-react'; +'use client'; + +import type {getPreviousNext} from '@/lib/markdown'; +import {cn} from '@/lib/utils'; import Link from 'next/link'; +import {useRouter} from 'next/navigation'; +import {useEffect} from 'react'; +import {useHotkeys} from 'react-hotkeys-hook'; import {buttonVariants} from './ui/button'; -import {cn} from '@/lib/utils'; +import Kbd from './ui/kbd'; + +export default function Pagination({ + previousNext, +}: { + previousNext: ReturnType; +}) { + const router = useRouter(); + + useEffect(() => { + if (previousNext.next) { + router.prefetch(`/docs${previousNext.next.href}`); + } + if (previousNext.prev) { + router.prefetch(`/docs${previousNext.prev.href}`); + } + }, [router]); + + useHotkeys('j', () => { + if (previousNext.next) { + router.push(`/docs${previousNext.next.href}`); + } + }); -export default function Pagination({pathname}: {pathname: string}) { - const res = getPreviousNext(pathname); + useHotkeys('k', () => { + if (previousNext.prev) { + router.push(`/docs${previousNext.prev.href}`); + } + }); return (
- {res.prev && ( + {previousNext.prev && ( - - + + K Previous - {res.prev.title} + {previousNext.prev.title} )}
- {res.next && ( + {previousNext.next && ( - + Next - + J - {res.next.title} + {previousNext.next.title} )}
diff --git a/components/search.tsx b/components/search.tsx index 18ea1578..5dd0296b 100644 --- a/components/search.tsx +++ b/components/search.tsx @@ -1,5 +1,6 @@ 'use client'; +import searchIndex from '@/assets/search-index.json'; import { Dialog, DialogContent, @@ -11,11 +12,12 @@ import { import {Input} from '@/components/ui/input'; import {IconKey, icons} from '@/lib/icons'; import {cn} from '@/lib/utils'; -import searchIndex from '@/assets/search-index.json'; +import {useCommandState} from 'cmdk'; import {CommandIcon, SearchIcon} from 'lucide-react'; import lunr from 'lunr'; import {useRouter} from 'next/navigation'; import React, {useEffect, useMemo, useState} from 'react'; +import {useHotkeys} from 'react-hotkeys-hook'; import { Command, CommandEmpty, @@ -25,7 +27,6 @@ import { CommandList, } from './ui/command'; import Kbd from './ui/kbd'; -import {useCommandState} from 'cmdk'; const Anchor = React.forwardRef>( ({children, ...props}, ref) => { @@ -224,17 +225,13 @@ export default function Search() { }, [searchedInput]); // Toggle the menu when ⌘K is pressed - React.useEffect(() => { - const down = (e: KeyboardEvent) => { - if (e.key === 'k' && (e.metaKey || e.ctrlKey)) { - e.preventDefault(); - setIsOpen(open => !open); - } - }; - - document.addEventListener('keydown', down); - return () => document.removeEventListener('keydown', down); - }, []); + useHotkeys( + 'meta+k', + () => { + setIsOpen(open => !open); + }, + {enableOnFormTags: true, enableOnContentEditable: true}, + ); return ( (tocs.length > 1 ? tocs.filter(toc => toc.level === 2) : []), + [tocs], + ); + + const mappedTocs = useMemo( + () => + tocs.map(({href, level, text}, index) => ({ + href, + level, + text, + index, + hotkey: level === 2 ? level2Tocs.findIndex(t => t.href === href) : null, + })), + [tocs, level2Tocs], + ); + + useHotkeys(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], e => { + const hotkeyIndex = parseInt(e.key); -export default async function Toc({path}: {path: string}) { - const tocs = await getDocsTocs(path); + const level2Toc = level2Tocs[hotkeyIndex]; + if (level2Toc) { + router.push(level2Toc.href); + } + }); return (
@@ -14,18 +50,22 @@ export default async function Toc({path}: {path: string}) {

On this page

- {tocs.map(({href, level, text}) => ( + {mappedTocs.map(({href, level, text, hotkey}) => ( {text} + {hotkey !== null && hotkey >= 0 && hotkey < 10 && ( + {hotkey} + )} ))}
diff --git a/components/ui/kbd.tsx b/components/ui/kbd.tsx index 36b571a2..77caa1cb 100644 --- a/components/ui/kbd.tsx +++ b/components/ui/kbd.tsx @@ -1,20 +1,22 @@ +import {cva} from 'class-variance-authority'; import {cn} from '@/lib/utils'; +const kbdVariants = cva( + 'flex-shrink-0 text-xs text-foreground items-center font-medium font-mono lining-nums gap-0.5 bg-input rounded-sm border border-border', + { + variants: {size: {sm: 'text-xs px-1.5 py-0.5', md: 'text-xs px-2 py-1'}}, + defaultVariants: {size: 'md'}, + }, +); + export default function Kbd({ children, className, + size = 'md', }: { children: React.ReactNode; className?: string; + size?: 'sm' | 'md'; }) { - return ( - - {children} - - ); + return {children}; } diff --git a/lib/markdown.ts b/lib/markdown.ts index 76de37fb..02ae5714 100644 --- a/lib/markdown.ts +++ b/lib/markdown.ts @@ -16,18 +16,10 @@ import Video from '@/components/ui/Video'; import {Button} from '@/components/ui/button'; import {sluggify} from './utils'; -const components = { - Note, - ImageLightbox, - Video, - Button, -}; +const components = {Note, ImageLightbox, Video, Button}; // Define the structure of the frontmatter -type BaseMdxFrontmatter = { - title: string; - description: string; -}; +type BaseMdxFrontmatter = {title: string; description: string}; // Parse MDX content with the given plugins async function parseMdx(rawMdx: string) { @@ -90,11 +82,14 @@ export async function getDocsTocs(slug: string) { return extractedHeadings; } -export function getPreviousNext(path: string) { +export function getPreviousNext(path: string): { + prev: (typeof page_routes)[number] | null; + next: (typeof page_routes)[number] | null; +} { const index = page_routes.findIndex(({href}) => href == `/${path}`); return { - prev: page_routes[index - 1], - next: page_routes[index + 1], + prev: page_routes[index - 1] ?? null, + next: page_routes[index + 1] ?? null, }; } diff --git a/package-lock.json b/package-lock.json index 55170100..96da7beb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -26,6 +26,7 @@ "next-themes": "^0.3.0", "react": "^18.3.1", "react-dom": "^18.3.1", + "react-hotkeys-hook": "^5.1.0", "rehype-autolink-headings": "^7.1.0", "rehype-code-titles": "^1.2.0", "rehype-prism-plus": "^2.0.0", @@ -8093,6 +8094,19 @@ "node": "*" } }, + "node_modules/react-hotkeys-hook": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/react-hotkeys-hook/-/react-hotkeys-hook-5.1.0.tgz", + "integrity": "sha512-GCNGXjBzV9buOS3REoQFmSmE4WTvBhYQ0YrAeeMZI83bhXg3dRWsLHXDutcVDdEjwJqJCxk5iewWYX5LtFUd7g==", + "license": "MIT", + "workspaces": [ + "packages/*" + ], + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, "node_modules/react-is": { "version": "16.13.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", diff --git a/package.json b/package.json index 76d18b23..dca935db 100644 --- a/package.json +++ b/package.json @@ -31,6 +31,7 @@ "next-themes": "^0.3.0", "react": "^18.3.1", "react-dom": "^18.3.1", + "react-hotkeys-hook": "^5.1.0", "rehype-autolink-headings": "^7.1.0", "rehype-code-titles": "^1.2.0", "rehype-prism-plus": "^2.0.0", From a1dd6d35088b80bc895b08c14fa092e12b209f08 Mon Sep 17 00:00:00 2001 From: Chase Adams Date: Thu, 10 Jul 2025 11:09:49 -0700 Subject: [PATCH 14/15] fix: added fixes based on feedback --- app/docs/[[...slug]]/page.tsx | 4 +- app/docs/layout.tsx | 4 +- app/globals.css | 8 +- app/page.module.css | 2 +- app/page.tsx | 54 ++-- components/Pagination.tsx | 67 +++-- components/docs-menu.tsx | 11 +- components/leftbar.tsx | 15 +- components/search.tsx | 5 +- components/sublink.tsx | 2 +- components/toc.tsx | 45 ++-- components/ui/ActiveHashLink.tsx | 3 +- components/ui/hotkey-tooltip.tsx | 52 ++++ components/ui/tooltip.tsx | 61 +++++ lib/icons.tsx | 17 +- lib/routes-config.ts | 38 +-- package-lock.json | 448 +++++++++++++++++++++++++++++++ package.json | 1 + 18 files changed, 717 insertions(+), 120 deletions(-) create mode 100644 components/ui/hotkey-tooltip.tsx create mode 100644 components/ui/tooltip.tsx diff --git a/app/docs/[[...slug]]/page.tsx b/app/docs/[[...slug]]/page.tsx index f58cf5ca..1c104b0b 100644 --- a/app/docs/[[...slug]]/page.tsx +++ b/app/docs/[[...slug]]/page.tsx @@ -19,8 +19,8 @@ export default async function DocsPage({params: {slug = []}}: PageProps) { if (!res) notFound(); return ( -
-
+
+

{res.frontmatter.title}

diff --git a/app/docs/layout.tsx b/app/docs/layout.tsx index 7041e241..16ab553d 100644 --- a/app/docs/layout.tsx +++ b/app/docs/layout.tsx @@ -6,9 +6,9 @@ export default function DocsLayout({ children: React.ReactNode; }>) { return ( -

+
-
{children}
{' '} + {children}
); } diff --git a/app/globals.css b/app/globals.css index 28d16fdc..1e6f9d2e 100644 --- a/app/globals.css +++ b/app/globals.css @@ -40,7 +40,7 @@ --foreground: 0 0% 98%; --card: 240 10% 3.9%; --card-foreground: 0 0% 98%; - --popover: 240 10% 3.9%; + --popover: 240 10% 6.3%; --popover-foreground: 0 0% 98%; --primary: 0 0% 98%; --primary-foreground: 240 5.9% 10%; @@ -113,7 +113,6 @@ mark { color: hsl(var(--primary-highlight-foreground)); - font-weight: 400; border-radius: 0.25rem; padding: 0.2rem; margin: 0 0.05rem; @@ -208,11 +207,6 @@ mark { top: 4rem !important; } -.leftbar-aside { - max-height: calc(100vh - 72px); - overflow-y: scroll; -} - .footer-container { color: hsl(var(--foreground) / 50%); font-size: 0.875rem; diff --git a/app/page.module.css b/app/page.module.css index 01c81d6e..0f6ee16a 100644 --- a/app/page.module.css +++ b/app/page.module.css @@ -118,7 +118,7 @@ } .footer { - margin: 8rem 0 0; + margin: 8rem 0 2rem; align-self: center; } diff --git a/app/page.tsx b/app/page.tsx index 77b9ff18..26677b88 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -1,11 +1,10 @@ 'use client'; import CodeBlock from '@/components/CodeBlock'; -import GithubLogo from '@/components/logos/Github'; import RocicorpLogo from '@/components/logos/Rocicorp'; import ZeroAlphaLogo from '@/components/logos/ZeroAlpha'; import {Button} from '@/components/ui/button'; -import Kbd from '@/components/ui/kbd'; +import {HotkeyTooltip} from '@/components/ui/hotkey-tooltip'; import ResponsiveImage from '@/components/ui/responsive-image'; import {cn} from '@/lib/utils'; import Link from 'next/link'; @@ -71,28 +70,39 @@ export default function Home() { />
- - - + + + - - DocsD - - + +
diff --git a/components/Pagination.tsx b/components/Pagination.tsx index 62002b8f..f8b11343 100644 --- a/components/Pagination.tsx +++ b/components/Pagination.tsx @@ -2,12 +2,13 @@ import type {getPreviousNext} from '@/lib/markdown'; import {cn} from '@/lib/utils'; +import {ChevronLeftIcon, ChevronRightIcon} from 'lucide-react'; import Link from 'next/link'; import {useRouter} from 'next/navigation'; import {useEffect} from 'react'; import {useHotkeys} from 'react-hotkeys-hook'; import {buttonVariants} from './ui/button'; -import Kbd from './ui/kbd'; +import {HotkeyTooltip} from './ui/hotkey-tooltip'; export default function Pagination({ previousNext, @@ -41,36 +42,52 @@ export default function Pagination({
{previousNext.prev && ( - - - K - Previous - - {previousNext.prev.title} - + + + + Previous + + {previousNext.prev.title} + + )}
{previousNext.next && ( - - - Next - J - - {previousNext.next.title} - + + + Next + + + {previousNext.next.title} + + )}
diff --git a/components/docs-menu.tsx b/components/docs-menu.tsx index 29d3dd97..24ecf333 100644 --- a/components/docs-menu.tsx +++ b/components/docs-menu.tsx @@ -9,7 +9,7 @@ import {ModeToggle} from './theme-toggle'; export default function DocsMenu({isSheet = false}) { return ( -
+
{ROUTES.map((item, index) => { const modifiedItems = { ...item, @@ -18,14 +18,7 @@ export default function DocsMenu({isSheet = false}) { isSheet, noLink: item.href === null, }; - return ( -
- - {index < ROUTES.length - 1 && ( -
- )} -
- ); + return ; })}
diff --git a/components/leftbar.tsx b/components/leftbar.tsx index 46beee83..b4209e1f 100644 --- a/components/leftbar.tsx +++ b/components/leftbar.tsx @@ -6,16 +6,19 @@ import { SheetTitle, SheetTrigger, } from '@/components/ui/sheet'; -import {Button} from './ui/button'; -import {buttonVariants} from './ui/button'; import {AlignLeftIcon} from 'lucide-react'; import DocsMenu from './docs-menu'; -import Image from 'next/image'; -import Link from 'next/link'; +import {Button} from './ui/button'; +import {cn} from '@/lib/utils'; -export function Leftbar() { +export function Leftbar({className}: {className?: string}) { return ( -